pax_global_header00006660000000000000000000000064146517650140014523gustar00rootroot0000000000000052 comment=26673f65e17ae099bb0cf65928fe70c93b8a6a37 mkosi-24.3/000077500000000000000000000000001465176501400125755ustar00rootroot00000000000000mkosi-24.3/.dir-locals.el000066400000000000000000000011301465176501400152210ustar00rootroot00000000000000; Sets emacs variables based on mode. ; A list of (major-mode . ((var1 . value1) (var2 . value2))) ; Mode can be nil, which gives default values. ; Note that we set a wider line width source files, but for everything else we ; stick to a more conservative 79 characters. ; NOTE: Keep this file in sync with .editorconfig. ((python-mode . ((indent-tabs-mode . nil) (tab-width . 4) (fill-column . 99))) (sh-mode . ((sh-basic-offset . 4) (sh-indentation . 4))) (nil . ((indent-tabs-mode . nil) (tab-width . 4) (fill-column . 79))) ) mkosi-24.3/.editorconfig000066400000000000000000000002631465176501400152530ustar00rootroot00000000000000root = true [*] end_of_line = lf insert_final_newline = true trim_trailing_whitespace = true charset = utf-8 indent_style = space indent_size = 4 [*.yaml,*.yml] indent_size = 2 mkosi-24.3/.github/000077500000000000000000000000001465176501400141355ustar00rootroot00000000000000mkosi-24.3/.github/ISSUE_TEMPLATE/000077500000000000000000000000001465176501400163205ustar00rootroot00000000000000mkosi-24.3/.github/ISSUE_TEMPLATE/bug_report.yml000066400000000000000000000047641465176501400212260ustar00rootroot00000000000000name: Bug Report description: A report of an error in mkosi labels: ["bug"] body: - type: markdown attributes: value: Thanks for taking the time to fill out this bug report! - type: input id: version attributes: label: mkosi commit the issue has been seen with description: | Please do not submit bug reports against older releases, but use your distribution bug tracker. Please also test whether your bug has been already resolved on the current git main. placeholder: 'main' validations: required: true - type: input id: hostdistro attributes: label: Used host distribution description: Used distribution on the host (or in the tools tree) and its version placeholder: Fedora 39 validations: required: false - type: input id: targetdistro attributes: label: Used target distribution description: Used distribution for the image and its version placeholder: Fedora 39 validations: required: false - type: input id: kernel attributes: label: Linux kernel version used description: | Please use `uname -r` to get linux kernel version. placeholder: kernel-6.6.8-200.fc39.x86_64 validations: required: false - type: dropdown id: architecture attributes: label: CPU architectures issue was seen on options: - aarch64 - alpha - arm - i686 - ia64 - loongarch - mips - parisc - ppc (big endian) - ppc64 (big endian) - ppc64le - riscv64 - s390x - sparc - sparc64 - x86_64 - other validations: required: false - type: textarea id: unexpected-behaviour attributes: label: Unexpected behaviour you saw validations: required: false - type: textarea id: config attributes: label: Used mkosi config description: | Please add a, preferably minimised, mkosi config to reproduce the issue here. placeholder: This will be automatically formatted into code, so no need for backticks. render: ini validations: required: false - type: textarea id: logs attributes: label: mkosi output description: | Please paste the full mkosi debug output here. placeholder: This will be automatically formatted into code, so no need for backticks. render: sh validations: required: false mkosi-24.3/.github/ISSUE_TEMPLATE/config.yml000066400000000000000000000004251465176501400203110ustar00rootroot00000000000000--- # vi: ts=2 sw=2 et: # SPDX-License-Identifier: LGPL-2.1-or-later blank_issues_enabled: true contact_links: - name: mkosi Matrix room url: https://matrix.to/#/#mkosi:matrix.org about: Please ask (and answer) questions here, use the issue tracker only for issues. mkosi-24.3/.github/dependabot.yml000066400000000000000000000003051465176501400167630ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later version: 2 updates: - package-ecosystem: "github-actions" directory: "/" schedule: interval: "monthly" open-pull-requests-limit: 2 mkosi-24.3/.github/workflows/000077500000000000000000000000001465176501400161725ustar00rootroot00000000000000mkosi-24.3/.github/workflows/ci.yml000066400000000000000000000125231465176501400173130ustar00rootroot00000000000000name: CI on: push: branches: - main pull_request: branches: - main jobs: unit-test: runs-on: ubuntu-24.04 concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true steps: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 - name: Install run: | # This is added by default, and it is often broken, but we don't need anything from it sudo rm -f /etc/apt/sources.list.d/microsoft-prod.list sudo apt-get update sudo apt-get install pandoc python3-pytest python3 -m pip install --break-system-packages --upgrade setuptools wheel pip python3 -m pip install --break-system-packages mypy ruff npm install -g pyright - name: Run ruff run: | ruff --version ruff check mkosi/ tests/ kernel-install/50-mkosi.install - name: Check that tabs are not used in code run: sh -c '! git grep -P "\\t" "*.py"' - name: Type Checking (mypy) run: | python3 -m mypy --version python3 -m mypy mkosi/ tests/ kernel-install/50-mkosi.install - name: Type Checking (pyright) run: | pyright --version pyright mkosi/ tests/ kernel-install/50-mkosi.install - name: Unit Tests run: | python3 -m pytest --version python3 -m pytest -sv tests/ - name: Test execution from current working directory run: python3 -m mkosi -h - name: Test execution from current working directory (sudo call) run: sudo python3 -m mkosi -h - name: Test venv installation run: | python3 -m venv testvenv testvenv/bin/python3 -m pip install --upgrade setuptools wheel pip testvenv/bin/python3 -m pip install . testvenv/bin/mkosi -h rm -rf testvenv - name: Test editable venv installation run: | python3 -m venv testvenv testvenv/bin/python3 -m pip install --upgrade setuptools wheel pip testvenv/bin/python3 -m pip install --editable . testvenv/bin/mkosi -h rm -rf testvenv - name: Test zipapp creation run: | ./tools/generate-zipapp.sh ./builddir/mkosi -h ./builddir/mkosi documentation - name: Run shellcheck on scripts run: | sudo apt-get update && sudo apt-get install --no-install-recommends shellcheck bash -c 'shopt -s globstar; shellcheck bin/mkosi tools/*.sh' bin/mkosi completion bash | shellcheck - - name: Test man page generation run: pandoc -s mkosi.md -o mkosi.1 integration-test: runs-on: ubuntu-24.04 needs: unit-test concurrency: group: ${{ github.workflow }}-${{ matrix.distro }}-${{ matrix.tools }}-${{ github.ref }} cancel-in-progress: true strategy: fail-fast: false matrix: distro: - arch - centos - debian - fedora - opensuse - ubuntu tools: - arch - debian - fedora - opensuse - ubuntu # TODO: Add CentOS once they have systemd v254 or newer. exclude: # pacman and archlinux-keyring are not packaged in OpenSUSE. - distro: arch tools: opensuse # apt, debian-keyring and ubuntu-keyring are not packaged in OpenSUSE. - distro: debian tools: opensuse - distro: ubuntu tools: opensuse steps: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 - uses: ./ # Freeing up disk space with rm -rf can take multiple minutes. Since we don't need the extra free space # immediately, we remove the files in the background. However, we first move them to a different location so that # nothing tries to use anything in these directories anymore while we're busy deleting them. - name: Free disk space run: | sudo mv /usr/local /usr/local.trash sudo mv /opt/hostedtoolcache /opt/hostedtoolcache.trash sudo systemd-run rm -rf /usr/local.trash /opt/hostedtoolcache.trash - name: Install run: | sudo apt-get update sudo apt-get install python3-pytest lvm2 cryptsetup-bin btrfs-progs # Make sure the latest changes from the pull request are used. sudo ln -svf $PWD/bin/mkosi /usr/bin/mkosi working-directory: ./ - name: Configure run: | tee mkosi.local.conf < Neal Gompa (ニール・ゴンパ) mkosi-24.3/LICENSE000066400000000000000000000636421465176501400136150ustar00rootroot00000000000000 GNU LESSER GENERAL PUBLIC LICENSE Version 2.1, February 1999 Copyright (C) 1991, 1999 Free Software Foundation, Inc. 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. [This is the first released version of the Lesser GPL. It also counts as the successor of the GNU Library Public License, version 2, hence the version number 2.1.] Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public Licenses are intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This license, the Lesser General Public License, applies to some specially designated software packages--typically libraries--of the Free Software Foundation and other authors who decide to use it. You can use it too, but we suggest you first think carefully about whether this license or the ordinary General Public License is the better strategy to use in any particular case, based on the explanations below. When we speak of free software, we are referring to freedom of use, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish); that you receive source code or can get it if you want it; that you can change the software and use pieces of it in new free programs; and that you are informed that you can do these things. To protect your rights, we need to make restrictions that forbid distributors to deny you these rights or to ask you to surrender these rights. These restrictions translate to certain responsibilities for you if you distribute copies of the library or if you modify it. For example, if you distribute copies of the library, whether gratis or for a fee, you must give the recipients all the rights that we gave you. You must make sure that they, too, receive or can get the source code. If you link other code with the library, you must provide complete object files to the recipients, so that they can relink them with the library after making changes to the library and recompiling it. And you must show them these terms so they know their rights. We protect your rights with a two-step method: (1) we copyright the library, and (2) we offer you this license, which gives you legal permission to copy, distribute and/or modify the library. To protect each distributor, we want to make it very clear that there is no warranty for the free library. Also, if the library is modified by someone else and passed on, the recipients should know that what they have is not the original version, so that the original author's reputation will not be affected by problems that might be introduced by others. Finally, software patents pose a constant threat to the existence of any free program. We wish to make sure that a company cannot effectively restrict the users of a free program by obtaining a restrictive license from a patent holder. Therefore, we insist that any patent license obtained for a version of the library must be consistent with the full freedom of use specified in this license. Most GNU software, including some libraries, is covered by the ordinary GNU General Public License. This license, the GNU Lesser General Public License, applies to certain designated libraries, and is quite different from the ordinary General Public License. We use this license for certain libraries in order to permit linking those libraries into non-free programs. When a program is linked with a library, whether statically or using a shared library, the combination of the two is legally speaking a combined work, a derivative of the original library. The ordinary General Public License therefore permits such linking only if the entire combination fits its criteria of freedom. The Lesser General Public License permits more lax criteria for linking other code with the library. We call this license the "Lesser" General Public License because it does Less to protect the user's freedom than the ordinary General Public License. It also provides other free software developers Less of an advantage over competing non-free programs. These disadvantages are the reason we use the ordinary General Public License for many libraries. However, the Lesser license provides advantages in certain special circumstances. For example, on rare occasions, there may be a special need to encourage the widest possible use of a certain library, so that it becomes a de-facto standard. To achieve this, non-free programs must be allowed to use the library. A more frequent case is that a free library does the same job as widely used non-free libraries. In this case, there is little to gain by limiting the free library to free software only, so we use the Lesser General Public License. In other cases, permission to use a particular library in non-free programs enables a greater number of people to use a large body of free software. For example, permission to use the GNU C Library in non-free programs enables many more people to use the whole GNU operating system, as well as its variant, the GNU/Linux operating system. Although the Lesser General Public License is Less protective of the users' freedom, it does ensure that the user of a program that is linked with the Library has the freedom and the wherewithal to run that program using a modified version of the Library. The precise terms and conditions for copying, distribution and modification follow. Pay close attention to the difference between a "work based on the library" and a "work that uses the library". The former contains code derived from the library, whereas the latter must be combined with the library in order to run. GNU LESSER GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License Agreement applies to any software library or other program which contains a notice placed by the copyright holder or other authorized party saying it may be distributed under the terms of this Lesser General Public License (also called "this License"). Each licensee is addressed as "you". A "library" means a collection of software functions and/or data prepared so as to be conveniently linked with application programs (which use some of those functions and data) to form executables. The "Library", below, refers to any such software library or work which has been distributed under these terms. A "work based on the Library" means either the Library or any derivative work under copyright law: that is to say, a work containing the Library or a portion of it, either verbatim or with modifications and/or translated straightforwardly into another language. (Hereinafter, translation is included without limitation in the term "modification".) "Source code" for a work means the preferred form of the work for making modifications to it. For a library, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the library. Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running a program using the Library is not restricted, and output from such a program is covered only if its contents constitute a work based on the Library (independent of the use of the Library in a tool for writing it). Whether that is true depends on what the Library does and what the program that uses the Library does. 1. You may copy and distribute verbatim copies of the Library's complete source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and distribute a copy of this License along with the Library. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Library or any portion of it, thus forming a work based on the Library, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) The modified work must itself be a software library. b) You must cause the files modified to carry prominent notices stating that you changed the files and the date of any change. c) You must cause the whole of the work to be licensed at no charge to all third parties under the terms of this License. d) If a facility in the modified Library refers to a function or a table of data to be supplied by an application program that uses the facility, other than as an argument passed when the facility is invoked, then you must make a good faith effort to ensure that, in the event an application does not supply such function or table, the facility still operates, and performs whatever part of its purpose remains meaningful. (For example, a function in a library to compute square roots has a purpose that is entirely well-defined independent of the application. Therefore, Subsection 2d requires that any application-supplied function or table used by this function must be optional: if the application does not supply it, the square root function must still compute square roots.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Library, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Library, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Library. In addition, mere aggregation of another work not based on the Library with the Library (or with a work based on the Library) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may opt to apply the terms of the ordinary GNU General Public License instead of this License to a given copy of the Library. To do this, you must alter all the notices that refer to this License, so that they refer to the ordinary GNU General Public License, version 2, instead of to this License. (If a newer version than version 2 of the ordinary GNU General Public License has appeared, then you can specify that version instead if you wish.) Do not make any other change in these notices. Once this change is made in a given copy, it is irreversible for that copy, so the ordinary GNU General Public License applies to all subsequent copies and derivative works made from that copy. This option is useful when you wish to copy part of the code of the Library into a program that is not a library. 4. You may copy and distribute the Library (or a portion or derivative of it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange. If distribution of object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place satisfies the requirement to distribute the source code, even though third parties are not compelled to copy the source along with the object code. 5. A program that contains no derivative of any portion of the Library, but is designed to work with the Library by being compiled or linked with it, is called a "work that uses the Library". Such a work, in isolation, is not a derivative work of the Library, and therefore falls outside the scope of this License. However, linking a "work that uses the Library" with the Library creates an executable that is a derivative of the Library (because it contains portions of the Library), rather than a "work that uses the library". The executable is therefore covered by this License. Section 6 states terms for distribution of such executables. When a "work that uses the Library" uses material from a header file that is part of the Library, the object code for the work may be a derivative work of the Library even though the source code is not. Whether this is true is especially significant if the work can be linked without the Library, or if the work is itself a library. The threshold for this to be true is not precisely defined by law. If such an object file uses only numerical parameters, data structure layouts and accessors, and small macros and small inline functions (ten lines or less in length), then the use of the object file is unrestricted, regardless of whether it is legally a derivative work. (Executables containing this object code plus portions of the Library will still fall under Section 6.) Otherwise, if the work is a derivative of the Library, you may distribute the object code for the work under the terms of Section 6. Any executables containing that work also fall under Section 6, whether or not they are linked directly with the Library itself. 6. As an exception to the Sections above, you may also combine or link a "work that uses the Library" with the Library to produce a work containing portions of the Library, and distribute that work under terms of your choice, provided that the terms permit modification of the work for the customer's own use and reverse engineering for debugging such modifications. You must give prominent notice with each copy of the work that the Library is used in it and that the Library and its use are covered by this License. You must supply a copy of this License. If the work during execution displays copyright notices, you must include the copyright notice for the Library among them, as well as a reference directing the user to the copy of this License. Also, you must do one of these things: a) Accompany the work with the complete corresponding machine-readable source code for the Library including whatever changes were used in the work (which must be distributed under Sections 1 and 2 above); and, if the work is an executable linked with the Library, with the complete machine-readable "work that uses the Library", as object code and/or source code, so that the user can modify the Library and then relink to produce a modified executable containing the modified Library. (It is understood that the user who changes the contents of definitions files in the Library will not necessarily be able to recompile the application to use the modified definitions.) b) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (1) uses at run time a copy of the library already present on the user's computer system, rather than copying library functions into the executable, and (2) will operate properly with a modified version of the library, if the user installs one, as long as the modified version is interface-compatible with the version that the work was made with. c) Accompany the work with a written offer, valid for at least three years, to give the same user the materials specified in Subsection 6a, above, for a charge no more than the cost of performing this distribution. d) If distribution of the work is made by offering access to copy from a designated place, offer equivalent access to copy the above specified materials from the same place. e) Verify that the user has already received a copy of these materials or that you have already sent this user a copy. For an executable, the required form of the "work that uses the Library" must include any data and utility programs needed for reproducing the executable from it. However, as a special exception, the materials to be distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. It may happen that this requirement contradicts the license restrictions of other proprietary libraries that do not normally accompany the operating system. Such a contradiction means you cannot use both them and the Library together in an executable that you distribute. 7. You may place library facilities that are a work based on the Library side-by-side in a single library together with other library facilities not covered by this License, and distribute such a combined library, provided that the separate distribution of the work based on the Library and of the other library facilities is otherwise permitted, and provided that you do these two things: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities. This must be distributed under the terms of the Sections above. b) Give prominent notice with the combined library of the fact that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 8. You may not copy, modify, sublicense, link with, or distribute the Library except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense, link with, or distribute the Library is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 9. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Library or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Library (or any work based on the Library), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Library or works based on it. 10. Each time you redistribute the Library (or any work based on the Library), the recipient automatically receives a license from the original licensor to copy, distribute, link with or modify the Library subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties with this License. 11. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Library at all. For example, if a patent license would not permit royalty-free redistribution of the Library by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Library. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply, and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 12. If the distribution and/or use of the Library is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Library under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 13. The Free Software Foundation may publish revised and/or new versions of the Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Library does not specify a license version number, you may choose any version ever published by the Free Software Foundation. 14. If you wish to incorporate parts of the Library into other free programs whose distribution conditions are incompatible with these, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Libraries If you develop a new library, and you want it to be of the greatest possible use to the public, we recommend making it free software that everyone can redistribute and change. You can do so by permitting redistribution under these terms (or, alternatively, under the terms of the ordinary General Public License). To apply these terms, attach the following notices to the library. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Also add information on how to contact you by electronic and paper mail. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the library, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the library `Frob' (a library for tweaking knobs) written by James Random Hacker. , 1 April 1990 Ty Coon, President of Vice That's all there is to it! mkosi-24.3/LICENSE.GPL2000066400000000000000000000431031465176501400143060ustar00rootroot00000000000000 GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Lesser General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. , 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. mkosi-24.3/MANIFEST.in000066400000000000000000000000201465176501400143230ustar00rootroot00000000000000include LICENSE mkosi-24.3/NEWS.md000066400000000000000000001603141465176501400137000ustar00rootroot00000000000000# mkosi Changelog ## v24 - The default kernel command line of `console=ttyS0` (or equivalent for other architectures) has been removed. The required `console=` argument to have the kernel output to the serial console has to be added manually from `v24` onwards. - Support for installing local packages located in directories in `BuildSources=` was dropped. Instead, the packages can be made available for installation via `PackageManagerTrees=`. - Configuration parsing was reworked to remove the need for the `@` specifier and to streamline building multiple images with `mkosi.images/`. If you were building multiple images with `mkosi.images/`, you'll need to adapt your configuration to the rework. Read the **Building multiple images** section in the documentation for more information. - mkosi has gained the option to generate completion scripts for bash, fish and zsh. Packagers should generate the scripts during packaging and ship them in the appropriate places. - Added support for CentOS Stream 10. - mkosi now installs a separate `mkosi-initrd` script that can be used to build initramfs images intended for use on the local system. - We do not automatically append `centos-stream` or `fedora` anymore to CentOS (and derivatives) and Fedora mirrors specified with `Mirror=` as not all mirrors store the repository metadata under these subdirectories. Users are now required to add these subdirectories themselves in `Mirror=`. If the EPEL repositories are enabled for CentOS Stream (and derivatives) and `Mirror=` is used, we look for the EPEL repositories in `../fedora` relative to the mirror specified in `Mirror=`. - We now support compressed tar archives wherever we already accept tar archives as input. - We now always rerun the build if `Format=none` and don't remove previous outputs in that case (unless `--force` is specified). This allows using `mkosi -t none` to rerun the build scripts without removing the previous image. This can then be combined with `RuntimeBuildSources=yes` to make the build script outputs available in a booted container or virtual machine so they can be installed without having to rebuild the image. - We now use `virtconsole` to provide the serial console when booting with `qemu`. - `root=PARTUUID` and `mount.usr=PARTUUID` on the kernel command line are now automatically extended with the actual PARTUUID of the corresponding partition. - All available OpenSUSE repositories are now supported and can be enabled with `Repositories=`. - Building OpenSUSE `aarch64` images is now supported - `mkosi dependencies` was beefed up to handle more scenarios properly - The default list of kernel modules that are always added to the initramfs was extended with various virtualization modules. - Added a `Repositories=` match. - Cached images are now invalidated if packages specified via `PackageDirectories=` change. - Added `VolatilePackageDirectories=` which can be used to provide local packages that do not invalidate cached images. - `mkosi.pkgmngr` is now used as the default path for `PackageManagerTrees=`. - The package directory that build scripts can use to make built packages available for installation (`$PACKAGEDIR`) is now shared between all image builds. This means that packages built in earlier images and stored in `$PACKAGEDIR` become available for installation in all subsequent image builds. - The default tools tree distribution is now chosen based on the host distribution instead of the target distribution. - mkosi can now be invoked from the initramfs. ## v23.1 - Respin due to git tag mismatch ## v23 - Added `CleanScripts=` to allow running custom cleanup code whenever mkosi cleans up the output directory. This allows cleaning up extra outputs produced by e.g. a build script that mkosi doesn't know about. - Added `ConfigureScripts=` to allow dynamically modifying the mkosi configuration. Each configure script receives the current config as JSON on stdin and should output the new config as JSON on stdout. - When building a UKI, we don't measure for the TPM SHA1 PCR bank anymore. - All keys in the mkosi config JSON output are now in pascal case, except for credentials and environments, where the keys encode names of credentials and environment variables and are therefore case sensitive. - Added various settings to allow running mkosi behind a proxy. - Various fixes to kernel module filtering that should result in fewer modules being pulled into the default initrd when `KernelModulesExclude=` or `KernelModulesInitrdExclude=` are used. - Added `ToolsTreeDistribution=` match. - Removed `vmspawn` verb and replaced it with `VirtualMachineMonitor=`. - New specifiers for various directories were added. `%D` resolves to the directory that mkosi was invoked in, `%P` to the current working directory, and `%C` to the parent directory of the config file. - Added `ForwardJournal=` to have systemd inside a container/VM forward its journal to the specified file or directory. - Systemd scopes are now allocated for qemu, swtpm, virtiofsd and systemd-journal-remote if available. - The `mkosi qemu` virtual machine is now registered with systemd-machined if available. - Added new `oci` output format - Runtime trees without a target are now mounted to `/root/src` instead of a subdirectory of it (To have the same behaviour as `BuildSources=`). - Added `RuntimeBuildSources=` to mount build and source directories when booting the image with `mkosi nspawn` or `mkosi qemu`. - Introduced `--append` to allow command line settings to be parsed after parsing configuration files. - `distribution-release` is not installed by default anymore on OpenSUSE. - Setting `QemuSmp=` to `0` will now make qemu use all available CPUs - Free page reporting and discard request processing are now enabled by default in VMs spawned by `mkosi qemu`. - Added `ToolsTreeCertificates=` to allow configuring whether to use certificates and keys from the tools tree (if one is used) or the host. - Added `never` for `CacheOnly=` to specify that repository metadata should always be refreshed. - Renamed the `none` option for `CacheOnly=` to `auto`. - Added `ProxyExclude=` to configure hostnames for which requests should not go through the configured proxy. - The default tools tree is now reused on incremental builds. - Added `VolatilePackages=` and `InitrdVolatilePackages=` to configure packages that should be installed after executing build scripts and which should not be cached when using `Incremental=`. - `PackageDirectories=` now has an associated default path `mkosi.packages`. - `reprepro` is now used to generate local apt repositories. - Support for BSD tar/cpio was dropped. - When both `ExtraSearchPaths=` and `ToolsTree=` are used, mkosi will now prefer running a binary found in `ExtraSearchPaths=` without the tools tree over running the binary from the tools tree. If a binary is not found in `ExtraSearchPaths=`, the tools tree is used instead. - An artifact directory is now made available when running scripts which can be used to pass around data between different scripts. mkosi will also look for microcode and initrds in the artifact directory under the `io.mkosi.microcode` and `io.mkosi.initrd` subdirectories. - Added `Environment=` match setting to check for environment variables defined with the `Environment=` setting. - The `basesystem` package is now always installed in Fedora and CentOS images instead of the `filesystem` package. - The `qemu`, `shell` and `boot` verbs do not automatically build the image anymore unless `--force` is specified. - `SplitArtifacts=` is now supported for the portable, sysext and confext outputs. - The `WithDocs=` option was implemented for pacman-based distributions. - The default Fedora release was bumped to 40. - `QemuSwtpm=` can now be used with `QemuFirmware=` set to `linux` or `bios`. - Added `UnitProperties=` to allow configure properties on the scopes generated by `systemd-nspawn` and `systemd-run`. - mkosi now only builds a single default tools tree per build using the settings from the last regular image that we'll build. - Configure scripts are now only executed for verbs which imply an image build and are executed with the tools tree instead of without it. - `$QEMU_ARCHITECTURE` is now set for configure scripts to easily allow scripts to figure out which qemu binary will be used to run qemu. - A file ID can now be specified for `QemuDrives=`. This allows adding multiple qemu drives that are backed by the same file. - mkosi doesn't fail anymore if images already exist when running `mkosi build`. - Image names from `mkosi.images/` are now preferred over the specified image ID when determining the output filename to use for an image. - `--include` now has a shorthand option `-I`. - The `WITH_NETWORK` environment variable is now passed to build and finalize scripts. - We now clamp mtimes to the specified source date epoch timestamp instead of resetting all mtimes. This means that we won't touch any mtimes that are already older than the given source date epoch timestamp. - Removed support for CentOS 8 Stream as it is now EOL. - The `coredumpctl` and `journalctl` verbs now operrate on the path specified in `ForwardJournal=` if one is set. - Added `UnifiedKernelImageFormat=` format setting to allow configuring the naming of unified kernel images generated by mkosi. - The `versionlock` plugin is now enabled by default for dnf with a noop configuration. - `Repositories=` is now implemented for zypper. - `KernelModulesInclude=` and `KernelModulesInitrdInclude=` now take the special values `host` and `default` to include the host's loaded modules and the default kernel modules defined in `mkosi-initrd` respectively. - `KernelModulesIncludeHost=` and `KernelModulesInitrdIncludeHost=` are now deprecated. - Added `mkosi dependencies` to output the list of packages required by mkosi to build and boot images. ## v22 - We'll now try to delete btrfs subvolumes with `btrfs subvolume delete` first before falling back to recursively deleting the directory. - The invoking user is now always mapped to `root` when running sync scripts. This fixes an issue where we would fail when a package manager tree or skeleton tree contained a `/usr` directory as we would not have permissions to run mount in the sandbox. - We now use qemu's official firmware descriptions to find EDK2/OVMF UEFI firmware. Addititionally, `QemuFirmware=uefi` now boots without SecureBoot support, and `QemuFirmware=uefi-secure-boot` was introduced to boot with SecureBoot support. By default we will still boot with SecureBoot support if `QemuFirmware=auto`. - Added support for `QemuFirmwareVariables=custom` and `QemuFirmwareVariables=microsoft` to use OVMF/EDK2 variables with either the user's custom keys enrolled or with the Microsoft keys enrolled. - Added `UnifiedKernelImages=` to control whether we generate unified kernel images or not. - `Bootloader=grub` will now generate a grub EFI image and install it. If `SecureBoot=` is enabled and `ShimBootloader=` is not set to `signed`, the grub EFI image will be signed for SecureBoot. - `ShimBootloader=signed` will now also instruct mkosi to look for and install already signed grub, systemd-boot, kernel and UKI binaries. - We now build grub images with a fixed set of modules and don't copy any grub modules to the ESP anymore. - The configuration is now made available as a JSON file to all mkosi scripts via the `$MKOSI_CONFIG` environment variable. - `$PROFILE` is now set for all mkosi scripts containing the value of `Profile=` if it is set. ## v21 - We now handle unmerged-usr systems correctly - Builtin configs (`mkosi-initrd`, `mkosi-tools`) can now be included using `Include=` (e.g. `Include=mkosi-initrd`) - The kernel-install plugin now uses the builtin `mkosi-initrd` config so there's no need anymore to copy the full `mkosi-initrd` config into `/usr/lib/mkosi-initrd`. - We don't require a build anymore for the `journalctl` and `coredumpctl` verbs. - `mkosi ssh` works again when used with `ToolsTree=default` - We now use `.zst` instead of `.zstd` for compressed split artifacts produced by `systemd-repart`. - `systemd-repart` uses a persistent temporary directory again for assembling images instead of a tmpfs. - Added `MicrocodeHost=` setting to only include the CPU specific microcode for the current host system. - The kernel-install plugin now only includes the CPU specific microcode - Introduced `PackageCacheDirectory=` to set the directory for package manager caches. This setting defaults to a suitable location in the system or user directory depending on how mkosi is invoked. `CacheDirectory=` is only used for incremental cached images now. - Repository metadata is now synced once at the start of each image build and never during an image build. Each image includes a snapshot of the repository metadata in the canonical locations in `/var` so that incremental images and extension images can reuse the same snapshot. When building an image intended to be used with `BaseTrees=`, disable `CleanPackageMetadata=` to make sure the repository metadata in `/var` is not cleaned up, otherwise any extension images using this image as their base tree will not be able to install additional packages. - Implemented `CacheOnly=metadata`. Note that in the JSON output, the value of `CacheOnly=` will now be a string instead of a boolean. - Added `CompressLevel=` to set the compression level to use. - Dropped experimental Gentoo support. - Added `TriggerMatch=` to specify multiple match sections of which only one should be satisfied. - Added `jq`, `attr`, `acl`, `git`, `sed`, `grep` and `findutils` to the default tools tree. - Added `mkosi-install`, `mkosi-upgrade`, `mkosi-remove` and `mkosi-reinstall` scripts which allow writing scripts that are independent of the package manager being used to build the image. - We now expand specifiers in `Match` section values - Made GPG key handling for Fedora rawhide more robust - If systemd-repart 256 or newer is available, mkosi will instruct it to generate `/etc/fstab` and `/etc/crypttab` for the image if any partition definitions contain the corresponding settings (`MountPoint=` and `EncryptedVolume=`). - `bash` is now started in the debug shell instead of `sh`. - The default release for Ubuntu is now `noble`. - Ubuntu is now used as the default tools tree distribution for Ubuntu instead of Debian. - Added `mkosi vmspawn` which boots the image with `systemd-vmspawn`. Note that `systemd-vmspawn` is experimental and its interface may still change. As such `mkosi vmspawn` is also considered experimental. Note that `systemd-vmspawn` version `256` or newer is required. - Added `SyncScripts=` which can be used to update various build sources before starting the image build. - The `DISTRIBUTION=` and `RELEASE=` environment variables are now set when running scripts. - Added `ToolsTreeRepositories=` and `ToolsTreePackageManagerTrees=`. - Added `RuntimeNetwork=` to configure the networking used when booting the image. - Added `SecureBootKeySource=` and `VerityKeySource=` to support signing images with OpenSSL engines. Note that these settings require various systemd tools to be version `256` or newer. - We don't clean up package manager metadata anymore unless explicitly requested with `CleanPackageManagerMetadata=yes` when building `directory` and `tar` images. ## v20.2 - Fixed a bug in signing unsigned shim EFI binaries. - We now build an early microcode initrd in the mkosi kernel-install plugin. - Added `PackageDirectories=` to allow providing extra packages to be made available during the build. - Fixed issue where `KernelModulesIncludeHost` was including unnecessary modules - Fixed `--mirror` specification for CentOS (and variants) and Fedora. Previously a subdirectory within the mirror had to be specified which prevented using CentOS and EPEL repositories from the same mirror. Now only the URL has be specified. - We now mount package manager cache directories when running scripts on the host so that any packages installed in scripts are properly cached. - We don't download filelists on Fedora anymore - Nested build sources don't cause errors anymore when trying to install packages. - We don't try to build the same tools tree more than once anymore when building multiple images. - We now create the `/etc/mtab` compatibility symlink in mkosi's sandbox. - We now always hash the root password ourselves instead of leaving it to `systemd-firstboot`. - `/srv` and `/mnt` are not mounted read-only anymore during builds. - Fixed a crash when running mkosi in a directory with fewer than two parent directories. - Implemented `RepositoryKeyCheck=` for apt-based distributions. ## v20.1 - `BuildSources=` are now mounted when we install packages so local packages can be made available in the sandbox. - Fixed check to see if we're running as root which makes sure we don't do shared mounts when running as root. - The extension release file is now actually written when building system or configuration extensions. - The nspawn settings are copied to the output directory again. - Incremental caching is now skipped when `Overlay=` is enabled as this combination isn't supported. - The SELinux relabel check is more granular and now checks for all required files instead of just whether there's a policy configured. - `qemu-system-xxx` binaries are now preferred over the generic `qemu` and `qemu-kvm` binaries. - Grub tools from the tools tree are now used to install grub instead of grub tools from the image itself. The grub tools were added to the default tools trees as well. - The pacman keyring in tools trees is now only populated from the Arch Linux keyring (and not the Debian/Ubuntu ones anymore). - `gpg` is allowed to access `/run/pscsd/pscsd.comm` on the host if it exists to allow interaction with smartcards. ## v20 - The current working directory is not mounted unconditionally to `/work/src` anymore. Instead, the default value for `BuildSources=` now mounts the current working directory to `/work/src`. This means that the current working directory is no longer implicitly included when `BuildSources=` is explicitly configured. - Assigning the empty string to a setting that takes a list of values now overrides any configured default value as well. - The github action does not build and install systemd from source anymore. Instead, `ToolsTree=default` can be used to make sure a recent version of systemd is used to do the image build. - Added `EnvironmentFiles=` to read environment variables from environment files. - We drastically reduced how much of the host system we expose to scripts. Aside from `/usr`, a few directories in `/etc`, `/tmp`, `/var/tmp` and various directories configured in mkosi settings, all host directories are hidden from scripts, package managers and other tools executed by mkosi. - Added `RuntimeScratch=` to automatically mount a directory with extra scratch space into mkosi-spawned containers and virtual machines. - Package manager trees can now be used to configure every tool invoked by mkosi while building an image that reads config files from `/etc` or `/usr`. - Added `SELinuxRelabel=` to specify whether to relabel selinux files or not. - Many fixes to tools trees were made and tools trees are now covered by CI. Some combinations aren't possible yet but we're actively working to make these possible. - `mkosi qemu` now supports direct kernel boots of `s390x` and `powerpc` images. - Added `HostArchitecture=` match to match against the host architecture. - We don't use the user's SSH public/private keypair anymore for `mkosi ssh` but instead use a separate key pair which can be generated by `mkosi genkey`. Users using `mkosi ssh` will have to run `mkosi genkey` once to generate the necessary files to keep `mkosi ssh` working. - We don't automatically set `--offline=no` anymore when we detect the `Subvolumes=` setting is used in a `systemd-repart` partition definition file. Instead, use the new `RepartOffline=` option to explicitly disable running `systemd-repart` in offline mode. - During the image build we now install UKIs/kernels/initrds to `/boot` instead of `/efi`. While this will generally not be noticeable, users with custom systemd-repart ESP partition definitions will need to add `CopyFiles=/boot:/` along with the usual `CopyFiles=/efi:/` to their ESP partition definitions. By installing UKIs/kernels/initrds to `/boot`, it becomes possible to use `/boot` to populate an XBOOTLDR partition which wasn't possible before. Note that this is also safe to do before `v20` so `CopyFiles=/boot:/` can unconditionally be added to any ESP partition definition files. - Added `QemuFirmwareVariables=` to allow specifying a custom OVMF variables file to use. - Added `MinimumVersion=` to allow specifying the minimum required mkosi version to build an image. - Added support for Arch Linux's debug repositories. - Merged the mkosi-initrd project into mkosi itself. mkosi-initrd is now used to build the default initrd. - Implemented mkosi-initrd for all supported distributions. - Added `ShimBootloader=` to support installing shim to the ESP. - Added sysext, confext and portable output formats. These will produce signed disk images that can be used as sysexts, confexts and portable services respectively. - Added `QemuVsockConnectionId=` to configure how to allocate the vsock connection ID when `QemUVsock=` is enabled. - Added documentation on how to build sysexts with mkosi. - Global systemd user presets are now also configured. - Implemented `WithDocs=` for `apt`. - On supported package managers, locale data for other locales is now stripped if the local is explicitly configured using `Locale=`. - All `rpm` plugins are now disabled when building images. - Added `KernelModulesIncludeHost=` and `KernelModulesInitrdIncludeHost=` to only include modules loaded on the host system in the image/initrd respectively. - Implemented `RemovePackages=` for Arch Linux. - Added `useradd` and `groupadd` scripts to configure these binaries to operate on the image during builds instead on the host. - Added microcode support. If installed into the image, an early microcode initrd will automatically be built and prepended to the initrd. - A passwordless root account may now be created by specifying `hashed:`. - The `Autologin=` feature was extended with support for `arm64`, `s390x` and `powerpc` architectures. - Added `SecureBootAutoEnroll=` to control automatic enrollment of secureboot keys separately from signing `systemd-boot` and generated UKIs. - `ImageVersion=` is no longer automatically appended to the output files, instead this is automatically appended to `Output=` if not specified and results in the `%o` specifier being equivalent to `%i` or `%i_%v` depending on whether `ImageVersion=` is specified. ## v19 - Support for RHEL was added! - Added `journalctl` and `coredumpctl` verbs for running the respective tools on built directory or disk images. - Added a `burn` verb to write the output image to a block device. - Added a new `esp` output format, which is largely similar to the existing `uki` output format but wraps it in a disk image with only an ESP. - `Presets` were renamed to `Images`. `mkosi.images/` is now used instead of `mkosi.presets/`, the `Presets=` setting was renamed to `Images=` and the `Presets` section was merged into the `Config` section. The old names can still be used for backwards compatibility. - Added profiles to support building variants of the same image in one repository. Profiles can be defined in `mkosi.profiles/` and one can be selected using the new `Profile=` setting. - mkosi will now parse `mkosi.local.conf` before any other config files if that exists. - Added a kernel-install plugin. This is only shipped in source tree and not included in the Python module. - Added a `--json` option to get the output of `mkosi summary` as JSON. - Added shorthand `-a` for `--autologin`. - Added a `--debug-workspace` option to not remove the workspace directory after a build. This is useful to inspect the workspace after failing builds. As a consequence the prefix for the default workspace directory prefix has been changed from `.mkosi-tmp` to `mkosi-workspace`. - Scripts with the `.chroot` extension are now executed in the image automatically. - Added `rpm` helper script to have `rpm` automatically operate on the image when running scripts. - Added `mkosi-as-caller` helper script that can be used in scripts to run commands as the user invoking mkosi. - `mkosi-chroot` will now start a shell if no arguments are specified. - Added `WithRecommends=` to configure whether to install recommended packages by default or not where this is supported. It is disabled by default. - Added `ToolsTreeMirror=` setting for configuring the mirror to use for the default tools tree. - `WithDocs=` is now enabled by default. - Added `BuildSourcesEphemeral=` to make source directories ephemeral when running scripts. This means any changes made to source directories while running scripts will be undone after the scripts have finished executing. - Added `QemuDrives=` to have mkosi create extra qemu drives and pass them to qemu when using the `qemu` verb. - Added `BuildSources=` match to match against configured build source targets. - `PackageManagerTrees=` was moved to the `Distribution` section. - We now automatically configure the qemu firmware, kernel cmdline and initrd based on what type of kernel is passed by the user via `-kernel` or `QemuKernel=`. - The mkosi repository itself now ships configuration to build basic bootable images that can be used to test mkosi. - Added support for enabling `updates-testing` repositories for Fedora. - GPG keys for CentOS, Fedora, Alma and Rocky are now looked up locally first before fetching them remotely. - Signatures are not required for local packages on Arch anymore. - Packages on opensuse are now always downloaded in advance before installation when using zypper. - The tar output is now reproducible. - We now make sure `git` can be executed from mkosi scripts without running into permission errors. - We don't create subdirectories beneath the configured cache directory anymore. - Workspace directories are now created outside of any source directories. mkosi will either use `XDG_CACHE_HOME`, `$HOME/.cache` or `/var/tmp` depending on the situation. - Added environment variable `MKOSI_DNF` to override which dnf to use for building images (`dnf` or `dnf5`). - The rootfs can now be modified when running build scripts (with all changes thrown away after the last build script has been executed). - mkosi now fails if configuration specified via the CLI does not apply to any image (because it is overridden). - Added a new doc on building rpms from source with mkosi (`docs/building-rpms-from-source.md`). - `/etc/resolv.conf` will now only be mounted for scripts when they are run with network access. ## v18 - `$SCRIPT` was renamed to `$CHROOT_SCRIPT`. `$SCRIPT` can still be used but is considered deprecated. - Added `RuntimeTrees=` setting to mount directories when booting images via `mkosi boot`, `mkosi shell` or `mkosi qemu`. The directories are mounted with a uid map that maps the user invoking mkosi to the root user so that all files in the directory appear as if owned by the root user in the container or virtual machine and any new files created in the directories are owned by the user invoking mkosi. To make this work in VMs, we use `VirtioFS` via `virtiofsd`. Note that this requires systemd v254 or newer to be installed in the image. - Added support for booting directory images with `mkosi qemu` via `VirtioFS`. When `CONFIG_VIRTIOFS` and `CONFIG_VIRTIO_PCI` are builtin modules, no initramfs is required to make this work. - Added `Include=` or `--include` to include extra configuration files or directories. - Added support for specifiers to access the current value of certain settings during configuration file parsing. - `mkosi` will now exit with an error when no configuration was provided. - Multiple scripts of the same type are now supported. - Custom distributions are now supported via the new `custom` distribution. When using `custom` as the distribution, the rootfs must be provided via base trees, skeleton trees or prepare scripts. - We now use local GPG keys for rpm based distributions if the `distribution-gpg-keys` package is installed on the host. - Added `RuntimeSize=` to grow the image to a specific size before booting it when using `mkosi boot` or `mkosi qemu`. - We now set `MKOSI_UID` and `MKOSI_GID` when running scripts which are set to the uid and gid of the user invoking mkosi respectively. These can be used to run commands as the user that invoked mkosi. - Added an `Architecture=` match - Initrds specified with `Initrds=` are now used for grub menuentries as well. - `ImageId=` and `ImageVersion=` are now written to os-release as `IMAGE_ID` and `IMAGE_VERSION` if provided. - We pass command line arguments passed to the `build` verb to the build script again. - We added support for the "RHEL Universal Base Image" distribution. ## v17.1 - Fixed bug where `--autologin` was broken when used in combination with a tools tree when using a packaged version of mkosi. ## v17 - Added `ToolsTreePackages=` to add extra packages to the default tools tree. - Added `SystemdVersion=` match to match on the host's systemd version - Added `Format=` match to match on the configured output format - `Presets=` can now be configured in global configuration files to select which presets to build - UKIs can now be booted using direct linux boot. - We don't try to make images UEFI bootable anymore on architectures that do not support UEFI - Fixed `--help` to show all options again - We now warn when settings are configured in the wrong section ## v16 - `mkosi.version` is now picked up from preset and dropin directories as well following the usual config precedence logic - Removed the "first assignment wins" logic from configuration parsing. Settings parsed later will now override earlier values - Removed the `!` operator for lists. Instead, assign the empty string to the list to remove all previous values. - Added support for configuring custom default values for settings by prefixing their name in the configuration file with `@`. - Added `QemuCdrom=` to attach the image to the virtual machine as a CD-ROM instead of a block device. - Added `SectorSize=` to set the sector size of the disk images built by systemd-repart. - Added back grub support (BIOS/UEFI). Note that we don't install grub on UEFI yet but we do add the necessary configuration and partitions. - Added `Bootloader=` option to configure which EFI bootloader to install. Added `uki` option to install just the UKI without systemd-boot and `grub` to generate grub configuration to chainload into the built UKIs. - Added `BiosBootloader=` to configure whether grub for BIOS gets installed or not. - Added `QemuFirmware=` to select which qemu firmware to use (OVMF, Seabios or direct kernel boot). - Added `QemuKernel=` to specify the kernel that should be used with direct kernel boot. - `/var/lib/dbus/machine-id` is now removed if it was added by a package manager postinstall script. - The manifest is not generated by default anymore. Use `ManifestFormat=json` to make sure the manifest is generated. - Added `SourceDateEpoch=` to enable more reproducible image builds. - Added `Seed=` to set the seed passed to systemd-repart. - Updated the default Fedora release to Fedora 39. - If `ToolsTree=` is set to `default`, mkosi will now build a default tools tree containing all the necessary tools to build images. The distribution and release to use can be configured with `ToolsTreeDistribution=` and `ToolsTreeRelease=` or are determined automatically based on the image being built. - Added `uki` output format. This is similar to `cpio`, except the cpio is packaged up as a UKI with a kernel image and stub picked up from the rootfs. ## v15.1 - The man page can be generated from the markdown file via `tools/make-man-page.sh`. - Fixed issue where not all packages and data files where included in the generated python package. - mkosi doesn't try to unshare the network namespace anymore when it doesn't have `CAP_NET_ADMIN`. - Fixed issue when the workspace was located in `/tmp`. - Don't try to run `timedatectl` or `ssh-add` when they're not installed. ## v15 - Migrated to systemd-repart. Many options are dropped in favor of specifying them directly in repart partition definition files: - Format=gpt_xxx options are replaced with a single "disk" options. Filesystem to use can now be specified with repart's Format= option - Format=plain_squashfs (Can be reproduced by a single repart squashfs root partition combined with SplitArtifacts=yes) - Verity= (Replaced by repart's Verity= options) - Encrypt= (Replaced by repart's Encrypt= option) - RootSize=, HomeSize=, VarSize=, TmpSize=, ESPSize=, SwapSize=, SrvSize= (Replaced by repart's size options) - UsrOnly= (replaced with `CopyFiles=/:/usr` in a usr partition definition) - OutputSplitRoot=, OutputSplitVerity=, (Replaced by repart's SplitName= option) - OutputSplitKernel= (UKI is now always written to its own output file) - GPTFirstLBA (Removed, no equivalent in repart) - ReadOnly= (Replaced by repart's ReadOnly= option per partition) - Minimize= (Replaced by repart's Minimize= option per partition) - CompressFs= (No equivalent in repart, can be replicated by replacing mkfs. in $PATH with a script that adds the necessary command line option) - MkSquashfs= (Can be replaced with a script in $PATH that invokes the correct binary) We also remove the WithoutUnifiedKernelImages= switch as building unified kernel images is trivial and fast these days. - Support for --qemu-boot was dropped - Support for --use-host-repositories was dropped, use --repository-directory instead - `RepositoryDirectory` was removed, use `PackageManagerTrees=` or `SkeletonTrees=` instead. - `--repositories` is now only usable on Debian/RPM based distros and can only be used to enable additional repositories. Specifically, it cannot be used on Arch Linux anymore to add new repositories. - The `_epel` distributions were removed. Use `--repositories=epel` instead to enable the EPEL repository. - Removed `-stream` from CentOS release specifiers. Instead of specifying `8-stream`, you know just specify `8`. - Removed default kernel command line arguments `rhgb`, `selinux=0` and `audit=0`. - Dropped --all and --all-directory as this functionality is better implemented by using a build system. - mkosi now builds images without needing root privileges. - Removed `--no-chown`, `--idmap` and `--nspawn-keep-unit` options as they were made obsolete by moving to rootless builds. - Removed `--source-file-transfer`, `--source-file-transfer-final`, `--source-resolve-symlinks` and `--source-resolve-symlinks-final` in favor of always mounting the source directory into the build image. `--source-file-transfer-final` might be reimplemented in the future using virtiofsd. - Dropped `--include-dir` option. Usage can be replaced by using `--incremental` and reading includes from the cached build image tree. - Removed `--machine-id` in favor of shipping images without a machine ID at all. - Removed `--skip-final-phase` as we only have a single phase now. - The post install script is only called for the final image now and not for the build image anymore. Use the prepare script instead. - `--ssh-key`, `--ssh-agent`, `--ssh-port` and `--ssh-timeout` options were dropped as the SSH support was reimplemented using VSock. `mkosi ssh` can only be used with images booted with `mkosi qemu`. Use `machinectl` to access images booted with `mkosi boot`. Use --extra-tree or --credential with the `.ssh.authorized_keys.root` credentials as alternatives for provisioning the public key inside the image. - Only configuration files matching `*.conf` are parsed in dropin directories now. - Removed `--qemu-headless`, we now start qemu in the terminal by default and configure the serial console at runtime. Use the new `--qemu-gui` option to start qemu in its graphical interface. - Removed `--netdev`. Can be replaced by manually installing systemd-networkd, putting a network file in the image and enabling systemd-networkd. - If `mkosi.extra/` or `mkosi.skeleton/` exist, they are now always used instead of only when no explicit extra/skeleton trees are defined. - mkosi doesn't install any default packages anymore aside from packages required by the distro or the base filesystem layout package if there are no required packages. In practice, this means systemd and other basic tools have to be installed explicitly from now on. - Removed `--base-packages` as it's not needed anymore since we don't install any packages by default anymore aside from the base filesystem layout package. - Removed `--qcow2` option in favor of supporting only raw disk images as the disk image output format. - Removed `--bmap` option as it can be trivially added manually by utilizing a finalize script. - The `never` value for `--with-network` was spun of into its own custom option `--cache-only`. - `--bootable` now defaults to `auto`. When set to `auto`, mkosi will generate a bootable image only if all the necessary packages are installed. Documentation was added in docs/bootable.md on how a bootable image can be generated on mainstream distros. - The RPM db is no longer rebuilt in bdb format on CentOS Stream 8. To be able to install packages on a CentOS Stream 8 image with a RPM db in sqlite format, rewrite the db in bdb format using `rpm --rebuilddb --define _db_backend bdb`. - Repositories are now only written to /etc/apt/sources.list if apt is installed in the image. - Removed the dependency on `debootstrap` to build Ubuntu or Debian images. - Apt now uses the keyring from the host instead of the keyring from the image. This means `debian-archive-keyring` or `ubuntu-archive-keyring` are now required to be installed to build Debian or Ubuntu images respectively. - `--base-image` is split into `--base-tree` and `--overlay`. - Removed `--cache-initrd`, instead, use a prebuilt initrd with `Initrds=` to avoid rebuilding the initrd all the time. - Disk images are now resized to 8G when booted to give some disk space to play around with in the booted image. - Removed `--install-directory=` option. This was originally added for caching the installation results, but this doesn't work properly as it might result in leftover files in the install directory from a previous installation, so we have to empty the directory before reusing it, invalidating the caching, so the option was removed. - Build scripts are now executed on the host. See the `SCRIPTS` section in the manual for more information. Existing build scripts will need to be updated to make sure they keep working. Specifically, most paths in scripts will need to be prefixed with $BUILDROOT to have them operate on the image instead of on the host system. To ensure the host system cannot be modified when running a script, most host directories are mounted read-only when running a script to ensure a script cannot modify the host in any way. Alternatively to making the script run on the host, the script can also still be executed in the image itself by putting the following snippet at the top of the script: ```sh if [ "$container" != "mkosi" ]; then exec mkosi-chroot "$SCRIPT" "$@" fi ``` - Removed `--tar-strip-selinux-context=` option. We now label all files properly if selinux is enabled and if users don't want the labels, they can simply exclude them when extracting the archive. - Gentoo is now marked as experimental and unsupported and there's no guarantee at all that it will work. Issues related to gentoo will generally not receive attention from core maintainers. All gentoo specific hacks outside of the gentoo implementation module have been removed. - A verb `documentation` has been added. Calling mkosi with this verb will show the documentation. This is useful when running mkosi during development to always have the documentation in the correct version available. By default it will try several ways to output the documentation, but a specific option can be chosen with the `--doc-format` option. Distro packagers are encouraged to add a file `mkosi.1` into the `mkosi/resources` directory of the Python package, if it is missing, as well es install it in the appropriate search path for man pages. The man page can be generated from the markdown file `mkosi/resources/mkosi.md` e.g via `pandoc -t man -s -o mkosi.1 mkosi.md`. - BuildSources= now takes source:target pairs which specify the source directory and where to mount it relative to the top level source directory when running scripts. (e.g. BuildSources=../my-project:my-project) ## v14 - Support for Clear Linux was dropped. See https://github.com/systemd/mkosi/pull/1037 for more information. - Support for Photon was dropped. See https://github.com/systemd/mkosi/pull/1048 for more information. - The Arch kernel/bootloader pacman hooks were removed. For anyone that still wants to use them, they can be found [here](https://github.com/systemd/mkosi/tree/v13/mkosi/resources/arch). - mkosi now creates `distro~release` subdirectories inside the build, cache and output directories for each `distro~release` combination that is built. This allows building for multiple distros without throwing away the results of a previous distro build every time. - The preferred names for mkosi configuration files and directories are now `mkosi.conf` and `mkosi.conf.d/` respectively. The old names (`mkosi.default` and `mkosi.default.d`) have been removed from the docs but are still supported for backwards compatibility. - `plain_squashfs` type images will now also be named with a `.raw` suffix. - `tar` type images will now respect the `--compress` option. - Pacman's `SigLevel` option was changed to use the same default value as used on Arch which is `SigLevel = Required DatabaseOptional`. If this results in keyring errors, you need to update the keyring by running `pacman-key --populate archlinux`. - Support for CentOS 7 was dropped. If you still need to support CentOS 7, we recommend using any mkosi version up to 13. - Support for BIOS/grub was dropped. because EFI hardware is widely available and legacy BIOS systems do not support the feature set to fully verify a boot chain from firmware to userland and it has become bothersome to maintain for little use. To generate BIOS images you can use any version of mkosi up to mkosi 13 or the new `--bios-size` option. This can be used to add a BIOS boot partition of the specified size on which `grub` (or any other bootloader) can be installed with the help of mkosi's script support (depending on your needs most likely `mkosi.postinst` or `mkosi.finalize`). This method can also be used for other EFI bootloaders that mkosi intentionally does not support. - mkosi now unconditionally copies the kernel, initrd and kernel cmdline from the image that were previously only copied out for Qemu boot. - mkosi now runs apt and dpkg on the host. As such, we now require apt and dpkg to be installed on the host along with debootstrap in order to be able to build debian/ubuntu images. - Split dm-verity artifacts default names have been changed to match what `systemd` and other tools expect: `image.root.raw`, `image.root.verity`, `image.root.roothash`, `image.root.roothash.p7s` (same for `usr` variants). - `mkosi` will again default to the same OS release as the host system when the host system uses the same distribution as the image that's being built. - By default, `mkosi` will now change the owner of newly created directories to `SUDO_UID` or `PKEXEC_UID` if defined, unless `--no-chown` is used. - If `systemd-nspawn` v252 or newer is used, bind-mounted directories with `systemd-nspawn` will use the new `rootidmap` option so files and directories created from within the container will be owned by the actual directory owner on the host. ## v13 - The `--network-veth` option has been renamed to `--netdev`. The old name made sense with virtual ethernet devices, but when booting images with qemu a TUN/TAP device is used instead. - The network config file installed by mkosi when the `--netdev` (previously `--network-veth`) option is used (formerly `/etc/systemd/network/80-mkosi-network-veth.network` in the image) now only matches network interfaces using the `virtio_net` driver. Please make sure you weren't relying on this file to configure any network interfaces other than the tun/tap virtio-net interface created by mkosi when booting the image in QEMU with the `--netdev` option. If you were relying on this config file when the host system uses the same distribution as the image that's being built. Instead, when no release is specified, mkosi will now always default to the default version embedded in mkosi itself. - `mkosi` will now use the `pacman` keyring from the host when building Arch images. This means that users will, on top of installing `archlinux-keyring`, also have to run `pacman-key --init` and `pacman-key --populate archlinux` on the host system to be able to build Arch images. Also, unless the package manager is configured to do it automatically, the host keyring will have to be updated after `archlinux-keyring` updates by running `pacman-key --populate archlinux` and `pacman-key --updatedb`. - Direct qemu linux boot is now supported with `BootProtocols=linux`. When enabled, the kernel image, initrd, and cmdline will be extracted from the image and passed to `qemu` by `mkosi qemu` to directly boot into the kernel image without a bootloader. This can be used to boot for example s390x images in `qemu`. - The initrd will now always be rebuilt after the extra trees and build artifacts have been installed into the image. - The github action has been migrated to Ubuntu Jammy. To migrate any jobs using the action, add `runs-on: ubuntu-22.04` to the job config. - All images are now configured by default with the `C.UTF-8` locale. - New `--repository-directory` option can be used to configure a directory with extra repository files to be used by the package manager when building an image. Note that this option is currently only supported for `pacman` and `dnf`-based distros. - Option `--skeleton-tree` is now supported on Debian-based distros. - Removed `--hostname` as its trivial to configure using systemd-firstboot. - Removed default locale configuration as its trivial to configure using systemd-firstboot and systemd writes a default locale well. ## v12 - Fix handling of baselayout in Gentoo installations. ## v11 - Support for Rocky Linux, Alma Linux, and Gentoo has been added! - A new `ManifestFormat=` option can be used to generate "manifest" files that describe what packages were installed. With `json`, a JSON file that shows the names and versions of all installed packages will be created. With `changelog`, a longer human-readable file that shows package descriptions and changelogs will be generated. This latter format should be considered experimental and likely to change in later versions. - A new `RemovePackages=` option can be used to uninstall packages after the build and finalize scripts have been done. This is useful for the case where packages are required by the build scripts, or pulled in as dependencies for scriptlets of other packages, but are not necessary in the final image. - A new `BaseImage=` option can be used to build "system extensions" a.k.a. "sysexts" — partial images which are mounted on top of an existing system to provide additional files under `/usr/`. See the [systemd-sysext man page](https://www.freedesktop.org/software/systemd/man/systemd-sysext.html) for more information. - A new `CleanPackageMetadata=` option can be used to force or disable the removal of package manager files. When this option is not used, they are removed when the package manager is not installed in the final image. - A new `UseHostRepositories=` option instructs mkosi to use repository configuration from the host system, instead of the internal list. - A new `SshAgent=` option configures the path to the ssh agent. - A new `SshPort=` option overrides the port used for ssh. - The `Verity=` setting supports a new value `signed`. When set, verity data will be signed and the result inserted as an additional partition in the image. See https://systemd.io/DISCOVERABLE_PARTITIONS for details about signed disk images. This information is used by `systemd-nspawn`, `systemd-dissect`, `systemd-sysext`, `systemd-portabled` and `systemd`'s `RootImage=` setting (among others) to cryptographically validate the image file systems before use. - The `--build-environment=` option was renamed to `--environment=` and extended to cover *all* invoked scripts, not just the `mkosi.build`. The old name is still understood. - With `--with-network=never`, `dnf` is called with `--cacheonly`, so that the package lists are not refreshed. This gives a degree of reproducibility when doing repeated installs with the same package set (and also makes installs significantly faster). - The `--debug=` option gained a new value `disk` to show information about disk sized and partition allocations. - Some sections and settings have been renamed for clarity: [Packages] is now [Content], `Password=`, `PasswordIsHashed=`, and `Autologin=` are now in [Content]. The old names are still supported, but not documented. - When `--prepare-script=`/`--build-script=`/`--finalize-script=` is used with an empty argument, the corresponding script will not be called. - Python 3.7 is the minimal supported version. - Note to packagers: the Python `cryptography` module is needed for signing of verity data. ## v10 - Minimum supported Python version is now 3.7. - Automatic configuration of the network for Arch Linux was removed to bring different distros more in line with each other. To add it back, add a postinstall script to configure your network manager of choice. - The `--default` option was changed to not affect the search location of `mkosi.default.d/`. mkosi now always searches for `mkosi.default.d/` in the working directory. - `quiet` was dropped from the default kernel command line. - `--source-file-transfer` and `--source-file-transfer-final` now accept an empty value as the argument which can be used to override a previous setting. - A new command `mkosi serve` can be used to serve build artifacts using a small embedded HTTP server. This is useful for `machinectl pull-raw …` and `machinectl pull-tar …`. - A new command `mkosi genkey` can be used to generate secure boot keys for use with mkosi's `--secure-boot` options. The number of days the keys should remain valid can be specified via `--secure-boot-valid-days=` and their CN via `--secure-boot-common-name=`. - When booting images with `qemu`, firmware that supports Secure Boot will be used if available. - `--source-resolve-symlinks` and `--source-resolve-symlinks-final` options are added to control how symlinks in the build sources are handled when `--source-file-transfer[-final]=copy-all` is used. - `--build-environment=` option was added to set variables for the build script. - `--usr-only` option was added to build images that comprise only the `/usr/` directory, instead of the whole root file system. This is useful for stateless systems where `/etc/` and `/var/` are populated by `systemd-tmpfiles`/`systemd-sysusers` and related calls at boot, or systems that are originally shipped without a root file system, but where `systemd-repart` adds one on the first boot. - Support for "image versions" has been added. The version number can be set with `--version-number=`. It is included in the default output filename and passed as `$IMAGE_VERSION` to the build script. In addition, `mkosi bump` can be used to increase the version number by one, and `--auto-bump` can be used to increase it automatically after successful builds. - Support for "image identifiers" has been added. The id can be set with `--image=id` and is passed to the build script as `$IMAGE_ID`. - The list of packages to install can be configured with `--base-packages=`. With `--base-packages=no`, only packages specified with `--packages=` will be installed. With `--base-packages=conditional`, various packages will be installed "conditionally", i.e. only if some other package is otherwise pulled in. For example, `systemd-udev` may be installed only if `systemd` is listed in `--packages=`. - CPIO output format has been added. This is useful for kernel initramfs images. - Output compression can be configured with `--compress-fs=` and `--compress-output=`, and support for `zstd` has been added. - `--ssh-key=` option was added to control the ssh key used to connect to the image. - `--remove-files=` option was added to remove file from the generated images. - Inline comments are now allowed in config files (anything from `#` until the end of the line will be ignored). - The development branch was renamed from `master` to `main`. ## v9 ### Highlighted Changes - The mkosi Github action now defaults to the current release of mkosi instead of the tip of the master branch. - Add a `ssh` verb and accompanying `--ssh` option. The latter sets up SSH keys for direct SSH access into a booted image, whereas the former can be used to start an SSH connection to the image. - Allow for distribution specific `mkosi.*` files in subdirectories of `mkosi.default.d/`. These files are only processed if a subdirectory named after the target distribution of the image is found in `mkosi.default.d/`. - The summary of used options for the image is now only printed when building the image for the first time or when the `summary` verb is used. - All of mkosi's output, except for the build script, will now go to stderr. There was no clear policy on this before and this choice makes it easier to use images generated and booted via mkosi with language servers using stdin and stdout for communication. - `--source-file-transfer` now defaults to `copy-git-others` to also include untracked files. - [black](https://github.com/psf/black) is now used as a code style and conformance with it is checked in CI. - Add a new `--ephemeral` option to boot into a temporary snapshot of the image that will be thrown away on shutdown. - Add a new option `--network-veth` to set up a virtual Ethernet link between the host and the image for usage with nspawn or QEMU - Add a new `--autologin` option to automatically log into the root account upon boot of the image. This is useful when using mkosi for boot tests. - Add a new `--hostonly` option to generate host specific initrds. This is useful when using mkosi for boot tests. - Add a new `--install-directory` option and special directory `mkosi.installdir/` that will be used as `$DESTDIR` for the build script, so that the contents of this directory can be shared between builds. - Add a new `--include-directory` option and special directory `mkosi.includedir/` that will be mounted at `/usr/include` during the build. This way headers files installed during the build can be made available to the host system, which is useful for usage with language servers. - Add a new `--source-file-transfer-final` option to complement `--source-file-transfer`. It does the same `--source-file-transfer` does for the build image, but for the final one. - Add a new `--tar-strip-selinux-context` option to remove SELinux xattrs. This is useful when an image with a target distribution not using SELinux is generated on a host that is using it. - Document the `--no-chown` option. Using this option, artifacts generated by mkosi are not chowned to the user invoking mkosi when it is invoked via sudo. It has been with as for a while, but hasn't been documented until now. ### Fixed Issues - [#506](https://github.com/systemd/mkosi/issues/506) - [#559](https://github.com/systemd/mkosi/issues/559) - [#561](https://github.com/systemd/mkosi/issues/561) - [#562](https://github.com/systemd/mkosi/issues/562) - [#575](https://github.com/systemd/mkosi/issues/575) - [#580](https://github.com/systemd/mkosi/issues/580) - [#593](https://github.com/systemd/mkosi/issues/593) ### Authors - Daan De Meyer - Joerg Behrmann - Luca Boccassi - Peter Hutterer - ValdikSS mkosi-24.3/README.md000066400000000000000000000117731465176501400140650ustar00rootroot00000000000000# mkosi — Build Bespoke OS Images A fancy wrapper around `dnf --installroot`, `apt`, `pacman` and `zypper` that generates customized disk images with a number of bells and whistles. For a longer description and available features and options, see the [man page](mkosi/resources/mkosi.md). Packaging status # Installation You can install mkosi from your distribution using its package manager or install the development version from git. If you install mkosi using your distribution's package manager, make sure it installs at least mkosi v16 or newer (Use `mkosi --version` to check). If your distribution only packages an older version of mkosi, it is recommended to install mkosi using one of the alternative installation methods listed below instead. ## Running mkosi from the repository To run mkosi straight from its git repository, you can invoke the shim `bin/mkosi`. The `MKOSI_INTERPRETER` environment variable can be set when using the `bin/mkosi` shim to configure the python interpreter used to execute mkosi. The shim can be symlinked to e.g. `/usr/local/bin` to make it accessible from the `PATH`. ```shell git clone https://github.com/systemd/mkosi ln -s $PWD/mkosi/bin/mkosi /usr/local/bin/mkosi mkosi --version ``` ## Python installation methods mkosi can also be installed straight from the git repository url using `pipx`: ```shell pipx install git+https://github.com/systemd/mkosi.git mkosi --version ``` which will transparently install mkosi into a Python virtual environment and a mkosi binary to `~/.local/bin`. This is, up to the path of the virtual environment and the mkosi binary, equivalent to ```shell python3 -m venv mkosivenv mkosivenv/bin/pip install git+https://github.com/systemd/mkosi.git mkosivenv/bin/mkosi --version ``` You can also package mkosi as a [zipapp](https://docs.python.org/3/library/zipapp.html) that you can deploy anywhere in your `PATH`. Running this will leave a `mkosi` binary in `builddir/` ```shell git clone https://github.com/systemd/mkosi cd mkosi tools/generate-zipapp.sh builddir/mkosi --version ``` Besides the mkosi binary, you can also call mkosi via ```shell python3 -m mkosi ``` when not installed as a zipapp. Please note, that the python module exists solely for the usage of the mkosi binary and is not to be considered a public API. ## kernel-install plugin mkosi can also be used as a kernel-install plugin to build initrds. To enable this feature, install `kernel-install/50-mkosi.install` into `/usr/lib/kernel/install.d`. Extra distro configuration for the initrd can be configured in `/usr/lib/mkosi-initrd`. Users can add their own customizations in `/etc/mkosi-initrd`. Once installed, the mkosi plugin can be enabled by writing `initrd_generator=mkosi-initrd` to `/usr/lib/kernel/install.conf` or to `/etc/kernel/install.conf`. # Hacking on mkosi To hack on mkosi itself you will also need [mypy](https://github.com/python/mypy), for type checking, and [pytest](https://github.com/pytest-dev/pytest), to run tests. We check tests and typing in CI (see `.github/workflows`), but you can run the tests locally as well. # References * [Primary mkosi git repository on GitHub](https://github.com/systemd/mkosi/) * [A re-introduction to mkosi — A Tool for Generating OS Images](https://0pointer.net/blog/a-re-introduction-to-mkosi-a-tool-for-generating-os-images.html) * [The mkosi OS generation tool](https://lwn.net/Articles/726655/) story on LWN (2017) * [systemd-repart: Building Discoverable Disk Images](https://media.ccc.de/v/all-systems-go-2023-191-systemd-repart-building-discoverable-disk-images) and [mkosi: Building Bespoke Operating System Images](https://media.ccc.de/v/all-systems-go-2023-190-mkosi-building-bespoke-operating-system-images) talks at All Systems Go! 2023 * [Building RHEL and RHEL UBI images with mkosi](https://fedoramagazine.org/create-images-directly-from-rhel-and-rhel-ubi-package-using-mkosi/) an article in Fedora Magazine (2023) * [Building USIs with mkosi](https://overhead.neocities.org/blog/build-usi-mkosi/) * [Constellation 💖 mkosi — Minimal TCB, tailor-made for measured boot](https://www.edgeless.systems/blog/constellation-mkosi-minimal-tcb-tailor-made-for-measured-boot/) * [Streamlining kernel hacking with mkosi-kernel](https://video.fosdem.org/2024/ub5132/fosdem-2024-2209-streamlining-kernel-hacking-with-mkosi-kernel.av1.webm) * [mkosi-initrd: Building initrds out of distribution packages](https://video.fosdem.org/2024/ua2118/fosdem-2024-2888-mkosi-initrd-building-initrds-out-of-distribution-packages.av1.webm) * [Running systemd integration tests with mkosi](https://video.fosdem.org/2024/ud2208/fosdem-2024-3431-running-systemd-integration-tests-with-mkosi.av1.webm) * [Arch Linux rescue image with mkosi](https://swsnr.de/archlinux-rescue-image-with-mkosi) ## Community Find us on Matrix at [#mkosi:matrix.org](https://matrix.to/#/#mkosi:matrix.org). mkosi-24.3/action.yaml000066400000000000000000000060321465176501400147370ustar00rootroot00000000000000name: setup-mkosi description: Install mkosi runs: using: composite steps: - name: Permit unprivileged access to kvm, vhost-vsock and vhost-net devices shell: bash run: | sudo mkdir -p /etc/tmpfiles.d sudo cp /usr/lib/tmpfiles.d/static-nodes-permissions.conf /etc/tmpfiles.d/ sudo sed -i '/kvm/s/0660/0666/g' /etc/tmpfiles.d/static-nodes-permissions.conf sudo sed -i '/vhost/s/0660/0666/g' /etc/tmpfiles.d/static-nodes-permissions.conf sudo tee /etc/udev/rules.d/99-kvm4all.rules <<- EOF KERNEL=="kvm", GROUP="kvm", MODE="0666", OPTIONS+="static_node=kvm" KERNEL=="vhost-vsock", GROUP="kvm", MODE="0666", OPTIONS+="static_node=vhost-vsock" KERNEL=="vhost-net", GROUP="kvm", MODE="0666", OPTIONS+="static_node=vhost-net" EOF sudo udevadm control --reload-rules sudo modprobe kvm sudo modprobe vhost_vsock sudo modprobe vhost_net [[ -e /dev/kvm ]] && sudo udevadm trigger --name-match=kvm sudo udevadm trigger --name-match=vhost-vsock sudo udevadm trigger --name-match=vhost-net [[ -e /dev/kvm ]] && sudo chmod 666 /dev/kvm sudo chmod 666 /dev/vhost-vsock sudo chmod 666 /dev/vhost-net lsmod [[ -e /dev/kvm ]] && ls -l /dev/kvm ls -l /dev/vhost-* id - name: Check clock source shell: bash run: cat /sys/devices/system/clocksource/clocksource0/current_clocksource - name: Show environment shell: bash run: env - name: Enable unprivileged user namespaces shell: bash run: | sudo sysctl --ignore --write kernel.apparmor_restrict_unprivileged_unconfined=0 sudo sysctl --ignore --write kernel.apparmor_restrict_unprivileged_userns=0 # Both the unix-chkpwd and swtpm profiles are broken (https://gitlab.com/apparmor/apparmor/-/issues/402) so let's # just disable and remove apparmor completely. It's not relevant in this context anyway. # TODO: Remove if https://github.com/actions/runner-images/issues/10015 is ever fixed. - name: Disable and mask apparmor service shell: bash run: | # This command fails with a non-zero error code even though it unloads the apparmor profiles. # https://gitlab.com/apparmor/apparmor/-/issues/403 sudo aa-teardown || true sudo apt-get remove apparmor - name: Dependencies shell: bash run: | # This is added by default, and it is often broken, but we don't need anything from it sudo rm -f /etc/apt/sources.list.d/microsoft-prod.list # For archlinux-keyring and pacman sudo add-apt-repository ppa:michel-slm/kernel-utils sudo apt-get update sudo apt-get install --assume-yes --no-install-recommends \ archlinux-keyring \ bubblewrap \ debian-archive-keyring \ dnf \ makepkg \ pacman-package-manager \ systemd-container \ zypper sudo pacman-key --init sudo pacman-key --populate archlinux - name: Install shell: bash run: sudo ln -svf ${{ github.action_path }}/bin/mkosi /usr/bin/mkosi mkosi-24.3/bin/000077500000000000000000000000001465176501400133455ustar00rootroot00000000000000mkosi-24.3/bin/mkosi000077500000000000000000000012711465176501400144160ustar00rootroot00000000000000#!/usr/bin/env bash # SPDX-License-Identifier: LGPL-2.1-or-later set -e PYTHONPATH="$(dirname "$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")")" export PYTHONPATH if [ -z "$MKOSI_INTERPRETER" ]; then # Note the check seems to be inverted here because the if branch is executed when the exit status is 0 # which is equal to "False" in python. if python3 -c "import sys; sys.exit(sys.version_info < (3, 9))"; then MKOSI_INTERPRETER=python3 elif command -v python3.9 >/dev/null; then MKOSI_INTERPRETER=python3.9 else echo "mkosi needs python 3.9 or newer (found $(python3 --version))" exit 1 fi fi exec "$MKOSI_INTERPRETER" -B -m mkosi "$@" mkosi-24.3/blog/000077500000000000000000000000001465176501400135205ustar00rootroot00000000000000mkosi-24.3/blog/content/000077500000000000000000000000001465176501400151725ustar00rootroot00000000000000mkosi-24.3/blog/content/a-reintroduction-to-mkosi.md000066400000000000000000000366501465176501400225540ustar00rootroot00000000000000Title: A re-introduction to mkosi -- A Tool for Generating OS Images Date: 2024-01-10 > This is a guest post written by Daan De Meyer, systemd and mkosi > maintainer Almost 7 years ago, Lennart first [wrote](https://0pointer.net/blog/mkosi-a-tool-for-generating-os-images.html) about `mkosi` on this blog. Some years ago, I took over development and there's been a huge amount of changes and improvements since then. So I figure this is a good time to re-introduce `mkosi`. [`mkosi`](https://github.com/systemd/mkosi) stands for *Make Operating System Image*. It generates OS images that can be used for a variety of purposes. If you prefer watching a video over reading a blog post, you can also watch my [presentation](https://www.youtube.com/watch?v=6EelcbjbUa8) on `mkosi` at All Systems Go 2023. ## What is mkosi? `mkosi` was originally written as a tool to simplify hacking on systemd and for experimenting with images using many of the new concepts being introduced in systemd at the time. In the meantime, it has evolved into a general purpose image builder that can be used in a multitude of scenarios. Instructions to install `mkosi` can be found in its [readme](https://github.com/systemd/mkosi/blob/main/README.md). We recommend running the latest version to take advantage of all the latest features and bug fixes. You'll also need `bubblewrap` and the package manager of your favorite distribution to get started. At its core, the workflow of `mkosi` can be divided into 3 steps: 1. Generate an OS tree for some distribution by installing a set of packages. 2. Package up that OS tree in a variety of output formats. 3. (Optionally) Boot the resulting image in `qemu` or `systemd-nspawn`. Images can be built for any of the following distributions: - Fedora Linux - Ubuntu - OpenSUSE - Debian - Arch Linux - CentOS Stream - RHEL - Rocky Linux - Alma Linux And the following output formats are supported: - GPT disk images built with `systemd-repart` - Tar archives - CPIO archives (for building initramfs images) - USIs (Unified System Images which are full OS images packed in a UKI) - Sysext, confext and portable images - Directory trees For example, to build an Arch Linux GPT disk image and boot it in `qemu`, you can run the following command: ```sh $ mkosi -d arch -p systemd -p udev -p linux -t disk qemu ``` To instead boot the image in systemd-nspawn, replace `qemu` with `boot`: ```sh $ mkosi -d arch -p systemd -p udev -p linux -t disk boot ``` The actual image can be found in the current working directory named `image.raw`. However, using a separate output directory is recommended which is as simple as running `mkdir mkosi.output`. To rebuild the image after it's already been built once, add `-f` to the command line before the verb to rebuild the image. Any arguments passed after the verb are forwarded to either `systemd-nspawn` or `qemu` itself. To build the image without booting it, pass `build` instead of `boot` or `qemu` or don't pass a verb at all. By default, the disk image will have an appropriately sized root partition and an ESP partition, but the partition layout and contents can be fully customized using `systemd-repart` by creating partition definition files in `mkosi.repart/`. This allows you to customize the partition as you see fit: - The root partition can be encrypted. - Partition sizes can be customized. - Partitions can be protected with signed dm-verity. - You can opt out of having a root partition and only have a /usr partition instead. - You can add various other partitions, e.g. an XBOOTLDR partition or a swap partition. - ... As part of building the image, we'll run various tools such as `systemd-sysusers`, `systemd-firstboot`, `depmod`, `systemd-hwdb` and more to make sure the image is set up correctly. ## Configuring mkosi image builds Naturally with extended use you don't want to specify all settings on the command line every time, so `mkosi` supports configuration files where the same settings that can be specified on the command line can be written down. For example, the command we used above can be written down in a configuration file `mkosi.conf`: ```conf [Distribution] Distribution=arch [Output] Format=disk [Content] Packages= systemd udev linux ``` Like systemd, `mkosi` uses INI configuration files. We also support dropins which can be placed in `mkosi.conf.d`. Configuration files can also be conditionalized using the `[Match]` section. For example, to only install a specific package on Arch Linux, you can write the following to `mkosi.conf.d/10-arch.conf`: ```conf [Match] Distribution=arch [Content] Packages=pacman ``` Because not everything you need will be supported in `mkosi`, we support running scripts at various points during the image build process where all extra image customization can be done. For example, if it is found, `mkosi.postinst` is called after packages have been installed. Scripts are executed on the host system by default (in a sandbox), but can be executed inside the image by suffixing the script with `.chroot`, so if `mkosi.postinst.chroot` is found it will be executed inside the image. To add extra files to the image, you can place them in `mkosi.extra` in the source directory and they will be automatically copied into the image after packages have been installed. ## Bootable images If the necessary packages are installed, `mkosi` will automatically generate a UEFI/BIOS bootable image. As `mkosi` is a systemd project, it will always build [UKIs](https://uapi-group.org/specifications/specs/unified_kernel_image/) (Unified Kernel Images), except if the image is BIOS-only (since UKIs cannot be used on BIOS). The initramfs is built like a regular image by installing distribution packages and packaging them up in a CPIO archive instead of a disk image. Specifically, we do not use `dracut`, `mkinitcpio` or `initramfs-tools` to generate the initramfs from the host system. `ukify` is used to assemble all the individual components into a UKI. If you don't want `mkosi` to generate a bootable image, you can set `Bootable=no` to explicitly disable this logic. ## Using mkosi for development The main requirements to use `mkosi` for development is that we can build our source code against the image we're building and install it into the image we're building. `mkosi` supports this via build scripts. If a script named `mkosi.build` (or `mkosi.build.chroot`) is found, we'll execute it as part of the build. Any files put by the build script into `$DESTDIR` will be installed into the image. Required build dependencies can be installed using the `BuildPackages=` setting. These packages are installed into an overlay which is put on top of the image when running the build script so the build packages are available when running the build script but don't end up in the final image. An example `mkosi.build.chroot` script for a project using `meson` could look as follows: ```sh #!/bin/sh meson setup "$BUILDDIR" "$SRCDIR" ninja -C "$BUILDDIR" if ((WITH_TESTS)); then meson test -C "$BUILDDIR" fi meson install -C "$BUILDDIR" ``` Now, every time the image is built, the build script will be executed and the results will be installed into the image. The `$BUILDDIR` environment variable points to a directory that can be used as the build directory for build artifacts to allow for incremental builds if the build system supports it. Of course, downloading all packages from scratch every time and re-installing them again every time the image is built is rather slow, so `mkosi` supports two modes of caching to speed things up. The first caching mode caches all downloaded packages so they don't have to be downloaded again on subsequent builds. Enabling this is as simple as running `mkdir mkosi.cache`. The second mode of caching caches the image after all packages have been installed but before running the build script. On subsequent builds, `mkosi` will copy the cache instead of reinstalling all packages from scratch. This mode can be enabled using the `Incremental=` setting. While there is some rudimentary cache invalidation, the cache can also forcibly be rebuilt by specifying `-ff` on the command line instead of `-f`. Note that when running on a btrfs filesystem, `mkosi` will automatically use subvolumes for the cached images which can be snapshotted on subsequent builds for even faster rebuilds. We'll also use reflinks to do copy-on-write copies where possible. With this setup, by running `mkosi -f qemu` in the systemd repository, it takes about 40 seconds to go from a source code change to a root shell in a virtual machine running the latest systemd with your change applied. This makes it very easy to test changes to systemd in a safe environment without risk of breaking your host system. Of course, while 40 seconds is not a very long time, it's still more than we'd like, especially if all we're doing is modifying the kernel command line. That's why we have the `KernelCommandLineExtra=` option to configure kernel command line options that are passed to the container or virtual machine at runtime instead of being embedded into the image. These extra kernel command line options are picked up when the image is booted with qemu's direct kernel boot (using `-append`), but also when booting a disk image in UEFI mode (using SMBIOS). The same applies to systemd credentials (using the `Credentials=` setting). These settings allow configuring the image without having to rebuild it, which means that you only have to run `mkosi qemu` or `mkosi boot` again afterwards to apply the new settings. ## Building images without root privileges and loop devices By using `newuidmap`/`newgidmap` and `systemd-repart`, `mkosi` is able to build images without needing root privileges. As long as proper subuid and subgid mappings are set up for your user in `/etc/subuid` and `/etc/subgid`, you can run `mkosi` as your regular user without having to switch to `root`. Note that as of the writing of this blog post this only applies to the `build` and `qemu` verbs. Booting the image in a `systemd-nspawn` container with `mkosi boot` still needs root privileges. We're hoping to fix this in an future systemd release. Regardless of whether you're running `mkosi` with root or without root, almost every tool we execute is invoked in a sandbox to isolate as much of the build process from the host as possible. For example, `/etc` and `/var` from the host are not available in this sandbox, to avoid host configuration inadvertently affecting the build. Because `systemd-repart` can build disk images without loop devices, `mkosi` can run from almost any environment, including containers. All that's needed is a UID range with 65536 UIDs available, either via running as the root user or via `/etc/subuid` and `newuidmap`. In a future systemd release, we're hoping to provide an alternative to `newuidmap` and `/etc/subuid` to allow running `mkosi` from all containers, even those with only a single UID available. ## Supporting older distributions mkosi depends on very recent versions of various systemd tools (v254 or newer). To support older distributions, we implemented so called tools trees. In short, `mkosi` can first build a tools image for you that contains all required tools to build the actual image. This can be enabled by adding `ToolsTree=default` to your mkosi configuration. Building a tools image does not require a recent version of systemd. In the systemd mkosi configuration, we automatically use a tools tree if we detect your distribution does not have the minimum required systemd version installed. ## Configuring variants of the same image using profiles Profiles can be defined in the `mkosi.profiles/` directory. The profile to use can be selected using the `Profile=` setting (or `--profile=`) on the command line. A profile allows you to bundle various settings behind a single recognizable name. Profiles can also be matched on if you want to apply some settings only to a few profiles. For example, you could have a `bootable` profile that sets `Bootable=yes`, adds the `linux` and `systemd-boot` packages and configures `Format=disk` to end up with a bootable disk image when passing `--profile bootable` on the kernel command line. ## Building system extension images [System extension](https://uapi-group.org/specifications/specs/extension_image/) images may – dynamically at runtime — extend the base system with an overlay containing additional files. To build system extensions with `mkosi`, we need a base image on top of which we can build our extension. To keep things manageable, we'll make use of `mkosi`'s support for building multiple images so that we can build our base image and system extension in one go. We start by creating a temporary directory with a base configuration file `mkosi.conf` with some shared settings: ```conf [Output] OutputDirectory=mkosi.output CacheDirectory=mkosi.cache ``` Now let's continue with the base image definition by writing the following to `mkosi.images/base/mkosi.conf`: ```conf [Output] Format=directory [Content] CleanPackageMetadata=no Packages=systemd udev ``` We use the `directory` output format here instead of the `disk` output so that we can build our extension without needing root privileges. Now that we have our base image, we can define a sysext that builds on top of it by writing the following to `mkosi.images/btrfs/mkosi.conf`: ```conf [Config] Dependencies=base [Output] Format=sysext Overlay=yes [Content] BaseTrees=%O/base Packages=btrfs-progs ``` `BaseTrees=` point to our base image and `Overlay=yes` instructs mkosi to only package the files added on top of the base tree. We can't sign the extension image without a key. We can generate one by running `mkosi genkey` which will generate files that are automatically picked up when building the image. Finally, you can build the base image and the extensions by running `mkosi -f`. You'll find `btrfs.raw` in `mkosi.output` which is the extension image. ## Various other interesting features - To sign any generated UKIs for secure boot, put your secure boot key and certificate in `mkosi.key` and `mkosi.crt` and enable the `SecureBoot=` setting. You can also run `mkosi genkey` to have `mkosi` generate a key and certificate itself. - The `Ephemeral=` setting can be enabled to boot the image in an ephemeral copy that is thrown away when the container or virtual machine exits. - `ShimBootloader=` and `BiosBootloader=` settings are available to configure shim and grub installation if needed. - `mkosi` can boot directory trees in a virtual using `virtiofsd`. This is very useful for quickly rebuilding an image and booting it as the image does not have to be packed up as a disk image. - ... There's many more features that we won't go over in detail here in this blog post. Learn more about those by reading the [documentation](https://github.com/systemd/mkosi/blob/main/mkosi/resources/mkosi.md). ## Conclusion I'll finish with a bunch of links to more information about `mkosi` and related tooling: - [Github repository](https://github.com/systemd/mkosi) - [Building RHEL and RHEL UBI images with mkosi](https://fedoramagazine.org/create-images-directly-from-rhel-and-rhel-ubi-package-using-mkosi/) - [My presentation on systemd-repart at ASG 2023](https://media.ccc.de/v/all-systems-go-2023-191-systemd-repart-building-discoverable-disk-images) - [mkosi's Matrix channel](https://matrix.to/#/#mkosi:matrix.org). - [systemd's mkosi configuration](https://raw.githubusercontent.com/systemd/systemd/main/mkosi.conf) - [mkosi's mkosi configuration](https://github.com/systemd/systemd/tree/main/mkosi.conf.d) mkosi-24.3/docs/000077500000000000000000000000001465176501400135255ustar00rootroot00000000000000mkosi-24.3/docs/CNAME000066400000000000000000000000201465176501400142630ustar00rootroot00000000000000mkosi.systemd.iomkosi-24.3/docs/_data/000077500000000000000000000000001465176501400145755ustar00rootroot00000000000000mkosi-24.3/docs/_data/documentation_page.json000066400000000000000000000002701465176501400213340ustar00rootroot00000000000000[ { "category": "Documentation", "title": "A longer description and available features and options", "url": "https://github.com/systemd/mkosi/blob/main/mkosi/resources/mkosi.md" } ] mkosi-24.3/docs/_data/project_pages.json000066400000000000000000000007701465176501400203210ustar00rootroot00000000000000[ { "category": "Project", "title": "Brand", "url": "https://brand.systemd.io/" }, { "category": "Project", "title": "Releases", "url": "https://github.com/systemd/mkosi/releases" }, { "category": "Project", "title": "GitHub Project Page", "url": "https://github.com/systemd/mkosi" }, { "category": "Project", "title": "Issues", "url": "https://github.com/systemd/mkosi/issues" }, { "category": "Project", "title": "Pull Requests", "url": "https://github.com/systemd/mkosi/pulls" } ] mkosi-24.3/docs/_includes/000077500000000000000000000000001465176501400154725ustar00rootroot00000000000000mkosi-24.3/docs/_includes/footer.html000066400000000000000000000003211465176501400176520ustar00rootroot00000000000000 mkosi-24.3/docs/_includes/head.html000066400000000000000000000011441465176501400172610ustar00rootroot00000000000000 {% if page.title %}{{ page.title }}{% else %}{{ site.title }}{% endif %} mkosi-24.3/docs/_includes/header.html000066400000000000000000000004741465176501400176150ustar00rootroot00000000000000 mkosi-24.3/docs/_layouts/000077500000000000000000000000001465176501400153645ustar00rootroot00000000000000mkosi-24.3/docs/_layouts/default.html000066400000000000000000000004011465176501400176710ustar00rootroot00000000000000 {% include head.html %} {% include header.html %}
{{ content }}
{% include footer.html %} mkosi-24.3/docs/_layouts/forward.html000066400000000000000000000016041465176501400177170ustar00rootroot00000000000000 Redirecting to {{ page.target }} {% include header.html %}

This document has moved.
Redirecting to {{ page.target }}.

mkosi-24.3/docs/assets/000077500000000000000000000000001465176501400150275ustar00rootroot00000000000000mkosi-24.3/docs/assets/systemd-logo.svg000066400000000000000000000060601465176501400202000ustar00rootroot00000000000000 mkosi-24.3/docs/bootable.md000066400000000000000000000025571465176501400156470ustar00rootroot00000000000000--- title: Building a bootable image on different distros category: Tutorials layout: default SPDX-License-Identifier: LGPL-2.1-or-later --- # Building a bootable image on different distros To build a bootable image, you'll need to install a list of packages that differs depending on the distribution. We give an overview here of what's needed to generate a bootable image for some common distributions: ## Arch ```conf [Distribution] Distribution=arch [Content] Bootable=yes Packages=linux systemd ``` ## Fedora ```conf [Distribution] Distribution=fedora [Content] Bootable=yes Packages=kernel systemd systemd-boot udev util-linux ``` ## CentOS ```conf [Distribution] Distribution=centos [Content] Bootable=yes Packages=kernel systemd systemd-boot udev ``` ## Debian ```conf [Distribution] Distribution=debian [Content] Bootable=yes Packages=linux-image-generic systemd systemd-boot systemd-sysv udev dbus ``` ## Ubuntu ```conf [Distribution] Distribution=ubuntu Repositories=main,universe [Content] Bootable=yes Packages=linux-image-generic systemd systemd-sysv udev dbus ``` ## Opensuse ```conf [Distribution] Distribution=opensuse [Content] Bootable=yes Packages=kernel-default systemd udev ``` mkosi-24.3/docs/building-rpms-from-source.md000066400000000000000000000153371465176501400210730ustar00rootroot00000000000000--- title: Building RPMs from source with mkosi category: Tutorials layout: default SPDX-License-Identifier: LGPL-2.1-or-later --- # Building RPMs from source with mkosi If you want to build an RPM from source and install it within a mkosi image, you can do that with mkosi itself without using `mock`. The steps required are as follows: 1. Install `BuildRequires` dependencies in the build overlay 1. Install dynamic `BuildRequires` dependencies in the build overlay 1. Build the RPM with `rpmbuild` 1. Install the built rpms in the image In the following examples, we'll use mkosi itself and its Fedora RPM spec as an example. To keep things snappy, we execute the first 3 steps in a prepare script so that they're cached on subsequent runs of mkosi if the `Incremental=` setting is enabled. First, we need access to the upstream sources and the RPM spec and related files. These can be mounted into the current working directory when running mkosi scripts by using the `BuildSources=` setting. For example, in `mkosi.local.conf`, we could have the following settings: ```conf [Content] BuildSources=../mkosi:mkosi ../fedora/mkosi:mkosi/rpm BuildSourcesEphemeral=yes ``` Which instructs mkosi to mount the local version of the mkosi upstream repository at `../mkosi` to `mkosi` in the current working directory when running mkosi. The Fedora RPM spec is mounted at `mkosi/rpm`. We enable the `BuildSourcesEphemeral=` option as `rpmbuild` will write quite a few files to the source directory as part of building the rpm which we don't want to remain there after the build finishes. We use `rpmspec` and `rpmbuild`, but these do not really support running from outside of the image that the RPM is being built in, so we have to make sure they're available inside the image by adding the following to `mkosi.conf`: ```conf [Content] Packages=rpm-build # If you don't want rpm-build in the final image. RemovePackages=rpm-build ``` The prepare script `mkosi.prepare` then looks as follows: ```shell #!/bin/sh set -e if [ "$1" = "final" ]; then exit 0 fi mkosi-chroot \ env --chdir=mkosi \ rpmspec \ --query \ --buildrequires \ --define "_topdir /var/tmp" \ --define "_sourcedir rpm" \ rpm/mkosi.spec | sort --unique | tee /tmp/buildrequires | xargs --delimiter '\n' mkosi-install until mkosi-chroot \ env --chdir=mkosi \ rpmbuild \ -bd \ --build-in-place \ --define "_topdir /var/tmp" \ --define "_sourcedir rpm" \ --define "_build_name_fmt %%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm" \ rpm/mkosi.spec do EXIT_STATUS=$? if [ $EXIT_STATUS -ne 11 ]; then exit $EXIT_STATUS fi mkosi-chroot \ rpm \ --query \ --package \ --requires \ /var/tmp/SRPMS/mkosi-*.buildreqs.nosrc.rpm | grep --invert-match '^rpmlib(' | sort --unique >/tmp/dynamic-buildrequires sort /tmp/buildrequires /tmp/dynamic-buildrequires | uniq --unique | tee --append /tmp/buildrequires | xargs --delimiter '\n' mkosi-install done ``` To install non-dynamic dependencies, we use `rpmspec`. What's important is to set `_sourcedir` to the directory containing the RPM sources for the RPM spec that we want to build. We run `rpmspec` inside the image to make sure all the RPM macros have their expected values and then run `mkosi-install` outside the image to install the required dependencies. `mkosi-install` will invoke the package manager that's being used to build the image to install the given packages. We always set `_topdir` to `/var/tmp` to avoid polluting the image with `rpmbuild` artifacts. After installing non-dynamic `BuildRequires` dependencies, we have to install the dynamic `BuildRequires` dependencies by running `rpmbuild -bd` until it succeeds or fails with an exit code that's not `11`. After each run of `rpmbuild -bd` that exits with exit code `11`, there will be an SRPM in the `SRPMS` subdirectory of the rpm working directory (`_topdir`) of which the `BuildRequires` dependencies have to be installed. We retrieve the list of `BuildRequires` dependencies with `rpm` this time (because we're operating on a package instead of a spec), remove all `rpmlib` style dependencies which can't be installed and store them in a temporary file after filtering duplicates. Because the `BuildRequires` dependencies from the SRPM will also contain the non-dynamic `BuildRequires` dependencies, we have to filter those out as well. Now we have an image and build overlay with all the necessary dependencies installed to be able to build the RPM. Next is the build script. We suffix the build script with `.chroot` so that mkosi runs it entirely inside the image. In the build script, we invoke `rpmbuild -bb --build-in-place` to have `rpmbuild` build the RPM in place from the upstream sources. Because `--build-in-place` configures `_builddir` to the current working directory, we change directory to the upstream sources before invoking `rpmbuild`. Again, `_sourcedir` has to point to the RPM spec sources. We also have to override `_rpmdir` to point to the mkosi output directory (stored in `$OUTPUTDIR`). The build script `mkosi.build.chroot` then looks as follows: ```shell #!/bin/sh set -e env --chdir=mkosi \ rpmbuild \ -bb \ --build-in-place \ $([ "$WITH_TESTS" = "0" ] && echo --nocheck) \ --define "_topdir /var/tmp" \ --define "_sourcedir rpm" \ --define "_rpmdir $OUTPUTDIR" \ ${BUILDDIR:+--define} \ ${BUILDDIR:+"_vpath_builddir $BUILDDIR"} \ --define "_build_name_fmt %%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm" \ --define "_binary_payload w.ufdio" \ --define "debug_package %{nil}" \ --define "__brp_strip %{nil}" \ --define "__brp_compress %{nil}" \ --define "__brp_mangle_shebangs %{nil}" \ --define "__brp_strip_comment_note %{nil}" \ --define "__brp_strip_static_archive %{nil}" \ rpm/mkosi.spec ``` The `_vpath_builddir` directory will be used to store out-of-tree build artifacts for build systems that support out-of-tree builds (CMake, Meson) so we set it to mkosi's out-of-tree build directory in `$BUILDDIR` if one is provided. This will make subsequent RPM builds much faster as CMake or Meson will be able to do an incremental build. Setting `_binary_payload` to `w.ufdio` disables compression to speed up the build. We also disable debug package generation using `debug_package` and various rpm build root policy scripts to speed up the build. Note that the build root policy macros we use here are CentOS/Fedora specific. After the build script finishes, the produced rpms will be located in `$OUTPUTDIR`. We can now install them from the `mkosi.postinst` post-installation script: ```shell #!/bin/sh set -e rpm --install "$OUTPUTDIR"/*mkosi*.rpm ``` mkosi-24.3/docs/distribution-policy.md000066400000000000000000000055341465176501400200720ustar00rootroot00000000000000--- title: Adding new distributions category: Tutorials layout: default SPDX-License-Identifier: LGPL-2.1-or-later --- # Adding new distributions Merging support for a new distribution in mkosi depends on a few factors. Not all of these are required but depending on how many of these requirements are satisfied, the chances of us merging support for your distribution will improve: 1. Is the distribution somewhat popular? mkosi's goal is not to support every distribution under the sun, the distribution should have a substantial amount of users. 2. Does the distribution differentiate itself somehow from the distributions that are already supported? We're generally not interested in supporting distributions that only consist of minimal configuration changes to another distribution. 3. Is there a long-term maintainer for the distribution in mkosi? When proposing support for a new distribution, we expect you to be the maintainer for the distribution and to respond when pinged for support on distribution specific issues. 4. Does the distribution use a custom package manager or one of the already supported ones (apt, dnf, pacman, zypper)? Supporting new package managers in mkosi is generally a lot of work. We can support new ones if needed for a new distribution, but we will insist on the package manager having a somewhat sane design, with official support for building in a chroot and running unprivileged in a user namespace being the bare minimum features we expect from any new package manager. We will only consider new distributions that satisfy all or most of these requirements. However, you can still use mkosi with the distribution by setting the `Distribution` setting to `custom` and implementing either providing the rootfs via a skeleton tree or base tree, or by providing the rootfs via a prepare script. # Implementing new distributions To actually implement a new distribution, the following checklist can be used: - Add the distribution to the `Distribution` enum - Add the implementation of the distribution in `mkosi/distributions`. If the distribution is a variant of an existing distribution, inherit from the existing distribution's installer class and only override the necessary methods. - Update any relevant methods on the `Distribution` enum to take the new distribution into account. - Update the documentation in `mkosi/resources/mkosi.md` - Update the default initrd, tools and default image configurations in `mkosi/resources/mkosi-initrd`, `mkosi/resources/mkosi-tools` and `mkosi.conf.d` respectively. If the distribution is a variant of another existing distribution, update the `[Match]` blocks for the existing distribution to also match against the new distribution. To test whether all necessary changes were made, you can run `mkosi -d --tools-tree -t disk -f qemu`. mkosi-24.3/docs/favicon.png000066400000000000000000000006121465176501400156570ustar00rootroot00000000000000PNG  IHDRasBIT|d pHYs+tEXtSoftwarewww.inkscape.org<IDAT8œJP{ӤRbM@r!4W| ;Q|pq*NN5v"h]8ŧs#0!ՒI-i[aYq\QˀK +-` jT ua|4F8I{q1_[60ΎGz?H60YlW0٭PaB>)=4])+(虭+v(̂H0EAuBQGw{ hAJ9IENDB`mkosi-24.3/docs/fonts/000077500000000000000000000000001465176501400146565ustar00rootroot00000000000000mkosi-24.3/docs/fonts/heebo-bold.woff000066400000000000000000001235341465176501400175510ustar00rootroot00000000000000wOFF\8DSIGTGDEF>@GPOS w675GSUBH OS/2R`ыcmap|5"ηcvt bgfpgm k ^lgaspglyf|vƠhead66 2hhea!$ ?hmtxd>\{loca$_xmaxp jnameBE;ZOpostq prep d-f.n_<!gb2`bxc`d`H_ @dxW JVh ^xc`fejra&f:Ðtg`fF/;܏AA?GҟE@$8Ez @J 3 xڍ}LUe}9C ) + xJD^R 9,^Ǜ%3b -ȶfZmҭ5VQ{sōͻ}=y`y0L}:f6xt#ׂ*=U쩐ϥz5v2{:0.֋J.Vߑa\g|4Ck):Recl/I>K1/ [$y1s,[S? FN@Tzѐs)B﷙3ۉ>]7Z(qk\]q:*Os`{DbXF uC4e`"pc2B8_'H/X%sK1쯥5-j|{# PV ?dҸxi`G11by]Jvc|5R, :0CE4ضq /Ҍ! g,{@p.C.nxu-S%ㆱB (4rކ @Ąq 毌Odj430lFb>lN&$XXS?kϺuӬ3F%k:y=T8咏2jiUӣjJ#oZN=Ja8 d7M~Gm㈽ٜ7w2%JdJm6P!)ujmJ>> K>f_h$ H#Kݏo< :AZѮAy;QÇVUVXrýg?vPf=Ws9oA3(֖V =̗=ƹY}{YNDp€sDqOSˆYqtVexWY贒P k̠ `CG`q7:~$k^*rMى4ZVQ/% sWZ$;!x 9JI"ȍJC1/k|!ox͓[PUU9c(f(t2Rr K/e7+SQAQ,-9Y4%!]&ǦQzM?[t饇Z{־AN'ؕh1 jLDWKFc#w}p?ӹ~#||_:[@A,E`z0;(O&-3bqKw.0rO,2|s„)aZ:QM~:#ga~¶.v:o` [SCS!88ոTCU oB2?1"|c}%Sr!Lxɣ :,'gueNs,itu:gNw|Fnܩ/F,s/Gӛ'{7biTo]< OK?ce,,?Zˊ)SOCX,6L*q4jYW"(.@ME*d^jI::杉j>92/9Jp;Ubj6=Ls'XɬG'uyţBNdQeo>byݮZEG`q&!hQf.+)+myz+MyS~-)1nddVڏެq(MR:vVqgKݨ,`tTPt酠Lk2-~K87N'/k:Q(Vn44_ha/8 ^뎙iET s,3@/ cЫzñs!0<1Ϡטgu::ĬAœU0h2A9 ֌]> c7]2v1zcw]3v1z1>KfD?Gt3&疅pl>;S32;>w|n!o L JOпZﰳCB&`!~\?X_CB&n!ݹL/Qe7nw_go /xcp"(b##c_Ɲ  ؝2000h\,,,PO&[&]6f^A>NN^(03lTa9eš#9$$\Y,YT9YYxv0oһe3k  z'xc` 0Z1Z1|a²_GҟE,>3|f 7@7A|K@a%vAg4o (E2DxH<R P|>j@t  > l > j 4 v L & J z F|.`0rjHn@Z(xP8tBhbDnXd, r !4!|!""`"""#*#Z##$|$$%f%&&&'Z'(())*8**+*+r++,<,t,--h-.<.z../B//00<0001$1b112242V2223@3|334(44545v566`667,7h7788@8v889989: :;*;J;n;;;<$<<<<==D===>>@>h>>?$??@ @|@@AA:AABBRBBBC(CdCCDDDDtDDEEREzEEF$FFFG"GHGlGGHHHHHII\IIIJHJlJJKK^KKKKLRRSSSSSSSST T T8T8TJThTTTTU*UXUvUUUV V2VbVVVWW0W^WWXXhXXYNYnYZ2ZbZZZ[.[N[[\(\\]F]]^^&^B^b^^__:_d_____` `B`f````aa,aJahaaab6bbbbcBcd dtde.e|eeeff,fPfggo^oop.pqBqr rJrrssRs|stt*tHtrtuq{\QDQ1Mzdd4M91a 2'Z͓1/p|%X,h** kCTp0pa!0N\d>EQ(UlMVDa"S Z\1:lHi>C-j>͚`f%?{ba =v6B7#XRR*o>//%f=ޭkΝ\ڵmS:uܔgvˬ̌T[ [dG=mp9pA aBiSU1ج5Fj4ZsXKU[^֙4f-6:s*-Jss\a`~CÇ ^>|.!S%x;J>q>pbfђ(0؃~H-I:}p!nhP!!!BE*R],pqBc{ u PE;ue0^@yL>koGNzietJ&Cm]ظ*@"+ hBCB5`@DVYS}:aQIsRT(Ԝȉ5@C6<j A !9.8]]$T^Ĕ*u1bc)!$KbHLHQ),=%^!ʼnW>G❨[T)UAXj8:L\rj iDrwXgnTH!UHM9OIvح qQ&6X @u``> X(Blc` RB5(TaPo!Qou[j XvK YOb+m{C=d/~.Gk&r?porDsV/@Hx C`&ä rB6<20TTa4f3Dhy:rs:xN,3qs8W5;o7ɭܛX{=-Y}_#+aX_3g+qsb5^p^Dep}K*:)ll!dNJZF!"7 76C/7mźh](9^cW,Sx׋>(ywxj^ï=jE 7pB0Ľ k`.ӑ9B  UXxNW!B(\aE=Kizaަ] 2PE ѩA4l@J(TF\(VG~RR34 TwNW,pyʌ\aޢ0$ sRۧ-¨8TcT@ߢdBX*`*G0S1BȨׅ5Aʘ` V*CN ce"+pk+qZ\zɵa>D^M! !ckQw刘S`e e".&T4_M0>|MqڲŃ7ċ[7FV@)vTV(o1d` "/T2[hB`JU[7+ s \k9[%od 86q m \@ V**>, 0]8R4,-YGQ /tb JQj "@L&PA0hc!i`uJSt(6KX\X+X9Pw|5̽p%Qie] G$Nih_fL'?ǵsJ¬/܇ak X,f.8^sjCNL@5-:EI( LD~™ Vt[N/?c="hl;n_/Ͼ˗bZAo\9Jl D}Z-yzr:#?9s[a?j`(P(Dft+Y 73_v|[=xR.0).x$1[T` :*D>$;~g\Ws P+8q5 (TAja@+ aI!UJ0EATH !)E]H=pko-[qv(g:-l3b:7eGxU Xii-z*k$%嚩xj賵mX.6?}׃_ cP! J ;ȀJ+1[q1&>,kF,Nt1L,>-ĞD\ 2i\ѹ|q䕝oپsUO.ww}ip}ʏ?^( )@6#i2$J: ffWRE^W++FT*j![XZP*9`4} ]|Rŋt:D* Ԭ,9MVA^9ӜBA3<޵ J;>ZKl#V*GEpdD!X >KJŀ$`20Pp+A:ٮK%1_,5Jd’sII9=KO/ڎc{rU5TL1[scldpa!?1R׍G`!&M}f_u>q@;9+>>6a{X?\MN@mclI %"@הfֆ4 czYCZAJJ;,4TB a* KSy=QpOޝ`-Hnp#!vs]U% ĸ`Ifo`,/ZiWF-nϒ<0xv6hA׽wsy{nÃ8:ö$z oEhT!ŕBݓG :6H954:/ n-[z !,vXXS]]ӥ'P`C%5'HQ%V#,* 2\EKxXt$T snTWx#rg|4`bv}$eoM'_y=uY6d(ť?)fAwW&s@$4)Fj^{l%~P!&:d5RE1"s9&hZ5Úa ƷVxY%0O4/II}w^YKX*9jDQ j{0fZ,Ϗ&p_Kt p]_p܃/K+X\ R^{ѐt:Y`~ "i<J!Pp[D].-c&C+#4c &6 pʅs1xa:wUJw!z#Нl|P&D 9׎ axLm{ \)34ˬ L#8bXN#;n9>rvg6SidxT)0/4WOwSlY)xExMVsGӧunfa Ơ3F0 s*+Utt%ڜa4_b0`2rs$͒`-HRZpĚLN8W?re'ĸm}~KL:s-H0>S>$eQ7O 4>@`!H/ڽ8t@9Ǻêw)m0ş1[mrtly(6mg\{SU͊p:q|?;J-,̏rQwkGrb;/=!> COmwCS_''tycf]7j$9Ns}#קw=&>p.da\VD.>O^;8Nz(YN_}ߘG>5}Lo\֊Gq9^G)x}ުwgZ" Ղo\p4bEES/O\ D_?ȑSp7R}N"ϾrjҖ~pKN<ɈwzwWer<,p)r~~48PiSкa=.j' DDj'R vzĥ?~nBJ.Bɻ4 afhiMIܲ$$>,b穃9FԨ@5FIG&!w}#"љ@EnNvˌx;ݥ,QPpY x+K4]xTw/7CiԚPTbo4νf}{JK{t.mT}#z Бk"juPY rRA .Cv?6߽ڀXgn]\j,afn;S^"2tt&#݆uX* kvϮIərJ`$?_//{*??gQUYiB3Vsq39ǭ&3+ս(RQ<H!K_F5( R[bvt<&,ɡtDMJ>WOX_X[Y4_뗏Ϟbo(>+2povۨla9ZZC?]g?y֌noZ.?=S_ukЖ2JG72J㜮,%0\pOʱxQ2  3f]>M?jzN!r Ë񋕳۴7`ڟ7%&{=cUH{ ?v6tCvuuRR.ƨou͍GذBTs R1h4ErݒbMԨ쩾w5)U* d+遇jbiٶe\K%!Vh) ͿGEFF4z )8Y) |8X})*d((D%-GVT5D f*Vh5`Eh%IV͠'wyt^Ͼ){t~n7?*1G-Ndd?%5dgNb8}l$El+-lkC)pMt$>y)T%) ${wAeLH w65+ hi^oZ,i/]5KL]P} -T*{' n!緟;9o[یZ3R|zS+|2"@wPKWUҰli hmW_ii:f{Qݷ;yN8xn.x4XϫQEg;uf (4W *[e4\(5H{#jl 6BwfJKK{nřCtBg|"mOMUr35U58Amd5U5h\Saq|(, NE[S4L2V\T H:AީJK<|ʡ9]UWW[˷_S?J sl#): `KiVθ(8[4fAZ{p#G֌vhYieg/^#L9r^1Y]V{NWJs6|<V#^9+gflΤӥb!$4lL>eJR,l8WF3Ur EG=LBvVo3=,SfSM̜W#䢭(LG1m+69v=;ԥ0M2ܝ=եw3ӭ v`bFW%)@wi1惰*=0/"Mо~lpJ\!RtZ}cß8L.fH ÷rW(nP{/X /)^r]! ^_h`?L[qTQul/ jz3z jHU,`[ qQ&lz;wA}CO`=uf:%3zÜ~kJ8c̔c_o0~]1>*'W/4FY$RKi0 jM`ڻHetSB! LnZ`ߓ`4 =Z1=0;՗EP&A(4B03?@Ā|L |_/9 vLj=Ffb1`oqc'O"j˥?n5>vû'{q(xoOV__lߚ#Q3oN \i{`65Qk~ȆQj`,?%٣7 v{A;f;rDtMi̊swQkԥ`I}kήC6_X׶b)+KKْ^^إk/ƀ-}\m\o 6rHБ84%m84'gCEA(cRIn/r6Jm9IJFybnyUMVvM?0~ďwvo?eKuruh xPlV2wTP;] 0\2g2[^QϘ'W3 8e~sg~ ,9C_ |Fr|Gk7 -;9z4 uf/*Q%JuS:eSV⣣Xg\skp04ALa/ {Kڣ2A8Ā>&+A*] $Tc%({jr…[DŪ*ErY5U(WYܡ,IjB]J3Zhy XYK׼3\[ԩ_ɄEMz{q?~`!s#Kvس}Cds27Mt8BLƱbT%+We8&H_k:/ǒf Bh OIYY$E[1眞׺G76ޢ}j^R򻶌ܩOo|`vN[S~rh|O֌c;/Nݱ9Mڧ0Υ)XfB!{e7m*33GV V@7ҩNY] ɻgjzv= m!jF9 f^i•P)f/A}Si@*Pq} oHRL )'sa|*"6*I)Co8_Uun3c6ݷ_7nʢV T }WkqsݺҾO^wm:nyo/ܹ^,|ŏx﨑Vl~ǢmV\z_qw{W*}ghۯEFy:?K% \z~Ҥ3gxly;f[67Wn}ϕ\e\ R kv+ԅ?-%NWnL|}Qe)o]_Ͳ>^&xƥUO;ʸ5h[ͩ:Sm]]gO+3rgnLm7n mO8xk1-ɱƑ[7LЖO;+yrqȚ9zkzh}+=CˆKcg8MmnoXtmg=G|`J@^EHRSVJD4N|軷SZ*׊Eh ly٪Ζ5qg%Z>\:l s50?]wq~FALH(a~s@w+8LI3r{XH(O+kQ H j,'*;84sUqdFdzJp|0h9;1IȩIRRTIIE*[VewK Q+G0X%-.EAAyˁTAJ lePUsWӳҳ2c@z0&ʎ&ti%dEڬb{ ;OW~L5hT%\ X%5H~w+tOC:&ײ0ŮDExfgA{F&(lO)rr&L+ | 8F-6KBdBCT t2"4%gJ1\٦1ǏRޅ8RxmlA$X" !bYox )e㊥1\2aD[ T݄bPiVZjԨIVҳ^}_9RK\Ș"*+$ 8.6IȋD^ \}pDXװW$02`HYC%7*owvmXº!&K1z`'JZߊ>PkKY7{j[L1pDV>Z4ΒJ)?nө])m$۶ga Wj4}f:^RgRl/-'~^& SNɨ8n"Djz k&^0jρ6u)*E^Yhe2;Ұ ʯ$߯p?믯z|=OST<3WrK0C'=u_ 5f b{B>Sȓe_e{s.#u? hk؊9yºSwգ:ݏKZxʯ_vunӻzz商xo^(j7_pc! %ݕº1 IJn| A+U:]͞G5]ce+_ř\6?\]-(pl;-"TuRe1.;k)֘Mn]Wh"{csxX+RcL TS%,A#)(j^[dv O=O~Oi"AӐj5lȫ4P!ޚsS+ܨj k)ȣYo JF_kM. F|utd=]Ђƹ4qX-L;?ū='};)*qi:Y\.;/Yz?WuQS,[1-kYbSviĎXW,U^^}^,.J_di.?.ť} z3%xI4~E{{CKWcoĥ s_9JΥ!cV/QOO%.Csut~~~.= nT.SywuOZ&4)~kX?v?g~!o8.5 ǴldvA1/i([t څz w8svm$OT9۷נam EwGvJ<%@vK0y`˽B?^ :1%`>h^ty5z^N"yT547Hn$yߢ7DlU<} S\N;_b&}wN+NBo_iaa9zonu`J~J}QjF"mMki tz;pY3/o9:ưCϜ֍;>kSdO{QF.)?ء{{{yޛCӇã)Q{E*Oycavw3.I2wfB踩;5P &~%[mןWk͵u,5c֟ P xi;.DJ WogJ/&f6_>yxxH!!x-!ճ[zdT |d9FπAs5`MO͆%~6g/ #S+%yTٴw.v#Θ8}`oLjUngӒ:9,91|ˤ  ]ZR9՞muBk1dG=7# M{n}s3RSAtLls~6qM~#?v#[v?~:^)-|ќ^z[ʼH$x-5e3$M<n uĞ0rMٗشL&ys>8cTYpv<]Qqz%;|dIHg| zlׇG? 0,87>$@{-FZ tYSf3U5z{gרqgc[aqqONdp8VOeDVݐ"k^ޘʛ+,{+u ?AOS>6K㶘ר?,3^āO{MmQ~Dxv={֒߱e>ýncy^1p X|޸M#;B" d.yIQ< Nd#y4KȁI:+E(fH&_8/~ɡsJA Τ 1h,hg`pϬ#`QW)(Ьb ])~5}FYݸI#g_##έC׀xaٰ!YB`y4Ӭ')(}_-I*P̝s獛2پ3+뗓cfآWv3 ֦QuYgw1i՘E4pcfo;~۠A=3h=]ÆSk]U}N^gZ ެ͗kt\b{ka ~5?Q0ɬB%-e+*2 PK\a111[^XTy3'TOYOSNhxg5;#f Q99ǔʇ[v>6<؝ֽ-ϕd$ezK^qɉ BK7$gBM|_.H$goe7N 3O ֦%5|惷0O.nmrtR+.Y&Ԕjn(^/77UQ/>3f5oL6ad0r$0hTbѕ2vkd?e'Δlhap.xkEqY#Zn늕.M iH7KjMWOy_XXP8N8gN3HW$Lh-#_|A{4v=%ODl$KוYo  0z _Hd>9)Fn .] &vdPGm)<&,˜io'.7[} {|>^@.mXYcb/-hK%RV-NkJ?RҶor \GӾ7N7Fՙ5D 0R.{ (1ޏ`$|ORIC$~ҁ8A>fDVgUm3/z{?PΚBrkc("vWU><4D1* {ר22glHfz!S;=GZ1+1 ]% QוӅXD }N#$Q fp>Rs8'I'0b$r>Jκ'xg ?Ig?Xw?@I^NTtG|tC*W w4YȺݭcw;r2Wt&W 'k@$S{ֵV@eZ>J3U_8r}}@Wpb"gGZ%ob)]&_*(2^7UD` TPO #ksӉDNC .)k׾,geJm=<`F.a'OjvRw\-O!~tj;)|͇{EU1;ԭҚb|/3lLWy7O8|Lq|oJXτ,ÑgyS~(Ln34 J5ӅӶ$Bj[Y0B5Gs0T0^^ GnP$x $RPw==|}ΐ¡κ37hGxz>6R3DD}f;c Ϣz~|}8"I~hK1p];Dž;vMӻwܧ<}l!0d0! .󑼏 %8P9>* (94Z)9Gn4DhYW=JZ~]n![(>G"FPX""mI7&2%%9x C|qb[nqܶVk;P8|Jo)3H\6{]Jʅr)P5+'W< \&Sφ1d`}FX p*tyAB~ݍu41晖I-'46gu92XTcIKj&i\WlKf+dKXGovXJXW1j zxZIi?>ۀMV(T:,irTo#R$gXy"X\WWiF> $=BP i*#Ubo7{UHӗ͎kZ3gSH%WTǢ_B-ó¼.JJߎ4y'ǫsO[r|G&}]~㨂 RPBbǧde $ոq'3,v`"=W: , 8*hyshy T%8dt"ч)3`\u6"f qP8ڍb"U MvO> RxPL5iaa97V0Q&majp=90L/Y?tj9+UVPgeŤV,)k7=f t7O?&ȌdLΦVZ'9K@%]\DF S%j5%SaJi}/zRyxaV*zxS*TxbKTXQ^Q҆$&ƄDvU$$J@Ikm HXdpdsU5弚g#i?a#`Ѝe*Jv;v\5 V cYn[,a7/Fi8 DKO\! aĀEoL%,*'ДwILO,-閴$lj6ͨYxәI-zqMYs+xv~'7p\Χ|ǂgjGot35iAhc1&#aZI;ɅgZ%/7QJ=Fc(^]qZrN6XN+KJ"<fZD! ˗6 OHm4,͕D1hsJ Yn gzzڋXA픳ߓGW2M2@Qx ><0#Q;e偊S2`" e\ZIIX--8 GZkfy{0Tuuڅ0eF(yQh>W$D5j>?k<+zi t waNeqr:9UҘ*:RURU816)b K$Ib_n#0}ܦd-,l(m`ѻ/ρ<砹<܊Tu99Ӆ_w#EEQʟxZzAv6E)S.o 09V4HaL[D̉w]ǯ-ikkd]7ȭBf5Ǝ$ͷ5=t[<Ļ޻}hlo9~<mU'絸҇dPU>=0A)2Ч#sGhjRRLśG8TZMh'5\'>g- Y2P}W|>yQGe˰".bҥEBG}c%])3HAbKy1s,HaZ1b79U$t-gP,(IbAFT$P(6ҪwPsbObG#eC"}'Y3gm/~ReW~33n_Ϟ_>w|&)w /Kh?b^{nfYRz ji:˕ōpV/__y# :t~ccƏ=I..t fN?}߲WƔ\= V=vXI2fUua4?8i]tL>L[*7ZpvG8өu=I.q%?uI sܚí_柯VoYOZdM Μ DISx#$/nEdǤF3F<7s|·vf֝w炙iq*GL֭X'yhqW}uɐћfϨIa맜P_,<Ѩ|}ҼFG#$­fG i(ҦL*=N eKA܈24nXsDn/.|/k6b3~|PN3z7lVi @gK5t7ևí3W wN\ g: AI:8`Arrll39;#v)IM{[Aډ<#sp@ !?% ?ݴM(;ݕޝ5<1xH>w%_½xt֊'V:~|ʞ_|x\&kIK;ng0N`{ ]:vuauf80XSp8i6ڻqoEoPr+='veps\<?vHћ8eőf]#2NF C)x{c܃Xq?{+EsR3)&a]v_oAN |%cQ(KJ k{F\mKP^ɵSk#хfmlք8 !qָQiR^W`+P _PRRr $L] И)wt_bNvKҹu׮v;/+=;5x䡲혛ߡСٹ).\5`=:{wNj$w̕̕s_fBdB!G! `T4@E# ʊ "" x "**yL&!wwLw*KlxXtߐqI3WiR&2uo*?uľ1z+oyc7+sܧVlذqg}O_Ws[v0>D (CO2 ROwϦ{ }ij$ձǺY,l[M\*/sy+ǖ5WI cӢ[h慡 8敤O7@OC9Iqr5D {IW\;;ۍҨ@0!TAAt}BZSWO!J,8d! F,*{/,<-ʜbJJzbv#_J\]!|;exf*9StOįH^'9tr5jq-BXƱ52 SwZzu ^Wi## tơLӰeHMپbLkڦK6Z>(ޖcKsSgΜjZeSҠED9Sn2E#s:~_Ji'zNKPFх&Ueuja xD ?/3 q_Ҝҹ\5$2?)1f /ۮߪgY=Z{Pݫ$Z`Xyf(ːOgwW %&F0kfp0 >|o#"H|QnטB7ш;EGِJ|=ufUضVP[E'bxVs[έTp;f<KД`bobJڗ3mx?ӆ{%^IǸBfg\!Ba(ZERa5/K$Yw w2=q4Ba4Jfb{&#\߄;1d[W=|esW0oHnKuSm<\BxCEfzHvS=?YZᑆ"Ad| |9Bݼ,P-,]e!# 2,~#K ( Ni" 2gơ1ő1$x- 4yI,k2MvǶ~l˝$"ߖYیؽ_,-)y( {.>t56 l=$ɄE=Ik >o AOJF%Ce11{wH.2zD*Omt8:|p' K $,wl;uB6KyV<EYpay^ɝNbS?(0{Aa~F\N|g]5UطCE$9nŷù剝vV!xM7s8FA1R~vڵ?`{@>^߮zo۹BPlG}';33=J8j`5U1ٺykߤ+['Ŀ\RP*C_ c[VYo E<#! +b3kNhkbexrʜLO^,6sI?ߚ޾_ṅrmwNPQꚽM*'Ltn&N<ǂB%$\xh: Bdr]ᕝbƈ:"r^'4XI܍H)!%ĿYlZ_qiv@d! |r=f1˹ZƯgieZȐ{rL^x_v_l&oJ&2: Y[5ߥR))Ҡcc!D2G 09`_޽aC9yOb~;Op*zicjIPJ @jGD8˭DHۍd8T!%xVtE0&C KV+IDJEPorӎ\Y]]55Cekö&M#O1yKo44m@xp$CC(PC6BNZ?ל/@#*ħx^(JnS˗ <+Op;T%o̼̽|YV;$`ђT(jj돤/l`#*52APenr_qxŞsݝ'dz)ׯ~R.f{́RQ*z>#ʾ' 'd4ɛ`~˽cz [U!Nmk}y=D/YhR)T IMbXaPa~jCqyՍTDZRVA*r?\[ٔ B=Q( $Xz%mL7G(qs`3סCPr?_+-C!CV cO"~]OGMrK;ujҢjOs5 }EF2W3b b8==g)DI|LI_>~Kq%p {mWH =!yR ^mp ['N*~<2[VaNY޲--g<N쨰r8 <4߽wexI= D4{n@;wUҵiv;+|WqU+FR JzKVWrki ]K!}krϜEC@ɕ @]X4`:aJIk }_P[)U/w%ؙIqq|*,#.sۯjCnXr\JzҊgN_]}[ %Dz1B`rLE:Ql3=aز кX}q}ГVٲ7+?ƵoBXl\(§.Bpe4!@8@!?Q׈r6MUZu!kݮ]n]o{3ik{fg0+]Ù9_IAE@ ߛ٩ת;gط1^i/WCBA'u9g3{;}BzhG8%ZFߞaR,Ka B6!Icq"b>7OR 32\:u_:O5exFy x:@}7:> V5\|`fm;?|`yqp ָi~'O՟y~"3OڴS>֙ї9jDCNJVD&4.R Dx1NB";=˕:A}8WRq<R)}fʼ"vfg'q).B+ۻo{{`KOFӐ7>7 wUHa1I8NNRH}\RKjō>g#߰7o$w&pgO?^~WQ23t?>VT( V|: ;B@Y :[R4L44um nk$(qR/VV#<0G+׌,irDS .U<;dQnhJ^ֿ9!+?2|L1oc9渁80%r  q6b[a$BDeJ|}  TY?_fp@zb[sֺ̥!yUUyCk][RW_H1gF4> MSH"!;1#!;~c[s%7np7 L)*zUlkʍF9}*n5ԾsjInGLW{c#^tk===y'7IMp&&m"~HYl<@[mT%9]8pRm(Hnl8h/;[AL_חiǸ},.;4;ѫ "a3ChPC ۸6}c`L{wpgdh݃G x7{I+|DB H=!^H{c:%fޙ=HuɅ +yxQqO8 ;ɢoO럖?6; 's̵eee*\-亥#mՕ\Tj,pU,%,p楹d#~ͼ324d*1 O+] wczs[<єkO(GNp!5VJbn$FWWT1yH򚫵5o%'1чa J{h<:X7K-]pg곜cߒT'G2]:#9eYU~/ufl׭+dܥ\cv 2>lˁ}*F˗/Ri$~8 MI+>* !*&y#pAs|;v[h4RA8;*K]hp!WZ%;~fwL|5lC6eГW  y~JdS( lSKtaQ!;SMYTH+cr15"s?{ۊM>upBV07)lvɻ\MruP?|gΰrd. H~6ZRG~]Xk{6AU *`?|;sWnwl<)Z@+?Js2N;I[1 )]xu]TLB3wŅxT]. KTݞ{gXl5dZcY QoR'3?P3Yl}tб[_GEECoq*[EeYO̪]S2}+4..sUxcaLX_E%,f|Y侉*\Oyc-0sJ2.H`d=Y F_{b Q*+cYR4 <% |#02ÕbIk_!LdAf}!>R^wQwl#km~޸omu5c =~vnoê>Oy3 ۵ y%]/7ÁGi|W.>qY1i 적PitWu|sG0 bj0 i3A/!LphPjaY1Q`PQ: hqyy'͒-~\k` nxt}ls!0C/@fZ+ ȴ%AsFDP2Y) ) tMQf^UE sFUֵ2 #[hbiHW_5377YfOtv Wn׿]w!vC/}0վX4ܨTZgޛO}@ OMfGȦqށ*Ϧ1nˎϹ\GQTdϷtq|'5Xm]A%Ѩ K|/Zׅ qXČH ,bԴ]'T_*:1WHXͯJC 1)AwVEAfwS}5vU\qHCbt 7kW!V63/^v恉_|`ږSw{4cz%Py6auKrz79-]b՝,ٓ'h{zD>_kx|ـ4aCɈF~KKE&q Mgabԕםn8LOCգۦoDUVzy){_|35 CM}fӋ֎.ƏGF˂}A|ӇnHb" d4 #I$7>!HbJgQ`Ƹ.Q}K>^5.+64rpf_!װ].3mx/~{ wBQD|>J#7. 9ih<;Dj77$r0ZAː`ɣhH/1bƾFrf @jx"/._ޤG ^Rϓ JcNH{1ȯdP {  ΁'t'3N 3 (nrǷvf>̠ g̽PJ 56(33DEH HN2qbw>ܚ˜▊^8P$9m)O TpGЛwc@4+_྽qB82\DPK`{Q !2EPuPU;}BvLv)Ikݕ>nG%3dw?\Ѩ ҂G4@eB/h5OPSn b5d'H/෱w?# Wi 43QMfQfw31%c9¤ew"I +M# !^f@JXo&`nha.\UU\O H"P#p N>@-Dȓ!wX-ޱ36ƁT~0ut l"yGQXԝGNVL1cL;WYmx!TSu]Of)|v\Zڸ{|zF\|zz<{6fZkmnfLL11f0_ѓ̀ =)}bvtN Mv Mh(JB}1 L<1$Y>Iѥ8 TXHؾ^5lR%ii)Q8fTjj#LRi&q?&ٜVˎW=J>퇫\Õ-c!%YG$b4ѭ_A>HuP*V# <(2'_ 5^=/;|iѩ ^K_pVZZC~c1'%-fBv sjf*\ 1bB0,Tt8]@K/ D qzl3wlT]m!'3Hz -c OshN:lX}1B3^|Oy'~ȒZ*%2%8EDveb)͊<#70d@Yzh6?LǬKj>=Ё|HM鱏wxHD* P3oOb2)ٷщ9˲.^l Sh/VAv!ȹϙZJN tOB]3/ŗ2n=ADL *b -Jx:?ou?<_ lbZDwV V^ot=0 K",N \q~Ou#v|XHNr9{m1niCp"|^ur?0@<[Qy.詁" d5ræEI һr.uiZDpx5څC(J YD P|μG$nxObv-;YU/_YZTkV\N\8 #[W_q&iT.ѿy$*_u;|%95-j$ZMƾV97"Q"Brm sic*Z޼.Q77W#u^g`*`T#^"50|8Hie2ɘ_2K!zV!m6;xc(\d%SwjL=Aפ)1=-q r*=>$W}ɉ%HO.A x9DV#ߛe5Ꭽ]M+mћn.1/=L/R>bezlGVB\*ȵtbݚ֔8F {~28jc%v7 {E'.s+_:BF&C xڝTMo1|"EB=z6Z R?"mZ(Wg.ڮ]i{G/8q3?_^yf왔JDFK4UT% ݥUSԥ#4Gg}v:oDnoTy%-pL+|*͗7Ohri:-TA*}Joj\dq?ܨWI~"eWa^@PG*}n$lZcų y,RfyR.Xjũguͯz5K{k"&t;a![iO-a׃2#25t/V;x2&mN`XX v&j 1RjcLti}+ ry}{ŴFGټKUwZ=USjanSD2?U]7-8b2bNy=vr%u`M-Fm\7OmmM Wjzton|}q?g~+xmxe@r Ni4.%(iITQQ[P{p{Op=z~o+|P/D+õ= hH][C#'4{a(sx/`o`P6//4܈`kC=}Ua|؃=YF?YJV! f!ÑC.8N`61 1Ec1M #p$`5͘o0͘YX81`|{p{7/q3n-%s+܆r;<rw`ĝ wnݹ^ʽY}`-4h a?b1ڌsG8c8XUlN4N ăyL[d+Slc;pb1Nf]9s9GHŅ

Ǹ >ɧ4||/:|oG;/EO\ ?6Ne@l 0F/܊[Џj\ 6܎;p'#~O)?ñ8g##c.÷30_`gTe8K븞_+~o-=-7?g_o},/2v.]Ⱥe_Aq& e|)kWR׶贤FFF^ScFCHzl+Qlyq:ՑDT/ŎljZiͤ )'Hn{!1GVE \,l^.؊~K#;Lfل^)TnZKJ؛Vbo1RQ5h6vlTDsh1c5qwRS[SwMݚ6U79لDvnt+4&jLR%^Ѭ՚K*j^Q5Tiu elg:Q=x@X+ z[f\| ) CaXZ¨0&o-o-o-vD6 k!4uzaH ݠ% cB[(k!k!~FX(k)>o)}SM7E}Ser_nq$6JbrAi+$mbAL ENf ԜTNL{ՉLNS,wxtJ7]H OԽ-EZKRޒRvsŎ OZTiq/ȓWy**QYx`!  !a0N B ^HBºmؓw:ȈR=Pyd/.{+I!;.eZq0.}ȵؚzII5縩BvkbT%OgRdRMPiER$KfΒ eLd&Y2BKfDuFKf%ђYhЯq}[,%QUmOgrޥx% @pG,|x!A`g`$X–Wy=uFWIxyy xڥX U\1 ɥfU2bՄGnɃ4&dh5UdY-[t\$),s=Y;ww/JDbպmNRV ^"|(K,qN$gE4X"5\r-E% DI:PT(.dZ&cQ(9ktW=Uo2#1̏洍l=meRnYjz.ȼ4Z#gI"|cOK_,gH\H r?&k q_|;ԡ7PP0N IWʡ*TRsxю ^D;P> wJ%J3cև2320# Znr>3Ɯ;ۃ90g3-|̝k>Yl!Z~ԔN=+͊OA8ҡ0Ar *ǡr]Z.}ZF-* UĠWAu=t߆,wJs䔇*e9PjXx-*)BE2L*Icփ|,v_"֪ߍ, .2~ŕa ;# EUuVNqeIj3|㥝/w5WI.,B^GH?C!\tc.1ݘ++1Y[#۟(zʷe!q?B v# xooA0KT TlA0T&AT]XT *R*Pz*aGPcx3,j쟵PVݪk ;q>+#t 5".DBtx:?s-D Bw"tg~S+UK2 fbIEdBȆZ>~1Ez '}#vUXX1d>q+|NĎLgUC7"{ pQ#//1Gqe6Ĺio.r-fo_K&*MC*g?| C6P ' \Spjd߿WDGoPq}<y./_]w~ NɌ3fgFgA2؀8}#2+EӰ[BocjoE÷&\c]=w Do(&bx ;`X\m=+a~$n=3f%_JH#QшpGxnǝ6|ʟq3y3d9tg$tkOV5 ɺtC7Yߦ閺-P;wݞ@}@39=hY@"WW5z Ct!1}R1&gbM|3M-S4jdн&t2݁z&դeQfdtf>-3f|nvo"sʜudPƅhe[ 7lcd[@xzl{>vllGk'jڙvB]IcniθWU\3ҵY_EZY#<-ut]˭ŕz\Xt}C~C/fz\Tv]p@Ox7JgNDF!Ye'߄:=1#_s_lפg%OQ'ȗN'TNU?|^#u瓟O~6L]TbΚuMx 9B(*%C4q*,g0 Ϡ-B~k9rرd<0q_Y̡{+2tGY R0Gg1'D_-M$_8fUP/}Xc}5ߗOp?Wj f)3Fx!TX^2I*˜ER?Q_諨L[T^@Lz$_KTXx?VǬ\*R!h@ϗp;+ƈ9_D.E2_mU+}[BVQ:0 kw2 /}ÚV/EWSq2/%sK}Qat,: )G񥬺nV^;QT3iR6NI:_Oz^Vo[zw)Vn>ϚȔř*S`n%sKLcdZ&t6=LR6 6C=\3j)jf9f!{fY`/k_s9hwwSlm5kk:66fmkَ+^omڴ_ mv"mFe畲Ev]i؍vaw}''ιXjzh \#5wܽ.ur]/ߥtYFn{MrnM'h9Uk]>˵}q8-FC^V$\N/Nۃj3-lџNVu0K¬hzֲx3&7{]|<#. 2<@{WK랶p>?WQ2'ߌ9 *9x0 F( JU% v+o IW|{ <ˮ>OiI *Z\x%ŏmWw;scYW,쐭Sh׀+jAF,ԢG1J>m\=rڇ_N&0}{T9❬[x񙠉KGINz?Ĝ6_Eѧ7{WY;9.L}[M GuY(\뤮$HC\k7$ͥ')oݤ2oYxK)c$O|Y e/*M U*SeTR#Q "^ր]IyAm(#V]/M@Ȯ,-#ˎ2(MF<9? G2_žmkosi-24.3/docs/fonts/heebo-regular.woff000066400000000000000000001232601465176501400202660ustar00rootroot00000000000000wOFF8lDSIGGDEF\>@GPOS 5GSUB OS/2Q`wwcmap5"ηcvt ^dfpgm k ^lgaspTglyf{B&head56 M¦hhea!$ Ghmtxd>'3loca$ dmaxp rnameKi%e,.Bف&:A:Ko|ah]ۯc/Tlmi( pG0\4#YtwLJph!=HK#V@w$H82cozb9 Ry9R ?ɓlCZ h0}0p$=;"C{'Q.o-_Q$I]+9Xe^nu N$?lFDO8̻P6#G]Xb %Eד[s[4~Z3Q8c oq\}Ӂ\y.O???o&UK=T@.Jj cZO[(sY9mL7= ʥEUT3d|"E'$<8_x{#ʄey͢F"Uտ&"Aw<@ sya[jQOcqx\E*e?Zz?2NQnA6wN.ݎ?px͓[PUU9c(f(t2Rr K/e7+SQAQ,-9Y4%!]&ǦQzM?[t饇Z{־AN'ؕh1 jLDWKFc#w}p?ӹ~#||_:[@A,E`z0;(O&-3bqKw.0rO,2|s„)aZ:QM~:#ga~¶.v:o` [SCS!88ոTCU oB2?1"|c}%Sr!Lxɣ :,'gueNs,itu:gNw|Fnܩ/F,s/Gӛ'{7biTo]< OK?ce,,?Zˊ)SOCX,6L*q4jYW"(.@ME*d^jI::杉j>92/9Jp;Ubj6=Ls'XɬG'uyţBNdQeo>byݮZEG`q&!hQf.+)+myz+MyS~-)1nddVڏެq(MR:vVqgKݨ,`tTPt酠Lk2-~K87N'/k:Q(Vn44_ha/8 ^뎙iET s,3@/ cЫzñs!0<1Ϡטgu::ĬAœU0h2A9 ֌]> c7]2v1zcw]3v1z1>KfD?Gt3&疅pl>;S32;>w|n!o L JOпZﰳCB&`!~\?X_CB&n!ݹL/Qe7nw_go /xcp"(b##c_Ɲ  ؝2000h\,,,PO&[&]6f^A>NN^(03lTa9eš#9$$\Y,YT9YYxv0oһe3k  z'xc` © SYf10KHeWn 0u#+ #~ M7kCg4o7 1SL|Xp4f4"Nt&P  @ n  N :  v  J r \ :HD jJDvd(>P|>bN|(t^ D !~!!"(""#@##$$0$n$$%&&X&''d'()B)*&*x++,4,,-(-Z---.".Z.//r/0$0`0011:1l1122>2X2222303\3|334@444550566N6677r78 8<8d88899P9999::;x;<<6J>f>>>? ?V?r?@&@f@@A*AXAABBpBBCC8CrCCDD&DDDE,EhEEEF&FVF~FFG GVG|GGGH*HZHHI"IfIIJJXJ|JJKKLKpKKKLL>LLM&M~MNNvNNOOFOtOOPPrPPQQRLRRSSSSSSSST$T$TPTPTbTTTTUUBUpUUUVV"VJVzVVVWWJWxWWXXXYYlYYZZZZZZ[N[l[\(\Z\]]^.^d^^^__V___``>`Z`v````aa>alaaaabb*bRbrbbcc6ccdhde$ejeffFf^fxfffgvggghiiijk"kkl&lm2mmn0ndnnnooPozooopFppqfqrrssVststtBtlttuu8ubuvww!x}x$WInj6.etD ۀ1ӛ146 IZNBB:$!7&7k̮ ɽ3gN3gs QC, AzL(9P[fLg`#[6pG qHiŚ 8˜bՅh.)ExC0E&㘸ȘbbbQt l6'xdL7Xp>8%7T(0Ц[ pXpP0 cDח(/YةSVVfsTTh(uݩw=wܱCvYmڶnhȴgn2=-5ŜlNNY-Q(S"aՇtZ …h?&4E#Mz;+[VFQv\W0:0fܐّ9ϟ>-:uL^ӧq  % @)$5c!;1%[q_J C;kv;60׿̤.ot_b_sw <Y_/!dD&MyCXOpl> ^Qx>(H V JiJAY55A/T(T D 0HNֻQa<(\ 98ݥy(լB:B:NwkAEh#,zdeY,LB\5;ka%s/zj8bL0A!eh,Gդ8f[ľ9ӯ3%6^cU3k)k:@ &j3ς*wLphAmV,,×#cy ߴ\)*ӁDPF @08Z=#<`ZM ^nlܵwN<"՛߬d8phYMl^Kk[_}۾uenDS+Fx#`*(@xSx-]x @ZVH6 lV[DXۧnlrwS"X. gMih+4cSm N% mlN3:Ä?`n@(4\+)Z GX;n75`L>(M|kԥW7xyS+o.zq ?v^;a|<Q3@\,0)1!DDȍI\Lt[=v6[E0^Ymב#npm=;A1v1QOJT1gbJ)*H A m4 H*"g]KuWKfkþ}|~1e.t +27{P>P$ )0F((}P 8DDҜr1Ρ0F+JBP3M* !PgLxFGWU,KLHj?EmCB":xA k<L!R $3ʼxAxbYjá~]}x*S:xs׉A\eT,xάXf,(!9_ܤ.謉Zp[tWraܚ0O`#?}xZw.b/?K%e$5f겇p?kC?/{u}*|bϐV @W]raPk,hjf6@y/,tZdjX z$[%rp/t#$az:!` 2LI yc^+0U(Sw \}gpf!c VxPRA V` wnȁ?\%/,2%BD51hCTuBFְI} bt9oS&iAn)nfWp]uf. juèuh罵͹Xoaݪk>qnc8? ?#9 58>ZF(`;&&4p]1Ǟ8%M2*\$zԲ)]Q:Yź]"Jm~?qA },ΑPČ:]( 4ab ZѴU4k_86?˴e Mv2; 9SXƍ F"9vLp?}xoMv ~We6x CDZZAvDK@ ( D4(6{DхypXcl4c]HVcVS+%_ͯͫAi+*je9f%F`}tϓjz_oSO`?QtaOW9;>tx|SҟNy)|IhKdf>Iඨ3l9WN*Dl66=`mcv;gJP( utf`AjlH^plČ1&uIZq1ԯK lslȶ#ٰܲ_[1 L}wIU]d9aV$oӯqwFkG0~7=Fysǣ^fו6uK+KZnhΝE-['0&qh2Y ǗFD)dk$&:ߘUݏ6npY|jɮmԱK̘P޾}ꀝ8qqS/X${ xK7x\ I4b!:aʹpÃaZ6@cp9d&.iNv_bi0zb׋`l*T p]& <# 1Y51WQ1PHd Ia"w/vOfdx'u:Noa=aeK.^kď]=)Xgz@a È9zz8^"itޢhG2tkwnZA>Z}ŘK*:׈~_j=ox))IK{.e8[cQ%ĉbЇDF  H&;q~-%忰rkW6wH<0:|Ws{[4K9 ̕ _&U{4]5uӰ:2H@o$T"}-oѤ/ˇlsۢ/fׁKE~ ud+s]9ؾ-?4k%/o(\ީlZ~IM7g+Z/)PKx *%(mtWB.BIiCQe}gj J\uج&bdJeK>&Pp`>zXP-䗥n޺&kڂ}ۊV9g ,kk^ޣlpx^'%iGf줹hgaw\G $GF#SL{^Lgwx;n?k` VJxCg 3Q3Nni;ϟٻeOgμyQg;#A H} ًQhx^fj 𲝙FL#z %.p5}`g9L#F^oL5,}s\Ýh) `baEX3 L\f0 H"pzM$,D;]߼$,| WC#wɺ %:ƞ@YfHMulqb)6X +UfХcpBG$Nlӧ߻E={Nx25&J;0by"kV+13;f-w?_|O-]F*t~ TNbQ% CY$IC pY uϙ%d*d;,qSA4Fc9a+|RA  uxn5&a;wb&NdPQk$!q8ۮ:k6C>߷oj{ iy?,HW 9~Zb̚k.]ڎzfkPYrB W?hpf!^ 1}d;eL7jZs1&C6=q's`:u^0P{ nS376#;G9b. |ͻom}b5cYlZHןXGilq&x`( f% pAK?1O3g]}.=]^ "Dq?@|zB~y?lpdsf^-{KGegqw:{]Uu>Z؃ur+n{kSy~/t1wSH+UVqTML'OhK:Iy& )z3ö94hY-ؖ?|QYyx֠D :~` sxQ38SJ-;qģgAYl=;;CvM=]Dֿ Hl 7/.~8 DHVJO-4vݺ ֿO/{W{mшCg_ۯu*^㽥|{xܚVXڲU `"o'_+Lha\n԰K2Gfj:h<eVqF9O#ȋţS5!!GF0_p' ?QL@{!(LRvx/7J*;I636fɕqU !id!m6>"5JQi)|ڠa5~5?T͸)?7m`QE+:v\^@wќw]i}w~kϖK$tEHekoԂ9(4*ƽȪ8VJR Hdd lXpCAedDz&芅Ycז&cÍV?ǟpgwգ.}uaƕݲ/:~V/?5moM~r.K>?_#KdH؍Ig"HV t%@j6I q%+kXL F.3SʬY^޶%n|Vhev/>Zз1cg#5koвGGq|"ăe_q*Pty*q+(r7DdhgR.aIe} d2YMV-F9pW8,XTA(ADצKq?GPg6HYM) G5%χ~Q5 I&b` TjVҚƬeKemvS)"x_WEV5Ch~h zI9Ublg%>O2Լ!Q>I\'yy(D9IRtg"XM 7ojOx]iT ahmv%Bq XJÇ$ ژ(P(i9k5Xtdz, 4iO*U ze_02L_a:$Js xS-&X8bV-Y7KnhE~;6- ry0Vo` B!!Apڬ 0$RǛ%=-۷l6!=}w;\;OMXR35a.Jw0x h&^/A5f]XddP9/4.7Wy~sj*L1uX1 8T6& M 8Рy j_?Fl0#5_.gͩ "!d;SѿϨHkshWQӡ,㱜Lq9  nt,;N٭*[Zuxܯz jU!O76Y1ء|V*%VWі-m<=v<7q4BqbGW%rVҰ Qh3nªE-q_ i4 ď d%MBj8w|P , Wʐh#HIK 3Ѱ('TdY矪7BjP9T26?{ՖbrI]\v[z}õ׵װx!CQ}Ⱥ!,$B򱱖3]hՅO( v '>93qya62^EB KQH_폿-՜՞yk$O9?V1!.&"Ȣ0$@ww_d,|sZD5)V|Yٶ/\-~=zkeŻ6oǦM7>?uM$Nq18`\ >A m&9"{M\ X,IpO[3ワ_s6M.!a&#"ԛmh$uo7B:5VKjJr%͚ B}hq킟;8ۂzΧ5@Kg;,,-ՁҹCgBABkٖ5CgF<2A_*{2R[㲇om"jZݔu׵S Ӟn0 ?wnѕha :;{h "b)߰>ȹXGLj>YFkgV\TrysʉmeÊ*:w^ A]+̯{@eSzoa]M;٩XsEA1JV)ER1!$WD;;p{R#M5kMM4z2~U@8O*ѧ0-yjC&=tI\YLβNȰ,QǦN"mzx#n4ޣ{um'>0ݩ -j6H$S'@h VJKop9IKIJw ]ĖtO4OJ.s{ԾM916+v*@!M py3@l>nwz~#؈vgf>mk̩I7&Sbagq#74!ggiN]m=&N2sgnjV&7w=^4mPQ#%H 0R4 \"tDRs'hU桳5$g[=zi}U>VfF]P8=?G 8|N]J[JsmFIG;5$Fwg*_|WuCUXrsrɵ9!k"`r G%k`7D&@MD\ְ7OӮv户{eWb.OLu4^/{!Z8r>8`ۂ>[ g˚(F ԑ ZxE0Y+䰚;kC8aaR:/Gg=00DΊb#EjAE#[eaH8Hyl"Fyg6Y#j3uwz1\g v۱c-seݧOʼn9|4 Ĭ3 Ip ܓ#y)HTp%Q@=GJ*j.CJ, F .'wu\@799:ѡ5/GcΖ^<` CBOɇ?1㽕 :_^:70rYeSf9cz숻Z\Vޥc٘+[ _,wԚܞ-K {7/{ Q /=7miONHgHn?"7&_6e;:UT߻jMNm_O49ᬳ$T ,_pmUϚKF& Wջ"O nXU*<9[?Tiu&dwRt?/,' r:/";Q 0G 2[u[ԣXzssvl_uXt}t$MEJ3L6m6vߺ]JiLR&ZThvjDC."5J{gO@8s:gfjG0/Cu-R[`ש @!4I$ R )IVO_(4J+z6LA-@g$#drX`HPEF R)82D [Lm ja5'ęxF U5## , nؔ7kO|Kt\˗NQ@k}'S=+$!&JE=ƨx膠@FES.H-&?@ILEjƸy.`2p('sVYWDR7\:c&źI41x#U`l",S"Y ,n#z"FHK2;|<JUs(Y ÀI׷0rʷSP0";&=}90OjO,8k>$W5Xf- \/]pVJGt{|8 /]w\ս9q|OA.8"aD.@k4K/4>af5Il0Dtc_59:m3u7jkt9d/^'ƒ}zv ;͍4K7;%fPTUT*oKܚTZreȽeM܋lf޿!/CAH?^>cþy7^gCbɑ1obVaq6W]<]\[27HMzxp }@uH+ -v7j 3=xmAo^vyWuV0BL%ܽt kWё\ՁʍxȈ7b$BȵΔIZ%w-~rOCk/^Wfj*?woѢA{bx5,\gJz/{`'ũ%9lWkRdK[D [*dʋ<P>bW>_`k݇ZR.I>^,DKuPo)v 8Eg=vK\պ9r}uyF}Z$&I/)DçѮowX\t]oo/N J0)=H1rb ٌtIU>FLv5;ŵDRؠɞzf;$[7Qҕ!!,H9P7ӷBCs19c:ؾcG!|P42" B܇F5\ Fc6WC#ymڪڲı!wrCnF[[ gwaexQYhhc rkSfDR;hs\ {XC}y!;y~{N {'n\4bٓ?l{`{zl}zry]`"bXZ;kɭôu']ׇ)$oWj^rWY|hc'iQ|'-$4ko Dq{wjljSW7 M3 q'JNyz3 ovqrH#d}6>4~>Z[oZLdeju׵HW XI?\vkcuJ_H}p KI?\~E8Rxp^W2!ucC_0Lv4쇋.{P쌺-o ;&^5-%yd|OO\m魰mw^w6 *7ouF5qf;cou)fV,vo]8w^Pn]\W< ])ѕ!U!٢njL+8E07 Z+YK㞹zO޺x [->ϣ6xn|mM"==sƻ+3g hOҜp*ԅ~1ё]RFJwH-XRƿ9UɖP S /U;2\#;\];a}K>ž}g]r^I_ךDH=OpvwC?ug;uwӯ)+fK55y)hoc!ߵz]Cuw8x<4|m򤗫ok񻫖e)~#cJQ녙uGOis}bqOlDq5W^oǹsscM-=<6WD{1pȫwѷx?>aNXZ|;ZM1ZJǢg?㻕+*-"VTor:NyT֑_{Ta]K^LdU+n?Mv,V,-eNUOR?n|j5$v⪞:9S3Ǵ7 wSK JJ|*uo[27b+SkW-_XnٴkxyՒOYxUyS7.8<.4oz66n7y3ݖXػ -940poW|:a&YkF=_U XfEѨ۫UGrry$yh}^{ASɐVEݱnL6=Ucvuiny+s _ҙ0?FR9Iwho8ՊfZ&uF#*>ts-azR% }LtJ%)R;]$j2UjX}O3w3&`4z3SySI'ֱS~mjoz^y:1V'.ptm6CjŶ.քvK3lK$ջ>"dO@K=?}[HTnM [2i!oyG1b4Q *9<1~᠂ :GG eb,@ qWDX 1yK,VP9Ey2˗DGGۢ6֖H,V.88ƶlN{ [́K`zmMW=n)rpCݝa]q#zF 7mFmtoua1`4P(6S;iT)!名.J7c#II{Ϗ`4d[3kKiY9Lz*kܳv+&/?xR0R{^&tMsz3FR$Sd%O`hVZ)rV #/S}8ٌ8$w)M a0| ?D'\r#y$YGC.o-3&M!JU<;p?l/,nӈu$ɹKW(!8@I@O[@>L Զaڕ?3lo߮E Ul4XUvc7<ُ̢PV-{;g`E h)uᤁCޮGimȉ|µWj,vtގCMѧ*k?6h |7C%Xn* ]Hp?T!CHF65+VS?)NKH2t(2i%b!PcC֩b/N.fumn.y> A RXtVHY'2I|<\Ť[8gOľa LG*~vWc_$ZLlNvoMI 2\yefE?n6X.nwSan&|UAa[M4.(HႱ~ӂuqr ή,r9vċ8ъ?2vNm6a~SG$i [% u. v%}X ~BP-vW߭7Iu&>P?)_!̘HH񘽳ƉsE5ج™G[!˹D{ 8dP^GRHg6`6\n_<{F|-kb\FC)cv:ݸ+G39&$gޤq%S<[Ӛh0(-ŨWAKq̓~82H2G;c~/=M# &>? 7YM:Lmƒ(Yَ8DJ\.rh۳5weĀV-n2H8êܨ">}q˹#6Ui9;:A̢8{%L/{Fl1`=c=vw&^^&`g6uZ9γDžt`ȝ,t4ٹq`^=Bܞ uSߞʏqWXɹIJZr6AEuy5 SiJzUƚ>}O15.G-1䄐B};3,gO0KQ32ⵣ1u&']jͦ GOn? ]^br~~އ3~aG~lftoG֗wקt\݋l nguiCtk I敇:BsCOG k4' d O`PnR >5>q<;s}Q`VJBSRSR[ H w %9Ǵ.<;$P@9Oa7ӒT{pbg' Hvn&I]s& 9j6& ]?wnĵDLh5Gzr|jc1S{e<\{t.L rjT]t*b0 FªFWr]wC>F`. ˕4Tɂ hD{<K`}$B,̔bJEhu"])Od spX==9ArIp'K2 ,I7%p"6f3 9/DGTc8fUcҡTX\Ь /݇Lpy#3L3 ;t6zth4OC 9sE@R}| 7t.j^FPȩ;C踊΅l8YLVҔ<;ӯU*ET< /!𿖱/K|0y AλI9$`4"0I "rRD֛4qz61i1Q`yם 4Diu*::;;(!36 _h cT1.AQܝˤJFP4PI csKsz$Qi m0>>uPf-,94{6#\0hLK3&foߢE4wn -y]3XɆ49 #"9u@A% q,XHrB Ҋjܖd7>88828f4y'ɗjK/c~SECL^){apN'oD XWe.kFNS{|W#=`n2Eyhj} )?$hw 䬈rivM@a&ݶ@AZ);9rM+=tZ!Wl1yng_mP"۬W՞V^bg/ZcUDN'Iyg|&*f.%Y$! As@PtϝX,F顮DF Ojy\ ٧?۵kׯܸr 6T<3<OS?|v'-I#GBX,q,>0Q E9c]{#ީ<]0Xi.C{ $ &Xl[ep,G`ƠILyGLWr|Ԩ_o:_vƇ;^\\|э_;}=[ڂ +;H߻Ok5R3ȓNqfr]_=Dפ$EzJbOA$)ͮo/V6t#W_\V~YZ}uzFI_ W?_\dy߹ׯYzFsvN|j .@lh;>:v ZIt<`[* $$j49vݨ|+̆]-_Q4.+FζyDm^ɶꏗ:lŠ[FX4&~hdw_*Kbe[I ^Xrd14wH 5ugϥʫiY3 %QimysAsaOfZK5HYZ))+d$rR,f4(}vz`y^(vr| zA㭵 x<,MȤ9yzcBRϓxjG1D:sw)I w8paleq% hy{qnOiEaТs t@ʦN8LE:#/o.q T YҲ+))&&ɞcX 0~f3CmhIfhURG\ Dq(mjcJ s0!М 9&}R+]sDVO+]Gj򆼼ԅrpϞp0[@e֯=#7;q<ұ+WQ^~d#qtFN@<<^ˌn|n-+P;( ],iD&{(~^0bMvntފi=#Ms:^г#:n]#v06/3060 " ˢ"7pnF=5&16ѸF̢4iiK&6msߛa@0c޻{le+;{&Z»#?O^Y>? =H~[ 2Mk2 %lkF1՛PVK.mAe9l5aڣczR^+cJR2sb W1GO+6-_ghͅ*_X S/698&r(DCè=D'7X+C LJ'"lQ(1g_ݞPV3UQN;d-),)|6y&o-MM+LlA [j~yϠj>O^ b #xgu 4!,iZc@xF@a=D'ZV |㩚Mp IKmh0 R POCz^s\\s^zgURk~NVR7FG'$DG% &%NKdOD&UDkU,-X;c Ps=w,Cqo;%wC.ǵ0ՔG+Z|4SL-Z]M.)2`]yc'\ >&bxz"&!wXlBX=K"2rȤ82ki4[%gbHRL<.ʸzIDJ(A+zELVH*%(v=~N@h=Ê'$QAO3&Ē6qXzLi؇BOk!;q_3@KttsjĄ|&f' <+ &o4 YY*?LDX;\=%_%h_"+MJۉgx'1'S֏E?l$Ylb<#ºS/k9y;7By 4JMVfZe%vl#6vF<`K yVjA.{0}A&Ԁێl+5}wf&3%ƥaArb9K#sБ&LyVȷV\Wq2^=!TU 泥 =çE{xh edY)#2= )hom?AyÈ?u Np8!;`ԑU [l&mM|a1rPRBwy.l.=Sg(DNơ;)Dyo&((X09- is/>9uƚWZ?Ww-Z|G_<^@'9ӧ?%^>n_nj}ƟGb4>y^c~=a[ԛ}E ]yՊ+o-;WgnLx=ټ!PHKs4f_S1,i)s|O | gIqꢁU[V,]ޖޜ8:dŦZ:yK vϋ/3sN]kxSYyMh~im&>`/QXk,Onŋ:cEߓڐt6G:now ؀#9XP28`brȿ.cٹٯlnê9 bmOcjN/8\uuS^XS<+.ʃ 𛇚D;2I+xDzDdw\.=;=85vsa`NޠsU{Zzw0n{`,>*ޡ(omDXϡJ8I`J3X1?hX( |*Ɵ:OJwo<D>KtǴ_YHlm~lB>C5cX3raG׆ڲ$ŽLbcRLѱid] cp)*&$Ȫ|2;6Zh'pk{`0[Ȁ O27`0I\Ҷe5i;ZLͩhj2ƅOJtwyCoܔDh?^9Gʍ^Qa L~%AAX b,bAJf,[g JG<Αؐ&bp'+icC] }r z*91DDkORP@KyBTMϮN^5{Jlj/)߷*7vi ]FI`ӞJ`ϑĤœirRhCePd(`9:Ѷf}6"`o$r)\@ ~(U ;B)r2 3+mCnA8Rs.4#P59A- 1k*0,iB }JzwT;Ctw L&@>ʽ@5ه. VvDejiDNʼn B_~{?]_cNf>"L$Vy̼Dk*a۫Q v` K)J YΜ'k̗8Yf܏d~p7nNuHa 2D07B5p5>s<qZwuq5G;[G$`t|3cC1= +[c^I]ǜ螤&\Īy1їQ=ܜ]'i\euk73|a`eaX99Aų+$#׸hf9D\ ^Α^Ycuh9~vɵ. jty{;#7sKq2İj'd%oxl:K`}ྦl-ytW gd. %ԙ-*uf"wn%2$4U")&h&<չ94-r/ 6.!\)UFꡤBc`FK/^5ϕMy\Kib9K*/"pwgJ妜*>/!IrZ{(r AH8hOb}ꢫՕd4j֨zkOַo9PP,yw奰=Jub((e ܃Ao◺?)j${V_/F[_?~/qo'kwu('ؽHLt @j)TjZڨ.T^<…*_\e/_|e"[{Ã8id+,L2fޏ& &HuslK+ L{I$\-Uб&doS *z RNqY +:f:PN"!VGzļtKM_}祐>#gs{}hn\w3?rSn4KKbe_8rΐ(P U-Qy`=NXS-Gl]Ýak >d@Ӂ.3MO>jO)ouZO{՟vi;ސnƒ;7T愑NVX9axӎ=`2RKZua^|01 MHBc=`tq]in(Znΐlص  v]]jWEv!\i}]hlRܿqwbjjҞSyC{{y_AŰ A*$&{ApsP; 8d[S sJ1Mi&9P"5M+Jx}>U{nwK v۵f7+j˗;.mxzRC;l\`jWϵ?xܿ-+2j ":vAEs10lI#Cb(6@nG!1 tu PPElrxkx(%*CPK\tEёubk2ڡT-)=xÞi"v㛭we?Q(AV@H ~CJ6K$uXY).0T$R:ޚ"b+(rG#!?_E` Ʋd7 B(/tl/S+%{g-[)ab81J˳8V픐.T # !Kz]eʌхG뾒IJ(}1m)QT9H2yη 0b40,eYR&:8|k.^.n66g0lLFcJ2?iѩش/*d/y *Q9R8T ;a2_e2ÇÇ md>b pR1@G(Iz|W ͼB#_gsnŕY>.cA0=]>h9y*y$ $+( *i|ީ"(0 UB&{#HvYz+Dg);sk'9+"dDH_/Eť(./_R/_BSgA0[Ka`h"TH&lJ%ԺY`Q\aV a RQRcW f5q款>pZY>'ur77<%0/n_m*-\V4mSIɦiE{[[ o&`(M-~k-Z؊l{bfO, ˢ"! V9y8~?t-G +@hcIh^cvJ! Ki5%K1d`eȪOК)_a*zti[ %t?ջ$7n3Bi~xG60N@w3d[*Yp".pYD߱^olއe!#4ʊ ٢!0FRa,7&v`bm1vc^i)t*Gyr*rʧfhC!PǗUIԒwhP@{7 ZXBhn@F6|NH,>f)i43R4<HmmFKnRp6MmM3"nuyo5?5e3no}|N4eʳ= z$dL5i]Ys//-۸(i_Iie+W9<̏\TLC\W^~tUvH^Ľt7{.o1.Lt:"'9LSmvHg -[AXBiLuj$ I%v+)lzP#%P#c[ T#CP)%)AwupS,8TсLe,E`8=)z5~5~Pֵ+V=sr9R_<8PyO?Q]]7on}'7?C7~| _?~U?k{{FPfHSJq .n{J@Kв)dr2I.K.L UKHO0b"#C !7J#5=`[rF".X'3=T{OH:8s AmpLdʞtyɜO䇈,ɻ˱aA/baI)Q&ZT%)+ 0 8)Q-Fhl J JU4{G;m@ފ][|2+k>Z~];[ok(ڶqӕWsV%$ԛʲš,-TYsm:?.%kzzhC˪iQvEfʭ)+27SєKೖJM>V>.Ԍq0FP H,XnG4|x+3+3둬ř\Y83sqv'ߘXXht!3!h/߈5Vlprw-X-gΎ#Ab9` X8b]d<2JȨ]؀:d`2j7)s_+,;GVP9QU݌ՠ|'B={8=YFk ,ٷ|FBF#?F(cye1z@P#cHTos)o}7IKeS޾`)=x84 6J+  5j`IZ*'kUf5#25}O#Ƕmey:DRU $.^IR!# lLedt>c]bbu||ubb%+*&'''dtD=3!`]`X⊄SprX`f BR{DcCVh VZ^it+m$6td20_߰P?߰X.(CM4T4ֽjH Ձgxګ$Hj)d2Z'd'+ $$TBV;1`88רDH!2XN7|j,`4E @%P #G\$){ y^)|mHxAa ^tec@.K3ۿP}&)>jx:aEXO`'m/J $ĩ+1:@l:eH@1! +@=asV@JDZ n> !_%xٽ!\S ;ӬwGJeAԵqvI#Q]>&B:]3/寋ns3s!c qd h7Q#jE(B`Baԇވw-*0)5TZ.#.xOH파g}|C1L3C&8U\] o{BaC;5 b: (t1 S$^ uTSwJu$%:2PI* |'b3#.9Eq woKF7zc4Fa qaeL! }2)>&*~ H]h:w}4`9^Lx>\XPea+^~Mm YJ,'v6^-byrdA~]z_I@; 9qHw"ܥ A'q pzn.. 6k8Qc{PB~a[Amy=ԡH 'Q_2՗'nC`#Lj&umAF7p/m Ą PU3^rSN) > wK7>q7s;9\"/ݧ% \qx.M =yx\e-B=hx>dNU*P!VUt ?r-bӔc#@\N脎{^.~V-!6ӺCv ylq2Nv~3NS@#=N}}K\.51Kk3qBtP+ϕyoQcםqthM= ^wuݶt$ݮ*7UXF[{+[K5{W"2rUuܶ@ԞiVmjݢpwi_x#GWM :wB7?k~%7>xmxe@r Ni4.%(iITQQ[P{p{Op=z~o+|P/D+õ= hH][C#'4{a(sx/`o`P6//4܈`kC=}Ua|؃=YF?YJV! f!ÑC.8N`61 1Ec1M #p$`5͘o0͘YX81`|{p{7/q3n-%s+܆r;<rw`ĝ wnݹ^ʽY}`-4h a?b1ڌsG8c8XUlN4N ăyL[d+Slc;pb1Nf]9s9GHŅ

Ǹ >ɧ4||/:|oG;/EO\ ?6Ne@l 0F/܊[Џj\ 6܎;p'#~O)?ñ8g##c.÷30_`gTe8K븞_+~o-=-7?g_o},/2v.]Ⱥe_Aq& e|)kWR׶贤FFF^ScFCHzl+Qlyq:ՑDT/ŎljZiͤ )'Hn{!1GVE \,l^.؊~K#;Lfل^)TnZKJ؛Vbo1RQ5h6vlTDsh1c5qwRS[SwMݚ6U79لDvnt+4&jLR%^Ѭ՚K*j^Q5Tiu elg:Q=x@X+ z[f\| ) CaXZ¨0&o-o-o-vD6 k!4uzaH ݠ% cB[(k!k!~FX(k)>o)}SM7E}Ser_nq$6JbrAi+$mbAL ENf ԜTNL{ՉLNS,wxtJ7]H OԽ-EZKRޒRvsŎ OZTiq/ȓWy**QYx`!  !a0N B ^HBºmؓw:ȈR=Pyd/.{+I!;.eZq0.}ȵؚzII5縩BvkbT%OgRdRMPiER$KfΒ eLd&Y2BKfDuFKf%ђYhЯq}[,%QUmOgrޥx% @pG,|x!A`g`$X–Wy=uFWIxyy xڥX t{}eDҡNeDѩZWҙZ4ej|+5NI(5QmQ%"D zvr><(.M{1@9 pQ5Z5JbT"HLZUދɤ*Q"+5Y{ddv9 *FuQ},u8 5 .z~ 1 Ƽm9iJmMɶhm]`gly6ۅIaOċw[wSwo$n$䞑!nK2W&S.>*B 6.VSmQO*Fg`< ")pz֝@gѝTwVstQ# D)c ςDjv]kInN㴌XOAW2KIK&bGMݧ9|N"m\ZltnG]x5:xY,I_9ޅ\w P Rte5r5D D +TfaTsu0ޭct$}|rPm+S]$,HS>G U\DeDYz,Z<&"rs``г@vcq"`n K[t8^jKH"p析^5DC^E$^F7Ir6+с2ͭn 2YNG'֣щ v6H;5vsn'@ Ms _{{(bG2XAA_/uDh̹FGQ}7t:3Ї#l5n,X Z`ق5w;ůtzF)lϠK%E#ND߁;}{ݽWiC5 |+ѕ];_ՅLg9& cJ,Tb|ReJ[,!{`m>~υ-x%@_ҍ{Oxc=hp> 5cb%!eAد#CJPk)j-eTmwaϨm3i؝jC+;>xC ౕ b"n(B=RJ\{AEjXQQ.d>V{VƫI4_Dc`5w*z4ݍ_SQQKyhXJ7<ØbNT#%LzCA«H ]!8TϟrA?^?ov6Df.cVQ<5@@zCE:uJw@1z8ᒢ{un&tC hCH'I=U?g{Q׋hyz^&Eo[ׇQh'9}I_ Dz;mԚi-t1=M*f3OBK7&Lmf9f&,2o֙ p=9s6M)1]6JoB40{mbCk7Dv)35gfѾvl{f;ε/[hWrh[v}ZeC>'^Q .LXذax~vNf͙|n{ofz[r|0.l>gᛵ9w~.|^=#ϴqLjצH5ӾǃDzN^dAH"8.d&Ll"d"Cs%>xK2dy^#*?~BD2?9B}"n[vN"]tOI3tD3o#sx^|Noz@}\}Y_3(=ĘM;IoV&$N&6@fflYU%,6@_nVF캍Ĝ0gLbʭmmk{mmc"In{>1;!#mm'ةwlo;K]AYc߮"a{Ԟ"%{50 #~8lFio; ۇa*}azfHv83 pW??<=*D}󖅃1%>(꣨ץ~}/NUwm#-ų韅C/0>/۫Gu=&~[?G0f=Drv4shZgmQm~S/bMw?λ2ym sf29|ԟc`-#[břq\iQ[Ѿ2#_z]rF`T ƏOAfZ/I=ygQnRѾ5M]| ^s){(dRȜI3ޣek2q{*ciw"Ȑ>p iNm~լԓ#M6ood&?T郯_`"2_S:MfH,|Y)ke] T&Iǹ2; Z ;j mjTEH]nX\U_Zki+?DnJ:/F}d4L—8d4T~l~Y:mkosi-24.3/docs/index.md000066400000000000000000000014151465176501400151570ustar00rootroot00000000000000--- layout: default SPDX-License-Identifier: LGPL-2.1-or-later --- # mkosi — Build Bespoke OS Images A fancy wrapper around `dnf --installroot`, `apt`, `pacman` and `zypper` that generates customized disk images with a number of bells and whistles. --- {% assign tutorials = site.pages | group_by:"category" %} {% assign project = site.data.project_pages | group_by:"category" %} {% assign documentation = site.data.documentation_page | group_by:"category" %} {% assign merged = documentation | concat: tutorials | concat: project %} {% for pair in merged %} {% if pair.name != "" %} ## {{ pair.name }} {% assign sorted = pair.items | sort:"title" %}{% for page in sorted %} * [{{ page.title }}]({{ page.url | relative_url }}){% endfor %} {% endif %} {% endfor %} --- mkosi-24.3/docs/initrd.md000066400000000000000000000017351465176501400153460ustar00rootroot00000000000000--- title: Building a custom initrd and using it in a mkosi image category: Tutorials layout: default SPDX-License-Identifier: LGPL-2.1-or-later --- # Building a custom initrd and using it in a mkosi image Building an image with a mkosi-built initrd is a two step process, because you will build two images - the initrd and your distribution image. 1. Build an initrd image using the `cpio` output format with the same target distributions as you want to use for your distribution image. mkosi compresses the `cpio` output format by default. ```conf [Output] Format=cpio [Content] Packages=systemd udev kmod ``` 2. Invoke `mkosi` passing the initrd image via the `--initrd` option or add the `Initrd=` option to your mkosi config when building your distribution image. ```bash mkosi --initrd= ... ``` This will build an image using the provided initrd image. mkosi will add the kernel modules found in the distribution image to this initrd. mkosi-24.3/docs/style.css000066400000000000000000000221421465176501400154000ustar00rootroot00000000000000/* SPDX-License-Identifier: LGPL-2.1-or-later */ @font-face { font-family: 'Heebo'; src: url('fonts/heebo-regular.woff'); font-weight: 400; } @font-face { font-family: 'Heebo'; src: url('fonts/heebo-bold.woff'); font-weight: 600; } /* Variables */ :root { --sd-brand-black: hsl(270, 19%, 13%); /* #201A26; */ --sd-brand-green: hsl(145, 66%, 51%); /* #30D475; */ --sd-brand-white: #fff; --sd-black: hsl(270, 7%, 13%); --sd-green: hsl(145, 66%, 43%); /* #26b763 */ --sd-gray-extralight: hsl(30, 10%, 96%); /* #f6f5f4 */ --sd-gray-light: hsl(30, 10%, 92%); --sd-gray: hsl(30, 10%, 85%); --sd-gray-dark: hsl(257, 23%, 20%); --sd-gray-extradark: hsl(257, 23%, 16%); /* #241f31 */ --sd-blue: hsl(200, 66%, 55%); --sd-highlight-bg-light: rgba(255, 255, 255, 1); --sd-highlight-bg-dark: rgba(0, 0, 0, .6); --sd-highlight-inline-bg-light: rgba(0, 0, 0, 0.07); --sd-highlight-inline-bg-dark: rgba(255, 255, 255, 0.1); --sd-font-weight-normal: 400; --sd-font-weight-bold: 600; /* Light mode variables */ --sd-foreground-color: var(--sd-gray-extradark); --sd-background-color: var(--sd-gray-extralight); --sd-logo-color: var(--sd-brand-black); --sd-link-color: var(--sd-green); --sd-small-color: var(--sd-gray-dark); --sd-highlight-bg: var(--sd-highlight-bg-light); --sd-highlight-inline-bg: var(--sd-highlight-inline-bg-light); --sd-link-font-weight: var(--sd-font-weight-bold); --sd-table-row-bg: var(--sd-highlight-inline-bg-light); --sd-table-row-hover-bg: var(--sd-gray); } @media (prefers-color-scheme: dark) { :root { color-scheme: dark; --sd-foreground-color: var(--sd-gray); --sd-background-color: var(--sd-black); --sd-logo-color: var(--sd-brand-white); --sd-link-color: var(--sd-brand-green); --sd-small-color: var(--sd-gray); --sd-highlight-bg: var(--sd-highlight-bg-dark); --sd-highlight-inline-bg: var(--sd-highlight-inline-bg-dark); --sd-link-font-weight: var(--sd-font-weight-normal); --sd-table-row-bg: var(--sd-highlight-inline-bg-dark); --sd-table-row-hover-bg: var(--sd-highlight-bg-dark); } } /* Typography */ * { -moz-box-sizing: border-box; -webkit-box-sizing: border-box; box-sizing: border-box; } html, body { margin: 0; padding: 0; font-size: 1rem; font-family: "Heebo", sans-serif; font-weight: 400; line-height: 1.6; } body { color: var(--sd-foreground-color); background-color: var(--sd-background-color); } h1, h2, h3, h4, h5, h6 { margin: 1rem 0 0.625rem; font-weight: 600; line-height: 1.25; } h1 { text-align: center; font-size: 1.87rem; font-weight: 400; font-style: normal; margin-bottom: 2rem; } @media screen and (min-width: 650px) { img { margin-left: 10%; margin-right: 10%; } h1 { font-size: 2.375em; } } h2 { font-size: 1.25rem; margin-top: 2.5em; } h3 { font-size: 1.15rem; } a { font-weight: var(--sd-link-font-weight); text-decoration: none; color: var(--sd-link-color); cursor: pointer; } a:hover { text-decoration: underline; } b { font-weight: 600; } small { color: var(--sd-small-color); } hr { margin: 3rem auto 4rem; width: 40%; opacity: 40%; } /* Layout */ .container { width: 80%; margin-left: auto; margin-right: auto; max-width: 720px; } /* Singletons */ .page-logo { display: block; padding: 5rem 0 3rem; color: var(--sd-logo-color); } .page-logo > svg { display: block; width: 12.625em; height: auto; margin: 0 auto; } .color-green { color: var(--sd-brand-green); } .color-blue { color: var(--sd-blue); } .page-link::after { content: " ➜"; } /* Footer */ footer { text-align: center; padding: 3em 0 3em; font-size: 1em; margin-top: 4rem; } @media (prefers-color-scheme: light) { .highlight .cm { color: #999988; font-style: italic; } .highlight .cp { color: #999999; font-weight: bold; } .highlight .c1 { color: #999988; font-style: italic; } .highlight .cs { color: #999999; font-weight: bold; font-style: italic; } .highlight .c, .highlight .ch, .highlight .cd, .highlight .cpf { color: #999988; font-style: italic; } .highlight .err { color: #a61717; background-color: #e3d2d2; } .highlight .gd { color: #000000; background-color: #ffdddd; } .highlight .ge { color: #000000; font-style: italic; } .highlight .gr { color: #aa0000; } .highlight .gh { color: #999999; } .highlight .gi { color: #000000; background-color: #ddffdd; } .highlight .go { color: #888888; } .highlight .gp { color: #555555; } .highlight .gs { font-weight: bold; } .highlight .gu { color: #aaaaaa; } .highlight .gt { color: #aa0000; } .highlight .kc { color: #000000; font-weight: bold; } .highlight .kd { color: #000000; font-weight: bold; } .highlight .kn { color: #000000; font-weight: bold; } .highlight .kp { color: #000000; font-weight: bold; } .highlight .kr { color: #000000; font-weight: bold; } .highlight .kt { color: #445588; font-weight: bold; } .highlight .k, .highlight .kv { color: #000000; font-weight: bold; } .highlight .mf { color: #009999; } .highlight .mh { color: #009999; } .highlight .il { color: #009999; } .highlight .mi { color: #009999; } .highlight .mo { color: #009999; } .highlight .m, .highlight .mb, .highlight .mx { color: #009999; } .highlight .sa { color: #000000; font-weight: bold; } .highlight .sb { color: #d14; } .highlight .sc { color: #d14; } .highlight .sd { color: #d14; } .highlight .s2 { color: #d14; } .highlight .se { color: #d14; } .highlight .sh { color: #d14; } .highlight .si { color: #d14; } .highlight .sx { color: #d14; } .highlight .sr { color: #009926; } .highlight .s1 { color: #d14; } .highlight .ss { color: #990073; } .highlight .s, .highlight .dl { color: #d14; } .highlight .na { color: #008080; } .highlight .bp { color: #999999; } .highlight .nb { color: #0086B3; } .highlight .nc { color: #445588; font-weight: bold; } .highlight .no { color: #008080; } .highlight .nd { color: #3c5d5d; font-weight: bold; } .highlight .ni { color: #800080; } .highlight .ne { color: #990000; font-weight: bold; } .highlight .nf, .highlight .fm { color: #990000; font-weight: bold; } .highlight .nl { color: #990000; font-weight: bold; } .highlight .nn { color: #555555; } .highlight .nt { color: #000080; } .highlight .vc { color: #008080; } .highlight .vg { color: #008080; } .highlight .vi { color: #008080; } .highlight .nv, .highlight .vm { color: #008080; } .highlight .ow { color: #000000; font-weight: bold; } .highlight .o { color: #000000; font-weight: bold; } .highlight .w { color: #bbbbbb; } } @media (prefers-color-scheme: dark) { /* rouge "base16.dark" code highlight */ /* generated with: rougify style base16.dark | sed '/background-color: #151515/d' */ .highlight, .highlight .w { color: #d0d0d0; } .highlight .err { color: #151515; background-color: #ac4142; } .highlight .c, .highlight .ch, .highlight .cd, .highlight .cm, .highlight .cpf, .highlight .c1, .highlight .cs { color: #505050; } .highlight .cp { color: #f4bf75; } .highlight .nt { color: #f4bf75; } .highlight .o, .highlight .ow { color: #d0d0d0; } .highlight .p, .highlight .pi { color: #d0d0d0; } .highlight .gi { color: #90a959; } .highlight .gd { color: #ac4142; } .highlight .gh { color: #6a9fb5; font-weight: bold; } .highlight .k, .highlight .kn, .highlight .kp, .highlight .kr, .highlight .kv { color: #aa759f; } .highlight .kc { color: #d28445; } .highlight .kt { color: #d28445; } .highlight .kd { color: #d28445; } .highlight .s, .highlight .sb, .highlight .sc, .highlight .dl, .highlight .sd, .highlight .s2, .highlight .sh, .highlight .sx, .highlight .s1 { color: #90a959; } .highlight .sa { color: #aa759f; } .highlight .sr { color: #75b5aa; } .highlight .si { color: #8f5536; } .highlight .se { color: #8f5536; } .highlight .nn { color: #f4bf75; } .highlight .nc { color: #f4bf75; } .highlight .no { color: #f4bf75; } .highlight .na { color: #6a9fb5; } .highlight .m, .highlight .mb, .highlight .mf, .highlight .mh, .highlight .mi, .highlight .il, .highlight .mo, .highlight .mx { color: #90a959; } .highlight .ss { color: #90a959; } } /* Code Blocks */ .highlighter-rouge { padding: 2px 1rem; border-radius: 5px; color: var(--sd-foreground-color); background-color: var(--sd-highlight-bg); overflow: auto; } .highlighter-rouge .highlight .err { background: transparent !important; color: inherit !important; } /* Inline Code */ code.highlighter-rouge { padding: 2px 6px; background-color: var(--sd-highlight-inline-bg); } a code.highlighter-rouge { color: inherit; } mkosi-24.3/docs/sysext.md000066400000000000000000000051311465176501400154060ustar00rootroot00000000000000--- title: Building system extensions with mkosi category: Tutorials layout: default SPDX-License-Identifier: LGPL-2.1-or-later --- # Building system extensions with mkosi [System extension](https://uapi-group.org/specifications/specs/extension_image/) images may – dynamically at runtime — extend the base system with an overlay containing additional files. To build system extensions with mkosi, we first have to create a base image on top of which we can build our extension. To keep things manageable, we'll use mkosi's support for building multiple images so that we can build our base image and system extension in one go. Start by creating a temporary directory with a base configuration file `mkosi.conf` with some shared settings: ```conf [Output] OutputDirectory=mkosi.output CacheDirectory=mkosi.cache ``` From now on we'll assume all steps are executed inside the temporary directory. Now let's continue with the base image definition by writing the following to `mkosi.images/base/mkosi.conf`: ```conf [Output] Format=directory [Content] CleanPackageMetadata=no Packages=systemd udev ``` We use the `directory` output format here instead of the `disk` output so that we can build our extension without needing root privileges. Now that we have our base image, we can define a sysext that builds on top of it by writing the following to `mkosi.images/btrfs/mkosi.conf`: ```conf [Config] Dependencies=base [Output] Format=sysext Overlay=yes [Content] BaseTrees=%O/base Packages=btrfs-progs ``` `BaseTrees=` points to our base image and `Overlay=yes` instructs mkosi to only package the files added on top of the base tree. We can't sign the extension image without a key, so let's generate one with `mkosi genkey` (or write your own private key and certificate yourself to `mkosi.key` and `mkosi.crt` respectively). Note that this key will need to be loaded into your kernel keyring either at build time or via MOK for systemd to accept the system extension at runtime as trusted. Finally, you can build the base image and the extension by running `mkosi -f`. You'll find `btrfs.raw` in `mkosi.output` which is the extension image. You'll also find the main image `image.raw` there but it will be almost empty. What we can do now is package up the base image as the main image, but in another format, for example an initrd, we can do that by adding the following to `mkosi.conf`: ```conf [Output] Format=cpio Output=initrd [Content] MakeInitrd=yes BaseTrees=%O/base ``` If we now run `mkosi -f` again, we'll find `initrd.cpio.zst` in `mkosi.output` with its accompanying extension still in `btrfs.raw`. mkosi-24.3/kernel-install/000077500000000000000000000000001465176501400155215ustar00rootroot00000000000000mkosi-24.3/kernel-install/50-mkosi.install000066400000000000000000000114061465176501400204570ustar00rootroot00000000000000#!/usr/bin/env python3 # SPDX-License-Identifier: LGPL-2.1-or-later import argparse import dataclasses import logging import os import shutil import tempfile from pathlib import Path from typing import Optional from mkosi import identify_cpu from mkosi.archive import make_cpio from mkosi.config import OutputFormat, __version__ from mkosi.log import die, log_setup from mkosi.run import run, uncaught_exception_handler from mkosi.types import PathString from mkosi.util import umask @dataclasses.dataclass(frozen=True) class Context: command: str kernel_version: str entry_dir: Path kernel_image: Path initrds: list[Path] staging_area: Path layout: str image_type: str initrd_generator: Optional[str] uki_generator: Optional[str] verbose: bool def we_are_wanted(context: Context) -> bool: return context.uki_generator == "mkosi" or context.initrd_generator in ("mkosi", "mkosi-initrd") def mandatory_variable(name: str) -> str: try: return os.environ[name] except KeyError: die(f"${name} must be set in the environment") def build_microcode_initrd(output: Path) -> Optional[Path]: vendor, ucode = identify_cpu(Path("/")) if vendor is None: logging.warning("Unable to determine the vendor of your CPU, not adding microcode") return None if ucode is None: logging.warning("Unable to find microcode for your CPU in /usr/lib/firmware, not adding microcode") return None with tempfile.TemporaryDirectory() as tmp: root = Path(tmp) / "initrd-microcode-root" destdir = root / "kernel/x86/microcode" with umask(~0o755): destdir.mkdir(parents=True, exist_ok=True) with (destdir / f"{vendor}.bin").open("wb") as f: f.write(ucode.read_bytes()) make_cpio(root, output) return output @uncaught_exception_handler() def main() -> None: log_setup() parser = argparse.ArgumentParser( description='kernel-install plugin to build initrds or Unified Kernel Images using mkosi', allow_abbrev=False, usage='50-mkosi.install COMMAND KERNEL_VERSION ENTRY_DIR KERNEL_IMAGE INITRD…', ) parser.add_argument( "command", metavar="COMMAND", help="The action to perform. Only 'add' is supported.", ) parser.add_argument( "kernel_version", metavar="KERNEL_VERSION", help="Kernel version string", ) parser.add_argument( "entry_dir", metavar="ENTRY_DIR", type=Path, nargs="?", help="Type#1 entry directory (ignored)", ) parser.add_argument( "kernel_image", metavar="KERNEL_IMAGE", type=Path, nargs="?", help="Kernel image", ) parser.add_argument( "initrds", metavar="INITRD…", type=Path, nargs="*", help="Initrd files", ) parser.add_argument( "--version", action="version", version=f"mkosi {__version__}", ) context = Context( **vars(parser.parse_args()), staging_area=Path(mandatory_variable("KERNEL_INSTALL_STAGING_AREA")), layout=mandatory_variable("KERNEL_INSTALL_LAYOUT"), image_type=mandatory_variable("KERNEL_INSTALL_IMAGE_TYPE"), initrd_generator=os.getenv("KERNEL_INSTALL_INITRD_GENERATOR"), uki_generator=os.getenv("KERNEL_INSTALL_UKI_GENERATOR"), verbose=int(os.getenv("KERNEL_INSTALL_VERBOSE", 0)) > 0, ) if context.command != "add" or not we_are_wanted(context): return # If kernel-install was passed a UKI, there's no need to build anything ourselves. if context.image_type == "uki": return # If the initrd was provided on the kernel command line, we shouldn't generate our own. if context.layout != "uki" and context.initrds: return format = OutputFormat.uki if context.layout == "uki" and context.uki_generator == "mkosi" else OutputFormat.cpio output = "initrd" if format == OutputFormat.cpio else "uki" cmdline: list[PathString] = [ "mkosi-initrd", "--kernel-version", context.kernel_version, "--format", str(format), "--output", output, "--output-dir", context.staging_area, ] if context.verbose: cmdline += ["--debug"] logging.info(f"Building {output}") run(cmdline) (context.staging_area / output).unlink() if format == OutputFormat.cpio: shutil.move(next(context.staging_area.glob("initrd*.cpio*")), context.staging_area / "initrd") build_microcode_initrd(context.staging_area / "microcode") else: (context.staging_area / f"{output}.vmlinuz").unlink() (context.staging_area / f"{output}.initrd").unlink() if __name__ == '__main__': main() mkosi-24.3/mkosi-initrd000077700000000000000000000000001465176501400226142mkosi/resources/mkosi-initrdustar00rootroot00000000000000mkosi-24.3/mkosi.conf000066400000000000000000000020541465176501400145670ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Output] # These images are (among other things) used for running mkosi which means we need some disk space available so # default to directory output where disk space isn't a problem. Format=directory CacheDirectory=mkosi.cache OutputDirectory=mkosi.output [Content] Autologin=yes SELinuxRelabel=no ShimBootloader=unsigned BuildSources=. BuildSourcesEphemeral=yes Packages= binutils gdb less nano strace sudo systemd tmux udev InitrdPackages= less RemoveFiles= # The grub install plugin doesn't play nice with booting from virtiofs. /usr/lib/kernel/install.d/20-grub.install # The dracut install plugin doesn't honor KERNEL_INSTALL_INITRD_GENERATOR. /usr/lib/kernel/install.d/50-dracut.install # Make sure that SELinux doesn't run in enforcing mode even if it's pulled in as a dependency. KernelCommandLine=enforcing=0 KernelModulesInitrdExclude=.* KernelModulesInitrdInclude=default [Host] QemuMem=4G mkosi-24.3/mkosi.conf.d/000077500000000000000000000000001465176501400150655ustar00rootroot00000000000000mkosi-24.3/mkosi.conf.d/15-bootable.conf000066400000000000000000000002371465176501400177500ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Format=|disk Format=|directory [Match] Architecture=|x86-64 Architecture=|arm64 [Content] Bootable=yes mkosi-24.3/mkosi.conf.d/15-memory.conf000066400000000000000000000001561465176501400174710ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Format=|esp Format=|uki Format=|cpio [Host] QemuMem=8G mkosi-24.3/mkosi.conf.d/15-metadata.conf000066400000000000000000000003601465176501400177360ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later # Remove package manager metadata on cpio based output formats # to keep memory usage within reasonable limits. [Match] Format=|uki Format=|esp Format=|cpio [Content] CleanPackageMetadata=yes mkosi-24.3/mkosi.conf.d/15-x86-64.conf000066400000000000000000000005011465176501400170270ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Architecture=x86-64 # We cannot install the grub tools in the OpenSUSE tools tree due to # https://bugzilla.opensuse.org/show_bug.cgi?id=1227464. # TODO: Remove this again when the above bug is resolved. ToolsTreeDistribution=!opensuse [Content] BiosBootloader=grub mkosi-24.3/mkosi.conf.d/20-arch.conf000066400000000000000000000005201465176501400170650ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=arch [Content] Packages= base bash dbus-broker dbus-broker-units grub iproute iputils linux openssh perf polkit python qemu-user-static shim strace mkosi-24.3/mkosi.conf.d/20-centos/000077500000000000000000000000001465176501400165775ustar00rootroot00000000000000mkosi-24.3/mkosi.conf.d/20-centos/mkosi.conf000066400000000000000000000003761465176501400205760ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=|centos Distribution=|alma Distribution=|rocky [Distribution] Release=9 [Content] # CentOS Stream 10 does not ship an unsigned shim ShimBootloader=none Packages= linux-firmware mkosi-24.3/mkosi.conf.d/20-centos/mkosi.conf.d/000077500000000000000000000000001465176501400210675ustar00rootroot00000000000000mkosi-24.3/mkosi.conf.d/20-centos/mkosi.conf.d/epel.conf000066400000000000000000000002641465176501400226650ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Release=9 [Distribution] Repositories=epel,epel-next [Content] Packages= rpmautospec rpmautospec-rpm-macros mkosi-24.3/mkosi.conf.d/20-debian/000077500000000000000000000000001465176501400165265ustar00rootroot00000000000000mkosi-24.3/mkosi.conf.d/20-debian/mkosi.conf000066400000000000000000000002611465176501400205160ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=debian [Distribution] Release=testing Repositories=non-free-firmware [Content] Packages= linux-perf mkosi-24.3/mkosi.conf.d/20-debian/mkosi.conf.d/000077500000000000000000000000001465176501400210165ustar00rootroot00000000000000mkosi-24.3/mkosi.conf.d/20-debian/mkosi.conf.d/20-arm64.conf000066400000000000000000000001761465176501400230410ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Architecture=arm64 [Content] Packages= linux-image-cloud-arm64 mkosi-24.3/mkosi.conf.d/20-debian/mkosi.conf.d/20-x86-64.conf000066400000000000000000000001771465176501400227650ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Architecture=x86-64 [Content] Packages= linux-image-cloud-amd64 mkosi-24.3/mkosi.conf.d/20-debian/mkosi.conf.d/20-x86.conf000066400000000000000000000001641465176501400225320ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Architecture=x86 [Content] Packages= linux-image-686 mkosi-24.3/mkosi.conf.d/20-fedora/000077500000000000000000000000001465176501400165445ustar00rootroot00000000000000mkosi-24.3/mkosi.conf.d/20-fedora/mkosi.conf000066400000000000000000000003051465176501400205330ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=fedora [Distribution] Release=rawhide [Content] Packages= qemu-user-static rpmautospec systemd-networkd mkosi-24.3/mkosi.conf.d/20-fedora/mkosi.conf.d/000077500000000000000000000000001465176501400210345ustar00rootroot00000000000000mkosi-24.3/mkosi.conf.d/20-fedora/mkosi.conf.d/20-arm64.conf000066400000000000000000000001741465176501400230550ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Architecture=arm64 [Content] Packages= shim-unsigned-aarch64 mkosi-24.3/mkosi.conf.d/20-fedora/mkosi.conf.d/20-x86_64.conf000066400000000000000000000002241465176501400230560ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Architecture=x86-64 [Content] Packages= amd-ucode-firmware shim-unsigned-x64 mkosi-24.3/mkosi.conf.d/20-opensuse/000077500000000000000000000000001465176501400171455ustar00rootroot00000000000000mkosi-24.3/mkosi.conf.d/20-opensuse/mkosi.conf000066400000000000000000000011371465176501400211400ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=opensuse [Distribution] Release=tumbleweed [Content] # OpenSUSE does not ship an unsigned shim ShimBootloader=none Packages= bash diffutils iproute iputils kernel-kvmsmall libtss2-esys0 libtss2-mu0 libtss2-rc0 libtss2-tcti-device0 openssh-clients openssh-server patterns-base-minimal_base perf polkit python3 qemu-linux-user shim strace sudo-policy-wheel-auth-self systemd-boot mkosi-24.3/mkosi.conf.d/20-opensuse/mkosi.conf.d/000077500000000000000000000000001465176501400214355ustar00rootroot00000000000000mkosi-24.3/mkosi.conf.d/20-opensuse/mkosi.conf.d/x86-64.conf000066400000000000000000000003061465176501400231570ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Architecture=x86-64 [Content] Packages= grub2-efi grub2-i386-pc grub2-x86_64-efi ucode-amd ucode-intel mkosi-24.3/mkosi.conf.d/20-rhel-ubi.conf000066400000000000000000000001751465176501400176650ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=rhel-ubi [Distribution] Release=9 [Content] Bootable=no mkosi-24.3/mkosi.conf.d/20-ubuntu/000077500000000000000000000000001465176501400166265ustar00rootroot00000000000000mkosi-24.3/mkosi.conf.d/20-ubuntu/mkosi.conf000066400000000000000000000003131465176501400206140ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=ubuntu [Distribution] Release=noble Repositories=universe [Content] Packages= linux-image-virtual linux-tools-virtual mkosi-24.3/mkosi.conf.d/20-ubuntu/mkosi.conf.d/000077500000000000000000000000001465176501400211165ustar00rootroot00000000000000mkosi-24.3/mkosi.conf.d/20-ubuntu/mkosi.conf.d/20-jammy.conf000066400000000000000000000001661465176501400233240ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Release=jammy [Distribution] PackageManagerTrees=mkosi.pkgmngr mkosi-24.3/mkosi.conf.d/20-ubuntu/mkosi.pkgmngr/000077500000000000000000000000001465176501400214145ustar00rootroot00000000000000mkosi-24.3/mkosi.conf.d/20-ubuntu/mkosi.pkgmngr/etc/000077500000000000000000000000001465176501400221675ustar00rootroot00000000000000mkosi-24.3/mkosi.conf.d/20-ubuntu/mkosi.pkgmngr/etc/apt/000077500000000000000000000000001465176501400227535ustar00rootroot00000000000000mkosi-24.3/mkosi.conf.d/20-ubuntu/mkosi.pkgmngr/etc/apt/sources.list.d/000077500000000000000000000000001465176501400256325ustar00rootroot00000000000000mkosi-24.3/mkosi.conf.d/20-ubuntu/mkosi.pkgmngr/etc/apt/sources.list.d/kernel-utils.sources000066400000000000000000000035131465176501400316570ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later Enabled: yes Types: deb URIs: https://ppa.launchpadcontent.net/michel-slm/kernel-utils/ubuntu Suites: jammy Components: main Signed-By: -----BEGIN PGP PUBLIC KEY BLOCK----- . xsFNBGJyEb0BEADR6CoHmzotr+Z1UmqkNQZ/z+tm4u3/KbjD/UKHOloAJK2L2Mf6 Eq1hbs2MlEYa7VtYcfq+NqluvTtqHckgE6sTFGbxQXMUDK+bcxqXmQUQcRw/Wytl BgRr6fCA+pK82W6Z5eFyYsfbZMqnIqw3rbtx02K43KFGOiP8Pj/FFcPXzf9q3+3k EXELs8y6N7OEYeloEs45RnBwkKETvMX08zbTZfn5owfYZRd4VpIZJ2BnZprzdzfP Z1ZTkkDTAUpZvpXFi5WtDx6rVP92+7OYxOjUKzQ9wFbKdkZVzqhfJIR2SHM2tHVz GwIZl6vYsAqLLZccQSS4nBCXIzUwRO28LwqzacjKabl197fk0C/IKFw2Z4/ZsCHb VcrnttD3G1AvQ+DMvEPTzB9vD4R7uNEkmg4UYokzk6yW9/KQm3lNbQBS+D5jZREv hRyz4ZLW9wzz13H655nXzJCIZvLWVpRLNzQDscxYlkYBONoH+HiGafsgZqw+ITc3 tDPTw0HqRNe+/oendXqOhtKhY2PRhD3vu5NgXLX7GuAHr7Dq9HMUyA5MxKH90e28 vaYUmwU0jfYGgNnSCpRrAOx0SlqKpMlwW9VkpJctGrYMZ/ts6yPdJC2OSWiJRlMa Xmf4IvsLrCGobd27y03TYl5Uq6OOpD0DSP2hGZQxYHOgMhhZxT99IJUlMwARAQAB zShMYXVuY2hwYWQgUFBBIGZvciBNaWNoZWwgQWxleGFuZHJlIFNhbGltwsGOBBMB CgA4FiEE+8ojhpUuuImlrBJjySgIhqCuA8oFAmJyEb0CGwMFCwkIBwIGFQoJCAsC BBYCAwECHgECF4AACgkQySgIhqCuA8orpw//YWpifbMc2F3zNx8oW2UyTsr2IXtu 4/pHVtDroYokMOvCy2IR6FhzXSMM35yQBVfn92T5MiG0pHqXNUIZstt/m3Qo7tnj 1AR9f0mRLTKHONQCUP91CiVHGGKfaYHiyQ9Pxxp+LUxFkoEXUfQPl6N0wfGCd0Rb k9gcFlOo3+duOFsd+1Aw3Gi83SFcl6Bc4P/3i+dfB4g1Nbte0ZzPnFWKKYRV0K6z 5uDbJJYdIS+nwIxVXb7cnMUrrHBr4cUDsIXnAwVN+zeK7Q4CrJOpR6ZDGNb5SGcO TaJPEOpCIcIKkzW9IzYm5NTzxhQHg7jvCPrGBuX3nTt1fEzCn5L2se9iwehtsMat WXwi+yIYlpce2vHRPZEb8ILMoCL50veAAZ2tAlHx9UnAPNtT+1PPzrKPcIVCAB2e fBgUBcCaQ62LWsIQX1B9qL4xhGX7Z4nFk2aXNlrHjnnf5gwFCJ/XiVuFgGetfRrV r2PgfFFOfUanJ4LMu8sfqurrNJXrYMHfA8+qIbTLyltlqsOiEROOa/Qje5KEqmbe vg/hbqRpGNHdYKP1OynqBK8VAgG9/g5qGR8FLXr1DXl5dzlqyiIkRQINd9O6XjnX LWPl1wsOXOCY/jWgMxktt8Mv9qaaZ4CT9cuwsm/aml270A3GKRYHLDFP3CkuMnqd 0vsZgWMIQtgQmXU= =7vwW -----END PGP PUBLIC KEY BLOCK----- mkosi-24.3/mkosi.conf.d/30-centos-fedora/000077500000000000000000000000001465176501400200365ustar00rootroot00000000000000mkosi-24.3/mkosi.conf.d/30-centos-fedora/mkosi.conf000066400000000000000000000005621465176501400220320ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=|centos Distribution=|alma Distribution=|rocky Distribution=|fedora [Content] Packages= bash iproute iputils kernel-core openssh-clients openssh-server perf polkit python3 rpm-build strace systemd-resolved mkosi-24.3/mkosi.conf.d/30-centos-fedora/mkosi.conf.d/000077500000000000000000000000001465176501400223265ustar00rootroot00000000000000mkosi-24.3/mkosi.conf.d/30-centos-fedora/mkosi.conf.d/20-arm64.conf000066400000000000000000000001751465176501400243500ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Architecture=arm64 [Content] Packages= grub2-efi-aa64-modules mkosi-24.3/mkosi.conf.d/30-centos-fedora/mkosi.conf.d/20-uefi.conf000066400000000000000000000002501465176501400243410ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Architecture=|x86-64 Architecture=|arm64 [Content] Packages= grub2-efi shim systemd-boot mkosi-24.3/mkosi.conf.d/30-centos-fedora/mkosi.conf.d/20-x86-64.conf000066400000000000000000000002741465176501400242730ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Architecture=x86-64 [Content] Packages= grub2-efi-x64-modules grub2-pc kernel-uki-virt microcode_ctl mkosi-24.3/mkosi.conf.d/30-debian-ubuntu/000077500000000000000000000000001465176501400200475ustar00rootroot00000000000000mkosi-24.3/mkosi.conf.d/30-debian-ubuntu/mkosi.conf000066400000000000000000000007621465176501400220450ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=|debian Distribution=|ubuntu [Content] Packages= ^libtss2-esys-[0-9.]+-0$ ^libtss2-mu[0-9.-]+$ bash dbus-broker iproute2 iputils-ping libtss2-rc0 libtss2-tcti-device0 openssh-client openssh-server polkitd python3 qemu-user-static shim-signed strace systemd-coredump systemd-sysv tzdata mkosi-24.3/mkosi.conf.d/30-debian-ubuntu/mkosi.conf.d/000077500000000000000000000000001465176501400223375ustar00rootroot00000000000000mkosi-24.3/mkosi.conf.d/30-debian-ubuntu/mkosi.conf.d/20-ext4-orphan-file.conf000066400000000000000000000004551465176501400265170ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=ubuntu Release=jammy [Content] # "orphan_file" is enabled by default in recent versions of mkfs.ext4 but not supported by the Jammy kernel so we # explicitly disable it. Environment=SYSTEMD_REPART_MKFS_OPTIONS_EXT4="-O ^orphan_file" mkosi-24.3/mkosi.conf.d/30-debian-ubuntu/mkosi.conf.d/20-systemd-extra.conf000066400000000000000000000002101465176501400262270ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Release=!jammy [Content] Packages= systemd-boot systemd-resolved mkosi-24.3/mkosi.conf.d/30-debian-ubuntu/mkosi.conf.d/20-x86-64.conf000066400000000000000000000003131465176501400242760ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Architecture=x86-64 [Content] Packages= amd64-microcode grub-efi grub-efi-amd64 grub-pc-bin intel-microcode mkosi-24.3/mkosi.conf.d/30-rpm/000077500000000000000000000000001465176501400161035ustar00rootroot00000000000000mkosi-24.3/mkosi.conf.d/30-rpm/mkosi.build.chroot000077500000000000000000000005501465176501400215460ustar00rootroot00000000000000#!/bin/sh # SPDX-License-Identifier: LGPL-2.1-or-later set -ex rpmbuild \ -bb \ --build-in-place \ $([ "$WITH_TESTS" = "0" ] && echo --nocheck) \ --define "_topdir /var/tmp" \ --define "_sourcedir rpm" \ --define "_rpmdir $PACKAGEDIR" \ --define "_build_name_fmt %%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm" \ rpm/mkosi.spec mkosi-24.3/mkosi.conf.d/30-rpm/mkosi.conf000066400000000000000000000002331465176501400200720ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] BuildSources=rpm Distribution=fedora [Content] VolatilePackages= mkosi mkosi-initrd mkosi-24.3/mkosi.conf.d/30-rpm/mkosi.prepare000077500000000000000000000020461465176501400206120ustar00rootroot00000000000000#!/bin/sh # SPDX-License-Identifier: LGPL-2.1-or-later set -e mkosi-chroot \ rpmspec \ --query \ --buildrequires \ --define "_topdir /var/tmp" \ --define "_sourcedir rpm" \ rpm/mkosi.spec | sort --unique | tee /tmp/buildrequires | xargs --delimiter '\n' mkosi-install until mkosi-chroot \ rpmbuild \ -bd \ --build-in-place \ --define "_topdir /var/tmp" \ --define "_sourcedir rpm" \ --define "_build_name_fmt %%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm" \ rpm/mkosi.spec do EXIT_STATUS=$? if [ $EXIT_STATUS -ne 11 ]; then exit $EXIT_STATUS fi mkosi-chroot \ rpm \ --query \ --package \ --requires \ /var/tmp/SRPMS/mkosi-*.buildreqs.nosrc.rpm | grep --invert-match '^rpmlib(' | sort --unique >/tmp/dynamic-buildrequires sort /tmp/buildrequires /tmp/dynamic-buildrequires | uniq --unique | tee --append /tmp/buildrequires | xargs --delimiter '\n' mkosi-install done mkosi-24.3/mkosi.extra/000077500000000000000000000000001465176501400150415ustar00rootroot00000000000000mkosi-24.3/mkosi.extra/usr/000077500000000000000000000000001465176501400156525ustar00rootroot00000000000000mkosi-24.3/mkosi.extra/usr/lib/000077500000000000000000000000001465176501400164205ustar00rootroot00000000000000mkosi-24.3/mkosi.extra/usr/lib/repart.d/000077500000000000000000000000001465176501400201375ustar00rootroot00000000000000mkosi-24.3/mkosi.extra/usr/lib/repart.d/root.conf000066400000000000000000000001041465176501400217640ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Partition] Type=root mkosi-24.3/mkosi.extra/usr/lib/systemd/000077500000000000000000000000001465176501400201105ustar00rootroot00000000000000mkosi-24.3/mkosi.extra/usr/lib/systemd/mkosi-check-and-shutdown.sh000077500000000000000000000003511465176501400252540ustar00rootroot00000000000000#!/bin/bash # SPDX-License-Identifier: LGPL-2.1-or-later set -eux systemctl --failed --no-legend | tee /failed-services # Exit with non-zero EC if the /failed-services file is not empty (we have -e set) [[ ! -s /failed-services ]] mkosi-24.3/mkosi.extra/usr/lib/systemd/system-preset/000077500000000000000000000000001465176501400227345ustar00rootroot00000000000000mkosi-24.3/mkosi.extra/usr/lib/systemd/system-preset/00-mkosi.preset000066400000000000000000000016111465176501400255160ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later # mkosi adds its own ssh units via the --ssh switch so disable the default ones. disable ssh.service disable sshd.service # Make sure dbus-broker is started by default on Debian/Ubuntu. enable dbus-broker.service # Make sure we have networking available. enable systemd-networkd.service enable systemd-networkd-wait-online.service enable systemd-resolved.service # We install dnf in some images but it's only going to be used rarely, # so let's not have dnf create its cache. disable dnf-makecache.* # The rpmdb is already in the right location, don't try to migrate it. disable rpmdb-migrate.service # We have journald to receive audit data so let's make sure we're not running auditd as well disable auditd.service # systemd-timesyncd is not enabled by default in the default systemd preset so enable it here instead. enable systemd-timesyncd.service mkosi-24.3/mkosi.extra/usr/lib/systemd/system-preset/99-mkosi.preset000066400000000000000000000002111465176501400255330ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later # Make sure that services are disabled by default (primarily for Debian/Ubuntu). disable * mkosi-24.3/mkosi.extra/usr/lib/systemd/system/000077500000000000000000000000001465176501400214345ustar00rootroot00000000000000mkosi-24.3/mkosi.extra/usr/lib/systemd/system/mkosi-check-and-shutdown.service000066400000000000000000000005251465176501400276260ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Unit] Description=Check if any service failed and then shut down the machine After=multi-user.target network-online.target Requires=multi-user.target SuccessAction=exit FailureAction=exit SuccessActionExitStatus=123 [Service] Type=oneshot ExecStart=/usr/lib/systemd/mkosi-check-and-shutdown.sh mkosi-24.3/mkosi.extra/usr/lib/systemd/system/systemd-machine-id-commit.service.d/000077500000000000000000000000001465176501400302675ustar00rootroot00000000000000mkosi-24.3/mkosi.extra/usr/lib/systemd/system/systemd-machine-id-commit.service.d/timeout.conf000066400000000000000000000000311465176501400326160ustar00rootroot00000000000000[Service] TimeoutSec=90s mkosi-24.3/mkosi.md000077700000000000000000000000001465176501400210342mkosi/resources/mkosi.mdustar00rootroot00000000000000mkosi-24.3/mkosi.postinst000077500000000000000000000005441465176501400155320ustar00rootroot00000000000000#!/bin/bash # SPDX-License-Identifier: LGPL-2.1-or-later set -e if [[ "$DISTRIBUTION" =~ ubuntu|debian ]]; then SUDO_GROUP=sudo else SUDO_GROUP=wheel fi mkosi-chroot \ useradd \ --user-group \ --create-home \ --password "$(openssl passwd -1 mkosi)" \ --groups "$SUDO_GROUP",systemd-journal \ --shell /bin/bash \ mkosi mkosi-24.3/mkosi.prepare000077500000000000000000000002701465176501400153010ustar00rootroot00000000000000#!/bin/bash # SPDX-License-Identifier: LGPL-2.1-or-later set -e if [ "$1" = "build" ]; then exit 0 fi mkosi-chroot "$SRCDIR"/bin/mkosi dependencies | xargs -d '\n' mkosi-install mkosi-24.3/mkosi.prepare.chroot000077500000000000000000000002611465176501400165760ustar00rootroot00000000000000#!/bin/sh # SPDX-License-Identifier: LGPL-2.1-or-later set -e if [ "$1" = "final" ] && command -v pacman-key; then pacman-key --init pacman-key --populate archlinux fi mkosi-24.3/mkosi/000077500000000000000000000000001465176501400137175ustar00rootroot00000000000000mkosi-24.3/mkosi/__init__.py000066400000000000000000005627061465176501400160500ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import contextlib import dataclasses import datetime import hashlib import io import itertools import json import logging import os import re import resource import shlex import shutil import socket import stat import subprocess import sys import tempfile import textwrap import uuid from collections.abc import Iterator, Mapping, Sequence from contextlib import AbstractContextManager from pathlib import Path from typing import Optional, Union, cast from mkosi.archive import can_extract_tar, extract_tar, make_cpio, make_tar from mkosi.burn import run_burn from mkosi.completion import print_completion from mkosi.config import ( PACKAGE_GLOBS, Args, BiosBootloader, Bootloader, Cacheonly, Compression, Config, ConfigFeature, DocFormat, JsonEncoder, KeySource, ManifestFormat, Network, OutputFormat, SecureBootSignTool, ShimBootloader, Verb, Vmm, __version__, format_bytes, parse_config, summary, systemd_tool_version, want_selinux_relabel, yes_no, ) from mkosi.context import Context from mkosi.distributions import Distribution from mkosi.installer import clean_package_manager_metadata from mkosi.kmod import gen_required_kernel_modules, loaded_modules, process_kernel_modules from mkosi.log import ARG_DEBUG, complete_step, die, log_notice, log_step from mkosi.manifest import Manifest from mkosi.mounts import finalize_crypto_mounts, finalize_source_mounts, mount_overlay from mkosi.pager import page from mkosi.partition import Partition, finalize_root, finalize_roothash from mkosi.qemu import KernelType, copy_ephemeral, run_qemu, run_ssh, start_journal_remote from mkosi.run import ( find_binary, fork_and_wait, run, ) from mkosi.sandbox import Mount, chroot_cmd, finalize_passwd_mounts from mkosi.tree import copy_tree, move_tree, rmtree from mkosi.types import PathString from mkosi.user import CLONE_NEWNS, INVOKING_USER, become_root, unshare from mkosi.util import ( flatten, flock, flock_or_die, format_rlimit, hash_file, make_executable, one_zero, read_env_file, round_up, scopedenv, umask, ) from mkosi.versioncomp import GenericVersion from mkosi.vmspawn import run_vmspawn @contextlib.contextmanager def mount_base_trees(context: Context) -> Iterator[None]: if not context.config.base_trees or not context.config.overlay: yield return with complete_step("Mounting base trees…"), contextlib.ExitStack() as stack: bases = [] (context.workspace / "bases").mkdir(exist_ok=True) for path in context.config.base_trees: d = context.workspace / f"bases/{path.name}-{uuid.uuid4().hex}" path = path.resolve() if path.is_dir(): bases += [path] elif can_extract_tar(path): extract_tar(path, d, sandbox=context.sandbox) bases += [d] elif path.suffix == ".raw": run(["systemd-dissect", "--mount", "--mkdir", path, d]) stack.callback(lambda: run(["systemd-dissect", "--umount", "--rmdir", d])) bases += [d] else: die(f"Unsupported base tree source {path}") stack.enter_context(mount_overlay(bases, context.root, context.root)) yield def remove_files(context: Context) -> None: """Remove files based on user-specified patterns""" if not context.config.remove_files: return with complete_step("Removing files…"): for pattern in context.config.remove_files: rmtree(*context.root.glob(pattern.lstrip("/")), sandbox=context.sandbox) def install_distribution(context: Context) -> None: if context.config.base_trees: if not context.config.packages: return with complete_step(f"Installing extra packages for {str(context.config.distribution).capitalize()}"): context.config.distribution.install_packages(context, context.config.packages) else: if context.config.overlay or context.config.output_format in (OutputFormat.sysext, OutputFormat.confext): if context.config.packages: die("Cannot install packages in extension images without a base tree", hint="Configure a base tree with the BaseTrees= setting") return with complete_step(f"Installing {str(context.config.distribution).capitalize()}"): context.config.distribution.install(context) if not (context.root / "etc/machine-id").exists(): # Uninitialized means we want it to get initialized on first boot. with umask(~0o444): (context.root / "etc/machine-id").write_text("uninitialized\n") # Ensure /efi exists so that the ESP is mounted there, as recommended by # https://0pointer.net/blog/linux-boot-partitions.html. Use the most restrictive access mode we # can without tripping up mkfs tools since this directory is only meant to be overmounted and # should not be read from or written to. with umask(~0o500): (context.root / "efi").mkdir(exist_ok=True) if context.config.packages: context.config.distribution.install_packages(context, context.config.packages) for f in ("var/lib/systemd/random-seed", "var/lib/systemd/credential.secret", "etc/machine-info", "var/lib/dbus/machine-id"): # Using missing_ok=True still causes an OSError if the mount is read-only even if the # file doesn't exist so do an explicit exists() check first. if (context.root / f).exists(): (context.root / f).unlink() def install_build_packages(context: Context) -> None: if not context.config.build_scripts or not context.config.build_packages: return with ( complete_step(f"Installing build packages for {context.config.distribution.pretty_name()}"), mount_build_overlay(context), ): context.config.distribution.install_packages(context, context.config.build_packages) def install_volatile_packages(context: Context) -> None: if not context.config.volatile_packages: return with complete_step(f"Installing volatile packages for {context.config.distribution.pretty_name()}"): context.config.distribution.install_packages(context, context.config.volatile_packages) def remove_packages(context: Context) -> None: """Remove packages listed in config.remove_packages""" if not context.config.remove_packages: return with complete_step(f"Removing {len(context.config.remove_packages)} packages…"): try: context.config.distribution.remove_packages(context, context.config.remove_packages) except NotImplementedError: die(f"Removing packages is not supported for {context.config.distribution}") def check_root_populated(context: Context) -> None: if context.config.output_format in (OutputFormat.sysext, OutputFormat.confext): return """Check that the root was populated by looking for a os-release file.""" osrelease = context.root / "usr/lib/os-release" if not osrelease.exists(): die( f"{osrelease} not found.", hint=( "The root must be populated by the distribution, or from base trees, " "skeleton trees, and prepare scripts." ) ) def configure_os_release(context: Context) -> None: """Write IMAGE_ID and IMAGE_VERSION to /usr/lib/os-release in the image.""" if not context.config.image_id and not context.config.image_version: return if context.config.overlay or context.config.output_format in (OutputFormat.sysext, OutputFormat.confext): return for candidate in ["usr/lib/os-release", "usr/lib/initrd-release"]: osrelease = context.root / candidate # at this point we know we will either change or add to the file newosrelease = osrelease.with_suffix(".new") if not osrelease.is_file() or osrelease.is_symlink(): continue image_id_written = image_version_written = False with osrelease.open("r") as old, newosrelease.open("w") as new: # fix existing values for line in old.readlines(): if context.config.image_id and line.startswith("IMAGE_ID="): new.write(f'IMAGE_ID="{context.config.image_id}"\n') image_id_written = True elif context.config.image_version and line.startswith("IMAGE_VERSION="): new.write(f'IMAGE_VERSION="{context.config.image_version}"\n') image_version_written = True else: new.write(line) # append if they were missing if context.config.image_id and not image_id_written: new.write(f'IMAGE_ID="{context.config.image_id}"\n') if context.config.image_version and not image_version_written: new.write(f'IMAGE_VERSION="{context.config.image_version}"\n') newosrelease.rename(osrelease) def configure_extension_release(context: Context) -> None: if context.config.output_format not in (OutputFormat.sysext, OutputFormat.confext): return prefix = "SYSEXT" if context.config.output_format == OutputFormat.sysext else "CONFEXT" d = "usr/lib" if context.config.output_format == OutputFormat.sysext else "etc" p = context.root / d / f"extension-release.d/extension-release.{context.config.output}" p.parent.mkdir(parents=True, exist_ok=True) osrelease = read_env_file(q) if (q := context.root / "usr/lib/os-release").exists() else {} extrelease = read_env_file(p) if p.exists() else {} new = p.with_suffix(".new") with new.open("w") as f: for k, v in extrelease.items(): f.write(f"{k}={v}\n") if "ID" not in extrelease: f.write(f"ID={osrelease.get('ID', '_any')}\n") if "VERSION_ID" not in extrelease and (version := osrelease.get("VERSION_ID")): f.write(f"VERSION_ID={version}\n") if f"{prefix}_ID" not in extrelease and context.config.image_id: f.write(f"{prefix}_ID={context.config.image_id}\n") if f"{prefix}_VERSION_ID" not in extrelease and context.config.image_version: f.write(f"{prefix}_VERSION_ID={context.config.image_version}\n") if f"{prefix}_SCOPE" not in extrelease: f.write(f"{prefix}_SCOPE=initrd system portable\n") if "ARCHITECTURE" not in extrelease: f.write(f"ARCHITECTURE={context.config.architecture}\n") new.rename(p) def configure_autologin_service(context: Context, service: str, extra: str) -> None: dropin = context.root / f"usr/lib/systemd/system/{service}.d/autologin.conf" with umask(~0o755): dropin.parent.mkdir(parents=True, exist_ok=True) with umask(~0o644): dropin.write_text( textwrap.dedent( f"""\ [Service] ExecStart= ExecStart=-agetty -o '-f -p -- \\\\u' --autologin root {extra} $TERM StandardInput=tty StandardOutput=tty """ ) ) def configure_autologin(context: Context) -> None: if not context.config.autologin: return with complete_step("Setting up autologin…"): configure_autologin_service(context, "console-getty.service", "--noclear --keep-baud console 115200,38400,9600") configure_autologin_service(context, "getty@tty1.service", "--noclear -") configure_autologin_service(context, "serial-getty@hvc0.service", "--keep-baud 115200,57600,38400,9600 -") @contextlib.contextmanager def mount_build_overlay(context: Context, volatile: bool = False) -> Iterator[Path]: d = context.workspace / "build-overlay" if not d.is_symlink(): with umask(~0o755): d.mkdir(exist_ok=True) with contextlib.ExitStack() as stack: lower = [context.root] if volatile: lower += [d] upper = None else: upper = d stack.enter_context(mount_overlay(lower, upper, context.root)) yield context.root @contextlib.contextmanager def finalize_scripts(config: Config, scripts: Mapping[str, Sequence[PathString]]) -> Iterator[Path]: with tempfile.TemporaryDirectory(prefix="mkosi-scripts-") as d: # Make sure than when mkosi-as-caller is used the scripts can still be accessed. os.chmod(d, 0o755) for name, script in scripts.items(): # Make sure we don't end up in a recursive loop when we name a script after the binary it execs # by removing the scripts directory from the PATH when we execute a script. with (Path(d) / name).open("w") as f: f.write("#!/bin/sh\n") if config.find_binary(name): f.write( textwrap.dedent( """\ DIR="$(cd "$(dirname "$0")" && pwd)" PATH="$(echo "$PATH" | tr ':' '\\n' | grep -v "$DIR" | tr '\\n' ':')" export PATH """ ) ) f.write(f'exec {shlex.join(str(s) for s in script)} "$@"\n') make_executable(Path(d) / name) os.chmod(Path(d) / name, 0o755) os.utime(Path(d) / name, (0, 0)) yield Path(d) GIT_ENV = { "GIT_CONFIG_COUNT": "1", "GIT_CONFIG_KEY_0": "safe.directory", "GIT_CONFIG_VALUE_0": "*", } def mkosi_as_caller() -> tuple[str, ...]: return ( "setpriv", f"--reuid={INVOKING_USER.uid}", f"--regid={INVOKING_USER.gid}", "--clear-groups", ) def finalize_host_scripts( context: Context, helpers: Mapping[str, Sequence[PathString]] = {}, ) -> AbstractContextManager[Path]: scripts: dict[str, Sequence[PathString]] = {} for binary in ("useradd", "groupadd"): if context.config.find_binary(binary): scripts[binary] = (binary, "--root", "/buildroot") if ukify := context.config.find_binary("ukify"): # A script will always run with the tools tree mounted so we pass binary=None to disable the conditional search # logic of python_binary() depending on whether the binary is in an extra search path or not. scripts["ukify"] = (python_binary(context.config, binary=None), ukify) return finalize_scripts(context.config, scripts | dict(helpers)) @contextlib.contextmanager def finalize_config_json(config: Config) -> Iterator[Path]: with tempfile.NamedTemporaryFile(mode="w") as f: f.write(config.to_json()) f.flush() yield Path(f.name) def run_configure_scripts(config: Config) -> Config: if not config.configure_scripts: return config for script in config.configure_scripts: if not os.access(script, os.X_OK): die(f"{script} is not executable") env = dict( DISTRIBUTION=str(config.distribution), RELEASE=config.release, ARCHITECTURE=str(config.architecture), QEMU_ARCHITECTURE=config.architecture.to_qemu(), DISTRIBUTION_ARCHITECTURE=config.distribution.architecture(config.architecture), SRCDIR="/work/src", MKOSI_UID=str(INVOKING_USER.uid), MKOSI_GID=str(INVOKING_USER.gid), ) if config.profile: env["PROFILE"] = config.profile with finalize_source_mounts(config, ephemeral=False) as sources: for script in config.configure_scripts: with complete_step(f"Running configure script {script}…"): result = run( ["/work/configure"], env=env | config.environment, sandbox=config.sandbox( binary=None, vartmp=True, mounts=[*sources, Mount(script, "/work/configure", ro=True)], options=["--dir", "/work/src", "--chdir", "/work/src"] ), input=config.to_json(indent=None), stdout=subprocess.PIPE, ) config = Config.from_json(result.stdout) return config def run_sync_scripts(context: Context) -> None: if not context.config.sync_scripts: return env = dict( DISTRIBUTION=str(context.config.distribution), RELEASE=context.config.release, ARCHITECTURE=str(context.config.architecture), DISTRIBUTION_ARCHITECTURE=context.config.distribution.architecture(context.config.architecture), SRCDIR="/work/src", MKOSI_UID=str(INVOKING_USER.uid), MKOSI_GID=str(INVOKING_USER.gid), MKOSI_CONFIG="/work/config.json", CACHED=one_zero(have_cache(context.config)), ) if context.config.profile: env["PROFILE"] = context.config.profile # We make sure to mount everything in to make ssh work since syncing might involve git which could invoke ssh. if agent := os.getenv("SSH_AUTH_SOCK"): env["SSH_AUTH_SOCK"] = agent with ( finalize_source_mounts(context.config, ephemeral=False) as sources, finalize_config_json(context.config) as json, ): for script in context.config.sync_scripts: mounts = [ *sources, *finalize_crypto_mounts(context.config), Mount(script, "/work/sync", ro=True), Mount(json, "/work/config.json", ro=True), ] if (p := INVOKING_USER.home()).exists() and p != Path("/"): # We use a writable mount here to keep git worktrees working which encode absolute paths to the parent # git repository and might need to modify the git config in the parent git repository when submodules # are in use as well. mounts += [Mount(p, p)] env["HOME"] = os.fspath(p) if (p := Path(f"/run/user/{INVOKING_USER.uid}")).exists(): mounts += [Mount(p, p, ro=True)] with complete_step(f"Running sync script {script}…"): run( ["/work/sync", "final"], env=env | context.config.environment, stdin=sys.stdin, sandbox=context.sandbox( binary=None, network=True, vartmp=True, mounts=mounts, options=["--dir", "/work/src", "--chdir", "/work/src"] ), ) def run_prepare_scripts(context: Context, build: bool) -> None: if not context.config.prepare_scripts: return if build and not context.config.build_scripts: return env = dict( DISTRIBUTION=str(context.config.distribution), RELEASE=context.config.release, ARCHITECTURE=str(context.config.architecture), DISTRIBUTION_ARCHITECTURE=context.config.distribution.architecture(context.config.architecture), BUILDROOT="/buildroot", SRCDIR="/work/src", CHROOT_SRCDIR="/work/src", PACKAGEDIR="/work/packages", ARTIFACTDIR="/work/artifacts", SCRIPT="/work/prepare", CHROOT_SCRIPT="/work/prepare", MKOSI_UID=str(INVOKING_USER.uid), MKOSI_GID=str(INVOKING_USER.gid), MKOSI_CONFIG="/work/config.json", WITH_DOCS=one_zero(context.config.with_docs), WITH_NETWORK=one_zero(context.config.with_network), WITH_TESTS=one_zero(context.config.with_tests), **GIT_ENV, ) if context.config.profile: env["PROFILE"] = context.config.profile if context.config.build_dir is not None: env |= dict(BUILDDIR="/work/build") with ( mount_build_overlay(context) if build else contextlib.nullcontext(), finalize_source_mounts(context.config, ephemeral=context.config.build_sources_ephemeral) as sources, ): if build: step_msg = "Running prepare script {} in build overlay…" arg = "build" else: step_msg = "Running prepare script {}…" arg = "final" for script in context.config.prepare_scripts: chroot = chroot_cmd(resolve=True, work=True) helpers = { "mkosi-chroot": chroot, "mkosi-as-caller": mkosi_as_caller(), **context.config.distribution.package_manager(context.config).scripts(context), } with ( finalize_host_scripts(context, helpers) as hd, finalize_config_json(context.config) as json, complete_step(step_msg.format(script)), ): run( ["/work/prepare", arg], env=env | context.config.environment, stdin=sys.stdin, sandbox=context.sandbox( binary=None, network=True, vartmp=True, mounts=[ *sources, Mount(script, "/work/prepare", ro=True), Mount(json, "/work/config.json", ro=True), Mount(context.root, "/buildroot"), Mount(context.artifacts, "/work/artifacts"), Mount(context.package_dir, "/work/packages"), *( [Mount(context.config.build_dir, "/work/build", ro=True)] if context.config.build_dir else [] ), *context.config.distribution.package_manager(context.config).mounts(context), ], options=["--dir", "/work/src", "--chdir", "/work/src"], scripts=hd, extra=chroot if script.suffix == ".chroot" else [], ) ) def run_build_scripts(context: Context) -> None: if not context.config.build_scripts: return env = dict( DISTRIBUTION=str(context.config.distribution), RELEASE=context.config.release, ARCHITECTURE=str(context.config.architecture), DISTRIBUTION_ARCHITECTURE=context.config.distribution.architecture(context.config.architecture), BUILDROOT="/buildroot", DESTDIR="/work/dest", CHROOT_DESTDIR="/work/dest", OUTPUTDIR="/work/out", CHROOT_OUTPUTDIR="/work/out", SRCDIR="/work/src", CHROOT_SRCDIR="/work/src", PACKAGEDIR="/work/packages", ARTIFACTDIR="/work/artifacts", SCRIPT="/work/build-script", CHROOT_SCRIPT="/work/build-script", MKOSI_UID=str(INVOKING_USER.uid), MKOSI_GID=str(INVOKING_USER.gid), MKOSI_CONFIG="/work/config.json", WITH_DOCS=one_zero(context.config.with_docs), WITH_NETWORK=one_zero(context.config.with_network), WITH_TESTS=one_zero(context.config.with_tests), **GIT_ENV, ) if context.config.profile: env["PROFILE"] = context.config.profile if context.config.build_dir is not None: env |= dict( BUILDDIR="/work/build", CHROOT_BUILDDIR="/work/build", ) with ( mount_build_overlay(context, volatile=True), finalize_source_mounts(context.config, ephemeral=context.config.build_sources_ephemeral) as sources, ): for script in context.config.build_scripts: chroot = chroot_cmd(resolve=context.config.with_network, work=True) helpers = { "mkosi-chroot": chroot, "mkosi-as-caller": mkosi_as_caller(), **context.config.distribution.package_manager(context.config).scripts(context), } cmdline = context.args.cmdline if context.args.verb == Verb.build else [] with ( finalize_host_scripts(context, helpers) as hd, finalize_config_json(context.config) as json, complete_step(f"Running build script {script}…"), ): run( ["/work/build-script", *cmdline], env=env | context.config.environment, stdin=sys.stdin, sandbox=context.sandbox( binary=None, network=context.config.with_network, vartmp=True, mounts=[ *sources, Mount(script, "/work/build-script", ro=True), Mount(json, "/work/config.json", ro=True), Mount(context.root, "/buildroot"), Mount(context.install_dir, "/work/dest"), Mount(context.staging, "/work/out"), Mount(context.artifacts, "/work/artifacts"), Mount(context.package_dir, "/work/packages"), *( [Mount(context.config.build_dir, "/work/build")] if context.config.build_dir else [] ), *context.config.distribution.package_manager(context.config).mounts(context), ], options=["--dir", "/work/src", "--chdir", "/work/src"], scripts=hd, extra=chroot if script.suffix == ".chroot" else [], ), ) def run_postinst_scripts(context: Context) -> None: if not context.config.postinst_scripts: return env = dict( DISTRIBUTION=str(context.config.distribution), RELEASE=context.config.release, ARCHITECTURE=str(context.config.architecture), DISTRIBUTION_ARCHITECTURE=context.config.distribution.architecture(context.config.architecture), BUILDROOT="/buildroot", OUTPUTDIR="/work/out", CHROOT_OUTPUTDIR="/work/out", SCRIPT="/work/postinst", CHROOT_SCRIPT="/work/postinst", SRCDIR="/work/src", CHROOT_SRCDIR="/work/src", PACKAGEDIR="/work/packages", ARTIFACTDIR="/work/artifacts", MKOSI_UID=str(INVOKING_USER.uid), MKOSI_GID=str(INVOKING_USER.gid), MKOSI_CONFIG="/work/config.json", WITH_NETWORK=one_zero(context.config.with_network), **GIT_ENV, ) if context.config.profile: env["PROFILE"] = context.config.profile if context.config.build_dir is not None: env |= dict(BUILDDIR="/work/build") with ( finalize_source_mounts(context.config, ephemeral=context.config.build_sources_ephemeral) as sources, ): for script in context.config.postinst_scripts: chroot = chroot_cmd(resolve=context.config.with_network, work=True) helpers = { "mkosi-chroot": chroot, "mkosi-as-caller": mkosi_as_caller(), **context.config.distribution.package_manager(context.config).scripts(context), } with ( finalize_host_scripts(context, helpers) as hd, finalize_config_json(context.config) as json, complete_step(f"Running postinstall script {script}…"), ): run( ["/work/postinst", "final"], env=env | context.config.environment, stdin=sys.stdin, sandbox=context.sandbox( binary=None, network=context.config.with_network, vartmp=True, mounts=[ *sources, Mount(script, "/work/postinst", ro=True), Mount(json, "/work/config.json", ro=True), Mount(context.root, "/buildroot"), Mount(context.staging, "/work/out"), Mount(context.artifacts, "/work/artifacts"), Mount(context.package_dir, "/work/packages"), *( [Mount(context.config.build_dir, "/work/build", ro=True)] if context.config.build_dir else [] ), *context.config.distribution.package_manager(context.config).mounts(context), ], options=["--dir", "/work/src", "--chdir", "/work/src"], scripts=hd, extra=chroot if script.suffix == ".chroot" else [], ), ) def run_finalize_scripts(context: Context) -> None: if not context.config.finalize_scripts: return env = dict( DISTRIBUTION=str(context.config.distribution), RELEASE=context.config.release, ARCHITECTURE=str(context.config.architecture), DISTRIBUTION_ARCHITECTURE=context.config.distribution.architecture(context.config.architecture), BUILDROOT="/buildroot", OUTPUTDIR="/work/out", CHROOT_OUTPUTDIR="/work/out", SRCDIR="/work/src", CHROOT_SRCDIR="/work/src", PACKAGEDIR="/work/packages", ARTIFACTDIR="/work/artifacts", SCRIPT="/work/finalize", CHROOT_SCRIPT="/work/finalize", MKOSI_UID=str(INVOKING_USER.uid), MKOSI_GID=str(INVOKING_USER.gid), MKOSI_CONFIG="/work/config.json", WITH_NETWORK=one_zero(context.config.with_network), **GIT_ENV, ) if context.config.profile: env["PROFILE"] = context.config.profile if context.config.build_dir is not None: env |= dict(BUILDDIR="/work/build") with finalize_source_mounts(context.config, ephemeral=context.config.build_sources_ephemeral) as sources: for script in context.config.finalize_scripts: chroot = chroot_cmd(resolve=context.config.with_network, work=True) helpers = { "mkosi-chroot": chroot, "mkosi-as-caller": mkosi_as_caller(), **context.config.distribution.package_manager(context.config).scripts(context), } with ( finalize_host_scripts(context, helpers) as hd, finalize_config_json(context.config) as json, complete_step(f"Running finalize script {script}…"), ): run( ["/work/finalize"], env=env | context.config.environment, stdin=sys.stdin, sandbox=context.sandbox( binary=None, network=context.config.with_network, vartmp=True, mounts=[ *sources, Mount(script, "/work/finalize", ro=True), Mount(json, "/work/config.json", ro=True), Mount(context.root, "/buildroot"), Mount(context.staging, "/work/out"), Mount(context.artifacts, "/work/artifacts"), Mount(context.package_dir, "/work/packages"), *( [Mount(context.config.build_dir, "/work/build", ro=True)] if context.config.build_dir else [] ), *context.config.distribution.package_manager(context.config).mounts(context), ], options=["--dir", "/work/src", "--chdir", "/work/src"], scripts=hd, extra=chroot if script.suffix == ".chroot" else [], ), ) def run_postoutput_scripts(context: Context) -> None: if not context.config.postoutput_scripts: return env = dict( DISTRIBUTION=str(context.config.distribution), RELEASE=context.config.release, ARCHITECTURE=str(context.config.architecture), DISTRIBUTION_ARCHITECTURE=context.config.distribution.architecture(context.config.architecture), SRCDIR="/work/src", OUTPUTDIR="/work/out", MKOSI_UID=str(INVOKING_USER.uid), MKOSI_GID=str(INVOKING_USER.gid), MKOSI_CONFIG="/work/config.json", ) if context.config.profile: env["PROFILE"] = context.config.profile with ( finalize_source_mounts(context.config, ephemeral=context.config.build_sources_ephemeral) as sources, finalize_config_json(context.config) as json, ): for script in context.config.postoutput_scripts: with complete_step(f"Running post-output script {script}…"): run( ["/work/postoutput"], env=env | context.config.environment, sandbox=context.sandbox( binary=None, vartmp=True, mounts=[ *sources, Mount(script, "/work/postoutput", ro=True), Mount(json, "/work/config.json", ro=True), Mount(context.staging, "/work/out"), ], options=["--dir", "/work/src", "--chdir", "/work/src", "--dir", "/work/out"] ), stdin=sys.stdin, ) def certificate_common_name(context: Context, certificate: Path) -> str: output = run( [ "openssl", "x509", "-noout", "-subject", "-nameopt", "multiline", "-in", certificate, ], stdout=subprocess.PIPE, sandbox=context.sandbox(binary="openssl", mounts=[Mount(certificate, certificate, ro=True)]), ).stdout for line in output.splitlines(): if not line.strip().startswith("commonName"): continue _, sep, value = line.partition("=") if not sep: die("Missing '=' delimiter in openssl output") return value.strip() die(f"Certificate {certificate} is missing Common Name") def pesign_prepare(context: Context) -> None: assert context.config.secure_boot_key assert context.config.secure_boot_certificate if (context.workspace / "pesign").exists(): return (context.workspace / "pesign").mkdir() # pesign takes a certificate directory and a certificate common name as input arguments, so we have # to transform our input key and cert into that format. Adapted from # https://www.mankier.com/1/pesign#Examples-Signing_with_the_certificate_and_private_key_in_individual_files with open(context.workspace / "secure-boot.p12", "wb") as f: run( [ "openssl", "pkcs12", "-export", # Arcane incantation to create a pkcs12 certificate without a password. "-keypbe", "NONE", "-certpbe", "NONE", "-nomaciter", "-passout", "pass:", "-inkey", context.config.secure_boot_key, "-in", context.config.secure_boot_certificate, ], stdout=f, sandbox=context.sandbox( binary="openssl", mounts=[ Mount(context.config.secure_boot_key, context.config.secure_boot_key, ro=True), Mount(context.config.secure_boot_certificate, context.config.secure_boot_certificate, ro=True), ], ), ) (context.workspace / "pesign").mkdir(exist_ok=True) run( [ "pk12util", "-K", "", "-W", "", "-i", context.workspace / "secure-boot.p12", "-d", context.workspace / "pesign", ], sandbox=context.sandbox( binary="pk12util", mounts=[ Mount(context.workspace / "secure-boot.p12", context.workspace / "secure-boot.p12", ro=True), Mount(context.workspace / "pesign", context.workspace / "pesign"), ], ), ) def efi_boot_binary(context: Context) -> Path: arch = context.config.architecture.to_efi() assert arch return Path(f"efi/EFI/BOOT/BOOT{arch.upper()}.EFI") def shim_second_stage_binary(context: Context) -> Path: arch = context.config.architecture.to_efi() assert arch if context.config.distribution == Distribution.opensuse: return Path("efi/EFI/BOOT/grub.EFI") else: return Path(f"efi/EFI/BOOT/grub{arch}.EFI") def sign_efi_binary(context: Context, input: Path, output: Path) -> Path: assert context.config.secure_boot_key assert context.config.secure_boot_certificate if ( context.config.secure_boot_sign_tool == SecureBootSignTool.sbsign or context.config.secure_boot_sign_tool == SecureBootSignTool.auto and context.config.find_binary("sbsign") is not None ): with tempfile.NamedTemporaryFile(dir=output.parent, prefix=output.name) as f: os.chmod(f.name, stat.S_IMODE(input.stat().st_mode)) cmd: list[PathString] = [ "sbsign", "--key", context.config.secure_boot_key, "--cert", context.config.secure_boot_certificate, "--output", "/dev/stdout", ] mounts = [ Mount(context.config.secure_boot_certificate, context.config.secure_boot_certificate, ro=True), Mount(input, input, ro=True), ] if context.config.secure_boot_key_source.type == KeySource.Type.engine: cmd += ["--engine", context.config.secure_boot_key_source.source] if context.config.secure_boot_key.exists(): mounts += [Mount(context.config.secure_boot_key, context.config.secure_boot_key, ro=True)] cmd += [input] run( cmd, stdout=f, sandbox=context.sandbox( binary="sbsign", mounts=mounts, devices=context.config.secure_boot_key_source.type != KeySource.Type.file, ) ) output.unlink(missing_ok=True) os.link(f.name, output) elif ( context.config.secure_boot_sign_tool == SecureBootSignTool.pesign or context.config.secure_boot_sign_tool == SecureBootSignTool.auto and context.config.find_binary("pesign") is not None ): pesign_prepare(context) with tempfile.NamedTemporaryFile(dir=output.parent, prefix=output.name) as f: os.chmod(f.name, stat.S_IMODE(input.stat().st_mode)) run( [ "pesign", "--certdir", context.workspace / "pesign", "--certificate", certificate_common_name(context, context.config.secure_boot_certificate), "--sign", "--force", "--in", input, "--out", "/dev/stdout", ], stdout=f, sandbox=context.sandbox( binary="pesign", mounts=[ Mount(context.workspace / "pesign", context.workspace / "pesign", ro=True), Mount(input, input, ro=True), ] ), ) output.unlink(missing_ok=True) os.link(f.name, output) else: die("One of sbsign or pesign is required to use SecureBoot=") return output def install_systemd_boot(context: Context) -> None: if not want_efi(context.config): return if context.config.bootloader != Bootloader.systemd_boot: return if not any(gen_kernel_images(context)) and context.config.bootable == ConfigFeature.auto: return if not context.config.find_binary("bootctl"): if context.config.bootable == ConfigFeature.enabled: die("An EFI bootable image with systemd-boot was requested but bootctl was not found") return directory = context.root / "usr/lib/systemd/boot/efi" signed = context.config.shim_bootloader == ShimBootloader.signed if not directory.glob("*.efi.signed" if signed else "*.efi"): if context.config.bootable == ConfigFeature.enabled: die(f"An EFI bootable image with systemd-boot was requested but a {'signed ' if signed else ''}" f"systemd-boot binary was not found at {directory.relative_to(context.root)}") return if context.config.secure_boot and not signed: with complete_step("Signing systemd-boot binaries…"): for input in itertools.chain(directory.glob('*.efi'), directory.glob('*.EFI')): output = directory / f"{input}.signed" sign_efi_binary(context, input, output) with complete_step("Installing systemd-boot…"): run( ["bootctl", "install", "--root=/buildroot", "--all-architectures", "--no-variables"], env={"SYSTEMD_ESP_PATH": "/efi", "SYSTEMD_XBOOTLDR_PATH": "/boot"}, sandbox=context.sandbox(binary="bootctl", mounts=[Mount(context.root, "/buildroot")]), ) if context.config.shim_bootloader != ShimBootloader.none: shutil.copy2( context.root / f"efi/EFI/systemd/systemd-boot{context.config.architecture.to_efi()}.efi", context.root / shim_second_stage_binary(context), ) if context.config.secure_boot and context.config.secure_boot_auto_enroll: assert context.config.secure_boot_key assert context.config.secure_boot_certificate with complete_step("Setting up secure boot auto-enrollment…"): keys = context.root / "efi/loader/keys/auto" with umask(~0o700): keys.mkdir(parents=True, exist_ok=True) # sbsiglist expects a DER certificate. with umask(~0o600), open(context.workspace / "mkosi.der", "wb") as f: run( [ "openssl", "x509", "-outform", "DER", "-in", context.config.secure_boot_certificate, ], stdout=f, sandbox=context.sandbox( binary="openssl", mounts=[ Mount( context.config.secure_boot_certificate, context.config.secure_boot_certificate, ro=True ), ], ), ) with umask(~0o600), open(context.workspace / "mkosi.esl", "wb") as f: run( [ "sbsiglist", "--owner", str(uuid.uuid4()), "--type", "x509", "--output", "/dev/stdout", context.workspace / "mkosi.der", ], stdout=f, sandbox=context.sandbox( binary="sbsiglist", mounts=[Mount(context.workspace / "mkosi.der", context.workspace / "mkosi.der", ro=True)] ), ) # We reuse the key for all secure boot databases to keep things simple. for db in ["PK", "KEK", "db"]: with umask(~0o600), open(keys / f"{db}.auth", "wb") as f: cmd: list[PathString] = [ "sbvarsign", "--attr", "NON_VOLATILE,BOOTSERVICE_ACCESS,RUNTIME_ACCESS,TIME_BASED_AUTHENTICATED_WRITE_ACCESS", "--key", context.config.secure_boot_key, "--cert", context.config.secure_boot_certificate, "--output", "/dev/stdout", ] mounts = [ Mount( context.config.secure_boot_certificate, context.config.secure_boot_certificate, ro=True ), Mount(context.workspace / "mkosi.esl", context.workspace / "mkosi.esl", ro=True), ] if context.config.secure_boot_key_source.type == KeySource.Type.engine: cmd += ["--engine", context.config.secure_boot_key_source.source] if context.config.secure_boot_key.exists(): mounts += [Mount(context.config.secure_boot_key, context.config.secure_boot_key, ro=True)] cmd += [db, context.workspace / "mkosi.esl"] run( cmd, stdout=f, sandbox=context.sandbox( binary="sbvarsign", mounts=mounts, devices=context.config.secure_boot_key_source.type != KeySource.Type.file, ), ) def find_and_install_shim_binary( context: Context, name: str, signed: Sequence[str], unsigned: Sequence[str], output: Path, ) -> None: if context.config.shim_bootloader == ShimBootloader.signed: for pattern in signed: for p in context.root.glob(pattern): if p.is_symlink() and p.readlink().is_absolute(): logging.warning(f"Ignoring signed {name} EFI binary which is an absolute path to {p.readlink()}") continue rel = p.relative_to(context.root) if (context.root / output).is_dir(): output /= rel.name log_step(f"Installing signed {name} EFI binary from /{rel} to /{output}") shutil.copy2(p, context.root / output) return if context.config.bootable == ConfigFeature.enabled: die(f"Couldn't find signed {name} EFI binary installed in the image") else: for pattern in unsigned: for p in context.root.glob(pattern): if p.is_symlink() and p.readlink().is_absolute(): logging.warning(f"Ignoring unsigned {name} EFI binary which is an absolute path to {p.readlink()}") continue rel = p.relative_to(context.root) if (context.root / output).is_dir(): output /= rel.name if context.config.secure_boot: log_step(f"Signing and installing unsigned {name} EFI binary from /{rel} to /{output}") sign_efi_binary(context, p, context.root / output) else: log_step(f"Installing unsigned {name} EFI binary /{rel} to /{output}") shutil.copy2(p, context.root / output) return if context.config.bootable == ConfigFeature.enabled: die(f"Couldn't find unsigned {name} EFI binary installed in the image") def install_shim(context: Context) -> None: if not want_efi(context.config): return if context.config.shim_bootloader == ShimBootloader.none: return if not any(gen_kernel_images(context)) and context.config.bootable == ConfigFeature.auto: return dst = efi_boot_binary(context) with umask(~0o700): (context.root / dst).parent.mkdir(parents=True, exist_ok=True) arch = context.config.architecture.to_efi() signed = [ f"usr/lib/shim/shim{arch}.efi.signed.latest", # Ubuntu f"usr/lib/shim/shim{arch}.efi.signed", # Debian f"boot/efi/EFI/*/shim{arch}.efi", # Fedora/CentOS "usr/share/efi/*/shim.efi", # OpenSUSE ] unsigned = [ f"usr/lib/shim/shim{arch}.efi", # Debian/Ubuntu f"usr/share/shim/*/*/shim{arch}.efi", # Fedora/CentOS f"usr/share/shim/shim{arch}.efi", # Arch ] find_and_install_shim_binary(context, "shim", signed, unsigned, dst) signed = [ f"usr/lib/shim/mm{arch}.efi.signed", # Debian f"usr/lib/shim/mm{arch}.efi", # Ubuntu f"boot/efi/EFI/*/mm{arch}.efi", # Fedora/CentOS "usr/share/efi/*/MokManager.efi", # OpenSUSE ] unsigned = [ f"usr/lib/shim/mm{arch}.efi", # Debian/Ubuntu f"usr/share/shim/*/*/mm{arch}.efi", # Fedora/CentOS f"usr/share/shim/mm{arch}.efi", # Arch ] find_and_install_shim_binary(context, "mok", signed, unsigned, dst.parent) def find_grub_directory(context: Context, *, target: str) -> Optional[Path]: for d in ("usr/lib/grub", "usr/share/grub2"): if (p := context.root / d / target).exists() and any(p.iterdir()): return p return None def find_grub_binary(config: Config, binary: str) -> Optional[Path]: assert "grub" not in binary # Debian has a bespoke setup where if only grub-pc-bin is installed, grub-bios-setup is installed in # /usr/lib/i386-pc instead of in /usr/bin. Let's take that into account and look for binaries in # /usr/lib/grub/i386-pc as well. return config.find_binary(f"grub-{binary}", f"grub2-{binary}", f"/usr/lib/grub/i386-pc/grub-{binary}") def want_grub_efi(context: Context) -> bool: if not want_efi(context.config): return False if context.config.bootloader != Bootloader.grub: return False if context.config.shim_bootloader != ShimBootloader.signed: have = find_grub_directory(context, target="x86_64-efi") is not None if not have and context.config.bootable == ConfigFeature.enabled: die("An EFI bootable image with grub was requested but grub for EFI is not installed") return True def want_grub_bios(context: Context, partitions: Sequence[Partition] = ()) -> bool: if context.config.bootable == ConfigFeature.disabled: return False if context.config.output_format != OutputFormat.disk: return False if context.config.bios_bootloader != BiosBootloader.grub: return False if context.config.overlay: return False have = find_grub_directory(context, target="i386-pc") is not None if not have and context.config.bootable == ConfigFeature.enabled: die("A BIOS bootable image with grub was requested but grub for BIOS is not installed") bios = any(p.type == Partition.GRUB_BOOT_PARTITION_UUID for p in partitions) if partitions and not bios and context.config.bootable == ConfigFeature.enabled: die("A BIOS bootable image with grub was requested but no BIOS Boot Partition was configured") esp = any(p.type == "esp" for p in partitions) if partitions and not esp and context.config.bootable == ConfigFeature.enabled: die("A BIOS bootable image with grub was requested but no ESP partition was configured") root = any(p.type.startswith("root") or p.type.startswith("usr") for p in partitions) if partitions and not root and context.config.bootable == ConfigFeature.enabled: die("A BIOS bootable image with grub was requested but no root or usr partition was configured") installed = True for binary in ("mkimage", "bios-setup"): if find_grub_binary(context.config, binary): continue if context.config.bootable == ConfigFeature.enabled: die(f"A BIOS bootable image with grub was requested but {binary} was not found") installed = False return (have and bios and esp and root and installed) if partitions else have def prepare_grub_config(context: Context) -> Optional[Path]: config = context.root / "efi" / context.config.distribution.grub_prefix() / "grub.cfg" with umask(~0o700): config.parent.mkdir(exist_ok=True) # For some unknown reason, if we don't set the timeout to zero, grub never leaves its menu, so we default # to a zero timeout, but only if the config file hasn't been provided by the user. if not config.exists(): with umask(~0o600), config.open("w") as f: f.write("set timeout=0\n") if want_grub_efi(context): # Signed EFI grub shipped by distributions reads its configuration from /EFI//grub.cfg (except # in OpenSUSE) in the ESP so let's put a shim there to redirect to the actual configuration file. if context.config.distribution == Distribution.opensuse: earlyconfig = context.root / "efi/EFI/BOOT/grub.cfg" else: earlyconfig = context.root / "efi/EFI" / context.config.distribution.name / "grub.cfg" with umask(~0o700): earlyconfig.parent.mkdir(parents=True, exist_ok=True) # Read the actual config file from the root of the ESP. earlyconfig.write_text(f"configfile /{context.config.distribution.grub_prefix()}/grub.cfg\n") return config def grub_mkimage( context: Context, *, target: str, modules: Sequence[str] = (), output: Optional[Path] = None, sbat: Optional[Path] = None, ) -> None: mkimage = find_grub_binary(context.config, "mkimage") assert mkimage directory = find_grub_directory(context, target=target) assert directory with ( complete_step(f"Generating grub image for {target}"), tempfile.NamedTemporaryFile("w", prefix="grub-early-config") as earlyconfig ): earlyconfig.write( textwrap.dedent( f"""\ search --no-floppy --set=root --file /{context.config.distribution.grub_prefix()}/grub.cfg set prefix=($root)/{context.config.distribution.grub_prefix()} """ ) ) earlyconfig.flush() run( [ mkimage, "--directory", "/grub", "--config", earlyconfig.name, "--prefix", f"/{context.config.distribution.grub_prefix()}", "--output", output or ("/grub/core.img"), "--format", target, *(["--sbat", str(sbat)] if sbat else []), *(["--disable-shim-lock"] if context.config.shim_bootloader == ShimBootloader.none else []), "cat", "cmp", "div", "echo", "fat", "hello", "help", "keylayouts", "linux", "loadenv", "ls", "normal", "part_gpt", "read", "reboot", "search_fs_file", "search", "sleep", "test", "tr", "true", *modules, ], sandbox=context.sandbox( binary=mkimage, mounts=[ Mount(directory, "/grub"), Mount(earlyconfig.name, earlyconfig.name, ro=True), *([Mount(output.parent, output.parent)] if output else []), *([Mount(str(sbat), str(sbat), ro=True)] if sbat else []), ], ), ) def find_signed_grub_image(context: Context) -> Optional[Path]: arch = context.config.architecture.to_efi() patterns = [ f"usr/lib/grub/*-signed/grub{arch}.efi.signed", # Debian/Ubuntu f"boot/efi/EFI/*/grub{arch}.efi", # Fedora/CentOS "usr/share/efi/*/grub.efi", # OpenSUSE ] for p in flatten(context.root.glob(pattern) for pattern in patterns): if p.is_symlink() and p.readlink().is_absolute(): logging.warning(f"Ignoring signed grub EFI binary which is an absolute path to {p.readlink()}") continue return p return None def install_grub(context: Context) -> None: if not want_grub_bios(context) and not want_grub_efi(context): return if want_grub_bios(context): grub_mkimage(context, target="i386-pc", modules=("biosdisk",)) if want_grub_efi(context): if context.config.shim_bootloader != ShimBootloader.none: output = context.root / shim_second_stage_binary(context) else: output = context.root / efi_boot_binary(context) with umask(~0o700): output.parent.mkdir(parents=True, exist_ok=True) if context.config.shim_bootloader == ShimBootloader.signed: if not (signed := find_signed_grub_image(context)): if context.config.bootable == ConfigFeature.enabled: die("Couldn't find a signed grub EFI binary installed in the image") return rel = output.relative_to(context.root) log_step(f"Installing signed grub EFI binary from /{signed.relative_to(context.root)} to /{rel}") shutil.copy2(signed, output) else: if context.config.secure_boot and context.config.shim_bootloader != ShimBootloader.none: if not (signed := find_signed_grub_image(context)): die("Couldn't find a signed grub EFI binary installed in the image to extract SBAT from") sbat = extract_pe_section(context, signed, ".sbat", context.workspace / "sbat") else: sbat = None grub_mkimage(context, target="x86_64-efi", output=output, modules=("chain",), sbat=sbat) if context.config.secure_boot: sign_efi_binary(context, output, output) dst = context.root / "efi" / context.config.distribution.grub_prefix() / "fonts" with umask(~0o700): dst.mkdir(parents=True, exist_ok=True) for d in ("grub", "grub2"): unicode = context.root / "usr/share" / d / "unicode.pf2" if unicode.exists(): shutil.copy2(unicode, dst) def grub_bios_setup(context: Context, partitions: Sequence[Partition]) -> None: if not want_grub_bios(context, partitions): return setup = find_grub_binary(context.config, "bios-setup") assert setup directory = find_grub_directory(context, target="i386-pc") assert directory with ( complete_step("Installing grub boot loader for BIOS…"), tempfile.NamedTemporaryFile(mode="w") as mountinfo, ): # grub-bios-setup insists on being able to open the root device that --directory is located on, which # needs root privileges. However, it only uses the root device when it is unable to embed itself in the # bios boot partition. To make installation work unprivileged, we trick grub to think that the root # device is our image by mounting over its /proc/self/mountinfo file (where it gets its information from) # with our own file correlating the root directory to our image file. mountinfo.write(f"1 0 1:1 / / - fat {context.staging / context.config.output_with_format}\n") mountinfo.flush() # We don't setup the mountinfo bind mount with bwrap because we need to know the child process pid to # be able to do the mount and we don't know the pid beforehand. run( [ setup, "--directory", "/grub", context.staging / context.config.output_with_format, ], sandbox=context.sandbox( binary=setup, mounts=[ Mount(directory, "/grub"), Mount(context.staging, context.staging), Mount(mountinfo.name, mountinfo.name), ], extra=["sh", "-c", f"mount --bind {mountinfo.name} /proc/$$/mountinfo && exec $0 \"$@\""], ), ) def install_tree( config: Config, src: Path, dst: Path, *, target: Optional[Path] = None, preserve: bool = True, ) -> None: src = src.resolve() t = dst if target: t = dst / target.relative_to("/") with umask(~0o755): t.parent.mkdir(parents=True, exist_ok=True) def copy() -> None: copy_tree( src, t, preserve=preserve, use_subvolumes=config.use_subvolumes, sandbox=config.sandbox, ) if src.is_dir() or (src.is_file() and target): copy() elif can_extract_tar(src): extract_tar(src, t, sandbox=config.sandbox) elif src.suffix == ".raw": run( ["systemd-dissect", "--copy-from", src, "/", t], sandbox=config.sandbox( binary="systemd-dissect", devices=True, network=True, mounts=[Mount(src, src, ro=True), Mount(t.parent, t.parent)], ), ) else: # If we get an unknown file without a target, we just copy it into /. copy() def install_base_trees(context: Context) -> None: if not context.config.base_trees or context.config.overlay: return with complete_step("Copying in base trees…"): for path in context.config.base_trees: install_tree(context.config, path, context.root) def install_skeleton_trees(context: Context) -> None: if not context.config.skeleton_trees: return with complete_step("Copying in skeleton file trees…"): for tree in context.config.skeleton_trees: install_tree(context.config, tree.source, context.root, target=tree.target, preserve=False) def install_package_manager_trees(context: Context) -> None: # Ensure /etc exists in the package manager tree (context.pkgmngr / "etc").mkdir(exist_ok=True) # Backwards compatibility symlink. (context.pkgmngr / "etc/mtab").symlink_to("../proc/self/mounts") # Required to be able to access certificates in the sandbox when running from nix. if Path("/etc/static").is_symlink(): (context.pkgmngr / "etc/static").symlink_to(Path("/etc/static").readlink()) (context.pkgmngr / "var/log").mkdir(parents=True) with (context.pkgmngr / "etc/passwd").open("w") as passwd: passwd.write("root:x:0:0:root:/root:/bin/sh\n") if INVOKING_USER.uid != 0: name = INVOKING_USER.name() home = INVOKING_USER.home() passwd.write(f"{name}:x:{INVOKING_USER.uid}:{INVOKING_USER.gid}:{name}:{home}:/bin/sh\n") os.fchown(passwd.fileno(), INVOKING_USER.uid, INVOKING_USER.gid) with (context.pkgmngr / "etc/group").open("w") as group: group.write("root:x:0:\n") if INVOKING_USER.uid != 0: group.write(f"{INVOKING_USER.name()}:x:{INVOKING_USER.gid}:\n") os.fchown(group.fileno(), INVOKING_USER.uid, INVOKING_USER.gid) if (p := context.config.tools() / "etc/crypto-policies").exists(): copy_tree( p, context.pkgmngr / "etc/crypto-policies", preserve=False, dereference=True, sandbox=context.config.sandbox, ) if not context.config.package_manager_trees: return with complete_step("Copying in package manager file trees…"): for tree in context.config.package_manager_trees: install_tree(context.config, tree.source, context.pkgmngr, target=tree.target, preserve=False) def install_package_directories(context: Context, directories: Sequence[Path]) -> None: directories = [d for d in directories if any(d.iterdir())] if not directories: return with complete_step("Copying in extra packages…"): for d in directories: for p in itertools.chain(*(d.glob(glob) for glob in PACKAGE_GLOBS)): shutil.copy(p, context.repository, follow_symlinks=True) def install_extra_trees(context: Context) -> None: if not context.config.extra_trees: return with complete_step("Copying in extra file trees…"): for tree in context.config.extra_trees: install_tree(context.config, tree.source, context.root, target=tree.target, preserve=False) def install_build_dest(context: Context) -> None: if not any(context.install_dir.iterdir()): return with complete_step("Copying in build tree…"): copy_tree( context.install_dir, context.root, use_subvolumes=context.config.use_subvolumes, sandbox=context.sandbox, ) def gzip_binary(context: Context) -> str: return "pigz" if context.config.find_binary("pigz") else "gzip" def fixup_vmlinuz_location(context: Context) -> None: # Some architectures ship an uncompressed vmlinux (ppc64el, riscv64) for type in ("vmlinuz", "vmlinux"): for d in context.root.glob(f"boot/{type}-*"): if d.is_symlink(): continue kver = d.name.removeprefix(f"{type}-") vmlinuz = context.root / "usr/lib/modules" / kver / type if not vmlinuz.parent.exists(): continue # Some distributions (OpenMandriva) symlink /usr/lib/modules//vmlinuz to /boot/vmlinuz-, so # get rid of the symlink and copy the actual vmlinuz to /usr/lib/modules/. if vmlinuz.is_symlink() and vmlinuz.is_relative_to("/boot"): vmlinuz.unlink() if not vmlinuz.exists(): shutil.copy2(d, vmlinuz) def gen_kernel_images(context: Context) -> Iterator[tuple[str, Path]]: if not (context.root / "usr/lib/modules").exists(): return for kver in sorted( (k for k in (context.root / "usr/lib/modules").iterdir() if k.is_dir()), key=lambda k: GenericVersion(k.name), reverse=True ): # Make sure we look for anything that remotely resembles vmlinuz, as # the arch specific install scripts in the kernel source tree sometimes # do weird stuff. But let's make sure we're not returning UKIs as the # UKI on Fedora is named vmlinuz-virt.efi. Also look for uncompressed # images (vmlinux) as some architectures ship those. Prefer vmlinuz if # both are present. for kimg in kver.glob("vmlinuz*"): if KernelType.identify(context.config, kimg) != KernelType.uki: yield kver.name, kimg break else: for kimg in kver.glob("vmlinux*"): if KernelType.identify(context.config, kimg) != KernelType.uki: yield kver.name, kimg break def want_initrd(context: Context) -> bool: if context.config.bootable == ConfigFeature.disabled: return False if context.config.output_format not in (OutputFormat.disk, OutputFormat.directory): return False if not any((context.artifacts / "io.mkosi.initrd").glob("*")) and not any(gen_kernel_images(context)): return False return True def finalize_default_initrd( args: Args, config: Config, *, resources: Path, output_dir: Optional[Path] = None, ) -> Config: if config.root_password: password, hashed = config.root_password rootpwopt = f"hashed:{password}" if hashed else password else: rootpwopt = None relabel = ConfigFeature.auto if config.selinux_relabel == ConfigFeature.enabled else config.selinux_relabel # Default values are assigned via the parser so we go via the argument parser to construct # the config for the initrd. cmdline = [ "--directory", "", "--distribution", str(config.distribution), "--release", config.release, "--architecture", str(config.architecture), *(["--mirror", config.mirror] if config.mirror else []), "--repository-key-check", str(config.repository_key_check), "--repositories", ",".join(config.repositories), "--package-manager-tree", ",".join(str(t) for t in config.package_manager_trees), # Note that when compress_output == Compression.none == 0 we don't pass --compress-output which means the # default compression will get picked. This is exactly what we want so that initrds are always compressed. *(["--compress-output", str(config.compress_output)] if config.compress_output else []), "--compress-level", str(config.compress_level), "--with-network", str(config.with_network), "--cache-only", str(config.cacheonly), *(["--output-dir", str(output_dir)] if output_dir else []), *(["--workspace-dir", str(config.workspace_dir)] if config.workspace_dir else []), *(["--cache-dir", str(config.cache_dir)] if config.cache_dir else []), *(["--package-cache-dir", str(config.package_cache_dir)] if config.package_cache_dir else []), *(["--local-mirror", str(config.local_mirror)] if config.local_mirror else []), "--incremental", str(config.incremental), "--acl", str(config.acl), *(f"--package={package}" for package in config.initrd_packages), *(f"--volatile-package={package}" for package in config.initrd_volatile_packages), *(f"--package-directory={d}" for d in config.package_directories), *(f"--volatile-package-directory={d}" for d in config.volatile_package_directories), "--output", "initrd", *(["--image-id", config.image_id] if config.image_id else []), *(["--image-version", config.image_version] if config.image_version else []), *( ["--source-date-epoch", str(config.source_date_epoch)] if config.source_date_epoch is not None else [] ), *(["--locale", config.locale] if config.locale else []), *(["--locale-messages", config.locale_messages] if config.locale_messages else []), *(["--keymap", config.keymap] if config.keymap else []), *(["--timezone", config.timezone] if config.timezone else []), *(["--hostname", config.hostname] if config.hostname else []), *(["--root-password", rootpwopt] if rootpwopt else []), *([f"--environment={k}='{v}'" for k, v in config.environment.items()]), *(["--tools-tree", str(config.tools_tree)] if config.tools_tree else []), *([f"--extra-search-path={p}" for p in config.extra_search_paths]), *(["--proxy-url", config.proxy_url] if config.proxy_url else []), *([f"--proxy-exclude={host}" for host in config.proxy_exclude]), *(["--proxy-peer-certificate", str(p)] if (p := config.proxy_peer_certificate) else []), *(["--proxy-client-certificate", str(p)] if (p := config.proxy_client_certificate) else []), *(["--proxy-client-key", str(p)] if (p := config.proxy_client_key) else []), "--selinux-relabel", str(relabel), *(["-f"] * args.force), ] cmdline += ["--include=mkosi-initrd"] for include in config.initrd_include: cmdline += ["--include", os.fspath(include)] _, [config] = parse_config(cmdline + ["build"], resources=resources) run_configure_scripts(config) return dataclasses.replace(config, image="default-initrd") def build_default_initrd(context: Context) -> Path: if context.config.distribution == Distribution.custom: die("Building a default initrd is not supported for custom distributions") config = finalize_default_initrd( context.args, context.config, resources=context.resources, output_dir=context.workspace, ) assert config.output_dir config.output_dir.mkdir(exist_ok=True) if (config.output_dir / config.output).exists(): return config.output_dir / config.output with ( complete_step("Building default initrd"), setup_workspace(context.args, config) as workspace, ): build_image( Context( context.args, config, workspace=workspace, resources=context.resources, # Re-use the repository metadata snapshot from the main image for the initrd. package_cache_dir=context.package_cache_dir, package_dir=context.package_dir, ) ) return config.output_dir / config.output def identify_cpu(root: Path) -> tuple[Optional[Path], Optional[Path]]: for entry in Path("/proc/cpuinfo").read_text().split("\n\n"): vendor_id = family = model = stepping = None for line in entry.splitlines(): key, _, value = line.partition(":") key = key.strip() value = value.strip() if not key or not value: continue if key == "vendor_id": vendor_id = value elif key == "cpu family": family = int(value) elif key == "model": model = int(value) elif key == "stepping": stepping = int(value) if vendor_id is not None and family is not None and model is not None and stepping is not None: break else: return (None, None) if vendor_id == "AuthenticAMD": uroot = root / "usr/lib/firmware/amd-ucode" if family > 21: ucode = uroot / f"microcode_amd_fam{family:x}h.bin" else: ucode = uroot / "microcode_amd.bin" if ucode.exists(): return (Path(f"{vendor_id}.bin"), ucode) elif vendor_id == "GenuineIntel": uroot = root / "usr/lib/firmware/intel-ucode" if (ucode := uroot / f"{family:02x}-{model:02x}-{stepping:02x}").exists(): return (Path(f"{vendor_id}.bin"), ucode) if (ucode := uroot / f"{family:02x}-{model:02x}-{stepping:02x}.initramfs").exists(): return (Path(f"{vendor_id}.bin"), ucode) return (Path(f"{vendor_id}.bin"), None) def build_microcode_initrd(context: Context) -> list[Path]: if not context.config.architecture.is_x86_variant(): return [] microcode = context.workspace / "microcode.initrd" if microcode.exists(): return [microcode] amd = context.root / "usr/lib/firmware/amd-ucode" intel = context.root / "usr/lib/firmware/intel-ucode" if not amd.exists() and not intel.exists(): logging.warning("/usr/lib/firmware/{amd-ucode,intel-ucode} not found, not adding microcode") return [] root = context.workspace / "microcode-root" destdir = root / "kernel/x86/microcode" with umask(~0o755): destdir.mkdir(parents=True, exist_ok=True) if context.config.microcode_host: vendorfile, ucodefile = identify_cpu(context.root) if vendorfile is None or ucodefile is None: logging.warning("Unable to identify CPU for MicrocodeHostonly=") return [] with (destdir / vendorfile).open("wb") as f: f.write(ucodefile.read_bytes()) else: if amd.exists(): with (destdir / "AuthenticAMD.bin").open("wb") as f: for p in amd.iterdir(): f.write(p.read_bytes()) if intel.exists(): with (destdir / "GenuineIntel.bin").open("wb") as f: for p in intel.iterdir(): f.write(p.read_bytes()) make_cpio(root, microcode, sandbox=context.sandbox) return [microcode] def finalize_kernel_modules_include(context: Context, *, include: Sequence[str], host: bool) -> set[str]: final = {i for i in include if i not in ("default", "host")} if "default" in include: initrd = finalize_default_initrd(context.args, context.config, resources=context.resources) final.update(initrd.kernel_modules_include) if host or "host" in include: final.update(loaded_modules()) return final def build_kernel_modules_initrd(context: Context, kver: str) -> Path: kmods = context.workspace / f"kernel-modules-{kver}.initrd" if kmods.exists(): return kmods make_cpio( context.root, kmods, files=gen_required_kernel_modules( context.root, kver, include=finalize_kernel_modules_include( context, include=context.config.kernel_modules_initrd_include, host=context.config.kernel_modules_initrd_include_host, ), exclude=context.config.kernel_modules_initrd_exclude, sandbox=context.sandbox, ), sandbox=context.sandbox, ) if context.config.distribution.is_apt_distribution(): # Ubuntu Focal's kernel does not support zstd-compressed initrds so use xz instead. if context.config.distribution == Distribution.ubuntu and context.config.release == "focal": compression = Compression.xz # Older Debian and Ubuntu releases do not compress their kernel modules, so we compress the initramfs instead. # Note that this is not ideal since the compressed kernel modules will all be decompressed on boot which # requires significant memory. elif context.config.distribution == Distribution.debian and context.config.release in ("sid", "testing"): compression = Compression.none else: compression = Compression.zstd maybe_compress(context, compression, kmods, kmods) return kmods def join_initrds(initrds: Sequence[Path], output: Path) -> Path: assert initrds if len(initrds) == 1: shutil.copy2(initrds[0], output) return output seq = io.BytesIO() for p in initrds: initrd = p.read_bytes() n = len(initrd) padding = b'\0' * (round_up(n, 4) - n) # pad to 32 bit alignment seq.write(initrd) seq.write(padding) output.write_bytes(seq.getbuffer()) return output def python_binary(config: Config, *, binary: Optional[PathString]) -> str: tools = ( not binary or not (path := config.find_binary(binary)) or not any(path.is_relative_to(d) for d in config.extra_search_paths) ) # If there's no tools tree, prefer the interpreter from MKOSI_INTERPRETER. If there is a tools # tree, just use the default python3 interpreter. return "python3" if tools and config.tools_tree else os.getenv("MKOSI_INTERPRETER", "python3") def extract_pe_section(context: Context, binary: Path, section: str, output: Path) -> Path: # When using a tools tree, we want to use the pefile module from the tools tree instead of requiring that # python-pefile is installed on the host. So we execute python as a subprocess to make sure we load # pefile from the tools tree if one is used. # TODO: Use ignore_padding=True instead of length once we can depend on a newer pefile. # TODO: Drop KeyError logic once we drop support for Ubuntu Jammy and sdmagic will always be available. pefile = textwrap.dedent( f"""\ import pefile import sys from pathlib import Path pe = pefile.PE("{binary}", fast_load=True) section = {{s.Name.decode().strip("\\0"): s for s in pe.sections}}.get("{section}") if not section: sys.exit(67) sys.stdout.buffer.write(section.get_data(length=section.Misc_VirtualSize)) """ ) with open(output, "wb") as f: result = run( [python_binary(context.config, binary=None)], input=pefile, stdout=f, sandbox=context.sandbox( binary=python_binary(context.config, binary=None), mounts=[Mount(binary, binary, ro=True), ]), success_exit_status=(0, 67), ) if result.returncode == 67: raise KeyError(f"{section} section not found in {binary}") return output def want_signed_pcrs(config: Config) -> bool: return ( config.sign_expected_pcr == ConfigFeature.enabled or ( config.sign_expected_pcr == ConfigFeature.auto and config.find_binary("systemd-measure", "/usr/lib/systemd/systemd-measure") is not None ) ) def build_uki( context: Context, stub: Path, kver: str, kimg: Path, microcodes: list[Path], initrds: list[Path], cmdline: Sequence[str], output: Path, ) -> None: # Older versions of systemd-stub expect the cmdline section to be null terminated. We can't embed # nul terminators in argv so let's communicate the cmdline via a file instead. (context.workspace / "cmdline").write_text(f"{' '.join(cmdline).strip()}\x00") if not (arch := context.config.architecture.to_efi()): die(f"Architecture {context.config.architecture} does not support UEFI") if not (ukify := context.config.find_binary("ukify", "/usr/lib/systemd/ukify")): die("Could not find ukify") cmd: list[PathString] = [ python_binary(context.config, binary=ukify), ukify, *(["--cmdline", f"@{context.workspace / 'cmdline'}"] if cmdline else []), "--os-release", f"@{context.root / 'usr/lib/os-release'}", "--stub", stub, "--output", output, "--efi-arch", arch, "--uname", kver, ] mounts = [ Mount(output.parent, output.parent), Mount(context.workspace / "cmdline", context.workspace / "cmdline", ro=True), Mount(context.root / "usr/lib/os-release", context.root / "usr/lib/os-release", ro=True), Mount(stub, stub, ro=True), ] if context.config.secure_boot: assert context.config.secure_boot_key assert context.config.secure_boot_certificate cmd += ["--sign-kernel"] if context.config.secure_boot_sign_tool != SecureBootSignTool.pesign: cmd += [ "--signtool", "sbsign", "--secureboot-private-key", context.config.secure_boot_key, "--secureboot-certificate", context.config.secure_boot_certificate, ] mounts += [ Mount(context.config.secure_boot_certificate, context.config.secure_boot_certificate, ro=True), ] if context.config.secure_boot_key_source.type == KeySource.Type.engine: cmd += ["--signing-engine", context.config.secure_boot_key_source.source] if context.config.secure_boot_key.exists(): mounts += [Mount(context.config.secure_boot_key, context.config.secure_boot_key, ro=True)] else: pesign_prepare(context) cmd += [ "--signtool", "pesign", "--secureboot-certificate-dir", context.workspace / "pesign", "--secureboot-certificate-name", certificate_common_name(context, context.config.secure_boot_certificate), ] mounts += [Mount(context.workspace / "pesign", context.workspace / "pesign", ro=True)] if want_signed_pcrs(context.config): cmd += [ "--pcr-private-key", context.config.secure_boot_key, # SHA1 might be disabled in OpenSSL depending on the distro so we opt to not sign for SHA1 to avoid # having to manage a bunch of configuration to re-enable SHA1. "--pcr-banks", "sha256", ] if context.config.secure_boot_key.exists(): mounts += [Mount(context.config.secure_boot_key, context.config.secure_boot_key)] if context.config.secure_boot_key_source.type == KeySource.Type.engine: cmd += [ "--signing-engine", context.config.secure_boot_key_source.source, "--pcr-public-key", context.config.secure_boot_certificate, ] mounts += [ Mount(context.config.secure_boot_certificate, context.config.secure_boot_certificate, ro=True), ] cmd += ["build", "--linux", kimg] mounts += [Mount(kimg, kimg, ro=True)] if microcodes: # new .ucode section support? if ( systemd_tool_version( python_binary(context.config, binary=ukify), ukify, sandbox=context.sandbox, ) >= "256" and (version := systemd_stub_version(context, stub)) and version >= "256" ): for microcode in microcodes: cmd += ["--microcode", microcode] mounts += [Mount(microcode, microcode, ro=True)] else: initrds = microcodes + initrds for initrd in initrds: cmd += ["--initrd", initrd] mounts += [Mount(initrd, initrd, ro=True)] with complete_step(f"Generating unified kernel image for kernel version {kver}"): run( cmd, sandbox=context.sandbox( binary=ukify, mounts=mounts, devices=context.config.secure_boot_key_source.type != KeySource.Type.file, ), ) def want_efi(config: Config) -> bool: # Do we want to make the image bootable on EFI firmware? # Note that this returns True also in the case where autodetection might later # cause the system to not be made bootable on EFI firmware after the filesystem # has been populated. if config.output_format in (OutputFormat.uki, OutputFormat.esp): return True if config.bootable == ConfigFeature.disabled: return False if config.bootloader == Bootloader.none: return False if ( (config.output_format == OutputFormat.cpio or config.output_format.is_extension_image() or config.overlay) and config.bootable == ConfigFeature.auto ): return False if config.architecture.to_efi() is None: if config.bootable == ConfigFeature.enabled: die(f"Cannot make image bootable on UEFI on {config.architecture} architecture") return False return True def systemd_stub_binary(context: Context) -> Path: arch = context.config.architecture.to_efi() stub = context.root / f"usr/lib/systemd/boot/efi/linux{arch}.efi.stub" return stub def systemd_stub_version(context: Context, stub: Path) -> Optional[GenericVersion]: try: sdmagic = extract_pe_section(context, stub, ".sdmagic", context.workspace / "sdmagic") except KeyError: return None sdmagic_text = sdmagic.read_text().strip("\x00") # Older versions of the stub have misaligned sections which results in an empty sdmagic text. Let's check for that # explicitly and treat it as no version. # TODO: Drop this logic once every distribution we support ships systemd-stub v254 or newer. if not sdmagic_text: return None if not (version := re.match(r"#### LoaderInfo: systemd-stub (?P[.~^a-zA-Z0-9-+]+) ####", sdmagic_text)): die(f"Unable to determine systemd-stub version, found {sdmagic_text!r}") return GenericVersion(version.group("version")) def want_uki(context: Context) -> bool: return want_efi(context.config) and ( context.config.bootloader == Bootloader.uki or context.config.unified_kernel_images == ConfigFeature.enabled or ( context.config.unified_kernel_images == ConfigFeature.auto and systemd_stub_binary(context).exists() and context.config.find_binary("ukify", "/usr/lib/systemd/ukify") is not None ) ) def find_entry_token(context: Context) -> str: if ( not context.config.find_binary("kernel-install") or "--version" not in run(["kernel-install", "--help"], stdout=subprocess.PIPE, sandbox=context.sandbox(binary="kernel-install")).stdout or systemd_tool_version("kernel-install", sandbox=context.sandbox) < "255.1" ): return context.config.image_id or context.config.distribution.name output = json.loads( run( ["kernel-install", "--root=/buildroot", "--json=pretty", "inspect"], sandbox=context.sandbox(binary="kernel-install", mounts=[Mount(context.root, "/buildroot", ro=True)]), stdout=subprocess.PIPE, env={"SYSTEMD_ESP_PATH": "/efi", "SYSTEMD_XBOOTLDR_PATH": "/boot"}, ).stdout ) logging.debug(json.dumps(output, indent=4)) return cast(str, output["EntryToken"]) def finalize_cmdline(context: Context, partitions: Sequence[Partition], roothash: Optional[str]) -> list[str]: if (context.root / "etc/kernel/cmdline").exists(): cmdline = [(context.root / "etc/kernel/cmdline").read_text().strip()] elif (context.root / "usr/lib/kernel/cmdline").exists(): cmdline = [(context.root / "usr/lib/kernel/cmdline").read_text().strip()] else: cmdline = [] if roothash: cmdline += [roothash] cmdline += context.config.kernel_command_line if not roothash: for name in ("root", "mount.usr"): type_prefix = name.removeprefix("mount.") if not (root := next((p.uuid for p in partitions if p.type.startswith(type_prefix)), None)): continue cmdline = [f"{name}=PARTUUID={root}" if c == f"{name}=PARTUUID" else c for c in cmdline] return cmdline def finalize_microcode(context: Context) -> list[Path]: if any((context.artifacts / "io.mkosi.microcode").glob("*")): return sorted((context.artifacts / "io.mkosi.microcode").iterdir()) elif microcode := build_microcode_initrd(context): return microcode return [] def finalize_initrds(context: Context) -> list[Path]: if context.config.initrds: return context.config.initrds elif any((context.artifacts / "io.mkosi.initrd").glob("*")): return sorted((context.artifacts / "io.mkosi.initrd").iterdir()) return [build_default_initrd(context)] def install_type1( context: Context, kver: str, kimg: Path, token: str, partitions: Sequence[Partition], ) -> None: dst = context.root / "boot" / token / kver entry = context.root / f"boot/loader/entries/{token}-{kver}.conf" with umask(~0o700): dst.mkdir(parents=True, exist_ok=True) entry.parent.mkdir(parents=True, exist_ok=True) kmods = build_kernel_modules_initrd(context, kver) cmdline = finalize_cmdline(context, partitions, finalize_roothash(partitions)) with umask(~0o600): if ( want_efi(context.config) and context.config.secure_boot and context.config.shim_bootloader != ShimBootloader.signed and KernelType.identify(context.config, kimg) == KernelType.pe ): kimg = sign_efi_binary(context, kimg, dst / "vmlinuz") else: kimg = Path(shutil.copy2(context.root / kimg, dst / "vmlinuz")) initrds = [ Path(shutil.copy2(initrd, dst.parent / initrd.name)) for initrd in finalize_microcode(context) + finalize_initrds(context) ] initrds += [Path(shutil.copy2(kmods, dst / "kernel-modules.initrd"))] with entry.open("w") as f: f.write( textwrap.dedent( f"""\ title {token} {kver} version {kver} linux /{kimg.relative_to(context.root / "boot")} options {" ".join(cmdline)} """ ) ) for initrd in initrds: f.write(f'initrd /{initrd.relative_to(context.root / "boot")}\n') if want_grub_efi(context) or want_grub_bios(context, partitions): config = prepare_grub_config(context) assert config if ( not any(c.startswith("root=PARTUUID=") for c in context.config.kernel_command_line) and not any(c.startswith("mount.usr=PARTUUID=") for c in context.config.kernel_command_line) and (root := finalize_root(partitions)) ): cmdline = [root] + cmdline with config.open("a") as f: f.write("if ") conditions = [] if want_grub_efi(context) and not want_uki(context): conditions += ['[ "${grub_platform}" = efi ]'] if want_grub_bios(context, partitions): conditions += ['[ "${grub_platform}" = pc ]'] f.write(" || ".join(conditions)) f.write("; then\n") f.write( textwrap.dedent( f"""\ menuentry "{token}-{kver}" {{ linux /{kimg.relative_to(context.root / "boot")} {" ".join(cmdline)} initrd {" ".join(os.fspath(Path("/") / i.relative_to(context.root / "boot")) for i in initrds)} }} """ ) ) f.write("fi\n") def expand_kernel_specifiers(text: str, kver: str, token: str, roothash: str, boot_count: str) -> str: specifiers = { "&": "&", "e": token, "k": kver, "h": roothash, "c": boot_count } def replacer(match: re.Match[str]) -> str: m = match.group("specifier") if specifier := specifiers.get(m): return specifier logging.warning(f"Unknown specifier '&{m}' found in {text}, ignoring") return "" return re.sub(r"&(?P[&a-zA-Z])", replacer, text) def install_uki(context: Context, kver: str, kimg: Path, token: str, partitions: Sequence[Partition]) -> None: bootloader_entry_format = context.config.unified_kernel_image_format or "&e-&k" roothash_value = "" if roothash := finalize_roothash(partitions): roothash_value = roothash.partition("=")[2] if not context.config.unified_kernel_image_format: bootloader_entry_format += "-&h" boot_count = "" if (context.root / "etc/kernel/tries").exists(): boot_count = (context.root / "etc/kernel/tries").read_text().strip() if not context.config.unified_kernel_image_format: bootloader_entry_format += "+&c" bootloader_entry = expand_kernel_specifiers( bootloader_entry_format, kver=kver, token=token, roothash=roothash_value, boot_count=boot_count, ) if context.config.bootloader == Bootloader.uki: if context.config.shim_bootloader != ShimBootloader.none: boot_binary = context.root / shim_second_stage_binary(context) else: boot_binary = context.root / efi_boot_binary(context) else: boot_binary = context.root / f"boot/EFI/Linux/{bootloader_entry}.efi" # Make sure the parent directory where we'll be writing the UKI exists. with umask(~0o700): boot_binary.parent.mkdir(parents=True, exist_ok=True) if context.config.shim_bootloader == ShimBootloader.signed: for p in (context.root / "usr/lib/modules" / kver).glob("*.efi"): log_step(f"Installing prebuilt UKI at {p} to {boot_binary}") shutil.copy2(p, boot_binary) break else: if context.config.bootable == ConfigFeature.enabled: die(f"Couldn't find a signed UKI binary installed at /usr/lib/modules/{kver} in the image") return else: microcodes = finalize_microcode(context) initrds = finalize_initrds(context) if context.config.kernel_modules_initrd: initrds += [build_kernel_modules_initrd(context, kver)] build_uki( context, systemd_stub_binary(context), kver, context.root / kimg, microcodes, initrds, finalize_cmdline(context, partitions, roothash), boot_binary, ) print_output_size(boot_binary) if want_grub_efi(context): config = prepare_grub_config(context) assert config with config.open("a") as f: f.write('if [ "${grub_platform}" = efi ]; then\n') f.write( textwrap.dedent( f"""\ menuentry "{boot_binary.stem}" {{ chainloader /{boot_binary.relative_to(context.root / "boot")} }} """ ) ) f.write("fi\n") def install_kernel(context: Context, partitions: Sequence[Partition]) -> None: # Iterates through all kernel versions included in the image and generates a combined # kernel+initrd+cmdline+osrelease EFI file from it and places it in the /EFI/Linux directory of the ESP. # sd-boot iterates through them and shows them in the menu. These "unified" single-file images have the # benefit that they can be signed like normal EFI binaries, and can encode everything necessary to boot a # specific root device, including the root hash. if context.config.output_format in (OutputFormat.uki, OutputFormat.esp): return if context.config.bootable == ConfigFeature.disabled: return if context.config.bootable == ConfigFeature.auto and ( context.config.output_format == OutputFormat.cpio or context.config.output_format.is_extension_image() or context.config.overlay ): return stub = systemd_stub_binary(context) if want_uki(context) and not stub.exists(): die(f"Unified kernel image(s) requested but systemd-stub not found at /{stub.relative_to(context.root)}") if context.config.bootable == ConfigFeature.enabled and not any(gen_kernel_images(context)): die("A bootable image was requested but no kernel was found") token = find_entry_token(context) for kver, kimg in gen_kernel_images(context): if want_uki(context): install_uki(context, kver, kimg, token, partitions) if not want_uki(context) or want_grub_bios(context, partitions): install_type1(context, kver, kimg, token, partitions) if context.config.bootloader == Bootloader.uki: break def make_uki(context: Context, stub: Path, kver: str, kimg: Path, microcode: list[Path], output: Path) -> None: make_cpio(context.root, context.workspace / "initrd", sandbox=context.sandbox) maybe_compress(context, context.config.compress_output, context.workspace / "initrd", context.workspace / "initrd") initrds = [context.workspace / "initrd"] build_uki(context, stub, kver, kimg, microcode, initrds, context.config.kernel_command_line, output) extract_pe_section(context, output, ".linux", context.staging / context.config.output_split_kernel) extract_pe_section(context, output, ".initrd", context.staging / context.config.output_split_initrd) def compressor_command(context: Context, compression: Compression) -> list[PathString]: """Returns a command suitable for compressing archives.""" if compression == Compression.gz: return [gzip_binary(context), f"-{context.config.compress_level}", "--stdout", "-"] elif compression == Compression.xz: return ["xz", "--check=crc32", f"-{context.config.compress_level}", "-T0", "--stdout", "-"] elif compression == Compression.zstd: return ["zstd", "-q", f"-{context.config.compress_level}", "-T0", "--stdout", "-"] else: die(f"Unknown compression {compression}") def maybe_compress(context: Context, compression: Compression, src: Path, dst: Optional[Path] = None) -> None: if not compression or src.is_dir(): if dst: move_tree( src, dst, use_subvolumes=context.config.use_subvolumes, sandbox=context.sandbox, ) return if not dst: dst = src.parent / f"{src.name}{compression.extension()}" cmd = compressor_command(context, compression) with complete_step(f"Compressing {src} with {compression}"): with src.open("rb") as i: src.unlink() # if src == dst, make sure dst doesn't truncate the src file but creates a new file. with dst.open("wb") as o: run(cmd, stdin=i, stdout=o, sandbox=context.sandbox(binary=cmd[0])) def copy_uki(context: Context) -> None: if (context.staging / context.config.output_split_uki).exists(): return if not want_efi(context.config) or context.config.unified_kernel_images == ConfigFeature.disabled: return ukis = sorted( (context.root / "boot/EFI/Linux").glob("*.efi"), key=lambda p: GenericVersion(p.name), reverse=True, ) if ( (uki := context.root / efi_boot_binary(context)).exists() and KernelType.identify(context.config, uki) == KernelType.uki ): pass elif ( (uki := context.root / shim_second_stage_binary(context)).exists() and KernelType.identify(context.config, uki) == KernelType.uki ): pass elif ukis: uki = ukis[0] else: return shutil.copy(uki, context.staging / context.config.output_split_uki) # Extract the combined initrds from the UKI so we can use it to direct kernel boot with qemu if needed. extract_pe_section(context, uki, ".initrd", context.staging / context.config.output_split_initrd) # ukify will have signed the kernel image as well. Let's make sure we put the signed kernel image in the output # directory instead of the unsigned one by reading it from the UKI. extract_pe_section(context, uki, ".linux", context.staging / context.config.output_split_kernel) def copy_vmlinuz(context: Context) -> None: if (context.staging / context.config.output_split_kernel).exists(): return for _, kimg in gen_kernel_images(context): shutil.copy(context.root / kimg, context.staging / context.config.output_split_kernel) break def copy_nspawn_settings(context: Context) -> None: if context.config.nspawn_settings is None: return None with complete_step("Copying nspawn settings file…"): shutil.copy2(context.config.nspawn_settings, context.staging / context.config.output_nspawn_settings) def copy_initrd(context: Context) -> None: if not want_initrd(context): return if (context.staging / context.config.output_split_initrd).exists(): return for kver, _ in gen_kernel_images(context): initrds = finalize_initrds(context) if context.config.kernel_modules_initrd: kver = next(gen_kernel_images(context))[0] initrds += [build_kernel_modules_initrd(context, kver)] join_initrds(initrds, context.staging / context.config.output_split_initrd) break def calculate_sha256sum(context: Context) -> None: if not context.config.checksum: return with complete_step("Calculating SHA256SUMS…"): with open(context.workspace / context.config.output_checksum, "w") as f: for p in context.staging.iterdir(): if p.is_dir(): logging.warning(f"Cannot checksum directory '{p}', skipping") continue print(hash_file(p) + " *" + p.name, file=f) (context.workspace / context.config.output_checksum).rename(context.staging / context.config.output_checksum) def calculate_signature(context: Context) -> None: if not context.config.sign or not context.config.checksum: return cmdline: list[PathString] = ["gpg", "--detach-sign"] # Need to specify key before file to sign if context.config.key is not None: cmdline += ["--default-key", context.config.key] cmdline += ["--output", "-", "-"] home = Path(context.config.environment.get("GNUPGHOME", INVOKING_USER.home() / ".gnupg")) if not home.exists(): die(f"GPG home {home} not found") env = dict(GNUPGHOME=os.fspath(home)) if sys.stderr.isatty(): env |= dict(GPGTTY=os.ttyname(sys.stderr.fileno())) options: list[PathString] = ["--perms", "755", "--dir", home] mounts = [Mount(home, home)] # gpg can communicate with smartcard readers via this socket so bind mount it in if it exists. if (p := Path("/run/pcscd/pcscd.comm")).exists(): options += ["--perms", "755", "--dir", p.parent] mounts += [Mount(p, p)] with ( complete_step("Signing SHA256SUMS…"), open(context.staging / context.config.output_checksum, "rb") as i, open(context.staging / context.config.output_signature, "wb") as o, ): run( cmdline, env=env, stdin=i, stdout=o, # GPG messes with the user's home directory so we run it as the invoking user. sandbox=context.sandbox( binary="gpg", mounts=mounts, options=options, extra=["setpriv", f"--reuid={INVOKING_USER.uid}", f"--regid={INVOKING_USER.gid}", "--clear-groups"], ) ) def dir_size(path: Union[Path, os.DirEntry[str]]) -> int: dir_sum = 0 for entry in os.scandir(path): if entry.is_symlink(): # We can ignore symlinks because they either point into our tree, # in which case we'll include the size of target directory anyway, # or outside, in which case we don't need to. continue elif entry.is_file(): dir_sum += entry.stat().st_blocks * 512 elif entry.is_dir(): dir_sum += dir_size(entry) return dir_sum def save_manifest(context: Context, manifest: Optional[Manifest]) -> None: if not manifest: return if manifest.has_data(): if ManifestFormat.json in context.config.manifest_format: with complete_step(f"Saving manifest {context.config.output_manifest}"): with open(context.staging / context.config.output_manifest, 'w') as f: manifest.write_json(f) if ManifestFormat.changelog in context.config.manifest_format: with complete_step(f"Saving report {context.config.output_changelog}"): with open(context.staging / context.config.output_changelog, 'w') as f: manifest.write_package_report(f) def print_output_size(path: Path) -> None: if path.is_dir(): log_step(f"{path} size is " + format_bytes(dir_size(path)) + ".") else: size = format_bytes(path.stat().st_size) space = format_bytes(path.stat().st_blocks * 512) log_step(f"{path} size is {size}, consumes {space}.") def cache_tree_paths(config: Config) -> tuple[Path, Path, Path]: fragments = [config.distribution, config.release, config.architecture] if config.image: fragments += [config.image] key = '~'.join(str(s) for s in fragments) assert config.cache_dir return ( config.cache_dir / f"{key}.cache", config.cache_dir / f"{key}.build.cache", config.cache_dir / f"{key}.manifest", ) def check_inputs(config: Config) -> None: """ Make sure all the inputs exist that aren't checked during config parsing because they might be created by an earlier build. """ for base in config.base_trees: if not base.exists(): die(f"Base tree {base} not found") if base.is_file() and base.suffix == ".raw" and os.getuid() != 0: die("Must run as root to use disk images in base trees") if config.tools_tree and not config.tools_tree.exists(): die(f"Tools tree {config.tools_tree} not found") trees = [ ("skeleton", config.skeleton_trees), ("package manager", config.package_manager_trees), ] if config.output_format != OutputFormat.none: trees += [("extra", config.extra_trees)] for name, trees in trees: for tree in trees: if not tree.source.exists(): die(f"{name.capitalize()} tree {tree.source} not found") if tree.source.is_file() and tree.source.suffix == ".raw" and not tree.target and os.getuid() != 0: die(f"Must run as root to use disk images in {name} trees") if config.output_format != OutputFormat.none and config.bootable != ConfigFeature.disabled: for p in config.initrds: if not p.exists(): die(f"Initrd {p} not found") if not p.is_file(): die(f"Initrd {p} is not a file") for script in itertools.chain( config.sync_scripts, config.prepare_scripts, config.build_scripts, config.postinst_scripts, config.finalize_scripts, config.postoutput_scripts, ): if not os.access(script, os.X_OK): die(f"{script} is not executable") def check_outputs(config: Config) -> None: if config.output_format == OutputFormat.none: return f = config.output_dir_or_cwd() / config.output_with_compression if f.exists() and not f.is_symlink(): logging.info(f"Output path {f} exists already. (Use --force to rebuild.)") def check_tool(config: Config, *tools: PathString, reason: str, hint: Optional[str] = None) -> Path: tool = config.find_binary(*tools) if not tool: die(f"Could not find '{tools[0]}' which is required to {reason}.", hint=hint) return tool def check_systemd_tool( config: Config, *tools: PathString, version: str, reason: str, hint: Optional[str] = None, ) -> None: tool = check_tool(config, *tools, reason=reason, hint=hint) v = systemd_tool_version(tool, sandbox=config.sandbox) if v < version: die(f"Found '{tool}' with version {v} but version {version} or newer is required to {reason}.", hint=f"Use ToolsTree=default to get a newer version of '{tools[0]}'.") def check_ukify( config: Config, version: str, reason: str, hint: Optional[str] = None, ) -> None: ukify = check_tool(config, "ukify", "/usr/lib/systemd/ukify", reason=reason, hint=hint) v = systemd_tool_version(python_binary(config, binary=ukify), ukify, sandbox=config.sandbox) if v < version: die(f"Found '{ukify}' with version {v} but version {version} or newer is required to {reason}.", hint="Use ToolsTree=default to get a newer version of 'ukify'.") def check_tools(config: Config, verb: Verb) -> None: check_tool(config, "bwrap", reason="execute sandboxed commands") if verb == Verb.build: if config.bootable != ConfigFeature.disabled: check_tool(config, "depmod", reason="generate kernel module dependencies") if want_efi(config) and config.unified_kernel_images == ConfigFeature.enabled: check_ukify( config, version="254", reason="build bootable images", hint="Use ToolsTree=default to download most required tools including ukify automatically or use " "Bootable=no to create a non-bootable image which doesn't require ukify", ) if config.output_format in (OutputFormat.disk, OutputFormat.esp): check_systemd_tool(config, "systemd-repart", version="254", reason="build disk images") if config.selinux_relabel == ConfigFeature.enabled: check_tool(config, "setfiles", reason="relabel files") if config.secure_boot_key_source.type != KeySource.Type.file: check_ukify( config, version="256", reason="sign Unified Kernel Image with OpenSSL engine", ) if want_signed_pcrs(config): check_systemd_tool( config, "systemd-measure", version="256", reason="sign PCR hashes with OpenSSL engine", ) if config.verity_key_source.type != KeySource.Type.file: check_systemd_tool( config, "systemd-repart", version="256", reason="sign verity roothash signature with OpenSSL engine", ) if want_efi(config) and config.secure_boot and config.secure_boot_auto_enroll: check_tool(config, "sbsiglist", reason="set up systemd-boot secure boot auto-enrollment") check_tool(config, "sbvarsign", reason="set up systemd-boot secure boot auto-enrollment") if verb == Verb.boot: check_systemd_tool(config, "systemd-nspawn", version="254", reason="boot images") if verb == Verb.qemu and config.vmm == Vmm.vmspawn: check_systemd_tool(config, "systemd-vmspawn", version="256", reason="boot images with vmspawn") def configure_ssh(context: Context) -> None: if not context.config.ssh: return unitdir = context.root / "usr/lib/systemd/system" with umask(~0o755): unitdir.mkdir(parents=True, exist_ok=True) with umask(~0o644): (unitdir / "ssh.socket").write_text( textwrap.dedent( """\ [Unit] Description=Mkosi SSH Server VSock Socket ConditionVirtualization=!container Wants=sshd-keygen.target [Socket] ListenStream=vsock::22 Accept=yes [Install] WantedBy=sockets.target """ ) ) (unitdir / "ssh@.service").write_text( textwrap.dedent( """\ [Unit] Description=Mkosi SSH Server After=sshd-keygen.target [Service] # We disable PAM because of an openssh-server bug where it sets PAM_RHOST=UNKNOWN when -i is # used causing a very slow reverse DNS lookup by pam. ExecStart=sshd -i -o UsePAM=no StandardInput=socket RuntimeDirectoryPreserve=yes RuntimeDirectory=sshd # ssh always exits with 255 even on normal disconnect, so let's mark that as success so we # don't get noisy logs about SSH service failures. SuccessExitStatus=255 """ ) ) preset = context.root / "usr/lib/systemd/system-preset/80-mkosi-ssh.preset" with umask(~0o755): preset.parent.mkdir(parents=True, exist_ok=True) with umask(~0o644): preset.write_text("enable ssh.socket\n") def configure_initrd(context: Context) -> None: if context.config.overlay or context.config.output_format.is_extension_image(): return if ( not (context.root / "init").exists() and not (context.root / "init").is_symlink() and (context.root / "usr/lib/systemd/systemd").exists() ): (context.root / "init").symlink_to("/usr/lib/systemd/systemd") if not context.config.make_initrd: return if not (context.root / "etc/initrd-release").exists() and not (context.root / "etc/initrd-release").is_symlink(): (context.root / "etc/initrd-release").symlink_to("/etc/os-release") def configure_clock(context: Context) -> None: if context.config.overlay or context.config.output_format in (OutputFormat.sysext, OutputFormat.confext): return with umask(~0o644): (context.root / "usr/lib/clock-epoch").touch() def run_depmod(context: Context, *, cache: bool = False) -> None: if context.config.overlay or context.config.output_format.is_extension_image(): return outputs = ( "modules.dep", "modules.dep.bin", "modules.symbols", "modules.symbols.bin", ) for kver, _ in gen_kernel_images(context): modulesd = context.root / "usr/lib/modules" / kver if ( not cache and not context.config.kernel_modules_exclude and all((modulesd / o).exists() for o in outputs) ): mtime = (modulesd / "modules.dep").stat().st_mtime if all(m.stat().st_mtime <= mtime for m in modulesd.rglob("*.ko*")): continue if not cache: process_kernel_modules( context.root, kver, include=finalize_kernel_modules_include( context, include=context.config.kernel_modules_include, host=context.config.kernel_modules_include_host, ), exclude=context.config.kernel_modules_exclude, sandbox=context.sandbox, ) with complete_step(f"Running depmod for {kver}"): run( ["depmod", "--all", kver], sandbox=context.sandbox( binary=None, mounts=[Mount(context.root, "/buildroot")], extra=chroot_cmd(), ) ) def run_sysusers(context: Context) -> None: if context.config.overlay or context.config.output_format in (OutputFormat.sysext, OutputFormat.confext): return if not context.config.find_binary("systemd-sysusers"): logging.warning("systemd-sysusers is not installed, not generating system users") return with complete_step("Generating system users"): run(["systemd-sysusers", "--root=/buildroot"], sandbox=context.sandbox(binary="systemd-sysusers", mounts=[Mount(context.root, "/buildroot")])) def run_tmpfiles(context: Context) -> None: if context.config.overlay or context.config.output_format in (OutputFormat.sysext, OutputFormat.confext): return if not context.config.find_binary("systemd-tmpfiles"): logging.warning("systemd-tmpfiles is not installed, not generating volatile files") return with complete_step("Generating volatile files"): run( [ "systemd-tmpfiles", "--root=/buildroot", "--boot", "--create", "--remove", # Exclude APIVFS and temporary files directories. *(f"--exclude-prefix={d}" for d in ("/tmp", "/var/tmp", "/run", "/proc", "/sys", "/dev")), ], env={"SYSTEMD_TMPFILES_FORCE_SUBVOL": "0"}, # systemd-tmpfiles can exit with DATAERR or CANTCREAT in some cases which are handled as success by the # systemd-tmpfiles service so we handle those as success as well. success_exit_status=(0, 65, 73), sandbox=context.sandbox( binary="systemd-tmpfiles", mounts=[ Mount(context.root, "/buildroot"), # systemd uses acl.h to parse ACLs in tmpfiles snippets which uses the host's passwd so we have to # mount the image's passwd over it to make ACL parsing work. *finalize_passwd_mounts(context.root) ], ), ) def run_preset(context: Context) -> None: if context.config.overlay or context.config.output_format in (OutputFormat.sysext, OutputFormat.confext): return if not context.config.find_binary("systemctl"): logging.warning("systemctl is not installed, not applying presets") return with complete_step("Applying presets…"): run(["systemctl", "--root=/buildroot", "preset-all"], sandbox=context.sandbox(binary="systemctl", mounts=[Mount(context.root, "/buildroot")])) run(["systemctl", "--root=/buildroot", "--global", "preset-all"], sandbox=context.sandbox(binary="systemctl", mounts=[Mount(context.root, "/buildroot")])) def run_hwdb(context: Context) -> None: if context.config.overlay or context.config.output_format in (OutputFormat.sysext, OutputFormat.confext): return if not context.config.find_binary("systemd-hwdb"): logging.warning("systemd-hwdb is not installed, not generating hwdb") return with complete_step("Generating hardware database"): run(["systemd-hwdb", "--root=/buildroot", "--usr", "--strict", "update"], sandbox=context.sandbox(binary="systemd-hwdb", mounts=[Mount(context.root, "/buildroot")])) # Remove any existing hwdb in /etc in favor of the one we just put in /usr. (context.root / "etc/udev/hwdb.bin").unlink(missing_ok=True) def run_firstboot(context: Context) -> None: if context.config.overlay or context.config.output_format.is_extension_image(): return if not context.config.find_binary("systemd-firstboot"): logging.warning("systemd-firstboot is not installed, not applying first boot settings") return password, hashed = context.config.root_password or (None, False) if password and not hashed: password = run( ["openssl", "passwd", "-stdin", "-6"], sandbox=context.sandbox(binary="openssl"), input=password, stdout=subprocess.PIPE, ).stdout.strip() settings = ( ("--locale", "firstboot.locale", context.config.locale), ("--locale-messages", "firstboot.locale-messages", context.config.locale_messages), ("--keymap", "firstboot.keymap", context.config.keymap), ("--timezone", "firstboot.timezone", context.config.timezone), ("--hostname", None, context.config.hostname), ("--root-password-hashed", "passwd.hashed-password.root", password), ("--root-shell", "passwd.shell.root", context.config.root_shell), ) options = [] creds = [] for option, cred, value in settings: # Check for None as password might be the empty string if value is None: continue options += [option, value] if cred: creds += [(cred, value)] if not options and not creds: return with complete_step("Applying first boot settings"): run(["systemd-firstboot", "--root=/buildroot", "--force", *options], sandbox=context.sandbox(binary="systemd-firstboot", mounts=[Mount(context.root, "/buildroot")])) # Initrds generally don't ship with only /usr so there's not much point in putting the credentials in # /usr/lib/credstore. if context.config.output_format != OutputFormat.cpio or not context.config.make_initrd: with umask(~0o755): (context.root / "usr/lib/credstore").mkdir(exist_ok=True) for cred, value in creds: with umask(~0o600 if "password" in cred else ~0o644): (context.root / "usr/lib/credstore" / cred).write_text(value) def run_selinux_relabel(context: Context) -> None: if not (selinux := want_selinux_relabel(context.config, context.root)): return setfiles, policy, fc, binpolicy = selinux fc = Path("/buildroot") / fc.relative_to(context.root) binpolicy = Path("/buildroot") / binpolicy.relative_to(context.root) with complete_step(f"Relabeling files using {policy} policy"): run([setfiles, "-mFr", "/buildroot", "-c", binpolicy, fc, "/buildroot"], sandbox=context.sandbox(binary=setfiles, mounts=[Mount(context.root, "/buildroot")]), check=context.config.selinux_relabel == ConfigFeature.enabled) def need_build_overlay(config: Config) -> bool: return bool(config.build_scripts and (config.build_packages or config.prepare_scripts)) def save_cache(context: Context) -> None: if not context.config.incremental or context.config.base_trees or context.config.overlay: return final, build, manifest = cache_tree_paths(context.config) with complete_step("Installing cache copies"): rmtree(final, sandbox=context.sandbox) move_tree( context.root, final, use_subvolumes=context.config.use_subvolumes, sandbox=context.sandbox, ) if need_build_overlay(context.config) and (context.workspace / "build-overlay").exists(): rmtree(build, sandbox=context.sandbox) move_tree( context.workspace / "build-overlay", build, use_subvolumes=context.config.use_subvolumes, sandbox=context.sandbox, ) manifest.write_text( json.dumps( context.config.cache_manifest(), cls=JsonEncoder, indent=4, sort_keys=True, ) ) def have_cache(config: Config) -> bool: if not config.incremental or config.base_trees or config.overlay: return False final, build, manifest = cache_tree_paths(config) if not final.exists(): logging.info(f"{final} does not exist, not reusing cached images") return False if need_build_overlay(config) and not build.exists(): logging.info(f"{build} does not exist, not reusing cached images") return False if manifest.exists(): prev = json.loads(manifest.read_text()) new = json.dumps(config.cache_manifest(), cls=JsonEncoder, indent=4, sort_keys=True) if prev != json.loads(new): logging.info("Cache manifest mismatch, not reusing cached images") if ARG_DEBUG.get(): run(["diff", manifest, "-"], input=new, check=False, sandbox=config.sandbox(binary="diff", mounts=[Mount(manifest, manifest)])) return False else: logging.info(f"{manifest} does not exist, not reusing cached images") return False return True def reuse_cache(context: Context) -> bool: if not have_cache(context.config): return False final, build, _ = cache_tree_paths(context.config) if final.stat().st_uid != os.getuid(): return False with complete_step("Copying cached trees"): copy_tree( final, context.root, use_subvolumes=context.config.use_subvolumes, sandbox=context.sandbox, ) if need_build_overlay(context.config): (context.workspace / "build-overlay").symlink_to(build) return True def save_uki_components(context: Context) -> tuple[Optional[Path], Optional[str], Optional[Path], list[Path]]: if context.config.output_format not in (OutputFormat.uki, OutputFormat.esp): return None, None, None, [] try: kver, kimg = next(gen_kernel_images(context)) except StopIteration: die("A kernel must be installed in the image to build a UKI") kimg = shutil.copy2(context.root / kimg, context.workspace) if not context.config.architecture.to_efi(): die(f"Architecture {context.config.architecture} does not support UEFI") stub = systemd_stub_binary(context) if not stub.exists(): die(f"sd-stub not found at /{stub.relative_to(context.root)} in the image") stub = shutil.copy2(stub, context.workspace) microcode = build_microcode_initrd(context) return stub, kver, kimg, microcode def make_image( context: Context, msg: str, skip: Sequence[str] = [], split: bool = False, tabs: bool = False, root: Optional[Path] = None, definitions: Sequence[Path] = [], ) -> list[Partition]: cmdline: list[PathString] = [ "systemd-repart", "--empty=allow", "--size=auto", "--dry-run=no", "--json=pretty", "--no-pager", f"--offline={yes_no(context.config.repart_offline)}", "--seed", str(context.config.seed), context.staging / context.config.output_with_format, ] mounts = [Mount(context.staging, context.staging)] if root: cmdline += ["--root=/buildroot"] mounts += [Mount(root, "/buildroot")] if not context.config.architecture.is_native(): cmdline += ["--architecture", str(context.config.architecture)] if not (context.staging / context.config.output_with_format).exists(): cmdline += ["--empty=create"] if context.config.passphrase: cmdline += ["--key-file", context.config.passphrase] mounts += [Mount(context.config.passphrase, context.config.passphrase, ro=True)] if context.config.verity_key: cmdline += ["--private-key", context.config.verity_key] if context.config.verity_key_source.type != KeySource.Type.file: cmdline += ["--private-key-source", str(context.config.verity_key_source)] if context.config.verity_key.exists(): mounts += [Mount(context.config.verity_key, context.config.verity_key, ro=True)] if context.config.verity_certificate: cmdline += ["--certificate", context.config.verity_certificate] mounts += [Mount(context.config.verity_certificate, context.config.verity_certificate, ro=True)] if skip: cmdline += ["--defer-partitions", ",".join(skip)] if split: cmdline += ["--split=yes"] if context.config.sector_size: cmdline += ["--sector-size", str(context.config.sector_size)] if tabs and systemd_tool_version("systemd-repart", sandbox=context.sandbox) >= 256: cmdline += [ "--generate-fstab=/etc/fstab", "--generate-crypttab=/etc/crypttab", ] for d in definitions: cmdline += ["--definitions", d] mounts += [Mount(d, d, ro=True)] with complete_step(msg): output = json.loads( run( cmdline, stdout=subprocess.PIPE, env=context.config.environment, sandbox=context.sandbox( binary="systemd-repart", devices=( not context.config.repart_offline or context.config.verity_key_source.type != KeySource.Type.file ), vartmp=True, mounts=mounts, ), ).stdout ) logging.debug(json.dumps(output, indent=4)) partitions = [Partition.from_dict(d) for d in output] if split: for p in partitions: if p.split_path: maybe_compress(context, context.config.compress_output, p.split_path) return partitions def make_disk( context: Context, msg: str, skip: Sequence[str] = [], split: bool = False, tabs: bool = False, ) -> list[Partition]: if context.config.output_format != OutputFormat.disk: return [] if context.config.repart_dirs: definitions = context.config.repart_dirs else: defaults = context.workspace / "repart-definitions" if not defaults.exists(): defaults.mkdir() if arch := context.config.architecture.to_efi(): bootloader = context.root / f"efi/EFI/BOOT/BOOT{arch.upper()}.EFI" else: bootloader = None esp = ( context.config.bootable == ConfigFeature.enabled or (context.config.bootable == ConfigFeature.auto and bootloader and bootloader.exists()) ) bios = (context.config.bootable != ConfigFeature.disabled and want_grub_bios(context)) if esp or bios: # Even if we're doing BIOS, let's still use the ESP to store the kernels, initrds and grub # modules. We cant use UKIs so we have to put each kernel and initrd on the ESP twice, so # let's make the ESP twice as big in that case. (defaults / "00-esp.conf").write_text( textwrap.dedent( f"""\ [Partition] Type=esp Format=vfat CopyFiles=/boot:/ CopyFiles=/efi:/ SizeMinBytes={"1G" if bios else "512M"} SizeMaxBytes={"1G" if bios else "512M"} """ ) ) # If grub for BIOS is installed, let's add a BIOS boot partition onto which we can install grub. if bios: (defaults / "05-bios.conf").write_text( textwrap.dedent( f"""\ [Partition] Type={Partition.GRUB_BOOT_PARTITION_UUID} SizeMinBytes=1M SizeMaxBytes=1M """ ) ) (defaults / "10-root.conf").write_text( textwrap.dedent( f"""\ [Partition] Type=root Format={context.config.distribution.filesystem()} CopyFiles=/ Minimize=guess """ ) ) definitions = [defaults] return make_image(context, msg=msg, skip=skip, split=split, tabs=tabs, root=context.root, definitions=definitions) def make_oci(context: Context, root_layer: Path, dst: Path) -> None: ca_store = dst / "blobs" / "sha256" with umask(~0o755): ca_store.mkdir(parents=True) layer_diff_digest = hash_file(root_layer) maybe_compress( context, context.config.compress_output, context.staging / "rootfs.layer", # Pass explicit destination to suppress adding an extension context.staging / "rootfs.layer", ) layer_digest = hash_file(root_layer) root_layer.rename(ca_store / layer_digest) creation_time = ( datetime.datetime.fromtimestamp(context.config.source_date_epoch, tz=datetime.timezone.utc) if context.config.source_date_epoch is not None else datetime.datetime.now(tz=datetime.timezone.utc) ).isoformat() oci_config = { "created": creation_time, "architecture": context.config.architecture.to_oci(), # Name of the operating system which the image is built to run on as defined by # https://github.com/opencontainers/image-spec/blob/v1.0.2/config.md#properties. "os": "linux", "rootfs": { "type": "layers", "diff_ids": [f"sha256:{layer_diff_digest}"], }, "config": { "Cmd": [ "/sbin/init", *context.config.kernel_command_line, ], }, "history": [ { "created": creation_time, "comment": "Created by mkosi", }, ], } oci_config_blob = json.dumps(oci_config) oci_config_digest = hashlib.sha256(oci_config_blob.encode()).hexdigest() with umask(~0o644): (ca_store / oci_config_digest).write_text(oci_config_blob) layer_suffix = context.config.compress_output.oci_media_type_suffix() oci_manifest = { "schemaVersion": 2, "mediaType": "application/vnd.oci.image.manifest.v1+json", "config": { "mediaType": "application/vnd.oci.image.config.v1+json", "digest": f"sha256:{oci_config_digest}", "size": (ca_store / oci_config_digest).stat().st_size, }, "layers": [ { "mediaType": f"application/vnd.oci.image.layer.v1.tar{layer_suffix}", "digest": f"sha256:{layer_digest}", "size": (ca_store / layer_digest).stat().st_size, } ], "annotations": { "io.systemd.mkosi.version": __version__, **({ "org.opencontainers.image.version": context.config.image_version, } if context.config.image_version else {}), } } oci_manifest_blob = json.dumps(oci_manifest) oci_manifest_digest = hashlib.sha256(oci_manifest_blob.encode()).hexdigest() with umask(~0o644): (ca_store / oci_manifest_digest).write_text(oci_manifest_blob) (dst / "index.json").write_text( json.dumps( { "schemaVersion": 2, "mediaType": "application/vnd.oci.image.index.v1+json", "manifests": [ { "mediaType": "application/vnd.oci.image.manifest.v1+json", "digest": f"sha256:{oci_manifest_digest}", "size": (ca_store / oci_manifest_digest).stat().st_size, } ], } ) ) (dst / "oci-layout").write_text(json.dumps({"imageLayoutVersion": "1.0.0"})) def make_esp(context: Context, uki: Path) -> list[Partition]: if not (arch := context.config.architecture.to_efi()): die(f"Architecture {context.config.architecture} does not support UEFI") definitions = context.workspace / "esp-definitions" definitions.mkdir(exist_ok=True) # Use a minimum of 36MB or 260MB depending on sector size because otherwise the generated FAT filesystem will have # too few clusters to be considered a FAT32 filesystem by OVMF which will refuse to boot from it. # See https://superuser.com/questions/1702331/what-is-the-minimum-size-of-a-4k-native-partition-when-formatted-with-fat32/1717643#1717643 if context.config.sector_size == 512: m = 36 # TODO: Figure out minimum size for 2K sector size else: m = 260 # Always reserve 10MB for filesystem metadata. size = max(uki.stat().st_size, (m - 10) * 1024**2) + 10 * 1024**2 # TODO: Remove the extra 4096 for the max size once https://github.com/systemd/systemd/pull/29954 is in a stable # release. (definitions / "00-esp.conf").write_text( textwrap.dedent( f"""\ [Partition] Type=esp Format=vfat CopyFiles={uki}:/EFI/BOOT/BOOT{arch.upper()}.EFI SizeMinBytes={size} SizeMaxBytes={size + 4096} """ ) ) return make_image(context, msg="Generating ESP image", definitions=[definitions]) def make_extension_image(context: Context, output: Path) -> None: r = context.resources / f"repart/definitions/{context.config.output_format}.repart.d" cmdline: list[PathString] = [ "systemd-repart", "--root=/buildroot", "--json=pretty", "--dry-run=no", "--no-pager", f"--offline={yes_no(context.config.repart_offline)}", "--seed", str(context.config.seed) if context.config.seed else "random", "--empty=create", "--size=auto", "--definitions", r, output, ] mounts = [ Mount(output.parent, output.parent), Mount(context.root, "/buildroot", ro=True), Mount(r, r, ro=True), ] if not context.config.architecture.is_native(): cmdline += ["--architecture", str(context.config.architecture)] if context.config.passphrase: cmdline += ["--key-file", context.config.passphrase] mounts += [Mount(context.config.passphrase, context.config.passphrase, ro=True)] if context.config.verity_key: cmdline += ["--private-key", context.config.verity_key] if context.config.verity_key_source.type != KeySource.Type.file: cmdline += ["--private-key-source", str(context.config.verity_key_source)] if context.config.verity_key.exists(): mounts += [Mount(context.config.verity_key, context.config.verity_key, ro=True)] if context.config.verity_certificate: cmdline += ["--certificate", context.config.verity_certificate] mounts += [Mount(context.config.verity_certificate, context.config.verity_certificate, ro=True)] if context.config.sector_size: cmdline += ["--sector-size", str(context.config.sector_size)] if context.config.split_artifacts: cmdline += ["--split=yes"] with complete_step(f"Building {context.config.output_format} extension image"): j = json.loads( run( cmdline, stdout=subprocess.PIPE, env=context.config.environment, sandbox=context.sandbox( binary="systemd-repart", devices=( not context.config.repart_offline or context.config.verity_key_source.type != KeySource.Type.file ), vartmp=True, mounts=mounts, ), ).stdout ) logging.debug(json.dumps(j, indent=4)) if context.config.split_artifacts: for p in (Partition.from_dict(d) for d in j): if p.split_path: maybe_compress(context, context.config.compress_output, p.split_path) def finalize_staging(context: Context) -> None: rmtree(*(context.config.output_dir_or_cwd() / f.name for f in context.staging.iterdir())) for f in context.staging.iterdir(): # Make sure all build outputs that are not directories are owned by the user running mkosi. if not f.is_dir(): os.chown(f, INVOKING_USER.uid, INVOKING_USER.gid, follow_symlinks=False) if f.is_symlink(): (context.config.output_dir_or_cwd() / f.name).symlink_to(f.readlink()) os.chown(f, INVOKING_USER.uid, INVOKING_USER.gid, follow_symlinks=False) continue move_tree( f, context.config.output_dir_or_cwd(), use_subvolumes=context.config.use_subvolumes, sandbox=context.sandbox, ) def clamp_mtime(path: Path, mtime: int) -> None: st = os.stat(path, follow_symlinks=False) orig = (st.st_atime_ns, st.st_mtime_ns) updated = (min(orig[0], mtime * 1_000_000_000), min(orig[1], mtime * 1_000_000_000)) if orig != updated: os.utime(path, ns=updated, follow_symlinks=False) def normalize_mtime(root: Path, mtime: Optional[int], directory: Path = Path("")) -> None: if mtime is None: return if not (root / directory).exists(): return with complete_step(f"Normalizing modification times of /{directory}"): clamp_mtime(root / directory, mtime) for p in (root / directory).rglob("*"): clamp_mtime(p, mtime) @contextlib.contextmanager def setup_workspace(args: Args, config: Config) -> Iterator[Path]: with contextlib.ExitStack() as stack: workspace = Path(tempfile.mkdtemp(dir=config.workspace_dir_or_default(), prefix="mkosi-workspace-")) # Discard setuid/setgid bits as these are inherited and can leak into the image. workspace.chmod(stat.S_IMODE(workspace.stat().st_mode) & ~(stat.S_ISGID|stat.S_ISUID)) stack.callback(lambda: rmtree(workspace, sandbox=config.sandbox)) (workspace / "tmp").mkdir(mode=0o1777) with scopedenv({"TMPDIR" : os.fspath(workspace / "tmp")}): try: yield Path(workspace) except BaseException: if args.debug_workspace: stack.pop_all() log_notice(f"Workspace: {workspace}") workspace.chmod(0o755) raise @contextlib.contextmanager def lock_repository_metadata(config: Config) -> Iterator[None]: subdir = config.distribution.package_manager(config).subdir(config) with contextlib.ExitStack() as stack: for d in ("cache", "lib"): if (src := config.package_cache_dir_or_default() / d / subdir).exists(): stack.enter_context(flock(src)) yield def copy_repository_metadata(context: Context) -> None: subdir = context.config.distribution.package_manager(context.config).subdir(context.config) # Don't copy anything if the repository metadata directories are already populated and we're not explicitly asked # to sync repository metadata. if ( context.config.cacheonly != Cacheonly.never and ( any((context.package_cache_dir / "cache" / subdir).glob("*")) or any((context.package_cache_dir / "lib" / subdir).glob("*")) ) ): logging.debug(f"Found repository metadata in {context.package_cache_dir}, not copying repository metadata") return with lock_repository_metadata(context.config): for d in ("cache", "lib"): src = context.config.package_cache_dir_or_default() / d / subdir if not src.exists(): logging.debug(f"{src} does not exist, not copying repository metadata from it") continue with tempfile.TemporaryDirectory() as tmp: os.chmod(tmp, 0o755) # cp doesn't support excluding directories but we can imitate it by bind mounting an empty directory # over the directories we want to exclude. if d == "cache": exclude = [ Mount(tmp, p, ro=True) for p in context.config.distribution.package_manager(context.config).cache_subdirs(src) ] else: exclude = [ Mount(tmp, p, ro=True) for p in context.config.distribution.package_manager(context.config).state_subdirs(src) ] dst = context.package_cache_dir / d / subdir with umask(~0o755): dst.mkdir(parents=True, exist_ok=True) def sandbox( *, binary: Optional[PathString], vartmp: bool = False, mounts: Sequence[Mount] = (), extra: Sequence[PathString] = (), ) -> AbstractContextManager[list[PathString]]: return context.sandbox(binary=binary, vartmp=vartmp, mounts=[*mounts, *exclude], extra=extra) copy_tree( src, dst, preserve=False, sandbox=sandbox, ) @contextlib.contextmanager def createrepo(context: Context) -> Iterator[None]: st = context.repository.stat() try: yield finally: if context.repository.stat().st_mtime_ns != st.st_mtime_ns: with complete_step("Rebuilding local package repository"): context.config.distribution.createrepo(context) def build_image(context: Context) -> None: manifest = Manifest(context) if context.config.manifest_format else None install_package_manager_trees(context) with mount_base_trees(context): install_base_trees(context) cached = reuse_cache(context) wantrepo = ( ( not cached and ( context.config.packages or context.config.build_packages or context.config.prepare_scripts ) ) or context.config.volatile_packages or context.config.postinst_scripts or context.config.finalize_scripts ) copy_repository_metadata(context) context.config.distribution.setup(context) if wantrepo: with createrepo(context): install_package_directories(context, context.config.package_directories) install_package_directories(context, context.config.volatile_package_directories) install_package_directories(context, [context.package_dir]) if not cached: install_skeleton_trees(context) install_distribution(context) run_prepare_scripts(context, build=False) install_build_packages(context) run_prepare_scripts(context, build=True) fixup_vmlinuz_location(context) run_depmod(context, cache=True) save_cache(context) reuse_cache(context) check_root_populated(context) run_build_scripts(context) if context.config.output_format == OutputFormat.none: finalize_staging(context) rmtree(context.root) return if wantrepo: with createrepo(context): install_package_directories(context, [context.package_dir]) install_volatile_packages(context) install_build_dest(context) install_extra_trees(context) run_postinst_scripts(context) fixup_vmlinuz_location(context) configure_autologin(context) configure_os_release(context) configure_extension_release(context) configure_initrd(context) configure_ssh(context) configure_clock(context) install_systemd_boot(context) install_grub(context) install_shim(context) run_sysusers(context) run_tmpfiles(context) run_preset(context) run_depmod(context) run_firstboot(context) run_hwdb(context) # These might be removed by the next steps, so let's save them for later if needed. stub, kver, kimg, microcode = save_uki_components(context) remove_packages(context) if manifest: manifest.record_packages() clean_package_manager_metadata(context) remove_files(context) run_selinux_relabel(context) run_finalize_scripts(context) normalize_mtime(context.root, context.config.source_date_epoch) partitions = make_disk(context, skip=("esp", "xbootldr"), tabs=True, msg="Generating disk image") install_kernel(context, partitions) normalize_mtime(context.root, context.config.source_date_epoch, directory=Path("boot")) normalize_mtime(context.root, context.config.source_date_epoch, directory=Path("efi")) partitions = make_disk(context, msg="Formatting ESP/XBOOTLDR partitions") grub_bios_setup(context, partitions) if context.config.split_artifacts: make_disk(context, split=True, msg="Extracting partitions") copy_nspawn_settings(context) copy_uki(context) copy_vmlinuz(context) copy_initrd(context) if context.config.output_format == OutputFormat.tar: make_tar(context.root, context.staging / context.config.output_with_format, sandbox=context.sandbox) elif context.config.output_format == OutputFormat.oci: make_tar(context.root, context.staging / "rootfs.layer", sandbox=context.sandbox) make_oci( context, context.staging / "rootfs.layer", context.staging / context.config.output_with_format, ) elif context.config.output_format == OutputFormat.cpio: make_cpio(context.root, context.staging / context.config.output_with_format, sandbox=context.sandbox) elif context.config.output_format == OutputFormat.uki: assert stub and kver and kimg make_uki(context, stub, kver, kimg, microcode, context.staging / context.config.output_with_format) elif context.config.output_format == OutputFormat.esp: assert stub and kver and kimg make_uki(context, stub, kver, kimg, microcode, context.staging / context.config.output_split_uki) make_esp(context, context.staging / context.config.output_split_uki) elif context.config.output_format.is_extension_image(): make_extension_image(context, context.staging / context.config.output_with_format) elif context.config.output_format == OutputFormat.directory: context.root.rename(context.staging / context.config.output_with_format) if context.config.output_format not in (OutputFormat.uki, OutputFormat.esp): maybe_compress(context, context.config.compress_output, context.staging / context.config.output_with_format, context.staging / context.config.output_with_compression) calculate_sha256sum(context) calculate_signature(context) save_manifest(context, manifest) output_base = context.staging / context.config.output if not output_base.exists() or output_base.is_symlink(): output_base.unlink(missing_ok=True) output_base.symlink_to(context.config.output_with_compression) run_postoutput_scripts(context) finalize_staging(context) rmtree(context.root) print_output_size(context.config.output_dir_or_cwd() / context.config.output_with_compression) def setfacl(config: Config, root: Path, uid: int, allow: bool) -> None: run( [ "setfacl", "--physical", "--modify" if allow else "--remove", f"user:{uid}:rwx" if allow else f"user:{uid}", "-", ], # Supply files via stdin so we don't clutter --debug run output too much input="\n".join([str(root), *(os.fspath(p) for p in root.rglob("*") if p.is_dir())]), sandbox=config.sandbox(binary="setfacl", mounts=[Mount(root, root)]), ) @contextlib.contextmanager def acl_maybe_toggle(config: Config, root: Path, uid: int, *, always: bool) -> Iterator[None]: if not config.acl: yield return # getfacl complains about absolute paths so make sure we pass a relative one. if root.exists(): sandbox = config.sandbox(binary="getfacl", mounts=[Mount(root, root)], options=["--chdir", root]) has_acl = f"user:{uid}:rwx" in run(["getfacl", "-n", "."], sandbox=sandbox, stdout=subprocess.PIPE).stdout if not has_acl and not always: yield return else: has_acl = False try: if has_acl: with complete_step(f"Removing ACLs from {root}"): setfacl(config, root, uid, allow=False) yield finally: if has_acl or always: with complete_step(f"Adding ACLs to {root}"): setfacl(config, root, uid, allow=True) @contextlib.contextmanager def acl_toggle_build(config: Config, uid: int) -> Iterator[None]: if not config.acl: yield return extras = [t.source for t in config.extra_trees] skeletons = [t.source for t in config.skeleton_trees] with contextlib.ExitStack() as stack: for p in (*config.base_trees, *extras, *skeletons): if p and p.is_dir(): stack.enter_context(acl_maybe_toggle(config, p, uid, always=False)) for p in (config.cache_dir, config.build_dir): if p: stack.enter_context(acl_maybe_toggle(config, p, uid, always=True)) if config.output_format == OutputFormat.directory: stack.enter_context(acl_maybe_toggle(config, config.output_dir_or_cwd() / config.output, uid, always=True)) yield @contextlib.contextmanager def acl_toggle_boot(config: Config, uid: int) -> Iterator[None]: if not config.acl or config.output_format != OutputFormat.directory: yield return with acl_maybe_toggle(config, config.output_dir_or_cwd() / config.output, uid, always=False): yield def run_shell(args: Args, config: Config) -> None: opname = "acquire shell in" if args.verb == Verb.shell else "boot" if config.output_format in (OutputFormat.tar, OutputFormat.cpio): die(f"Sorry, can't {opname} a {config.output_format} archive.") if config.output_format.use_outer_compression() and config.compress_output: die(f"Sorry, can't {opname} a compressed image.") cmdline: list[PathString] = ["systemd-nspawn", "--quiet", "--link-journal=no"] if config.runtime_network == Network.user: cmdline += ["--resolv-conf=auto"] elif config.runtime_network == Network.interface: if os.getuid() != 0: die("RuntimeNetwork=interface requires root privileges") cmdline += ["--private-network", "--network-veth"] elif config.runtime_network == Network.none: cmdline += ["--private-network"] # If we copied in a .nspawn file, make sure it's actually honoured if config.nspawn_settings: cmdline += ["--settings=trusted"] if args.verb == Verb.boot: cmdline += ["--boot"] else: cmdline += [ f"--rlimit=RLIMIT_CORE={format_rlimit(resource.RLIMIT_CORE)}", "--console=autopipe", ] # Underscores are not allowed in machine names so replace them with hyphens. name = config.machine_or_name().replace("_", "-") cmdline += ["--machine", name] for k, v in config.credentials.items(): cmdline += [f"--set-credential={k}:{v}"] with contextlib.ExitStack() as stack: # Make sure the latest nspawn settings are always used. if config.nspawn_settings: if not (config.output_dir_or_cwd() / f"{name}.nspawn").exists(): stack.callback(lambda: (config.output_dir_or_cwd() / f"{name}.nspawn").unlink(missing_ok=True)) shutil.copy2(config.nspawn_settings, config.output_dir_or_cwd() / f"{name}.nspawn") if config.ephemeral: fname = stack.enter_context(copy_ephemeral(config, config.output_dir_or_cwd() / config.output)) else: fname = stack.enter_context(flock_or_die(config.output_dir_or_cwd() / config.output)) if config.output_format == OutputFormat.disk and args.verb == Verb.boot: run( [ "systemd-repart", "--image", fname, *([f"--size={config.runtime_size}"] if config.runtime_size else []), "--no-pager", "--dry-run=no", "--offline=no", "--pretty=no", fname, ], stdin=sys.stdin, env=config.environment, sandbox=config.sandbox( binary="systemd-repart", network=True, devices=True, vartmp=True, mounts=[Mount(fname, fname)], ), ) if config.output_format == OutputFormat.directory: cmdline += ["--directory", fname] owner = os.stat(fname).st_uid if owner != 0: cmdline += [f"--private-users={str(owner)}"] else: cmdline += ["--image", fname] if config.runtime_build_sources: with finalize_source_mounts(config, ephemeral=False) as mounts: for mount in mounts: uidmap = "rootidmap" if Path(mount.src).stat().st_uid == INVOKING_USER.uid else "noidmap" cmdline += ["--bind", f"{mount.src}:{mount.dst}:norbind,{uidmap}"] if config.build_dir: cmdline += ["--bind", f"{config.build_dir}:/work/build:norbind,noidmap"] for tree in config.runtime_trees: target = Path("/root/src") / (tree.target or "") # We add norbind because very often RuntimeTrees= will be used to mount the source directory into the # container and the output directory from which we're running will very likely be a subdirectory of the # source directory which would mean we'd be mounting the container root directory as a subdirectory in # itself which tends to lead to all kinds of weird issues, which we avoid by not doing a recursive mount # which means the container root directory mounts will be skipped. uidmap = "rootidmap" if tree.source.stat().st_uid == INVOKING_USER.uid else "noidmap" cmdline += ["--bind", f"{tree.source}:{target}:norbind,{uidmap}"] if config.runtime_scratch == ConfigFeature.enabled or ( config.runtime_scratch == ConfigFeature.auto and config.output_format == OutputFormat.disk ): scratch = stack.enter_context(tempfile.TemporaryDirectory(dir="/var/tmp")) os.chmod(scratch, 0o1777) cmdline += ["--bind", f"{scratch}:/var/tmp"] if args.verb == Verb.boot and config.forward_journal: with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as sock: addr = Path(os.getenv("TMPDIR", "/tmp")) / f"mkosi-journal-remote-unix-{uuid.uuid4().hex[:16]}" sock.bind(os.fspath(addr)) sock.listen() if config.output_format == OutputFormat.directory and (stat := os.stat(fname)).st_uid != 0: os.chown(addr, stat.st_uid, stat.st_gid) stack.enter_context(start_journal_remote(config, sock.fileno())) cmdline += [ "--bind", f"{addr}:/run/host/journal/socket", "--set-credential=journal.forward_to_socket:/run/host/journal/socket", ] for p in config.unit_properties: cmdline += ["--property", p] if args.verb == Verb.boot: # Add nspawn options first since systemd-nspawn ignores all options after the first argument. argv = args.cmdline # When invoked by the kernel, all unknown arguments are passed as environment variables to pid1. Let's # mimick the same behavior when we invoke nspawn as a container. for arg in itertools.chain(config.kernel_command_line, config.kernel_command_line_extra): name, sep, value = arg.partition("=") # If there's a '.' in the argument name, it's not considered an environment variable by the kernel. if sep and "." not in name: cmdline += ["--setenv", f"{name.replace('-', '_')}={value}"] else: # kernel cmdline config of the form systemd.xxx= get interpreted by systemd when running in nspawn # as well. argv += [arg] cmdline += argv elif args.cmdline: cmdline += ["--"] cmdline += args.cmdline run( cmdline, stdin=sys.stdin, stdout=sys.stdout, env=os.environ | config.environment, log=False, sandbox=config.sandbox(binary="systemd-nspawn", devices=True, network=True, relaxed=True), ) def run_systemd_tool(tool: str, args: Args, config: Config) -> None: if config.output_format not in (OutputFormat.disk, OutputFormat.directory) and not config.forward_journal: die(f"{config.output_format} images cannot be inspected with {tool}") if ( args.verb in (Verb.journalctl, Verb.coredumpctl) and config.output_format == OutputFormat.disk and not config.forward_journal and os.getuid() != 0 ): die(f"Must be root to run the {args.verb} command") if (tool_path := config.find_binary(tool)) is None: die(f"Failed to find {tool}") if config.ephemeral and not config.forward_journal: die(f"Images booted in ephemeral mode cannot be inspected with {tool}") output = config.output_dir_or_cwd() / config.output if config.forward_journal and not config.forward_journal.exists(): die(f"Journal directory/file configured with ForwardJournal= does not exist, cannot inspect with {tool}") elif not output.exists(): die(f"Output {config.output_dir_or_cwd() / config.output} does not exist, cannot inspect with {tool}") cmd: list[PathString] = [tool_path] if config.forward_journal: cmd += ["--directory" if config.forward_journal.is_dir() else "--file", config.forward_journal] else: cmd += ["--root" if output.is_dir() else "--image", output] run( [*cmd, *args.cmdline], stdin=sys.stdin, stdout=sys.stdout, env=os.environ | config.environment, log=False, preexec_fn=become_root if not config.forward_journal else None, sandbox=config.sandbox( binary=tool_path, network=True, devices=config.output_format == OutputFormat.disk, relaxed=True, ), ) def run_journalctl(args: Args, config: Config) -> None: run_systemd_tool("journalctl", args, config) def run_coredumpctl(args: Args, config: Config) -> None: run_systemd_tool("coredumpctl", args, config) def run_serve(args: Args, config: Config) -> None: """Serve the output directory via a tiny HTTP server""" run( [python_binary(config, binary=None), "-m", "http.server", "8081"], stdin=sys.stdin, stdout=sys.stdout, sandbox=config.sandbox( binary=python_binary(config, binary=None), network=True, relaxed=True, options=["--chdir", config.output_dir_or_cwd()], ), ) def generate_key_cert_pair(args: Args) -> None: """Generate a private key and accompanying X509 certificate using openssl""" keylength = 2048 expiration_date = datetime.date.today() + datetime.timedelta(int(args.genkey_valid_days)) cn = expand_specifier(args.genkey_common_name) for f in ("mkosi.key", "mkosi.crt"): if Path(f).exists() and not args.force: die(f"{f} already exists", hint=("To generate new keys, first remove mkosi.key and mkosi.crt")) log_step(f"Generating keys rsa:{keylength} for CN {cn!r}.") logging.info( textwrap.dedent( f""" The keys will expire in {args.genkey_valid_days} days ({expiration_date:%A %d. %B %Y}). Remember to roll them over to new ones before then. """ ) ) run( [ "openssl", "req", "-new", "-x509", "-newkey", f"rsa:{keylength}", "-keyout", "mkosi.key", "-out", "mkosi.crt", "-days", str(args.genkey_valid_days), "-subj", f"/CN={cn}/", "-nodes" ], env=dict(OPENSSL_CONF="/dev/null"), ) def bump_image_version() -> None: """Write current image version plus one to mkosi.version""" version = Path("mkosi.version").read_text().strip() v = version.split(".") try: m = int(v[-1]) except ValueError: new_version = version + ".2" logging.info( "Last component of current version is not a decimal integer, " f"appending '.2', bumping '{version}' → '{new_version}'." ) else: new_version = ".".join(v[:-1] + [str(m + 1)]) logging.info(f"Increasing last component of version by one, bumping '{version}' → '{new_version}'.") Path("mkosi.version").write_text(f"{new_version}\n") os.chown("mkosi.version", INVOKING_USER.uid, INVOKING_USER.gid) def show_docs(args: Args, *, resources: Path) -> None: if args.doc_format == DocFormat.auto: formats = [DocFormat.man, DocFormat.pandoc, DocFormat.markdown, DocFormat.system] else: formats = [args.doc_format] while formats: form = formats.pop(0) try: if form == DocFormat.man: man = resources / "mkosi.1" if not man.exists(): raise FileNotFoundError() run(["man", "--local-file", man]) return elif form == DocFormat.pandoc: if not find_binary("pandoc"): logging.error("pandoc is not available") pandoc = run(["pandoc", "-t", "man", "-s", resources / "mkosi.md"], stdout=subprocess.PIPE) run(["man", "--local-file", "-"], input=pandoc.stdout) return elif form == DocFormat.markdown: page((resources / "mkosi.md").read_text(), args.pager) return elif form == DocFormat.system: run(["man", "mkosi"]) return except (FileNotFoundError, subprocess.CalledProcessError) as e: if not formats: if isinstance(e, FileNotFoundError): die("The mkosi package does not contain the man page.") raise e def expand_specifier(s: str) -> str: return s.replace("%u", INVOKING_USER.name()) @contextlib.contextmanager def prepend_to_environ_path(config: Config) -> Iterator[None]: if not config.extra_search_paths: yield return with tempfile.TemporaryDirectory(prefix="mkosi.path-") as d: for path in config.extra_search_paths: if not path.is_dir(): (Path(d) / path.name).symlink_to(path.absolute()) news = [os.fspath(path) for path in [Path(d), *config.extra_search_paths] if path.is_dir()] olds = os.getenv("PATH", "").split(":") os.environ["PATH"] = ":".join(news + olds) try: yield finally: os.environ["PATH"] = ":".join(olds) def finalize_default_tools(args: Args, config: Config, *, resources: Path) -> Config: if not config.tools_tree_distribution: die(f"{config.distribution} does not have a default tools tree distribution", hint="use ToolsTreeDistribution= to set one explicitly") cmdline = [ "--directory", "", "--distribution", str(config.tools_tree_distribution), *(["--release", config.tools_tree_release] if config.tools_tree_release else []), *(["--mirror", config.tools_tree_mirror] if config.tools_tree_mirror else []), "--repositories", ",".join(config.tools_tree_repositories), "--package-manager-tree", ",".join(str(t) for t in config.tools_tree_package_manager_trees), "--repository-key-check", str(config.repository_key_check), "--cache-only", str(config.cacheonly), *(["--output-dir", str(config.output_dir)] if config.output_dir else []), *(["--workspace-dir", str(config.workspace_dir)] if config.workspace_dir else []), *(["--cache-dir", str(config.cache_dir)] if config.cache_dir else []), *(["--package-cache-dir", str(config.package_cache_dir)] if config.package_cache_dir else []), "--incremental", str(config.incremental), "--acl", str(config.acl), *([f"--package={package}" for package in config.tools_tree_packages]), "--output", f"{config.tools_tree_distribution}-tools", *(["--source-date-epoch", str(config.source_date_epoch)] if config.source_date_epoch is not None else []), *([f"--environment={k}='{v}'" for k, v in config.environment.items()]), *([f"--extra-search-path={p}" for p in config.extra_search_paths]), *(["--proxy-url", config.proxy_url] if config.proxy_url else []), *([f"--proxy-exclude={host}" for host in config.proxy_exclude]), *(["--proxy-peer-certificate", str(p)] if (p := config.proxy_peer_certificate) else []), *(["--proxy-client-certificate", str(p)] if (p := config.proxy_client_certificate) else []), *(["--proxy-client-key", str(p)] if (p := config.proxy_client_key) else []), *(["-f"] * args.force), ] _, [tools] = parse_config( cmdline + ["--include=mkosi-tools", "build"], resources=resources, ) tools = dataclasses.replace(tools, image=f"{config.tools_tree_distribution}-tools") return tools def check_workspace_directory(config: Config) -> None: wd = config.workspace_dir_or_default() for tree in config.build_sources: if wd.is_relative_to(tree.source): die(f"The workspace directory ({wd}) cannot be a subdirectory of any source directory ({tree.source})", hint="Set BuildSources= to the empty string or use WorkspaceDirectory= to configure a different " "workspace directory") def run_clean_scripts(config: Config) -> None: if not config.clean_scripts: return for script in config.clean_scripts: if not os.access(script, os.X_OK): die(f"{script} is not executable") env = dict( DISTRIBUTION=str(config.distribution), RELEASE=config.release, ARCHITECTURE=str(config.architecture), DISTRIBUTION_ARCHITECTURE=config.distribution.architecture(config.architecture), SRCDIR="/work/src", OUTPUTDIR="/work/out", MKOSI_UID=str(INVOKING_USER.uid), MKOSI_GID=str(INVOKING_USER.gid), MKOSI_CONFIG="/work/config.json", ) if config.profile: env["PROFILE"] = config.profile with ( finalize_source_mounts(config, ephemeral=False) as sources, finalize_config_json(config) as json, ): for script in config.clean_scripts: with complete_step(f"Running clean script {script}…"): run( ["/work/clean"], env=env | config.environment, sandbox=config.sandbox( binary=None, vartmp=True, tools=False, mounts=[ *sources, Mount(script, "/work/clean", ro=True), Mount(json, "/work/config.json", ro=True), *([Mount(o, "/work/out")] if (o := config.output_dir_or_cwd()).exists() else []), ], options=["--dir", "/work/src", "--chdir", "/work/src", "--dir", "/work/out"] ), stdin=sys.stdin, ) def needs_clean(args: Args, config: Config, force: int = 1) -> bool: return ( args.verb == Verb.clean or args.force >= force or not (config.output_dir_or_cwd() / config.output_with_compression).exists() or # When the output is a directory, its name is the same as the symlink we create that points to the actual # output when not building a directory. So if the full output path exists, we have to check that it's not # a symlink as well. (config.output_dir_or_cwd() / config.output_with_compression).is_symlink() ) def run_clean(args: Args, config: Config, *, resources: Path) -> None: become_root() # We remove any cached images if either the user used --force twice, or he/she called "clean" with it # passed once. Let's also remove the downloaded package cache if the user specified one additional # "--force". if args.verb == Verb.clean: remove_build_cache = args.force > 0 remove_package_cache = args.force > 1 else: remove_build_cache = args.force > 1 remove_package_cache = args.force > 2 if config.output_format != OutputFormat.none or args.force: outputs = { config.output_dir_or_cwd() / output for output in config.outputs if (config.output_dir_or_cwd() / output).exists() or (config.output_dir_or_cwd() / output).is_symlink() } # Make sure we resolve the symlink we create in the output directory and remove its target as well as it might # not be in the list of outputs anymore if the compression or output format was changed. outputs |= {o.resolve() for o in outputs} if outputs: with ( complete_step(f"Removing output files of {config.name()} image…"), flock_or_die(config.output_dir_or_cwd() / config.output) if (config.output_dir_or_cwd() / config.output).exists() else contextlib.nullcontext() ): rmtree(*outputs) if remove_build_cache: if config.cache_dir: initrd = ( cache_tree_paths(finalize_default_initrd(args, config, resources=resources)) if config.distribution != Distribution.custom else [] ) if any(p.exists() for p in itertools.chain(cache_tree_paths(config), initrd)): with complete_step(f"Removing cache entries of {config.name()} image…"): rmtree(*(p for p in itertools.chain(cache_tree_paths(config), initrd) if p.exists())) if config.build_dir and config.build_dir.exists() and any(config.build_dir.iterdir()): with complete_step(f"Clearing out build directory of {config.name()} image…"): rmtree(*config.build_dir.iterdir()) if ( remove_package_cache and any(config.package_cache_dir_or_default().glob("*")) ): subdir = config.distribution.package_manager(config).subdir(config) with ( complete_step(f"Clearing out package cache of {config.name()} image…"), lock_repository_metadata(config), ): rmtree( *( config.package_cache_dir_or_default() / d / subdir for d in ("cache", "lib") ), ) run_clean_scripts(config) @contextlib.contextmanager def rchown_package_manager_dirs(config: Config) -> Iterator[None]: try: yield finally: if INVOKING_USER.is_regular_user(): with complete_step("Fixing ownership of package manager cache directory"): subdir = config.distribution.package_manager(config).subdir(config) for d in ("cache", "lib"): INVOKING_USER.rchown(config.package_cache_dir_or_default() / d / subdir) def sync_repository_metadata(context: Context) -> None: if ( context.config.cacheonly != Cacheonly.never and (have_cache(context.config) or context.config.cacheonly != Cacheonly.auto) ): return with ( complete_step(f"Syncing package manager metadata for {context.config.name()} image"), lock_repository_metadata(context.config), ): context.config.distribution.package_manager(context.config).sync( context, force=context.args.force > 1 or context.config.cacheonly == Cacheonly.never, ) def run_sync(args: Args, config: Config, *, resources: Path) -> None: if os.getuid() == 0: os.setgroups(INVOKING_USER.extra_groups()) os.setresgid(INVOKING_USER.gid, INVOKING_USER.gid, INVOKING_USER.gid) os.setresuid(INVOKING_USER.uid, INVOKING_USER.uid, INVOKING_USER.uid) if not (p := config.package_cache_dir_or_default()).exists(): p.mkdir(parents=True, exist_ok=True) subdir = config.distribution.package_manager(config).subdir(config) for d in ("cache", "lib"): (config.package_cache_dir_or_default() / d / subdir).mkdir(parents=True, exist_ok=True) with ( prepend_to_environ_path(config), setup_workspace(args, config) as workspace, ): context = Context( args, config, workspace=workspace, resources=resources, package_cache_dir=config.package_cache_dir_or_default(), ) install_package_manager_trees(context) context.config.distribution.setup(context) sync_repository_metadata(context) src = config.package_cache_dir_or_default() / "cache" / subdir for p in config.distribution.package_manager(config).cache_subdirs(src): p.mkdir(parents=True, exist_ok=True) run_sync_scripts(context) def run_build(args: Args, config: Config, *, resources: Path, package_dir: Optional[Path] = None) -> None: if (uid := os.getuid()) != 0: become_root() unshare(CLONE_NEWNS) if uid == 0: run(["mount", "--make-rslave", "/"]) for p in ( config.output_dir, config.cache_dir, config.package_cache_dir_or_default(), config.build_dir, config.workspace_dir, ): if not p or p.exists(): continue p.mkdir(parents=True, exist_ok=True) INVOKING_USER.chown(p) if config.build_dir: # Make sure the build directory is owned by root (in the user namespace) so that the correct uid-mapping is # applied if it is used in RuntimeTrees= os.chown(config.build_dir, os.getuid(), os.getgid()) # Discard setuid/setgid bits as these are inherited and can leak into the image. config.build_dir.chmod(stat.S_IMODE(config.build_dir.stat().st_mode) & ~(stat.S_ISGID|stat.S_ISUID)) # For extra safety when running as root, remount a bunch of stuff read-only. # Because some build systems use output directories in /usr, we only remount # /usr read-only if the output directory is not relative to it. if INVOKING_USER.invoked_as_root: remount = ["/etc", "/opt", "/boot", "/efi", "/media"] if not config.output_dir_or_cwd().is_relative_to("/usr"): remount += ["/usr"] for d in remount: if Path(d).exists(): options = "ro" if d in ("/usr", "/opt") else "ro,nosuid,nodev,noexec" run(["mount", "--rbind", d, d, "--options", options]) with ( complete_step(f"Building {config.name()} image"), prepend_to_environ_path(config), ): check_tools(config, Verb.build) with ( acl_toggle_build(config, INVOKING_USER.uid), rchown_package_manager_dirs(config), setup_workspace(args, config) as workspace, ): build_image(Context(args, config, workspace=workspace, resources=resources, package_dir=package_dir)) def ensure_root_is_mountpoint() -> None: """ bubblewrap uses pivot_root() which doesn't work in the initramfs as pivot_root() requires / to be a mountpoint which is not the case in the initramfs. So, to make sure mkosi works from within the initramfs, let's make / a mountpoint by recursively bind-mounting / (the directory) to another location and then switching root into the bind mount directory. """ fstype = run( ["findmnt", "--target", "/", "--output", "FSTYPE", "--noheadings"], stdout=subprocess.PIPE, ).stdout.strip() if fstype != "rootfs": return if os.getuid() != 0: die("mkosi can only be run as root from the initramfs") unshare(CLONE_NEWNS) run(["mount", "--make-rslave", "/"]) mountpoint = Path("/run/mkosi/mkosi-root") mountpoint.mkdir(parents=True, exist_ok=True) run(["mount", "--rbind", "/", mountpoint]) os.chdir(mountpoint) run(["mount", "--move", ".", "/"]) os.chroot(".") def run_verb(args: Args, images: Sequence[Config], *, resources: Path) -> None: images = list(images) if args.verb.needs_root() and os.getuid() != 0: die(f"Must be root to run the {args.verb} command") if args.verb == Verb.completion: return print_completion(args, resources=resources) if args.verb == Verb.documentation: return show_docs(args, resources=resources) if args.verb == Verb.genkey: return generate_key_cert_pair(args) if args.verb == Verb.bump: return bump_image_version() if args.verb == Verb.dependencies: _, [deps] = parse_config( ["--directory", "", "--repositories", "", "--include=mkosi-tools", "build"], resources=resources, ) for p in deps.packages: print(p) return if all(config == Config.default() for config in images): die("No configuration found", hint="Make sure you're running mkosi from a directory with configuration files") if args.verb == Verb.summary: if args.json: text = json.dumps( {"Images": [config.to_dict() for config in images]}, cls=JsonEncoder, indent=4, sort_keys=True ) else: text = "\n".join(summary(config) for config in images) page(text, args.pager) return ensure_root_is_mountpoint() if args.verb in (Verb.journalctl, Verb.coredumpctl, Verb.ssh): # We don't use a tools tree for verbs that don't need an image build. last = dataclasses.replace(images[-1], tools_tree=None) return { Verb.ssh: run_ssh, Verb.journalctl: run_journalctl, Verb.coredumpctl: run_coredumpctl, }[args.verb](args, last) assert args.verb.needs_build() or args.verb == Verb.clean for config in images: if args.verb == Verb.build and not args.force: check_outputs(config) last = images[-1] if last.tools_tree and last.tools_tree == Path("default"): tools = finalize_default_tools(args, last, resources=resources) # If we're doing an incremental build and the cache is not out of date, don't clean up the tools tree # so that we can reuse the previous one. if ( not tools.incremental or ((args.verb == Verb.build or args.force > 0) and not have_cache(tools)) or needs_clean(args, tools, force=2) ): fork_and_wait(run_clean, args, tools, resources=resources) else: tools = None # First, process all directory removals because otherwise if different images share directories a later # image build could end up deleting the output generated by an earlier image build. for config in images: if needs_clean(args, config): fork_and_wait(run_clean, args, config, resources=resources) if args.verb == Verb.clean: return for config in images: if (minversion := config.minimum_version) and minversion > __version__: die(f"mkosi {minversion} or newer is required to build this configuration (found {__version__})") if not config.repart_offline and os.getuid() != 0: die(f"Must be root to build {config.name()} image configured with RepartOffline=no") check_workspace_directory(config) if tools and not (tools.output_dir_or_cwd() / tools.output).exists(): if args.verb == Verb.build or args.force > 0: fork_and_wait(run_sync, args, tools, resources=resources) fork_and_wait(run_build, args, tools, resources=resources) else: die(f"Default tools tree requested for image '{last.name()}' but it has not been built yet", hint="Make sure to build the image first with 'mkosi build' or use '--force'") build = False with tempfile.TemporaryDirectory(dir=last.workspace_dir_or_default(), prefix="mkosi-packages-") as package_dir: for i, config in enumerate(images): images[i] = config = dataclasses.replace( config, tools_tree=( tools.output_dir_or_cwd() / tools.output if tools and config.tools_tree == Path("default") else config.tools_tree ) ) images[i] = config = run_configure_scripts(config) if args.verb != Verb.build and args.force == 0: continue if ( config.output_format != OutputFormat.none and (config.output_dir_or_cwd() / config.output_with_compression).exists() ): continue check_inputs(config) fork_and_wait(run_sync, args, config, resources=resources) fork_and_wait(run_build, args, config, resources=resources, package_dir=Path(package_dir)) build = True if build and args.auto_bump: bump_image_version() if args.verb == Verb.build: return # The images array has been modified so we need to reevaluate last again. last = images[-1] if not (last.output_dir_or_cwd() / last.output_with_compression).exists(): die(f"Image '{last.name()}' has not been built yet", hint="Make sure to build the image first with 'mkosi build' or use '--force'") with prepend_to_environ_path(last): check_tools(last, args.verb) with ( acl_toggle_boot(last, INVOKING_USER.uid) if args.verb in (Verb.shell, Verb.boot) else contextlib.nullcontext() ): run_vm = { Vmm.qemu: run_qemu, Vmm.vmspawn: run_vmspawn, }[last.vmm] { Verb.shell: run_shell, Verb.boot: run_shell, Verb.qemu: run_vm, Verb.serve: run_serve, Verb.burn: run_burn, }[args.verb](args, last) mkosi-24.3/mkosi/__main__.py000066400000000000000000000023331465176501400160120ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later # PYTHON_ARGCOMPLETE_OK import faulthandler import signal import sys from types import FrameType from typing import Optional import mkosi.resources from mkosi import run_verb from mkosi.config import parse_config from mkosi.log import log_setup from mkosi.run import find_binary, run, uncaught_exception_handler from mkosi.user import INVOKING_USER from mkosi.util import resource_path def onsigterm(signal: int, frame: Optional[FrameType]) -> None: raise KeyboardInterrupt() @uncaught_exception_handler() def main() -> None: signal.signal(signal.SIGTERM, onsigterm) log_setup() # Ensure that the name and home of the user we are running as are resolved as early as possible. INVOKING_USER.init() with resource_path(mkosi.resources) as resources: args, images = parse_config(sys.argv[1:], resources=resources) if args.debug: faulthandler.enable() try: run_verb(args, images, resources=resources) finally: if sys.stderr.isatty() and find_binary("tput"): run(["tput", "cnorm"], check=False) run(["tput", "smam"], check=False) if __name__ == "__main__": main() mkosi-24.3/mkosi/archive.py000066400000000000000000000070601465176501400157150ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import os from collections.abc import Iterable, Sequence from pathlib import Path from typing import Optional from mkosi.log import log_step from mkosi.run import run from mkosi.sandbox import Mount, SandboxProtocol, finalize_passwd_mounts, nosandbox from mkosi.types import PathString from mkosi.util import chdir, umask def tar_exclude_apivfs_tmp() -> list[str]: return [ "--exclude", "./dev/*", "--exclude", "./proc/*", "--exclude", "./sys/*", "--exclude", "./tmp/*", "--exclude", "./run/*", "--exclude", "./var/tmp/*", ] def make_tar(src: Path, dst: Path, *, sandbox: SandboxProtocol = nosandbox) -> None: log_step(f"Creating tar archive {dst}…") with dst.open("wb") as f: run( [ "tar", "--create", "--file", "-", "--directory", src, "--acls", "--selinux", # --xattrs implies --format=pax "--xattrs", # PAX format emits additional headers for atime, ctime and mtime # that would make the archive non-reproducible. "--pax-option=delete=atime,delete=ctime,delete=mtime", "--sparse", "--force-local", *tar_exclude_apivfs_tmp(), ".", ], stdout=f, # Make sure tar uses user/group information from the root directory instead of the host. sandbox=sandbox(binary="tar", mounts=[Mount(src, src, ro=True), *finalize_passwd_mounts(src)]), ) def can_extract_tar(src: Path) -> bool: return ".tar" in src.suffixes[-2:] def extract_tar( src: Path, dst: Path, *, log: bool = True, options: Sequence[PathString] = (), sandbox: SandboxProtocol = nosandbox, ) -> None: if log: log_step(f"Extracting tar archive {src}…") with umask(~0o755): dst.mkdir(exist_ok=True) run( [ "tar", "--extract", "--file", src, "--directory", dst, "--keep-directory-symlink", "--no-overwrite-dir", "--same-permissions", "--same-owner" if (dst / "etc/passwd").exists() else "--numeric-owner", "--same-order", "--acls", "--selinux", "--xattrs", "--force-local", *tar_exclude_apivfs_tmp(), *options, ], sandbox=sandbox( binary="tar", # Make sure tar uses user/group information from the root directory instead of the host. mounts=[Mount(src, src, ro=True), Mount(dst, dst), *finalize_passwd_mounts(dst)] ), ) def make_cpio( src: Path, dst: Path, *, files: Optional[Iterable[Path]] = None, sandbox: SandboxProtocol = nosandbox, ) -> None: if not files: with chdir(src): files = sorted(Path(".").rglob("*")) else: files = sorted(files) log_step(f"Creating cpio archive {dst}…") with dst.open("wb") as f: run( [ "cpio", "--create", "--reproducible", "--null", "--format=newc", "--quiet", "--directory", src, ], input="\0".join(os.fspath(f) for f in files), stdout=f, sandbox=sandbox(binary="cpio", mounts=[Mount(src, src, ro=True), *finalize_passwd_mounts(src)]), ) mkosi-24.3/mkosi/burn.py000066400000000000000000000021331465176501400152360ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import os import sys from mkosi.config import Args, Config, OutputFormat from mkosi.log import complete_step, die from mkosi.run import run def run_burn(args: Args, config: Config) -> None: if config.output_format not in (OutputFormat.disk, OutputFormat.esp): die(f"{config.output_format} images cannot be burned to disk") fname = config.output_dir_or_cwd() / config.output if len(args.cmdline) != 1: die("Expected device argument.") device = args.cmdline[0] cmd = [ "systemd-repart", "--no-pager", "--pretty=no", "--offline=yes", "--empty=force", "--dry-run=no", f"--copy-from={fname}", device, ] with complete_step("Burning 🔥🔥🔥 to medium…", "Burnt. 🔥🔥🔥"): run( cmd, stdin=sys.stdin, stdout=sys.stdout, env=os.environ | config.environment, log=False, sandbox=config.sandbox(binary="systemd-repart", devices=True, network=True, relaxed=True), ) mkosi-24.3/mkosi/completion.py000066400000000000000000000201751465176501400164470ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import argparse import dataclasses import enum import io import shlex from collections.abc import Iterable, Mapping from pathlib import Path from textwrap import indent from typing import Optional, Union from mkosi import config from mkosi.log import die from mkosi.util import StrEnum class CompGen(StrEnum): default = enum.auto() files = enum.auto() dirs = enum.auto() @staticmethod def from_action(action: argparse.Action) -> "CompGen": if isinstance(action.default, Path): if action.default.is_dir(): return CompGen.dirs else: return CompGen.files # TODO: the type of action.type is Union[Callable[[str], Any], FileType] # the type of Path is type, but Path also works in this position, # because the constructor is a callable from str -> Path elif action.type is not None and (isinstance(action.type, type) and issubclass(action.type, Path)): # type: ignore if isinstance(action.default, Path) and action.default.is_dir(): # type: ignore return CompGen.dirs else: return CompGen.files return CompGen.default def to_bash(self) -> str: return f"_mkosi_compgen_{self}" def to_fish(self) -> str: if self == CompGen.files: return "--force-files" elif self == CompGen.dirs: return "--force-files -a '(__fish_complete_directories)'" else: return "-f" def to_zsh(self) -> str: if self == CompGen.files: return ":path:_files -/" elif self == CompGen.dirs: return ":directory:_files -f" else: return "" @dataclasses.dataclass(frozen=True) class CompletionItem: short: Optional[str] long: Optional[str] help: Optional[str] nargs: Union[str, int] choices: list[str] compgen: CompGen def collect_completion_arguments() -> list[CompletionItem]: parser = config.create_argument_parser() options = [ CompletionItem( short=next((s for s in action.option_strings if not s.startswith("--")), None), long=next((s for s in action.option_strings if s.startswith("--")), None), help=action.help, nargs=action.nargs or 0, choices=[str(c) for c in action.choices] if action.choices is not None else [], compgen=CompGen.from_action(action), ) for action in parser._actions if (action.option_strings and action.help != argparse.SUPPRESS and action.dest not in config.SETTINGS_LOOKUP_BY_DEST) ] options += [ CompletionItem( short=setting.short, long=setting.long, help=setting.help, nargs=setting.nargs or 1, choices=[str(c) for c in setting.choices] if setting.choices is not None else [], compgen=CompGen.default, ) for setting in config.SETTINGS ] return options def finalize_completion_bash(options: list[CompletionItem], resources: Path) -> str: def to_bash_array(name: str, entries: Iterable[str]) -> str: return f"{name.replace('-', '_')}=(" + " ".join(shlex.quote(str(e)) for e in entries) + ")" def to_bash_hasharray(name: str, entries: Mapping[str, Union[str, int]]) -> str: return ( f"{name.replace('-', '_')}=(" + " ".join(f"[{shlex.quote(str(k))}]={shlex.quote(str(v))}" for k, v in entries.items()) + ")" ) completion = resources / "completion.bash" options_by_key = {o.short: o for o in options if o.short} | {o.long: o for o in options if o.long} template = completion.read_text() with io.StringIO() as c: c.write(to_bash_array("_mkosi_options", options_by_key.keys())) c.write("\n\n") nargs = to_bash_hasharray("_mkosi_nargs", {optname: v.nargs for optname, v in options_by_key.items()}) c.write(nargs) c.write("\n\n") choices = to_bash_hasharray( "_mkosi_choices", {optname: " ".join(v.choices) for optname, v in options_by_key.items() if v.choices} ) c.write(choices) c.write("\n\n") compgen = to_bash_hasharray( "_mkosi_compgen", {optname: v.compgen.to_bash() for optname, v in options_by_key.items() if v.compgen != CompGen.default}, ) c.write(compgen) c.write("\n\n") c.write(to_bash_array("_mkosi_verbs", [str(v) for v in config.Verb])) definitions = c.getvalue() return template.replace("##VARIABLEDEFINITIONS##", indent(definitions, " " * 4)) def finalize_completion_fish(options: list[CompletionItem], resources: Path) -> str: with io.StringIO() as c: c.write("# SPDX-License-Identifier: LGPL-2.1-or-later\n\n") c.write("complete -c mkosi -f\n") c.write("complete -c mkosi -n '__fish_is_first_token' -a \"") c.write(" ".join(str(v) for v in config.Verb)) c.write("\"\n") for option in options: if not option.short and not option.long: continue c.write("complete -c mkosi ") if option.short: c.write(f"-s {option.short.lstrip('-')} ") if option.long: c.write(f"-l {option.long.lstrip('-')} ") if isinstance(option.nargs, int) and option.nargs > 0: c.write("-r ") if option.choices: c.write("-a \"") c.write(" ".join(option.choices)) c.write("\" ") if option.help is not None: help = option.help.replace("'", "\\'") c.write(f"-d \"{help}\" ") c.write(option.compgen.to_fish()) c.write("\n") return c.getvalue() def finalize_completion_zsh(options: list[CompletionItem], resources: Path) -> str: def to_zsh_array(name: str, entries: Iterable[str]) -> str: return f"declare -a {name.replace('-', '_')}=(" + " ".join(shlex.quote(str(e)) for e in entries) + ")" completion = resources / "completion.zsh" with io.StringIO() as c: c.write(completion.read_text()) c.write("\n") c.write(to_zsh_array("_mkosi_verbs", [str(v) for v in config.Verb])) c.write("\n\n") c.write("_arguments -s \\\n") c.write(" '(- *)'{-h,--help}'[Show this help]' \\\n") c.write(" '(- *)--version[Show package version]' \\\n") for option in options: if not option.short and not option.long: continue posix = option.help and "'" in option.help open_quote = "$'" if posix else "'" if option.short and option.long: c.write(f" '({option.short} {option.long})'{{{option.short},{option.long}}}{open_quote}") else: c.write(f" {open_quote}{option.short or option.long}") if option.help: help = option.help.replace("'", r"\'") c.write(f"[{help}]") if option.choices: # TODO: maybe use metavar here? At least for me it's not shown, though c.write(":arg:(") c.write(" ".join(option.choices)) c.write(")") c.write(option.compgen.to_zsh()) c.write("' \\\n") c.write(" '*::mkosi verb:_mkosi_verb'\n\n") return c.getvalue() def print_completion(args: config.Args, *, resources: Path) -> None: if not args.cmdline: die( "No shell to generate completion script for specified", hint="Please specify either one of: bash, fish, zsh" ) shell = args.cmdline[0] if shell == "bash": func = finalize_completion_bash elif shell == "fish": func = finalize_completion_fish elif shell == "zsh": func = finalize_completion_zsh else: die( f"{shell!r} is not supported for completion scripts.", hint="Please specify either one of: bash, fish, zsh" ) completion_args = collect_completion_arguments() print(func(completion_args, resources)) mkosi-24.3/mkosi/config.py000066400000000000000000004450621465176501400155510ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import argparse import base64 import contextlib import copy import dataclasses import enum import fnmatch import functools import graphlib import inspect import json import logging import math import operator import os.path import platform import re import shlex import shutil import string import subprocess import sys import tempfile import textwrap import typing import uuid from collections.abc import Collection, Iterable, Iterator, Sequence from contextlib import AbstractContextManager from pathlib import Path from typing import Any, Callable, Optional, TypeVar, Union, cast from mkosi.distributions import Distribution, detect_distribution from mkosi.log import ARG_DEBUG, ARG_DEBUG_SHELL, Style, die from mkosi.pager import page from mkosi.run import find_binary, run from mkosi.sandbox import Mount, SandboxProtocol, nosandbox, sandbox_cmd from mkosi.types import PathString, SupportsRead from mkosi.user import INVOKING_USER from mkosi.util import ( StrEnum, chdir, flatten, is_power_of_2, make_executable, startswith, ) from mkosi.versioncomp import GenericVersion __version__ = "24.3" ConfigParseCallback = Callable[[Optional[str], Optional[Any]], Any] ConfigMatchCallback = Callable[[str, Any], bool] ConfigDefaultCallback = Callable[[argparse.Namespace], Any] BUILTIN_CONFIGS = ("mkosi-tools", "mkosi-initrd") class Verb(StrEnum): build = enum.auto() clean = enum.auto() summary = enum.auto() shell = enum.auto() boot = enum.auto() qemu = enum.auto() ssh = enum.auto() serve = enum.auto() bump = enum.auto() help = enum.auto() genkey = enum.auto() documentation = enum.auto() journalctl = enum.auto() coredumpctl = enum.auto() burn = enum.auto() dependencies = enum.auto() completion = enum.auto() def supports_cmdline(self) -> bool: return self in ( Verb.build, Verb.shell, Verb.boot, Verb.qemu, Verb.ssh, Verb.journalctl, Verb.coredumpctl, Verb.burn, Verb.completion, ) def needs_build(self) -> bool: return self in ( Verb.build, Verb.shell, Verb.boot, Verb.qemu, Verb.serve, Verb.burn, ) def needs_root(self) -> bool: return self in (Verb.shell, Verb.boot, Verb.burn) def needs_config(self) -> bool: return self not in ( Verb.help, Verb.genkey, Verb.documentation, Verb.dependencies, Verb.completion, ) class ConfigFeature(StrEnum): auto = enum.auto() enabled = enum.auto() disabled = enum.auto() def to_tristate(self) -> str: if self == ConfigFeature.enabled: return "yes" if self == ConfigFeature.disabled: return "no" return "" @dataclasses.dataclass(frozen=True) class ConfigTree: source: Path target: Optional[Path] def with_prefix(self, prefix: Path = Path("/")) -> tuple[Path, Path]: return (self.source, prefix / os.fspath(self.target).lstrip("/") if self.target else prefix) def __str__(self) -> str: return f"{self.source}:{self.target}" if self.target else f"{self.source}" @dataclasses.dataclass(frozen=True) class QemuDrive: id: str size: int directory: Optional[Path] options: Optional[str] file_id: str # We use negative numbers for specifying special constants # for VSock CIDs since they're not valid CIDs anyway. class QemuVsockCID(enum.IntEnum): auto = -1 hash = -2 @classmethod def format(cls, cid: int) -> str: if cid == QemuVsockCID.auto: return "auto" if cid == QemuVsockCID.hash: return "hash" return str(cid) class SecureBootSignTool(StrEnum): auto = enum.auto() sbsign = enum.auto() pesign = enum.auto() class OutputFormat(StrEnum): confext = enum.auto() cpio = enum.auto() directory = enum.auto() disk = enum.auto() esp = enum.auto() none = enum.auto() portable = enum.auto() sysext = enum.auto() tar = enum.auto() uki = enum.auto() oci = enum.auto() def extension(self) -> str: return { OutputFormat.confext: ".raw", OutputFormat.cpio: ".cpio", OutputFormat.disk: ".raw", OutputFormat.esp: ".raw", OutputFormat.portable: ".raw", OutputFormat.sysext: ".raw", OutputFormat.tar: ".tar", OutputFormat.uki: ".efi", }.get(self, "") def use_outer_compression(self) -> bool: return self in (OutputFormat.tar, OutputFormat.cpio, OutputFormat.disk) or self.is_extension_image() def is_extension_image(self) -> bool: return self in (OutputFormat.sysext, OutputFormat.confext, OutputFormat.portable) class ManifestFormat(StrEnum): json = enum.auto() # the standard manifest in json format changelog = enum.auto() # human-readable text file with package changelogs class Compression(StrEnum): none = enum.auto() zstd = enum.auto() zst = zstd xz = enum.auto() bz2 = enum.auto() gz = enum.auto() gzip = gz lz4 = enum.auto() lzma = enum.auto() def __bool__(self) -> bool: return self != Compression.none def extension(self) -> str: return { Compression.zstd: ".zst" }.get(self, f".{self}") def oci_media_type_suffix(self) -> str: suffix = { Compression.none: "", Compression.gz: "+gzip", Compression.zstd: "+zstd", }.get(self) if not suffix: die(f"Compression {self} not supported for OCI layers") return suffix class DocFormat(StrEnum): auto = enum.auto() markdown = enum.auto() man = enum.auto() pandoc = enum.auto() system = enum.auto() class Bootloader(StrEnum): none = enum.auto() uki = enum.auto() systemd_boot = enum.auto() grub = enum.auto() class BiosBootloader(StrEnum): none = enum.auto() grub = enum.auto() class ShimBootloader(StrEnum): none = enum.auto() signed = enum.auto() unsigned = enum.auto() class Cacheonly(StrEnum): always = enum.auto() auto = enum.auto() none = auto metadata = enum.auto() never = enum.auto() class QemuFirmware(StrEnum): auto = enum.auto() linux = enum.auto() uefi = enum.auto() uefi_secure_boot = enum.auto() bios = enum.auto() def is_uefi(self) -> bool: return self in (QemuFirmware.uefi, QemuFirmware.uefi_secure_boot) class Network(StrEnum): interface = enum.auto() user = enum.auto() none = enum.auto() class Vmm(StrEnum): qemu = enum.auto() vmspawn = enum.auto() class Architecture(StrEnum): alpha = enum.auto() arc = enum.auto() arm = enum.auto() arm64 = enum.auto() ia64 = enum.auto() loongarch64 = enum.auto() mips_le = enum.auto() mips64_le = enum.auto() parisc = enum.auto() ppc = enum.auto() ppc64 = enum.auto() ppc64_le = enum.auto() riscv32 = enum.auto() riscv64 = enum.auto() s390 = enum.auto() s390x = enum.auto() tilegx = enum.auto() x86 = enum.auto() x86_64 = enum.auto() @staticmethod def from_uname(s: str) -> "Architecture": a = { "aarch64" : Architecture.arm64, "aarch64_be" : Architecture.arm64, "armv8l" : Architecture.arm, "armv8b" : Architecture.arm, "armv7ml" : Architecture.arm, "armv7mb" : Architecture.arm, "armv7l" : Architecture.arm, "armv7b" : Architecture.arm, "armv6l" : Architecture.arm, "armv6b" : Architecture.arm, "armv5tl" : Architecture.arm, "armv5tel" : Architecture.arm, "armv5tejl" : Architecture.arm, "armv5tejb" : Architecture.arm, "armv5teb" : Architecture.arm, "armv5tb" : Architecture.arm, "armv4tl" : Architecture.arm, "armv4tb" : Architecture.arm, "armv4l" : Architecture.arm, "armv4b" : Architecture.arm, "alpha" : Architecture.alpha, "arc" : Architecture.arc, "arceb" : Architecture.arc, "x86_64" : Architecture.x86_64, "i686" : Architecture.x86, "i586" : Architecture.x86, "i486" : Architecture.x86, "i386" : Architecture.x86, "ia64" : Architecture.ia64, "parisc64" : Architecture.parisc, "parisc" : Architecture.parisc, "loongarch64" : Architecture.loongarch64, "mips64" : Architecture.mips64_le, "mips" : Architecture.mips_le, "ppc64le" : Architecture.ppc64_le, "ppc64" : Architecture.ppc64, "ppc" : Architecture.ppc, "riscv64" : Architecture.riscv64, "riscv32" : Architecture.riscv32, "riscv" : Architecture.riscv64, "s390x" : Architecture.s390x, "s390" : Architecture.s390, "tilegx" : Architecture.tilegx, }.get(s) if not a: die(f"Architecture {s} is not supported") return a def to_efi(self) -> Optional[str]: return { Architecture.x86_64 : "x64", Architecture.x86 : "ia32", Architecture.arm64 : "aa64", Architecture.arm : "arm", Architecture.riscv64 : "riscv64", Architecture.loongarch64 : "loongarch64", }.get(self) def to_qemu(self) -> str: a = { Architecture.alpha : "alpha", Architecture.arm : "arm", Architecture.arm64 : "aarch64", Architecture.loongarch64 : "loongarch64", Architecture.mips64_le : "mips", Architecture.mips_le : "mips", Architecture.parisc : "hppa", Architecture.ppc : "ppc", Architecture.ppc64 : "ppc64", Architecture.ppc64_le : "ppc64", Architecture.riscv32 : "riscv32", Architecture.riscv64 : "riscv64", Architecture.s390x : "s390x", Architecture.x86 : "i386", Architecture.x86_64 : "x86_64", }.get(self) if not a: die(f"Architecture {self} not supported by QEMU") return a def to_oci(self) -> str: a = { Architecture.arm : "arm", Architecture.arm64 : "arm64", Architecture.loongarch64 : "loong64", Architecture.mips64_le : "mips64le", Architecture.mips_le : "mipsle", Architecture.ppc : "ppc", Architecture.ppc64 : "ppc64", Architecture.ppc64_le : "ppc64le", Architecture.riscv32 : "riscv", Architecture.riscv64 : "riscv64", Architecture.s390x : "s390x", Architecture.x86 : "386", Architecture.x86_64 : "amd64", }.get(self) if not a: die(f"Architecture {self} not supported by OCI") return a def supports_smbios(self, firmware: QemuFirmware) -> bool: if self.is_x86_variant(): return True return self.is_arm_variant() and firmware.is_uefi() def supports_fw_cfg(self) -> bool: return self.is_x86_variant() or self.is_arm_variant() def supports_smm(self) -> bool: return self.is_x86_variant() def can_kvm(self) -> bool: return ( self == Architecture.native() or (Architecture.native() == Architecture.x86_64 and self == Architecture.x86) ) def default_qemu_machine(self) -> str: m = { Architecture.x86 : "q35", Architecture.x86_64 : "q35", Architecture.arm : "virt", Architecture.arm64 : "virt", Architecture.s390 : "s390-ccw-virtio", Architecture.s390x : "s390-ccw-virtio", Architecture.ppc : "pseries", Architecture.ppc64 : "pseries", Architecture.ppc64_le : "pseries", } if self not in m: die(f"No qemu machine defined for architecture {self}") return m[self] def default_qemu_nic_model(self) -> str: return { Architecture.s390 : "virtio", Architecture.s390x : "virtio", }.get(self, "virtio-net-pci") def is_native(self) -> bool: return self == self.native() def is_x86_variant(self) -> bool: return self in (Architecture.x86, Architecture.x86_64) def is_arm_variant(self) -> bool: return self in (Architecture.arm, Architecture.arm64) @classmethod def native(cls) -> "Architecture": return cls.from_uname(platform.machine()) def parse_boolean(s: str) -> bool: "Parse 1/true/yes/y/t/on as true and 0/false/no/n/f/off/None as false" s_l = s.lower() if s_l in {"1", "true", "yes", "y", "t", "on", "always"}: return True if s_l in {"0", "false", "no", "n", "f", "off", "never"}: return False die(f"Invalid boolean literal: {s!r}") def parse_path(value: str, *, required: bool = True, resolve: bool = True, expanduser: bool = True, expandvars: bool = True, secret: bool = False, absolute: bool = False, constants: Sequence[str] = ()) -> Path: if value in constants: return Path(value) if expandvars: value = os.path.expandvars(value) path = Path(value) if expanduser: if path.is_relative_to("~") and not INVOKING_USER.is_running_user(): path = INVOKING_USER.home() / path.relative_to("~") path = path.expanduser() if required and not path.exists(): die(f"{value} does not exist") if absolute and not path.is_absolute(): die(f"{value} must be an absolute path") if resolve: path = path.resolve() if secret and path.exists(): mode = path.stat().st_mode & 0o777 if mode & 0o007: die(textwrap.dedent(f"""\ Permissions of '{path}' of '{mode:04o}' are too open. When creating secret files use an access mode that restricts access to the owner only. """)) return path def config_parse_key(value: Optional[str], old: Optional[str]) -> Optional[Path]: if not value: return None return parse_path(value, secret=True) if Path(value).exists() else Path(value) def make_tree_parser(absolute: bool = True) -> Callable[[str], ConfigTree]: def parse_tree(value: str) -> ConfigTree: src, sep, tgt = value.partition(':') return ConfigTree( source=parse_path(src, required=False), target=parse_path( tgt, required=False, resolve=False, expanduser=False, absolute=absolute, ) if sep else None, ) return parse_tree def config_match_build_sources(match: str, value: list[ConfigTree]) -> bool: return Path(match.lstrip("/")) in [tree.target for tree in value if tree.target] def config_match_repositories(match: str, value: list[str]) -> bool: return match in value def config_parse_string(value: Optional[str], old: Optional[str]) -> Optional[str]: return value or None def config_make_string_matcher(allow_globs: bool = False) -> ConfigMatchCallback: def config_match_string(match: str, value: str) -> bool: if allow_globs: return fnmatch.fnmatchcase(value, match) else: return match == value return config_match_string def config_match_key_value(match: str, value: dict[str, str]) -> bool: k, sep, v = match.partition("=") if not sep: return k in value return value.get(k, None) == v def config_parse_boolean(value: Optional[str], old: Optional[bool]) -> Optional[bool]: if value is None: return False if not value: return None return parse_boolean(value) def parse_feature(value: str) -> ConfigFeature: try: return ConfigFeature(value) except ValueError: return ConfigFeature.enabled if parse_boolean(value) else ConfigFeature.disabled def config_parse_feature(value: Optional[str], old: Optional[ConfigFeature]) -> Optional[ConfigFeature]: if value is None: return ConfigFeature.auto if not value: return None return parse_feature(value) def config_match_feature(match: str, value: ConfigFeature) -> bool: return value == parse_feature(match) def config_parse_compression(value: Optional[str], old: Optional[Compression]) -> Optional[Compression]: if not value: return None try: return Compression[value] except KeyError: return Compression.zstd if parse_boolean(value) else Compression.none def config_parse_seed(value: Optional[str], old: Optional[str]) -> Optional[uuid.UUID]: if not value or value == "random": return None try: return uuid.UUID(value) except ValueError: die(f"{value} is not a valid UUID") def config_parse_source_date_epoch(value: Optional[str], old: Optional[int]) -> Optional[int]: if not value: return None try: timestamp = int(value) except ValueError: die(f"Timestamp {value!r} is not a valid integer") if timestamp < 0: die(f"Source date epoch timestamp cannot be negative (got {value})") return timestamp def config_parse_compress_level(value: Optional[str], old: Optional[int]) -> Optional[int]: if not value: return None try: level = int(value) except ValueError: die(f"Compression level {value!r} is not a valid integer") if level < 0: die(f"Compression level cannot be negative (got {value})") return level def config_default_compression(namespace: argparse.Namespace) -> Compression: if namespace.output_format in (OutputFormat.tar, OutputFormat.cpio, OutputFormat.uki, OutputFormat.esp): if namespace.distribution == Distribution.ubuntu and namespace.release == "focal": return Compression.xz else: return Compression.zstd elif namespace.output_format == OutputFormat.oci: return Compression.gz else: return Compression.none def config_default_output(namespace: argparse.Namespace) -> str: output = namespace.image or namespace.image_id or "image" if namespace.image_version: output += f"_{namespace.image_version}" return output def config_default_distribution(namespace: argparse.Namespace) -> Distribution: detected = detect_distribution()[0] if not detected: logging.info( "Distribution of your host can't be detected or isn't a supported target. " "Defaulting to Distribution=custom." ) return Distribution.custom return detected def config_default_release(namespace: argparse.Namespace) -> str: # If the configured distribution matches the host distribution, use the same release as the host. hd, hr = detect_distribution() if namespace.distribution == hd and hr is not None: return hr return cast(str, namespace.distribution.default_release()) def config_default_tools_tree_distribution(namespace: argparse.Namespace) -> Distribution: detected = detect_distribution()[0] if not detected: return Distribution.custom return detected.default_tools_tree_distribution() def config_default_source_date_epoch(namespace: argparse.Namespace) -> Optional[int]: for env in namespace.environment: if s := startswith(env, "SOURCE_DATE_EPOCH="): break else: s = os.environ.get("SOURCE_DATE_EPOCH") return config_parse_source_date_epoch(s, None) def config_default_proxy_url(namespace: argparse.Namespace) -> Optional[str]: names = ("http_proxy", "https_proxy", "HTTP_PROXY", "HTTPS_PROXY") for env in namespace.environment: k, _, v = env.partition("=") if k in names: return cast(str, v) for k, v in os.environ.items(): if k in names: return cast(str, v) return None def config_default_dependencies(namespace: argparse.Namespace) -> Optional[list[str]]: if namespace.directory is None or not Path("mkosi.images").exists(): return [] if namespace.image: return [] dependencies = [] for p in sorted(Path("mkosi.images").iterdir()): if not p.is_dir() and not p.suffix == ".conf": continue dependencies += [p.name.removesuffix(".conf")] return dependencies def make_enum_parser(type: type[StrEnum]) -> Callable[[str], StrEnum]: def parse_enum(value: str) -> StrEnum: try: return type(value) except ValueError: die(f"'{value}' is not a valid {type.__name__}") return parse_enum def config_make_enum_parser(type: type[StrEnum]) -> ConfigParseCallback: def config_parse_enum(value: Optional[str], old: Optional[StrEnum]) -> Optional[StrEnum]: return make_enum_parser(type)(value) if value else None return config_parse_enum def config_make_enum_parser_with_boolean(type: type[StrEnum], *, yes: StrEnum, no: StrEnum) -> ConfigParseCallback: def config_parse_enum(value: Optional[str], old: Optional[StrEnum]) -> Optional[StrEnum]: if not value: return None if value in type.values(): return type(value) return yes if parse_boolean(value) else no return config_parse_enum def config_make_enum_matcher(type: type[StrEnum]) -> ConfigMatchCallback: def config_match_enum(match: str, value: StrEnum) -> bool: return make_enum_parser(type)(match) == value return config_match_enum def config_make_list_parser(delimiter: str, *, parse: Callable[[str], Any] = str, unescape: bool = False, reset: bool = True) -> ConfigParseCallback: def config_parse_list(value: Optional[str], old: Optional[list[Any]]) -> Optional[list[Any]]: new = old.copy() if old else [] if value is None: return [] # Empty strings reset the list. if unescape: lex = shlex.shlex(value, posix=True) lex.whitespace_split = True lex.whitespace = f"\n{delimiter}" lex.commenters = "" values = list(lex) if reset and not values: return None else: values = value.replace(delimiter, "\n").split("\n") if reset and len(values) == 1 and values[0] == "": return None return new + [parse(v) for v in values if v] return config_parse_list def config_match_version(match: str, value: str) -> bool: version = GenericVersion(value) for sigil, opfunc in { "==": operator.eq, "!=": operator.ne, "<=": operator.le, ">=": operator.ge, ">": operator.gt, "<": operator.lt, }.items(): if (rhs := startswith(match, sigil)) is not None: op = opfunc comp_version = GenericVersion(rhs) break else: # default to equality if no operation is specified op = operator.eq comp_version = GenericVersion(match) # all constraints must be fulfilled if not op(version, comp_version): return False return True def config_make_dict_parser(delimiter: str, *, parse: Callable[[str], tuple[str, Any]], unescape: bool = False, allow_paths: bool = False, reset: bool = True) -> ConfigParseCallback: def config_parse_dict(value: Optional[str], old: Optional[dict[str, Any]]) -> Optional[dict[str, Any]]: new = old.copy() if old else {} if value is None: return {} if allow_paths and value and "=" not in value: if Path(value).is_dir(): for p in sorted(Path(value).iterdir()): if p.is_dir(): continue if os.access(p, os.X_OK): new[p.name] = run([p], stdout=subprocess.PIPE, env=os.environ).stdout else: new[p.name] = p.read_text() elif (p := Path(value)).exists(): if os.access(p, os.X_OK): new[p.name] = run([p], stdout=subprocess.PIPE, env=os.environ).stdout else: new[p.name] = p.read_text() else: die(f"{p} does not exist") return new # Empty strings reset the dict. if unescape: lex = shlex.shlex(value, posix=True) lex.whitespace_split = True lex.whitespace = f"\n{delimiter}" lex.commenters = "" values = list(lex) if reset and not values: return None else: values = value.replace(delimiter, "\n").split("\n") if reset and len(values) == 1 and values[0] == "": return None return new | dict(parse(v) for v in values if v) return config_parse_dict def parse_environment(value: str) -> tuple[str, str]: key, sep, value = value.partition("=") key, value = key.strip(), value.strip() value = value if sep else os.getenv(key, "") return (key, value) def parse_credential(value: str) -> tuple[str, str]: key, _, value = value.partition("=") key, value = key.strip(), value.strip() return (key, value) def make_path_parser(*, required: bool = True, resolve: bool = True, expanduser: bool = True, expandvars: bool = True, secret: bool = False, constants: Sequence[str] = ()) -> Callable[[str], Path]: return functools.partial( parse_path, required=required, resolve=resolve, expanduser=expanduser, expandvars=expandvars, secret=secret, constants=constants, ) def config_make_path_parser(*, required: bool = True, resolve: bool = True, expanduser: bool = True, expandvars: bool = True, secret: bool = False, constants: Sequence[str] = ()) -> ConfigParseCallback: def config_parse_path(value: Optional[str], old: Optional[Path]) -> Optional[Path]: if not value: return None return parse_path( value, required=required, resolve=resolve, expanduser=expanduser, expandvars=expandvars, secret=secret, constants=constants, ) return config_parse_path def is_valid_filename(s: str) -> bool: s = s.strip() return not (s == "." or s == ".." or "/" in s) def config_make_filename_parser(hint: str) -> ConfigParseCallback: def config_parse_filename(value: Optional[str], old: Optional[str]) -> Optional[str]: if not value: return None if not is_valid_filename(value): die(f"{value!r} is not a valid filename.", hint=hint) return value return config_parse_filename def match_path_exists(value: str) -> bool: if not value: return False return Path(value).exists() def config_parse_root_password(value: Optional[str], old: Optional[tuple[str, bool]]) -> Optional[tuple[str, bool]]: if not value: return None value = value.strip() hashed = value.startswith("hashed:") value = value.removeprefix("hashed:") return (value, hashed) def match_systemd_version(value: str) -> bool: if not value: return False version = run(["systemctl", "--version"], stdout=subprocess.PIPE).stdout.strip().split()[1] return config_match_version(value, version) def match_host_architecture(value: str) -> bool: return Architecture(value) == Architecture.native() def parse_bytes(value: str) -> int: if value.endswith("G"): factor = 1024**3 elif value.endswith("M"): factor = 1024**2 elif value.endswith("K"): factor = 1024 else: factor = 1 if factor > 1: value = value[:-1] result = math.ceil(float(value) * factor) if result <= 0: die("Size out of range") rem = result % 4096 if rem != 0: result += 4096 - rem return result def config_parse_bytes(value: Optional[str], old: Optional[int] = None) -> Optional[int]: if not value: return None return parse_bytes(value) def config_parse_number(value: Optional[str], old: Optional[int] = None) -> Optional[int]: if not value: return None try: return int(value) except ValueError: die(f"{value!r} is not a valid number") def config_parse_profile(value: Optional[str], old: Optional[int] = None) -> Optional[str]: if not value: return None if not is_valid_filename(value): die(f"{value!r} is not a valid profile", hint="Profile= or --profile= requires a name with no path components.") return value def parse_drive(value: str) -> QemuDrive: parts = value.split(":", maxsplit=3) if not parts or not parts[0]: die(f"No ID specified for drive '{value}'") if len(parts) < 2: die(f"Missing size in drive '{value}") if len(parts) > 5: die(f"Too many components in drive '{value}") id = parts[0] if not is_valid_filename(id): die(f"Unsupported path character in drive id '{id}'") size = parse_bytes(parts[1]) directory = parse_path(parts[2]) if len(parts) > 2 and parts[2] else None options = parts[3] if len(parts) > 3 and parts[3] else None file_id = parts[4] if len(parts) > 4 and parts[4] else id return QemuDrive(id=id, size=size, directory=directory, options=options, file_id=file_id) def config_parse_sector_size(value: Optional[str], old: Optional[int]) -> Optional[int]: if not value: return None try: size = int(value) except ValueError: die(f"'{value}' is not a valid number") if size < 512 or size > 4096: die(f"Sector size not between 512 and 4096: {size}") if not is_power_of_2(size): die(f"Sector size not power of 2: {size}") return size def config_parse_vsock_cid(value: Optional[str], old: Optional[int]) -> Optional[int]: if not value: return None if value == "auto": return QemuVsockCID.auto if value == "hash": return QemuVsockCID.hash try: cid = int(value) except ValueError: die(f"VSock connection ID '{value}' is not a valid number or one of 'auto' or 'hash'") if cid not in range(3, 0xFFFFFFFF): die(f"{cid} is not in the valid VSock connection ID range [3, 0xFFFFFFFF)") return cid def config_parse_minimum_version(value: Optional[str], old: Optional[GenericVersion]) -> Optional[GenericVersion]: if not value: return old new = GenericVersion(value) if not old: return new return max(old, new) @dataclasses.dataclass(frozen=True) class KeySource: class Type(StrEnum): file = enum.auto() engine = enum.auto() type: Type source: str = "" def __str__(self) -> str: return f"{self.type}:{self.source}" if self.source else str(self.type) def config_parse_key_source(value: Optional[str], old: Optional[KeySource]) -> Optional[KeySource]: if not value: return old typ, _, source = value.partition(":") try: type = KeySource.Type(typ) except ValueError: die(f"'{value}' is not a valid key source") return KeySource(type=type, source=source) @dataclasses.dataclass(frozen=True) class ConfigSetting: dest: str section: str parse: ConfigParseCallback = config_parse_string match: Optional[ConfigMatchCallback] = None name: str = "" default: Any = None default_factory: Optional[ConfigDefaultCallback] = None default_factory_depends: tuple[str, ...] = tuple() paths: tuple[str, ...] = () path_read_text: bool = False path_secret: bool = False specifier: str = "" universal: bool = False # settings for argparse short: Optional[str] = None long: str = "" choices: Optional[Any] = None metavar: Optional[str] = None nargs: Optional[str] = None const: Optional[Any] = None help: Optional[str] = None # backward compatibility compat_names: tuple[str, ...] = () def __post_init__(self) -> None: if not self.name: object.__setattr__(self, 'name', ''.join(x.capitalize() for x in self.dest.split('_') if x)) if not self.long: object.__setattr__(self, "long", f"--{self.dest.replace('_', '-')}") @dataclasses.dataclass(frozen=True) class Match: name: str match: Callable[[str], bool] @dataclasses.dataclass(frozen=True) class Specifier: char: str callback: Callable[[argparse.Namespace, Path], str] depends: tuple[str, ...] = tuple() class CustomHelpFormatter(argparse.HelpFormatter): def _format_action_invocation(self, action: argparse.Action) -> str: if not action.option_strings or action.nargs == 0: return super()._format_action_invocation(action) default = self._get_default_metavar_for_optional(action) args_string = self._format_args(action, default) return ", ".join(action.option_strings) + " " + args_string def _split_lines(self, text: str, width: int) -> list[str]: """Wraps text to width, each line separately. If the first line of text ends in a colon, we assume that this is a list of option descriptions, and subindent them. Otherwise, the text is wrapped without indentation. """ lines = text.splitlines() subindent = ' ' if lines[0].endswith(':') else '' return flatten(textwrap.wrap(line, width, break_long_words=False, break_on_hyphens=False, subsequent_indent=subindent) for line in lines) def parse_chdir(path: str) -> Optional[Path]: if not path: # The current directory should be ignored return None # Immediately change the current directory so that it's taken into # account when parsing the following options that take a relative path try: os.chdir(path) except (FileNotFoundError, NotADirectoryError): die(f"{path} is not a directory!") except OSError as e: die(f"Cannot change the directory to {path}: {e}") # Keep track of the current directory return Path.cwd() class IgnoreAction(argparse.Action): """Argparse action for deprecated options that can be ignored.""" def __init__( self, option_strings: Sequence[str], dest: str, nargs: Union[int, str, None] = None, default: Any = argparse.SUPPRESS, help: Optional[str] = argparse.SUPPRESS, ) -> None: super().__init__(option_strings, dest, nargs=nargs, default=default, help=help) def __call__( self, parser: argparse.ArgumentParser, namespace: argparse.Namespace, values: Union[str, Sequence[Any], None], option_string: Optional[str] = None ) -> None: logging.warning(f"{option_string} is no longer supported") class PagerHelpAction(argparse._HelpAction): def __call__( self, parser: argparse.ArgumentParser, namespace: argparse.Namespace, values: Union[str, Sequence[Any], None] = None, option_string: Optional[str] = None ) -> None: page(parser.format_help(), namespace.pager) parser.exit() def dict_with_capitalised_keys_factory(pairs: Any) -> dict[str, Any]: def key_transformer(k: str) -> str: if (s := SETTINGS_LOOKUP_BY_DEST.get(k)) is not None: return s.name return "".join(p.capitalize() for p in k.split("_")) return {key_transformer(k): v for k, v in dict(pairs).items()} @dataclasses.dataclass(frozen=True) class Args: verb: Verb cmdline: list[str] force: int directory: Optional[Path] debug: bool debug_shell: bool debug_workspace: bool pager: bool genkey_valid_days: str genkey_common_name: str auto_bump: bool doc_format: DocFormat json: bool @classmethod def default(cls) -> "Args": """Alternative constructor to generate an all-default MkosiArgs. This prevents MkosiArgs being generated with defaults values implicitly. """ with tempfile.TemporaryDirectory() as tempdir: with chdir(tempdir): args, _ = parse_config([]) return args @classmethod def from_namespace(cls, ns: argparse.Namespace) -> "Args": return cls(**{ k: v for k, v in vars(ns).items() if k in inspect.signature(cls).parameters }) def to_dict(self) -> dict[str, Any]: return dataclasses.asdict(self, dict_factory=dict_with_capitalised_keys_factory) def to_json(self, *, indent: Optional[int] = 4, sort_keys: bool = True) -> str: """Dump MkosiArgs as JSON string.""" return json.dumps(self.to_dict(), cls=JsonEncoder, indent=indent, sort_keys=sort_keys) @classmethod def _load_json(cls, s: Union[str, dict[str, Any], SupportsRead[str], SupportsRead[bytes]]) -> dict[str, Any]: """Load JSON and transform it into a dictionary suitable compatible with instantiating a MkosiArgs object.""" if isinstance(s, str): j = json.loads(s) elif isinstance(s, dict): j = s elif hasattr(s, "read"): j = json.load(s) else: raise ValueError(f"{cls.__name__} can only be constructed from JSON from strings, dictionaries and files.") value_transformer = json_type_transformer(cls) def key_transformer(k: str) -> str: return "_".join(part.lower() for part in FALLBACK_NAME_TO_DEST_SPLITTER.split(k)) return {(tk := key_transformer(k)): value_transformer(tk, v) for k, v in j.items()} @classmethod def from_json(cls, s: Union[str, dict[str, Any], SupportsRead[str], SupportsRead[bytes]]) -> "Args": """Instantiate a MkosiArgs object from a full JSON dump.""" j = cls._load_json(s) return cls(**j) @classmethod def from_partial_json(cls, s: Union[str, dict[str, Any], SupportsRead[str], SupportsRead[bytes]]) -> "Args": """Return a new MkosiArgs with defaults overwritten by the attributes from passed in JSON.""" j = cls._load_json(s) return dataclasses.replace(cls.default(), **j) PACKAGE_GLOBS = ( "*.rpm", "*.pkg.tar*", "*.deb", "*.ddeb", ) @dataclasses.dataclass(frozen=True) class Config: """Type-hinted storage for command line arguments. Only user configuration is stored here while dynamic state exists in Mkosicontext. If a field of the same name exists in both classes always access the value from context. """ profile: Optional[str] include: list[Path] initrd_include: list[Path] dependencies: list[str] minimum_version: Optional[GenericVersion] pass_environment: list[str] distribution: Distribution release: str architecture: Architecture mirror: Optional[str] local_mirror: Optional[str] repository_key_check: bool repositories: list[str] cacheonly: Cacheonly package_manager_trees: list[ConfigTree] output_format: OutputFormat manifest_format: list[ManifestFormat] output: str compress_output: Compression compress_level: int output_dir: Optional[Path] workspace_dir: Optional[Path] cache_dir: Optional[Path] package_cache_dir: Optional[Path] build_dir: Optional[Path] image_id: Optional[str] image_version: Optional[str] split_artifacts: bool repart_dirs: list[Path] sector_size: Optional[int] repart_offline: bool overlay: bool use_subvolumes: ConfigFeature seed: uuid.UUID packages: list[str] build_packages: list[str] volatile_packages: list[str] package_directories: list[Path] volatile_package_directories: list[Path] with_recommends: bool with_docs: bool base_trees: list[Path] skeleton_trees: list[ConfigTree] extra_trees: list[ConfigTree] remove_packages: list[str] remove_files: list[str] clean_package_metadata: ConfigFeature source_date_epoch: Optional[int] configure_scripts: list[Path] sync_scripts: list[Path] prepare_scripts: list[Path] build_scripts: list[Path] postinst_scripts: list[Path] finalize_scripts: list[Path] postoutput_scripts: list[Path] clean_scripts: list[Path] build_sources: list[ConfigTree] build_sources_ephemeral: bool environment: dict[str, str] environment_files: list[Path] with_tests: bool with_network: bool bootable: ConfigFeature bootloader: Bootloader bios_bootloader: BiosBootloader shim_bootloader: ShimBootloader unified_kernel_images: ConfigFeature unified_kernel_image_format: str initrds: list[Path] initrd_packages: list[str] initrd_volatile_packages: list[str] microcode_host: bool kernel_command_line: list[str] kernel_modules_include: list[str] kernel_modules_exclude: list[str] kernel_modules_include_host: bool kernel_modules_initrd: bool kernel_modules_initrd_include: list[str] kernel_modules_initrd_exclude: list[str] kernel_modules_initrd_include_host: bool locale: Optional[str] locale_messages: Optional[str] keymap: Optional[str] timezone: Optional[str] hostname: Optional[str] root_password: Optional[tuple[str, bool]] root_shell: Optional[str] autologin: bool make_initrd: bool ssh: bool selinux_relabel: ConfigFeature secure_boot: bool secure_boot_auto_enroll: bool secure_boot_key: Optional[Path] secure_boot_key_source: KeySource secure_boot_certificate: Optional[Path] secure_boot_sign_tool: SecureBootSignTool verity_key: Optional[Path] verity_key_source: KeySource verity_certificate: Optional[Path] sign_expected_pcr: ConfigFeature passphrase: Optional[Path] checksum: bool sign: bool key: Optional[str] proxy_url: Optional[str] proxy_exclude: list[str] proxy_peer_certificate: Optional[Path] proxy_client_certificate: Optional[Path] proxy_client_key: Optional[Path] incremental: bool nspawn_settings: Optional[Path] extra_search_paths: list[Path] ephemeral: bool credentials: dict[str, str] kernel_command_line_extra: list[str] acl: bool tools_tree: Optional[Path] tools_tree_distribution: Optional[Distribution] tools_tree_release: Optional[str] tools_tree_mirror: Optional[str] tools_tree_repositories: list[str] tools_tree_package_manager_trees: list[ConfigTree] tools_tree_packages: list[str] tools_tree_certificates: bool runtime_trees: list[ConfigTree] runtime_size: Optional[int] runtime_scratch: ConfigFeature runtime_network: Network runtime_build_sources: bool unit_properties: list[str] ssh_key: Optional[Path] ssh_certificate: Optional[Path] machine: Optional[str] forward_journal: Optional[Path] vmm: Vmm # QEMU-specific options qemu_gui: bool qemu_smp: int qemu_mem: int qemu_kvm: ConfigFeature qemu_vsock: ConfigFeature qemu_vsock_cid: int qemu_swtpm: ConfigFeature qemu_cdrom: bool qemu_firmware: QemuFirmware qemu_firmware_variables: Optional[Path] qemu_kernel: Optional[Path] qemu_drives: list[QemuDrive] qemu_args: list[str] image: Optional[str] def name(self) -> str: return self.image or self.image_id or "default" def machine_or_name(self) -> str: return self.machine or self.name() def output_dir_or_cwd(self) -> Path: return self.output_dir or Path.cwd() def workspace_dir_or_default(self) -> Path: if self.workspace_dir: return self.workspace_dir if (cache := INVOKING_USER.cache_dir()) and cache != Path("/var/cache/mkosi") and os.access(cache, os.W_OK): return cache return Path("/var/tmp") def package_cache_dir_or_default(self) -> Path: key = f"{self.distribution}~{self.release}~{self.architecture}" if self.mirror: key += f"-{self.mirror}" return self.package_cache_dir or (INVOKING_USER.cache_dir() / key) def tools(self) -> Path: return self.tools_tree or Path("/") @classmethod def default(cls) -> "Config": """Alternative constructor to generate an all-default MkosiArgs. This prevents MkosiArgs being generated with defaults values implicitly. """ with chdir("/proc"): _, [config] = parse_config([]) return config @classmethod def from_namespace(cls, ns: argparse.Namespace) -> "Config": return cls(**{ k: v for k, v in vars(ns).items() if k in inspect.signature(cls).parameters }) @property def output_with_format(self) -> str: return self.output + self.output_format.extension() @property def output_with_compression(self) -> str: output = self.output_with_format if self.compress_output and self.output_format.use_outer_compression(): output += self.compress_output.extension() return output @property def output_split_uki(self) -> str: return f"{self.output}.efi" @property def output_split_kernel(self) -> str: return f"{self.output}.vmlinuz" @property def output_split_initrd(self) -> str: return f"{self.output}.initrd" @property def output_nspawn_settings(self) -> str: return f"{self.output}.nspawn" @property def output_checksum(self) -> str: return f"{self.output}.SHA256SUMS" @property def output_signature(self) -> str: return f"{self.output}.SHA256SUMS.gpg" @property def output_manifest(self) -> str: return f"{self.output}.manifest" @property def output_changelog(self) -> str: return f"{self.output}.changelog" @property def outputs(self) -> list[str]: return [ self.output, self.output_with_format, self.output_with_compression, self.output_split_uki, self.output_split_kernel, self.output_split_initrd, self.output_nspawn_settings, self.output_checksum, self.output_signature, self.output_manifest, self.output_changelog, ] def cache_manifest(self) -> dict[str, Any]: return { "distribution": self.distribution, "release": self.release, "mirror": self.mirror, "architecture": self.architecture, "package_manager": self.distribution.package_manager(self).executable(self), "packages": sorted(self.packages), "build_packages": sorted(self.build_packages), "package_directories": [ (p.name, p.stat().st_mtime_ns) for d in self.package_directories for p in sorted(flatten(d.glob(glob) for glob in PACKAGE_GLOBS)) ], "repositories": sorted(self.repositories), "overlay": self.overlay, "prepare_scripts": sorted( base64.b64encode(script.read_bytes()).decode() for script in self.prepare_scripts ), # We don't use the full path here since tests will often use temporary directories for the output directory # which would trigger a rebuild every time. "tools_tree": self.tools_tree.name if self.tools_tree else None, "tools_tree_distribution": self.tools_tree_distribution, "tools_tree_release": self.tools_tree_release, "tools_tree_mirror": self.tools_tree_mirror, "tools_tree_packages": sorted(self.tools_tree_packages), } def to_dict(self) -> dict[str, Any]: return dataclasses.asdict(self, dict_factory=dict_with_capitalised_keys_factory) def to_json(self, *, indent: Optional[int] = 4, sort_keys: bool = True) -> str: """Dump MkosiConfig as JSON string.""" return json.dumps(self.to_dict(), cls=JsonEncoder, indent=indent, sort_keys=sort_keys) @classmethod def _load_json(cls, s: Union[str, dict[str, Any], SupportsRead[str], SupportsRead[bytes]]) -> dict[str, Any]: """Load JSON and transform it into a dictionary suitable compatible with instantiating a MkosiConfig object.""" if isinstance(s, str): j = json.loads(s) elif isinstance(s, dict): j = s elif hasattr(s, "read"): j = json.load(s) else: raise ValueError(f"{cls.__name__} can only be constructed from JSON from strings, dictionaries and files.") value_transformer = json_type_transformer(cls) def key_transformer(k: str) -> str: if (s := SETTINGS_LOOKUP_BY_NAME.get(k)) is not None: return s.dest return "_".join(part.lower() for part in FALLBACK_NAME_TO_DEST_SPLITTER.split(k)) return {(tk := key_transformer(k)): value_transformer(tk, v) for k, v in j.items()} @classmethod def from_json(cls, s: Union[str, dict[str, Any], SupportsRead[str], SupportsRead[bytes]]) -> "Config": """Instantiate a MkosiConfig object from a full JSON dump.""" j = cls._load_json(s) return cls(**j) @classmethod def from_partial_json(cls, s: Union[str, dict[str, Any], SupportsRead[str], SupportsRead[bytes]]) -> "Config": """Return a new MkosiConfig with defaults overwritten by the attributes from passed in JSON.""" j = cls._load_json(s) return dataclasses.replace(cls.default(), **j) def find_binary(self, *names: PathString, tools: bool = True) -> Optional[Path]: return find_binary(*names, root=self.tools() if tools else Path("/"), extra=self.extra_search_paths) def sandbox( self, *, binary: Optional[PathString], network: bool = False, devices: bool = False, vartmp: bool = False, relaxed: bool = False, tools: bool = True, scripts: Optional[Path] = None, mounts: Sequence[Mount] = (), options: Sequence[PathString] = (), setup: Sequence[PathString] = (), extra: Sequence[PathString] = (), ) -> AbstractContextManager[list[PathString]]: mounts = [ *([Mount(p, "/proxy.cacert", ro=True)] if (p := self.proxy_peer_certificate) else []), *([Mount(p, "/proxy.clientcert", ro=True)] if (p := self.proxy_client_certificate) else []), *([Mount(p, "/proxy.clientkey", ro=True)] if (p := self.proxy_client_key) else []), *mounts, ] if ( binary and (path := self.find_binary(binary, tools=tools)) and any(path.is_relative_to(d) for d in self.extra_search_paths) ): tools = False mounts += [Mount(d, d, ro=True) for d in self.extra_search_paths if not relaxed] return sandbox_cmd( network=network, devices=devices, vartmp=vartmp, relaxed=relaxed, scripts=scripts, tools=self.tools() if tools else Path("/"), mounts=mounts, options=options, setup=setup, extra=extra, ) def parse_ini(path: Path, only_sections: Collection[str] = ()) -> Iterator[tuple[str, str, str]]: """ We have our own parser instead of using configparser as the latter does not support specifying the same setting multiple times in the same configuration file. """ section: Optional[str] = None setting: Optional[str] = None value: Optional[str] = None for line in textwrap.dedent(path.read_text()).splitlines(): # Systemd unit files allow both '#' and ';' to indicate comments so we do the same. for c in ("#", ";"): comment = line.find(c) if comment >= 0: line = line[:comment] if not line.strip(): continue # If we have a section, setting and value, any line that's indented is considered part of the # setting's value. if section and setting and value is not None and line[0].isspace(): value = f"{value}\n{line.strip()}" continue # So the line is not indented, that means we either found a new section or a new setting. Either way, # let's yield the previous setting and its value before parsing the new section/setting. if section and setting and value is not None: yield section, setting, value setting = value = None line = line.strip() if line[0] == '[': if line[-1] != ']': die(f"{line} is not a valid section") # Yield the section name with an empty key and value to indicate we've finished the current section. if section: yield section, "", "" section = line[1:-1].strip() if not section: die("Section name cannot be empty or whitespace") continue if not section: die(f"Setting {line} is located outside of section") if only_sections and section not in only_sections: continue setting, delimiter, value = line.partition("=") if not delimiter: die(f"Setting {setting} must be followed by '='") if not setting: die(f"Missing setting name before '=' in {line}") setting = setting.strip() value = value.strip() # Make sure we yield any final setting and its value. if section and setting and value is not None: yield section, setting, value if section: yield section, "", "" SETTINGS = ( ConfigSetting( dest="include", short="-I", section="Config", parse=config_make_list_parser( delimiter=",", reset=False, parse=make_path_parser(constants=BUILTIN_CONFIGS), ), help="Include configuration from the specified file or directory", ), ConfigSetting( dest="initrd_include", section="Config", parse=config_make_list_parser(delimiter=",", reset=False, parse=make_path_parser()), help="Include configuration from the specified file or directory when building the initrd", ), ConfigSetting( dest="profile", section="Config", specifier="p", help="Build the specified profile", parse=config_parse_profile, match=config_make_string_matcher(), universal=True, ), ConfigSetting( dest="dependencies", long="--dependency", section="Config", parse=config_make_list_parser(delimiter=","), default_factory=config_default_dependencies, help="Specify other images that this image depends on", ), ConfigSetting( dest="minimum_version", section="Config", parse=config_parse_minimum_version, help="Specify the minimum required mkosi version", ), ConfigSetting( dest="configure_scripts", long="--configure-script", metavar="PATH", section="Config", parse=config_make_list_parser(delimiter=",", parse=make_path_parser()), paths=("mkosi.configure",), help="Configure script to run before doing anything", ), ConfigSetting( dest="pass_environment", metavar="NAME", section="Config", parse=config_make_list_parser(delimiter=" "), help="Environment variables to pass to subimages", ), ConfigSetting( dest="distribution", short="-d", section="Distribution", specifier="d", parse=config_make_enum_parser(Distribution), match=config_make_enum_matcher(Distribution), default_factory=config_default_distribution, choices=Distribution.choices(), help="Distribution to install", universal=True, ), ConfigSetting( dest="release", short="-r", section="Distribution", specifier="r", parse=config_parse_string, match=config_make_string_matcher(), default_factory=config_default_release, default_factory_depends=("distribution",), help="Distribution release to install", universal=True, ), ConfigSetting( dest="architecture", section="Distribution", specifier="a", parse=config_make_enum_parser(Architecture), match=config_make_enum_matcher(Architecture), default=Architecture.native(), choices=Architecture.choices(), help="Override the architecture of installation", universal=True, ), ConfigSetting( dest="mirror", short="-m", section="Distribution", help="Distribution mirror to use", universal=True, ), ConfigSetting( dest="local_mirror", section="Distribution", help="Use a single local, flat and plain mirror to build the image", universal=True, ), ConfigSetting( dest="repository_key_check", metavar="BOOL", nargs="?", section="Distribution", default=True, parse=config_parse_boolean, help="Controls signature and key checks on repositories", universal=True, ), ConfigSetting( dest="repositories", metavar="REPOS", section="Distribution", parse=config_make_list_parser(delimiter=","), match=config_match_repositories, help="Repositories to use", universal=True, ), ConfigSetting( dest="cacheonly", long="--cache-only", name="CacheOnly", section="Distribution", parse=config_make_enum_parser_with_boolean(Cacheonly, yes=Cacheonly.always, no=Cacheonly.auto), default=Cacheonly.auto, help="Only use the package cache when installing packages", choices=Cacheonly.choices(), universal=True, ), ConfigSetting( dest="package_manager_trees", long="--package-manager-tree", metavar="PATH", section="Distribution", parse=config_make_list_parser(delimiter=",", parse=make_tree_parser()), default_factory=lambda ns: ns.skeleton_trees, default_factory_depends=("skeleton_trees",), help="Use a package manager tree to configure the package manager", paths=("mkosi.pkgmngr", "mkosi.pkgmngr.tar",), universal=True, ), ConfigSetting( dest="output_format", short="-t", long="--format", name="Format", section="Output", specifier="t", parse=config_make_enum_parser(OutputFormat), match=config_make_enum_matcher(OutputFormat), default=OutputFormat.disk, choices=OutputFormat.choices(), help="Output Format", ), ConfigSetting( dest="manifest_format", metavar="FORMAT", section="Output", parse=config_make_list_parser(delimiter=",", parse=make_enum_parser(ManifestFormat)), help="Manifest Format", ), ConfigSetting( dest="output", short="-o", metavar="NAME", section="Output", specifier="o", parse=config_make_filename_parser( "Output= or --output= requires a filename with no path components. " "Use OutputDirectory= or --output-dir= to configure the output directory." ), default_factory=config_default_output, default_factory_depends=("image_id", "image_version"), help="Output name", ), ConfigSetting( dest="compress_output", metavar="ALG", nargs="?", section="Output", parse=config_parse_compression, default_factory=config_default_compression, default_factory_depends=("distribution", "release", "output_format"), help="Enable whole-output compression (with images or archives)", ), ConfigSetting( dest="compress_level", metavar="LEVEL", section="Output", parse=config_parse_compress_level, default=3, help="Set the compression level to use", ), ConfigSetting( dest="output_dir", short="-O", metavar="DIR", name="OutputDirectory", section="Output", specifier="O", parse=config_make_path_parser(required=False), paths=("mkosi.output",), help="Output directory", universal=True, ), ConfigSetting( dest="workspace_dir", metavar="DIR", name="WorkspaceDirectory", section="Output", parse=config_make_path_parser(required=False), help="Workspace directory", universal=True, ), ConfigSetting( dest="cache_dir", metavar="PATH", name="CacheDirectory", section="Output", parse=config_make_path_parser(required=False), paths=("mkosi.cache",), help="Incremental cache directory", universal=True, ), ConfigSetting( dest="package_cache_dir", metavar="PATH", name="PackageCacheDirectory", section="Output", parse=config_make_path_parser(required=False), help="Package cache directory", universal=True, ), ConfigSetting( dest="build_dir", metavar="PATH", name="BuildDirectory", section="Output", parse=config_make_path_parser(required=False), paths=("mkosi.builddir",), help="Path to use as persistent build directory", universal=True, ), ConfigSetting( dest="image_version", match=config_match_version, section="Output", specifier="v", help="Set version for image", paths=("mkosi.version",), path_read_text=True, universal=True, ), ConfigSetting( dest="image_id", match=config_make_string_matcher(allow_globs=True), section="Output", specifier="i", help="Set ID for image", universal=True, ), ConfigSetting( dest="split_artifacts", metavar="BOOL", nargs="?", section="Output", parse=config_parse_boolean, help="Generate split partitions", ), ConfigSetting( dest="repart_dirs", long="--repart-dir", metavar="PATH", name="RepartDirectories", section="Output", parse=config_make_list_parser(delimiter=",", parse=make_path_parser()), paths=("mkosi.repart",), help="Directory containing systemd-repart partition definitions", ), ConfigSetting( dest="sector_size", section="Output", parse=config_parse_sector_size, help="Set the disk image sector size", universal=True, ), ConfigSetting( dest="repart_offline", section="Output", parse=config_parse_boolean, help="Build disk images without using loopback devices", default=True, universal=True, ), ConfigSetting( dest="overlay", metavar="BOOL", nargs="?", section="Output", parse=config_parse_boolean, help="Only output the additions on top of the given base trees", ), ConfigSetting( dest="use_subvolumes", metavar="FEATURE", nargs="?", section="Output", parse=config_parse_feature, help="Use btrfs subvolumes for faster directory operations where possible", universal=True, ), ConfigSetting( dest="seed", metavar="UUID", section="Output", parse=config_parse_seed, default=uuid.uuid4(), help="Set the seed for systemd-repart", ), ConfigSetting( dest="clean_scripts", long="--clean-script", metavar="PATH", section="Output", parse=config_make_list_parser(delimiter=",", parse=make_path_parser()), paths=("mkosi.clean",), help="Clean script to run after cleanup", ), ConfigSetting( dest="packages", short="-p", long="--package", metavar="PACKAGE", section="Content", parse=config_make_list_parser(delimiter=","), help="Add an additional package to the OS image", ), ConfigSetting( dest="build_packages", long="--build-package", metavar="PACKAGE", section="Content", parse=config_make_list_parser(delimiter=","), help="Additional packages needed for build scripts", ), ConfigSetting( dest="volatile_packages", long="--volatile-package", metavar="PACKAGE", section="Content", parse=config_make_list_parser(delimiter=","), help="Packages to install after executing build scripts", ), ConfigSetting( dest="package_directories", long="--package-directory", metavar="PATH", section="Content", parse=config_make_list_parser(delimiter=",", parse=make_path_parser()), paths=("mkosi.packages",), help="Specify a directory containing extra packages", universal=True, ), ConfigSetting( dest="volatile_package_directories", long="--volatile-package-directory", metavar="PATH", section="Content", parse=config_make_list_parser(delimiter=",", parse=make_path_parser()), help="Specify a directory containing extra volatile packages", universal=True, ), ConfigSetting( dest="with_recommends", metavar="BOOL", nargs="?", section="Content", parse=config_parse_boolean, help="Install recommended packages", ), ConfigSetting( dest="with_docs", metavar="BOOL", nargs="?", section="Content", parse=config_parse_boolean, default=True, help="Install documentation", ), ConfigSetting( dest="base_trees", long='--base-tree', metavar='PATH', section="Content", parse=config_make_list_parser(delimiter=",", parse=make_path_parser(required=False)), help='Use the given tree as base tree (e.g. lower sysext layer)', ), ConfigSetting( dest="skeleton_trees", long="--skeleton-tree", metavar="PATH", section="Content", parse=config_make_list_parser(delimiter=",", parse=make_tree_parser()), paths=("mkosi.skeleton", "mkosi.skeleton.tar"), help="Use a skeleton tree to bootstrap the image before installing anything", ), ConfigSetting( dest="extra_trees", long="--extra-tree", metavar="PATH", section="Content", parse=config_make_list_parser(delimiter=",", parse=make_tree_parser()), paths=("mkosi.extra", "mkosi.extra.tar"), help="Copy an extra tree on top of image", ), ConfigSetting( dest="remove_packages", long="--remove-package", metavar="PACKAGE", section="Content", parse=config_make_list_parser(delimiter=","), help="Remove package from the image OS image after installation", ), ConfigSetting( dest="remove_files", metavar="GLOB", section="Content", parse=config_make_list_parser(delimiter=","), help="Remove files from built image", ), ConfigSetting( dest="clean_package_metadata", metavar="FEATURE", section="Content", parse=config_parse_feature, help="Remove package manager database and other files", ), ConfigSetting( dest="source_date_epoch", metavar="TIMESTAMP", section="Content", parse=config_parse_source_date_epoch, default_factory=config_default_source_date_epoch, default_factory_depends=("environment",), help="Set the $SOURCE_DATE_EPOCH timestamp", universal=True, ), ConfigSetting( dest="sync_scripts", long="--sync-script", metavar="PATH", section="Content", parse=config_make_list_parser(delimiter=",", parse=make_path_parser()), paths=("mkosi.sync",), help="Sync script to run before starting the build", ), ConfigSetting( dest="prepare_scripts", long="--prepare-script", metavar="PATH", section="Content", parse=config_make_list_parser(delimiter=",", parse=make_path_parser()), paths=("mkosi.prepare", "mkosi.prepare.chroot"), help="Prepare script to run inside the image before it is cached", compat_names=("PrepareScript",), ), ConfigSetting( dest="build_scripts", long="--build-script", metavar="PATH", section="Content", parse=config_make_list_parser(delimiter=",", parse=make_path_parser()), paths=("mkosi.build", "mkosi.build.chroot"), help="Build script to run inside image", compat_names=("BuildScript",), ), ConfigSetting( dest="postinst_scripts", long="--postinst-script", metavar="PATH", name="PostInstallationScripts", section="Content", parse=config_make_list_parser(delimiter=",", parse=make_path_parser()), paths=("mkosi.postinst", "mkosi.postinst.chroot"), help="Postinstall script to run inside image", compat_names=("PostInstallationScript",), ), ConfigSetting( dest="finalize_scripts", long="--finalize-script", metavar="PATH", section="Content", parse=config_make_list_parser(delimiter=",", parse=make_path_parser()), paths=("mkosi.finalize", "mkosi.finalize.chroot"), help="Postinstall script to run outside image", compat_names=("FinalizeScript",), ), ConfigSetting( dest="postoutput_scripts", long="--postoutput-script", metavar="PATH", name="PostOutputScripts", section="Content", parse=config_make_list_parser(delimiter=",", parse=make_path_parser()), paths=("mkosi.postoutput",), help="Output postprocessing script to run outside image", ), ConfigSetting( dest="build_sources", metavar="PATH", section="Content", parse=config_make_list_parser(delimiter=",", parse=make_tree_parser(absolute=False)), match=config_match_build_sources, default_factory=lambda ns: [ConfigTree(ns.directory, None)] if ns.directory else [], help="Path for sources to build", universal=True, ), ConfigSetting( dest="build_sources_ephemeral", metavar="BOOL", section="Content", parse=config_parse_boolean, help="Make build sources ephemeral when running scripts", universal=True, ), ConfigSetting( dest="environment", short="-E", metavar="NAME[=VALUE]", section="Content", parse=config_make_dict_parser(delimiter=" ", parse=parse_environment, unescape=True), match=config_match_key_value, help="Set an environment variable when running scripts", ), ConfigSetting( dest="environment_files", long="--env-file", metavar="PATH", section="Content", parse=config_make_list_parser(delimiter=",", parse=make_path_parser()), paths=("mkosi.env",), help="Enviroment files to set when running scripts", ), ConfigSetting( dest="with_tests", short="-T", long="--without-tests", nargs="?", const="no", section="Content", parse=config_parse_boolean, default=True, help="Do not run tests as part of build scripts, if supported", universal=True, ), ConfigSetting( dest="with_network", metavar="BOOL", nargs="?", section="Content", parse=config_parse_boolean, help="Run build and postinst scripts with network access (instead of private network)", universal=True, ), ConfigSetting( dest="bootable", metavar="FEATURE", nargs="?", section="Content", parse=config_parse_feature, match=config_match_feature, help="Generate ESP partition with systemd-boot and UKIs for installed kernels", ), ConfigSetting( dest="bootloader", section="Content", parse=config_make_enum_parser(Bootloader), choices=Bootloader.choices(), default=Bootloader.systemd_boot, help="Specify which UEFI bootloader to use", ), ConfigSetting( dest="bios_bootloader", section="Content", parse=config_make_enum_parser(BiosBootloader), choices=BiosBootloader.choices(), default=BiosBootloader.none, help="Specify which BIOS bootloader to use", ), ConfigSetting( dest="shim_bootloader", section="Content", parse=config_make_enum_parser(ShimBootloader), choices=ShimBootloader.choices(), default=ShimBootloader.none, help="Specify whether to use shim", ), ConfigSetting( dest="unified_kernel_images", metavar="FEATURE", section="Content", parse=config_parse_feature, help="Specify whether to use UKIs with grub/systemd-boot in UEFI mode", ), ConfigSetting( dest="unified_kernel_image_format", section="Content", parse=config_make_filename_parser( "UnifiedKernelImageFormat= or --unified-kernel-image-format= " "requires a filename with no path components." ), # The default value is set in `__init__.py` in `install_uki`. # `None` is used to determin if the roothash and boot count format # should be appended to the filename if they are found. #default= help="Specify the format used for the UKI filename", ), ConfigSetting( dest="initrds", long="--initrd", metavar="PATH", section="Content", parse=config_make_list_parser(delimiter=",", parse=make_path_parser(required=False)), help="Add a user-provided initrd to image", ), ConfigSetting( dest="microcode_host", metavar="BOOL", nargs="?", section="Content", parse=config_parse_boolean, default=False, help="Only include the host CPU's microcode", ), ConfigSetting( dest="initrd_packages", long="--initrd-package", metavar="PACKAGE", section="Content", parse=config_make_list_parser(delimiter=","), help="Add additional packages to the default initrd", ), ConfigSetting( dest="initrd_volatile_packages", long="--initrd-volatile-package", metavar="PACKAGE", section="Content", parse=config_make_list_parser(delimiter=","), help="Packages to install in the initrd that are not cached", ), ConfigSetting( dest="kernel_command_line", metavar="OPTIONS", section="Content", parse=config_make_list_parser(delimiter=" "), help="Set the kernel command line (only bootable images)", ), ConfigSetting( dest="kernel_modules_include", metavar="REGEX", section="Content", parse=config_make_list_parser(delimiter=","), help="Include the specified kernel modules in the image", ), ConfigSetting( dest="kernel_modules_include_host", metavar="BOOL", section="Content", parse=config_parse_boolean, help="Include the currently loaded modules on the host in the image", ), ConfigSetting( dest="kernel_modules_exclude", metavar="REGEX", section="Content", parse=config_make_list_parser(delimiter=","), help="Exclude the specified kernel modules from the image", ), ConfigSetting( dest="kernel_modules_initrd", metavar="BOOL", nargs="?", section="Content", parse=config_parse_boolean, default=True, help="When building a bootable image, add an extra initrd containing the kernel modules", ), ConfigSetting( dest="kernel_modules_initrd_include", metavar="REGEX", section="Content", parse=config_make_list_parser(delimiter=","), help="When building a kernel modules initrd, include the specified kernel modules", ), ConfigSetting( dest="kernel_modules_initrd_include_host", metavar="BOOL", section="Content", parse=config_parse_boolean, help="When building a kernel modules initrd, include the currently loaded modules on the host in the image", ), ConfigSetting( dest="kernel_modules_initrd_exclude", metavar="REGEX", section="Content", parse=config_make_list_parser(delimiter=","), help="When building a kernel modules initrd, exclude the specified kernel modules", ), ConfigSetting( dest="locale", section="Content", parse=config_parse_string, help="Set the system locale", ), ConfigSetting( dest="locale_messages", metavar="LOCALE", section="Content", parse=config_parse_string, help="Set the messages locale", ), ConfigSetting( dest="keymap", metavar="KEYMAP", section="Content", parse=config_parse_string, help="Set the system keymap", ), ConfigSetting( dest="timezone", metavar="TIMEZONE", section="Content", parse=config_parse_string, help="Set the system timezone", ), ConfigSetting( dest="hostname", metavar="HOSTNAME", section="Content", parse=config_parse_string, help="Set the system hostname", ), ConfigSetting( dest="root_password", metavar="PASSWORD", section="Content", parse=config_parse_root_password, paths=("mkosi.rootpw",), path_read_text=True, path_secret=True, help="Set the password for root", ), ConfigSetting( dest="root_shell", metavar="SHELL", section="Content", parse=config_parse_string, help="Set the shell for root", ), ConfigSetting( dest="autologin", short="-a", metavar="BOOL", nargs="?", section="Content", parse=config_parse_boolean, help="Enable root autologin", ), ConfigSetting( dest="make_initrd", metavar="BOOL", nargs="?", section="Content", parse=config_parse_boolean, help="Make sure the image can be used as an initramfs", ), ConfigSetting( dest="ssh", metavar="BOOL", nargs="?", section="Content", parse=config_parse_boolean, help="Set up SSH access from the host to the final image via 'mkosi ssh'", ), ConfigSetting( dest="selinux_relabel", name="SELinuxRelabel", metavar="FEATURE", section="Content", parse=config_parse_feature, help="Specify whether to relabel all files with setfiles", ), ConfigSetting( dest="secure_boot", metavar="BOOL", nargs="?", section="Validation", parse=config_parse_boolean, help="Sign the resulting kernel/initrd image for UEFI SecureBoot", ), ConfigSetting( dest="secure_boot_auto_enroll", metavar="BOOL", section="Validation", parse=config_parse_boolean, default=True, help="Automatically enroll the secureboot signing key on first boot", ), ConfigSetting( dest="secure_boot_key", metavar="KEY", section="Validation", parse=config_parse_key, paths=("mkosi.key",), help="UEFI SecureBoot private key", ), ConfigSetting( dest="secure_boot_key_source", section="Validation", metavar="SOURCE[:ENGINE]", parse=config_parse_key_source, default=KeySource(type=KeySource.Type.file), help="The source to use to retrieve the secure boot signing key", ), ConfigSetting( dest="secure_boot_certificate", metavar="PATH", section="Validation", parse=config_make_path_parser(), paths=("mkosi.crt",), help="UEFI SecureBoot certificate in X509 format", ), ConfigSetting( dest="secure_boot_sign_tool", section="Validation", parse=config_make_enum_parser(SecureBootSignTool), default=SecureBootSignTool.auto, choices=SecureBootSignTool.choices(), help="Tool to use for signing PE binaries for secure boot", ), ConfigSetting( dest="verity_key", metavar="KEY", section="Validation", parse=config_parse_key, paths=("mkosi.key",), help="Private key for signing verity signature", universal=True, ), ConfigSetting( dest="verity_key_source", section="Validation", metavar="SOURCE[:ENGINE]", parse=config_parse_key_source, default=KeySource(type=KeySource.Type.file), help="The source to use to retrieve the verity signing key", universal=True, ), ConfigSetting( dest="verity_certificate", metavar="PATH", section="Validation", parse=config_make_path_parser(), paths=("mkosi.crt",), help="Certificate for signing verity signature in X509 format", universal=True, ), ConfigSetting( dest="sign_expected_pcr", metavar="FEATURE", section="Validation", parse=config_parse_feature, help="Measure the components of the unified kernel image (UKI) and embed the PCR signature into the UKI", ), ConfigSetting( dest="passphrase", metavar="PATH", section="Validation", parse=config_make_path_parser(required=False, secret=True), paths=("mkosi.passphrase",), help="Path to a file containing the passphrase to use when LUKS encryption is selected", ), ConfigSetting( dest="checksum", metavar="BOOL", nargs="?", section="Validation", parse=config_parse_boolean, help="Write SHA256SUMS file", ), ConfigSetting( dest="sign", metavar="BOOL", nargs="?", section="Validation", parse=config_parse_boolean, help="Write and sign SHA256SUMS file", ), ConfigSetting( dest="key", section="Validation", help="GPG key to use for signing", ), ConfigSetting( dest="proxy_url", section="Host", default_factory=config_default_proxy_url, default_factory_depends=("environment",), metavar="URL", help="Set the proxy to use", universal=True, ), ConfigSetting( dest="proxy_exclude", section="Host", metavar="HOST", parse=config_make_list_parser(delimiter=","), help="Don't use the configured proxy for the specified host(s)", universal=True, ), ConfigSetting( dest="proxy_peer_certificate", section="Host", parse=config_make_path_parser(), paths=( "/etc/pki/tls/certs/ca-bundle.crt", "/etc/ssl/certs/ca-certificates.crt", ), help="Set the proxy peer certificate", universal=True, ), ConfigSetting( dest="proxy_client_certificate", section="Host", parse=config_make_path_parser(secret=True), help="Set the proxy client certificate", universal=True, ), ConfigSetting( dest="proxy_client_key", section="Host", default_factory=lambda ns: ns.proxy_client_certificate, default_factory_depends=("proxy_client_certificate",), parse=config_make_path_parser(secret=True), help="Set the proxy client key", universal=True, ), ConfigSetting( dest="incremental", short="-i", metavar="BOOL", nargs="?", section="Host", parse=config_parse_boolean, help="Make use of and generate intermediary cache images", universal=True, ), ConfigSetting( dest="nspawn_settings", name="NSpawnSettings", long="--settings", metavar="PATH", section="Host", parse=config_make_path_parser(), paths=("mkosi.nspawn",), help="Add in .nspawn settings file", ), ConfigSetting( dest="extra_search_paths", long="--extra-search-path", metavar="PATH", section="Host", parse=config_make_list_parser(delimiter=",", parse=make_path_parser()), help="List of comma-separated paths to look for programs before looking in PATH", universal=True, ), ConfigSetting( dest="ephemeral", metavar="BOOL", section="Host", parse=config_parse_boolean, help=('If specified, the container/VM is run with a temporary snapshot of the output ' 'image that is removed immediately when the container/VM terminates'), nargs="?", ), ConfigSetting( dest="credentials", long="--credential", metavar="NAME=VALUE", section="Host", parse=config_make_dict_parser(delimiter=" ", parse=parse_credential, allow_paths=True, unescape=True), help="Pass a systemd credential to systemd-nspawn or qemu", paths=("mkosi.credentials",), ), ConfigSetting( dest="kernel_command_line_extra", metavar="OPTIONS", section="Host", parse=config_make_list_parser(delimiter=" "), help="Append extra entries to the kernel command line when booting the image", ), ConfigSetting( dest="acl", metavar="BOOL", nargs="?", section="Host", parse=config_parse_boolean, help="Set ACLs on generated directories to permit the user running mkosi to remove them", universal=True, ), ConfigSetting( dest="tools_tree", metavar="PATH", section="Host", parse=config_make_path_parser(required=False, constants=("default",)), paths=("mkosi.tools",), help="Look up programs to execute inside the given tree", nargs="?", const="default", universal=True, ), ConfigSetting( dest="tools_tree_distribution", section="Host", parse=config_make_enum_parser(Distribution), match=config_make_enum_matcher(Distribution), choices=Distribution.choices(), default_factory_depends=("distribution",), default_factory=config_default_tools_tree_distribution, help="Set the distribution to use for the default tools tree", ), ConfigSetting( dest="tools_tree_release", metavar="RELEASE", section="Host", parse=config_parse_string, default_factory_depends=("tools_tree_distribution",), default_factory=lambda ns: d.default_release() if (d := ns.tools_tree_distribution) else None, help="Set the release to use for the default tools tree", ), ConfigSetting( dest="tools_tree_mirror", metavar="MIRROR", section="Host", default_factory_depends=("distribution", "mirror", "tools_tree_distribution"), default_factory=lambda ns: ns.mirror if ns.mirror and ns.distribution == ns.tools_tree_distribution else None, help="Set the mirror to use for the default tools tree", ), ConfigSetting( dest="tools_tree_repositories", long="--tools-tree-repository", metavar="REPOS", section="Host", parse=config_make_list_parser(delimiter=","), help="Repositories to use for the default tools tree", ), ConfigSetting( dest="tools_tree_package_manager_trees", long="--tools-tree-package-manager-tree", metavar="PATH", section="Host", parse=config_make_list_parser(delimiter=",", parse=make_tree_parser()), help="Package manager trees for the default tools tree", ), ConfigSetting( dest="tools_tree_packages", long="--tools-tree-package", metavar="PACKAGE", section="Host", parse=config_make_list_parser(delimiter=","), help="Add additional packages to the default tools tree", ), ConfigSetting( dest="tools_tree_certificates", metavar="BOOL", section="Host", parse=config_parse_boolean, help="Use certificates from the tools tree", default=True, universal=True, ), ConfigSetting( dest="runtime_trees", long="--runtime-tree", metavar="SOURCE:[TARGET]", section="Host", parse=config_make_list_parser(delimiter=",", parse=make_tree_parser(absolute=False)), help="Additional mounts to add when booting the image", ), ConfigSetting( dest="runtime_size", metavar="SIZE", section="Host", parse=config_parse_bytes, help="Grow disk images to the specified size before booting them", ), ConfigSetting( dest="runtime_scratch", metavar="FEATURE", section="Host", parse=config_parse_feature, help="Mount extra scratch space to /var/tmp", ), ConfigSetting( dest="runtime_network", section="Host", parse=config_make_enum_parser(Network), choices=Network.choices(), help="Set networking backend to use when booting the image", default=Network.user, ), ConfigSetting( dest="runtime_build_sources", metavar="BOOL", section="Host", parse=config_parse_boolean, help="Mount build sources and build directory in /work when booting the image", ), ConfigSetting( dest="unit_properties", long="--unit-property", metavar="PROPERTY", section="Host", parse=config_make_list_parser(delimiter=" ", unescape=True), help="Set properties on the scopes spawned by systemd-nspawn or systemd-run", ), ConfigSetting( dest="ssh_key", metavar="PATH", section="Host", parse=config_make_path_parser(secret=True), paths=("mkosi.key",), help="Private key for use with mkosi ssh in PEM format", ), ConfigSetting( dest="ssh_certificate", metavar="PATH", section="Host", parse=config_make_path_parser(), paths=("mkosi.crt",), help="Certificate for use with mkosi ssh in X509 format", ), ConfigSetting( dest="vmm", name="VirtualMachineMonitor", section="Host", choices=Vmm.choices(), parse=config_make_enum_parser(Vmm), default=Vmm.qemu, help="Set the virtual machine monitor to use for mkosi qemu", ), ConfigSetting( dest="machine", metavar="NAME", section="Host", help="Set the machine name to use when booting the image", ), ConfigSetting( dest="forward_journal", metavar="PATH", section="Host", parse=config_make_path_parser(required=False), help="Set the path used to store forwarded machine journals", ), ConfigSetting( dest="qemu_gui", metavar="BOOL", nargs="?", section="Host", parse=config_parse_boolean, help="Start QEMU in graphical mode", ), ConfigSetting( dest="qemu_smp", metavar="SMP", section="Host", parse=config_parse_number, default=1, help="Configure guest's SMP settings", ), ConfigSetting( dest="qemu_mem", metavar="MEM", section="Host", parse=config_parse_bytes, default=parse_bytes("2G"), help="Configure guest's RAM size", ), ConfigSetting( dest="qemu_kvm", metavar="FEATURE", nargs="?", section="Host", parse=config_parse_feature, help="Configure whether to use KVM or not", ), ConfigSetting( dest="qemu_vsock", metavar="FEATURE", nargs="?", section="Host", parse=config_parse_feature, help="Configure whether to use qemu with a vsock or not", ), ConfigSetting( dest="qemu_vsock_cid", name="QemuVsockConnectionId", long="--qemu-vsock-cid", metavar="NUMBER|auto|hash", section="Host", parse=config_parse_vsock_cid, default=QemuVsockCID.auto, help="Specify the VSock connection ID to use", ), ConfigSetting( dest="qemu_swtpm", metavar="FEATURE", nargs="?", section="Host", parse=config_parse_feature, help="Configure whether to use qemu with swtpm or not", ), ConfigSetting( dest="qemu_cdrom", metavar="BOOLEAN", nargs="?", section="Host", parse=config_parse_boolean, help="Attach the image as a CD-ROM to the virtual machine", ), ConfigSetting( dest="qemu_firmware", section="Host", parse=config_make_enum_parser(QemuFirmware), default=QemuFirmware.auto, help="Set qemu firmware to use", choices=QemuFirmware.choices(), ), ConfigSetting( dest="qemu_firmware_variables", metavar="PATH", section="Host", parse=config_make_path_parser(constants=("custom", "microsoft")), help="Set the path to the qemu firmware variables file to use", ), ConfigSetting( dest="qemu_kernel", metavar="PATH", section="Host", parse=config_make_path_parser(), help="Specify the kernel to use for qemu direct kernel boot", ), ConfigSetting( dest="qemu_drives", long="--qemu-drive", metavar="DRIVE", section="Host", parse=config_make_list_parser(delimiter=" ", parse=parse_drive), help="Specify a qemu drive that mkosi should create and pass to qemu", ), ConfigSetting( dest="qemu_args", metavar="ARGS", section="Host", parse=config_make_list_parser(delimiter=" ", unescape=True), # Suppress the command line option because it's already possible to pass qemu args as normal # arguments. help=argparse.SUPPRESS, ), ) SETTINGS_LOOKUP_BY_NAME = {name: s for s in SETTINGS for name in [s.name, *s.compat_names]} SETTINGS_LOOKUP_BY_DEST = {s.dest: s for s in SETTINGS} SETTINGS_LOOKUP_BY_SPECIFIER = {s.specifier: s for s in SETTINGS if s.specifier} MATCHES = ( Match( name="PathExists", match=match_path_exists, ), Match( name="SystemdVersion", match=match_systemd_version, ), Match( name="HostArchitecture", match=match_host_architecture, ), ) MATCH_LOOKUP = {m.name: m for m in MATCHES} SPECIFIERS = ( Specifier( char="C", callback=lambda ns, config: os.fspath(config.resolve().parent), ), Specifier( char="P", callback=lambda ns, config: os.fspath(Path.cwd()), ), Specifier( char="D", callback=lambda ns, config: os.fspath(ns.directory.resolve()), ), Specifier( char="F", callback=lambda ns, config: ns.distribution.filesystem(), depends=("distribution",), ), ) SPECIFIERS_LOOKUP_BY_CHAR = {s.char: s for s in SPECIFIERS} # This regular expression can be used to split "AutoBump" -> ["Auto", "Bump"] # and "NSpawnSettings" -> ["NSpawn", "Settings"] # The first part (?<=[a-z]) is a positive look behind for a lower case letter # and (?=[A-Z]) is a lookahead assertion matching an upper case letter but not # consuming it FALLBACK_NAME_TO_DEST_SPLITTER = re.compile("(?<=[a-z])(?=[A-Z])") def create_argument_parser(chdir: bool = True) -> argparse.ArgumentParser: parser = argparse.ArgumentParser( prog="mkosi", description="Build Bespoke OS Images", # the synopsis below is supposed to be indented by two spaces usage="\n " + textwrap.dedent("""\ mkosi [options...] {b}summary{e} mkosi [options...] {b}build{e} [command line...] mkosi [options...] {b}shell{e} [command line...] mkosi [options...] {b}boot{e} [nspawn settings...] mkosi [options...] {b}qemu{e} [qemu parameters...] mkosi [options...] {b}ssh{e} [command line...] mkosi [options...] {b}journalctl{e} [command line...] mkosi [options...] {b}coredumpctl{e} [command line...] mkosi [options...] {b}clean{e} mkosi [options...] {b}serve{e} mkosi [options...] {b}bump{e} mkosi [options...] {b}genkey{e} mkosi [options...] {b}documentation{e} mkosi [options...] {b}help{e} mkosi -h | --help mkosi --version """).format(b=Style.bold, e=Style.reset), add_help=False, allow_abbrev=False, argument_default=argparse.SUPPRESS, formatter_class=CustomHelpFormatter, ) parser.add_argument( "--version", action="version", version="%(prog)s " + __version__, help=argparse.SUPPRESS, ) parser.add_argument( "-f", "--force", action="count", dest="force", default=0, help="Remove existing image file before operation", ) parser.add_argument( "-C", "--directory", type=parse_chdir if chdir else str, default=Path.cwd(), help="Change to specified directory before doing anything", metavar="PATH", ) parser.add_argument( "--debug", help="Turn on debugging output", action="store_true", default=False, ) parser.add_argument( "--debug-shell", help="Spawn an interactive shell in the image if a chroot command fails", action="store_true", default=False, ) parser.add_argument( "--debug-workspace", help="When an error occurs, the workspace directory will not be deleted", action="store_true", default=False, ) parser.add_argument( "--no-pager", action="store_false", dest="pager", default=True, help="Enable paging for long output", ) parser.add_argument( "--genkey-valid-days", metavar="DAYS", help="Number of days keys should be valid when generating keys", default="730", ) parser.add_argument( "--genkey-common-name", metavar="CN", help="Template for the CN when generating keys", default="mkosi of %u", ) parser.add_argument( "-B", "--auto-bump", help="Automatically bump image version after building", action="store_true", default=False, ) parser.add_argument( "--doc-format", help="The format to show documentation in", default=DocFormat.auto, type=DocFormat, choices=list(DocFormat), ) parser.add_argument( "--json", help="Show summary as JSON", action="store_true", default=False, ) # These can be removed once mkosi v15 is available in LTS distros and compatibility with <= v14 # is no longer needed in build infrastructure (e.g.: OBS). parser.add_argument( "--nspawn-keep-unit", nargs=0, action=IgnoreAction, ) parser.add_argument( "--default", action=IgnoreAction, ) parser.add_argument( "--cache", action=IgnoreAction, ) parser.add_argument( "verb", type=Verb, choices=list(Verb), default=Verb.build, help=argparse.SUPPRESS, ) parser.add_argument( "cmdline", nargs=argparse.REMAINDER, help=argparse.SUPPRESS, ) parser.add_argument( "-h", "--help", action=PagerHelpAction, help=argparse.SUPPRESS, ) last_section = None for s in SETTINGS: if s.section != last_section: group = parser.add_argument_group(f"{s.section} configuration options") last_section = s.section opts = [s.short, s.long] if s.short else [s.long] group.add_argument( # type: ignore *opts, dest=s.dest, choices=s.choices, metavar=s.metavar, nargs=s.nargs, # type: ignore const=s.const, help=s.help, action=ConfigAction, ) return parser def resolve_deps(images: Sequence[argparse.Namespace], include: Sequence[str]) -> list[argparse.Namespace]: graph = {config.image: config.dependencies for config in images} if any((missing := i) not in graph for i in include): die(f"No image found with name {missing}") deps = set() queue = [*include] while queue: if (image := queue.pop(0)) not in deps: deps.add(image) queue.extend(graph[image]) images = [config for config in images if config.image in deps] graph = {config.image: config.dependencies for config in images} try: order = list(graphlib.TopologicalSorter(graph).static_order()) except graphlib.CycleError as e: die(f"Image dependency cycle detected: {' => '.join(e.args[1])}") return sorted(images, key=lambda i: order.index(i.image)) class ConfigAction(argparse.Action): def __call__( self, parser: argparse.ArgumentParser, namespace: argparse.Namespace, values: Union[str, Sequence[Any], None], option_string: Optional[str] = None ) -> None: assert option_string is not None if values is None and self.nargs == "?": values = self.const or "yes" s = SETTINGS_LOOKUP_BY_DEST[self.dest] if values is None or isinstance(values, str): setattr(namespace, s.dest, s.parse(values, getattr(namespace, self.dest, None))) else: for v in values: assert isinstance(v, str) setattr(namespace, s.dest, s.parse(v, getattr(namespace, self.dest, None))) class ParseContext: def __init__(self, resources: Path = Path("/")) -> None: self.resources = resources # We keep two namespaces around, one for the settings specified on the CLI and one for the settings specified # in configuration files. This is required to implement both [Match] support and the behavior where settings # specified on the CLI always override settings specified in configuration files. self.cli = argparse.Namespace() self.config = argparse.Namespace() # Compare inodes instead of paths so we can't get tricked by bind mounts and such. self.includes: set[tuple[int, int]] = set() self.immutable: set[str] = set() def expand_specifiers(self, text: str, path: Path) -> str: percent = False result: list[str] = [] for c in text: if percent: percent = False if c == "%": result += "%" elif setting := SETTINGS_LOOKUP_BY_SPECIFIER.get(c): if (v := self.finalize_value(setting)) is None: logging.warning( f"Setting {setting.name} specified by specifier '%{c}' in {text} is not yet set, ignoring" ) continue result += str(v) elif specifier := SPECIFIERS_LOOKUP_BY_CHAR.get(c): specifierns = argparse.Namespace() # Some specifier methods might want to access the image name or directory mkosi was invoked in so # let's make sure those are available. setattr(specifierns, "image", getattr(self.config, "image", None)) setattr(specifierns, "directory", self.cli.directory) for d in specifier.depends: setting = SETTINGS_LOOKUP_BY_DEST[d] if (v := self.finalize_value(setting)) is None: logging.warning( f"Setting {setting.name} which specifier '%{c}' in {text} depends on is not yet set, " "ignoring" ) break setattr(specifierns, d, v) else: result += specifier.callback(specifierns, path) else: logging.warning(f"Unknown specifier '%{c}' found in {text}, ignoring") elif c == "%": percent = True else: result += c if percent: result += "%" return "".join(result) @contextlib.contextmanager def parse_new_includes(self) -> Iterator[None]: try: yield finally: # Parse any includes that were added after yielding. for p in getattr(self.cli, "include", []) + getattr(self.config, "include", []): for c in BUILTIN_CONFIGS: if p == Path(c): path = self.resources / c break else: path = p st = path.stat() if (st.st_dev, st.st_ino) in self.includes: continue self.includes.add((st.st_dev, st.st_ino)) if any(p == Path(c) for c in BUILTIN_CONFIGS): _, [config] = parse_config(["--directory", "", "--include", os.fspath(path)]) make_executable( *config.configure_scripts, *config.clean_scripts, *config.sync_scripts, *config.prepare_scripts, *config.build_scripts, *config.postinst_scripts, *config.finalize_scripts, *config.postoutput_scripts, ) with chdir(path if path.is_dir() else Path.cwd()): self.parse_config_one(path if path.is_file() else Path(".")) def finalize_value(self, setting: ConfigSetting) -> Optional[Any]: # If a value was specified on the CLI, it always takes priority. If the setting is a collection of values, we # merge the value from the CLI with the value from the configuration, making sure that the value from the CLI # always takes priority. if ( hasattr(self.cli, setting.dest) and (v := getattr(self.cli, setting.dest)) is not None ): if isinstance(v, list): return (getattr(self.config, setting.dest, None) or []) + v elif isinstance(v, dict): return (getattr(self.config, setting.dest, None) or {}) | v elif isinstance(v, set): return (getattr(self.config, setting.dest, None) or set()) | v else: return v # If the setting was assigned the empty string on the CLI, we don't use any value configured in the # configuration file. Additionally, if the setting is a collection of values, we won't use any default # value either if the setting is set to the empty string on the command line. if ( not hasattr(self.cli, setting.dest) and hasattr(self.config, setting.dest) and (v := getattr(self.config, setting.dest)) is not None ): return v if ( (hasattr(self.cli, setting.dest) or hasattr(self.config, setting.dest)) and isinstance(setting.parse(None, None), (dict, list, set)) ): default = setting.parse(None, None) elif setting.default_factory: # To determine default values, we need the final values of various settings in # a namespace object, but we don't want to copy the final values into the config # namespace object just yet so we create a new namespace object instead. factoryns = argparse.Namespace( **{d: self.finalize_value(SETTINGS_LOOKUP_BY_DEST[d]) for d in setting.default_factory_depends} ) # Some default factory methods want to access the image name or directory mkosi # was invoked in so let's make sure those are available. setattr(factoryns, "image", getattr(self.config, "image", None)) setattr(factoryns, "directory", self.cli.directory) default = setting.default_factory(factoryns) elif setting.default is not None: default = setting.default else: default = setting.parse(None, None) setattr(self.config, setting.dest, default) return default def match_config(self, path: Path) -> bool: condition_triggered: Optional[bool] = None match_triggered: Optional[bool] = None skip = False # If the config file does not exist, we assume it matches so that we look at the other files in the # directory as well (mkosi.conf.d/ and extra files). if not path.exists(): return True for section, k, v in parse_ini(path, only_sections=["Match", "TriggerMatch"]): if not k and not v: if section == "Match" and condition_triggered is False: return False if section == "TriggerMatch": match_triggered = bool(match_triggered) or condition_triggered is not False condition_triggered = None skip = False continue if skip: continue trigger = v.startswith("|") v = v.removeprefix("|") negate = v.startswith("!") v = v.removeprefix("!") v = self.expand_specifiers(v, path) if not v: die("Match value cannot be empty") if s := SETTINGS_LOOKUP_BY_NAME.get(k): if not s.match: die(f"{k} cannot be used in [{section}]") if k != s.name: logging.warning(f"Setting {k} is deprecated, please use {s.name} instead.") # If we encounter a setting that has not been explicitly configured yet, we assign the default value # first so that we can match on default values for settings. if (value := self.finalize_value(s)) is None: result = False else: result = s.match(v, value) elif m := MATCH_LOOKUP.get(k): result = m.match(v) else: die(f"{k} cannot be used in [{section}]") if negate: result = not result if not trigger and not result: if section == "TriggerMatch": skip = True condition_triggered = False continue return False if trigger: condition_triggered = bool(condition_triggered) or result return match_triggered is not False def parse_config_one(self, path: Path, profiles: bool = False, local: bool = False) -> bool: s: Optional[ConfigSetting] # Make mypy happy extras = path.is_dir() if path.is_dir(): path = path / "mkosi.conf" if not self.match_config(path): return False if extras: if local and (path.parent / "mkosi.local.conf").exists(): self.parse_config_one(path.parent / "mkosi.local.conf") # Configuration from mkosi.local.conf should override other file based configuration but not the CLI # itself so move the finalized values to the CLI namespace. for s in SETTINGS: if hasattr(self.config, s.dest): setattr(self.cli, s.dest, self.finalize_value(s)) delattr(self.config, s.dest) for s in SETTINGS: for f in s.paths: extra = parse_path( f, secret=s.path_secret, required=False, resolve=False, expanduser=False, expandvars=False, ) if extra.exists(): setattr( self.config, s.dest, s.parse( extra.read_text().rstrip("\n") if s.path_read_text else f, getattr(self.config, s.dest, None) ), ) if path.exists(): logging.debug(f"Including configuration file {Path.cwd() / path}") for section, k, v in parse_ini(path, only_sections={s.section for s in SETTINGS}): if not k and not v: continue name = k.removeprefix("@") if name != k: logging.warning(f"The '@' specifier is deprecated, please use {name} instead of {k}") if not (s := SETTINGS_LOOKUP_BY_NAME.get(name)): die(f"Unknown setting {name}") if ( s.universal and not isinstance(s.parse(None, None), (list, set, dict)) and (image := getattr(self.config, "image", None)) is not None ): die(f"Setting {name} cannot be configured in subimage {image}") if name in self.immutable: die(f"Setting {name} cannot be modified anymore at this point") if section != s.section: logging.warning(f"Setting {name} should be configured in [{s.section}], not [{section}].") if name != s.name: logging.warning(f"Setting {name} is deprecated, please use {s.name} instead.") v = self.expand_specifiers(v, path) with self.parse_new_includes(): setattr(self.config, s.dest, s.parse(v, getattr(self.config, s.dest, None))) if profiles: profile = self.finalize_value(SETTINGS_LOOKUP_BY_DEST["profile"]) self.immutable.add("Profile") if profile: for p in (profile, f"{profile}.conf"): p = Path("mkosi.profiles") / p if p.exists(): break else: die(f"Profile '{profile}' not found in mkosi.profiles/") setattr(self.config, "profile", profile) with chdir(p if p.is_dir() else Path.cwd()): self.parse_config_one(p if p.is_file() else Path(".")) if extras and (path.parent / "mkosi.conf.d").exists(): for p in sorted((path.parent / "mkosi.conf.d").iterdir()): if p.is_dir() or p.suffix == ".conf": with chdir(p if p.is_dir() else Path.cwd()): self.parse_config_one(p if p.is_file() else Path(".")) return True def parse_config(argv: Sequence[str] = (), *, resources: Path = Path("/")) -> tuple[Args, tuple[Config, ...]]: argv = list(argv) # Make sure the verb command gets explicitly passed. Insert a -- before the positional verb argument # otherwise it might be considered as an argument of a parameter with nargs='?'. For example mkosi -i # summary would be treated as -i=summary. for verb in Verb: try: v_i = argv.index(verb.name) except ValueError: continue # Hack to make sure mkosi -C build works. if argv[v_i - 1] in ("-C", "--directory"): continue if v_i > 0 and argv[v_i - 1] != "--": argv.insert(v_i, "--") break else: argv += ["--", "build"] context = ParseContext(resources) # The "image" field does not directly map to a setting but is required # to determine some default values for settings, so let's set it on the # config namespace immediately so it's available. setattr(context.config, "image", None) # First, we parse the command line arguments into a separate namespace. argparser = create_argument_parser() with context.parse_new_includes(): argparser.parse_args(argv, context.cli) args = load_args(context.cli) # If --debug was passed, apply it as soon as possible. if ARG_DEBUG.get(): logging.getLogger().setLevel(logging.DEBUG) # Do the same for help. if args.verb == Verb.help: PagerHelpAction.__call__(None, argparser, context.cli) # type: ignore if not args.verb.needs_config(): return args, () # One of the specifiers needs access to the directory so let's make sure it # is available. setattr(context.config, "directory", args.directory) # Parse the global configuration unless the user explicitly asked us not to. if args.directory is not None: context.parse_config_one(Path("."), profiles=True, local=True) # After we've finished parsing the configuration, we'll have values in both # namespaces (context.cli, context.config). To be able to parse the values from a # single namespace, we merge the final values of each setting into one namespace. for s in SETTINGS: setattr(context.config, s.dest, context.finalize_value(s)) # Load the configuration for the main image. config = load_config(context.config) images = [] if args.directory is not None and Path("mkosi.images").exists(): # For the subimages in mkosi.images/, we want settings that are marked as # "universal" to override whatever settings are specified in the subimage # configuration files. We achieve this by making it appear like these settings # were specified on the CLI by copying them to the CLI namespace. Any settings # that are not marked as "universal" are deleted from the CLI namespace. for s in SETTINGS: if s.universal: setattr(context.cli, s.dest, getattr(context.config, s.dest)) elif hasattr(context.cli, s.dest): delattr(context.cli, s.dest) setattr( context.cli, "environment", { name: getattr(context.config, "environment")[name] for name in getattr(context.config, "pass_environment", {}) if name in getattr(context.config, "environment", {}) } ) for p in sorted(Path("mkosi.images").iterdir()): if not p.is_dir() and not p.suffix == ".conf": continue name = p.name.removesuffix(".conf") if not name: die(f"{p} is not a valid image name") context.config = argparse.Namespace() setattr(context.config, "image", name) setattr(context.config, "directory", args.directory) # Allow subimage configuration to include everything again. context.includes = set() with chdir(p if p.is_dir() else Path.cwd()): if not context.parse_config_one(p if p.is_file() else Path("."), local=True): continue # Consolidate all settings into one namespace again. for s in SETTINGS: setattr(context.config, s.dest, context.finalize_value(s)) images += [context.config] images = resolve_deps(images, config.dependencies) images = [load_config(ns) for ns in images] return args, tuple(images + [config]) def load_credentials(args: argparse.Namespace) -> dict[str, str]: creds = { "agetty.autologin": "root", "login.noauth": "yes", "firstboot.locale": "C.UTF-8", **args.credentials, } if "firstboot.timezone" not in creds: if find_binary("timedatectl"): tz = run( ["timedatectl", "show", "-p", "Timezone", "--value"], stdout=subprocess.PIPE, check=False, ).stdout.strip() else: tz = "UTC" creds["firstboot.timezone"] = tz if "ssh.authorized_keys.root" not in creds: if args.ssh_certificate: pubkey = run(["openssl", "x509", "-in", args.ssh_certificate, "-pubkey", "-noout"], stdout=subprocess.PIPE, env=dict(OPENSSL_CONF="/dev/null")).stdout.strip() sshpubkey = run(["ssh-keygen", "-f", "/dev/stdin", "-i", "-m", "PKCS8"], input=pubkey, stdout=subprocess.PIPE).stdout.strip() creds["ssh.authorized_keys.root"] = sshpubkey elif args.ssh: die("Ssh= is enabled but no SSH certificate was found", hint="Run 'mkosi genkey' to automatically create one") return creds def finalize_term() -> str: term = os.getenv("TERM", "unknown") if term == "unknown": term = "vt220" if sys.stderr.isatty() else "dumb" return term if sys.stderr.isatty() else "dumb" def load_kernel_command_line_extra(args: argparse.Namespace) -> list[str]: columns, lines = shutil.get_terminal_size() term = finalize_term() cmdline = [ "rw", # Make sure we set up networking in the VM/container. "systemd.wants=network.target", # Make sure we don't load vmw_vmci which messes with virtio vsock. "module_blacklist=vmw_vmci", f"systemd.tty.term.hvc0={term}", f"systemd.tty.columns.hvc0={columns}", f"systemd.tty.rows.hvc0={lines}", ] if not any(s.startswith("ip=") for s in args.kernel_command_line_extra): cmdline += ["ip=enc0:any", "ip=enp0s1:any", "ip=enp0s2:any", "ip=host0:any", "ip=none"] if not any(s.startswith("loglevel=") for s in args.kernel_command_line_extra): cmdline += ["loglevel=4"] if not any(s.startswith("SYSTEMD_SULOGIN_FORCE=") for s in args.kernel_command_line_extra): cmdline += ["SYSTEMD_SULOGIN_FORCE=1"] if not any(s.startswith("systemd.hostname=") for s in args.kernel_command_line_extra) and args.machine: cmdline += [f"systemd.hostname={args.machine}"] if args.qemu_cdrom: # CD-ROMs are read-only so tell systemd to boot in volatile mode. cmdline += ["systemd.volatile=yes"] if not args.qemu_gui: cmdline += [ f"systemd.tty.term.console={term}", f"systemd.tty.columns.console={columns}", f"systemd.tty.rows.console={lines}", "console=hvc0", f"TERM={term}", ] for s in args.kernel_command_line_extra: key, sep, value = s.partition("=") if " " in value: value = f'"{value}"' cmdline += [key if not sep else f"{key}={value}"] return cmdline def load_environment(args: argparse.Namespace) -> dict[str, str]: env = { "SYSTEMD_TMPFILES_FORCE_SUBVOL": "0", "TERM": finalize_term(), } if args.image_id is not None: env["IMAGE_ID"] = args.image_id if args.image_version is not None: env["IMAGE_VERSION"] = args.image_version if args.source_date_epoch is not None: env["SOURCE_DATE_EPOCH"] = str(args.source_date_epoch) if args.proxy_url is not None: for e in ("http_proxy", "https_proxy"): env[e] = args.proxy_url env[e.upper()] = args.proxy_url if args.proxy_exclude: env["no_proxy"] = ",".join(args.proxy_exclude) env["NO_PROXY"] = ",".join(args.proxy_exclude) if args.proxy_peer_certificate: env["GIT_PROXY_SSL_CAINFO"] = "/proxy.cacert" if args.proxy_client_certificate: env["GIT_PROXY_SSL_CERT"] = "/proxy.clientcert" if args.proxy_client_key: env["GIT_PROXY_SSL_KEY"] = "/proxy.clientkey" if dnf := os.getenv("MKOSI_DNF"): env["MKOSI_DNF"] = dnf if gnupghome := os.getenv("GNUPGHOME"): env["GNUPGHOME"] = gnupghome env |= dict(parse_environment(line) for f in args.environment_files for line in f.read_text().strip().splitlines()) env |= args.environment return env def load_args(args: argparse.Namespace) -> Args: if args.cmdline and not args.verb.supports_cmdline(): die(f"Arguments after verb are not supported for {args.verb}.") if args.debug: ARG_DEBUG.set(args.debug) if args.debug_shell: ARG_DEBUG_SHELL.set(args.debug_shell) return Args.from_namespace(args) def load_config(config: argparse.Namespace) -> Config: # Make sure we don't modify the input namespace. config = copy.deepcopy(config) if config.build_dir: config.build_dir /= f"{config.distribution}~{config.release}~{config.architecture}" if config.sign: config.checksum = True if not config.image: config.credentials = load_credentials(config) config.kernel_command_line_extra = load_kernel_command_line_extra(config) config.environment = load_environment(config) if config.overlay and not config.base_trees: die("--overlay can only be used with --base-tree") if config.incremental and not config.cache_dir: die("A cache directory must be configured in order to use --incremental") # For unprivileged builds we need the userxattr OverlayFS mount option, which is only available # in Linux v5.11 and later. if ( (config.build_scripts or config.base_trees) and GenericVersion(platform.release()) < GenericVersion("5.11") and os.geteuid() != 0 ): die("This unprivileged build configuration requires at least Linux v5.11") return Config.from_namespace(config) def yes_no(b: bool) -> str: return "yes" if b else "no" def none_to_na(s: Optional[object]) -> str: return "n/a" if s is None else str(s) def none_to_random(s: Optional[object]) -> str: return "random" if s is None else str(s) def none_to_none(s: Optional[object]) -> str: return "none" if s is None else str(s) def none_to_default(s: Optional[object]) -> str: return "default" if s is None else str(s) def line_join_list(array: Iterable[object]) -> str: return "\n ".join(str(item) for item in array) if array else "none" def format_bytes(num_bytes: int) -> str: if num_bytes >= 1024**3: return f"{num_bytes/1024**3 :0.1f}G" if num_bytes >= 1024**2: return f"{num_bytes/1024**2 :0.1f}M" if num_bytes >= 1024: return f"{num_bytes/1024 :0.1f}K" return f"{num_bytes}B" def format_bytes_or_none(num_bytes: Optional[int]) -> str: return format_bytes(num_bytes) if num_bytes is not None else "none" def summary(config: Config) -> str: def bold(s: Any) -> str: return f"{Style.bold}{s}{Style.reset}" maniformats = (" ".join(i.name for i in config.manifest_format)) or "(none)" env = [f"{k}={v}" for k, v in config.environment.items()] summary = f"""\ {bold(f"IMAGE: {config.image or 'default'}")} {bold("CONFIG")}: Profile: {none_to_none(config.profile)} Include: {line_join_list(config.include)} Initrd Include: {line_join_list(config.initrd_include)} Dependencies: {line_join_list(config.dependencies)} Minimum Version: {none_to_none(config.minimum_version)} Configure Scripts: {line_join_list(config.configure_scripts)} Pass Environment: {line_join_list(config.pass_environment)} {bold("DISTRIBUTION")}: Distribution: {bold(config.distribution)} Release: {bold(none_to_na(config.release))} Architecture: {config.architecture} Mirror: {none_to_default(config.mirror)} Local Mirror (build): {none_to_none(config.local_mirror)} Repo Signature/Key check: {yes_no(config.repository_key_check)} Repositories: {line_join_list(config.repositories)} Use Only Package Cache: {config.cacheonly} Package Manager Trees: {line_join_list(config.package_manager_trees)} {bold("OUTPUT")}: Output Format: {config.output_format} Manifest Formats: {maniformats} Output: {bold(config.output_with_compression)} Compression: {config.compress_output} Compression Level: {config.compress_level} Output Directory: {config.output_dir_or_cwd()} Workspace Directory: {config.workspace_dir_or_default()} Cache Directory: {none_to_none(config.cache_dir)} Package Cache Directory: {none_to_default(config.package_cache_dir)} Build Directory: {none_to_none(config.build_dir)} Image ID: {config.image_id} Image Version: {config.image_version} Split Artifacts: {yes_no(config.split_artifacts)} Repart Directories: {line_join_list(config.repart_dirs)} Sector Size: {none_to_default(config.sector_size)} Repart Offline: {yes_no(config.repart_offline)} Overlay: {yes_no(config.overlay)} Use Subvolumes: {config.use_subvolumes} Seed: {none_to_random(config.seed)} Clean Scripts: {line_join_list(config.clean_scripts)} {bold("CONTENT")}: Packages: {line_join_list(config.packages)} Build Packages: {line_join_list(config.build_packages)} Volatile Packages: {line_join_list(config.volatile_packages)} Package Directories: {line_join_list(config.package_directories)} Volatile Package Directories: {line_join_list(config.volatile_package_directories)} With Documentation: {yes_no(config.with_docs)} Base Trees: {line_join_list(config.base_trees)} Skeleton Trees: {line_join_list(config.skeleton_trees)} Extra Trees: {line_join_list(config.extra_trees)} Remove Packages: {line_join_list(config.remove_packages)} Remove Files: {line_join_list(config.remove_files)} Clean Package Manager Metadata: {config.clean_package_metadata} Source Date Epoch: {none_to_none(config.source_date_epoch)} Sync Scripts: {line_join_list(config.sync_scripts)} Prepare Scripts: {line_join_list(config.prepare_scripts)} Build Scripts: {line_join_list(config.build_scripts)} Postinstall Scripts: {line_join_list(config.postinst_scripts)} Finalize Scripts: {line_join_list(config.finalize_scripts)} Postoutput Scripts: {line_join_list(config.postoutput_scripts)} Build Sources: {line_join_list(config.build_sources)} Build Sources Ephemeral: {yes_no(config.build_sources_ephemeral)} Script Environment: {line_join_list(env)} Environment Files: {line_join_list(config.environment_files)} Run Tests in Build Scripts: {yes_no(config.with_tests)} Scripts With Network: {yes_no(config.with_network)} Bootable: {config.bootable} Bootloader: {config.bootloader} BIOS Bootloader: {config.bios_bootloader} Shim Bootloader: {config.shim_bootloader} Unified Kernel Images: {config.unified_kernel_images} Unified Kernel Image Format: {config.unified_kernel_image_format} Initrds: {line_join_list(config.initrds)} Initrd Packages: {line_join_list(config.initrd_packages)} Initrd Volatile Packages: {line_join_list(config.initrd_volatile_packages)} Kernel Command Line: {line_join_list(config.kernel_command_line)} Kernel Modules Include: {line_join_list(config.kernel_modules_include)} Kernel Modules Exclude: {line_join_list(config.kernel_modules_exclude)} Kernel Modules Include Host: {yes_no(config.kernel_modules_include_host)} Kernel Modules Initrd: {yes_no(config.kernel_modules_initrd)} Kernel Modules Initrd Include: {line_join_list(config.kernel_modules_initrd_include)} Kernel Modules Initrd Exclude: {line_join_list(config.kernel_modules_initrd_exclude)} Kernel Modules Initrd Include Host: {yes_no(config.kernel_modules_initrd_include_host)} Locale: {none_to_default(config.locale)} Locale Messages: {none_to_default(config.locale_messages)} Keymap: {none_to_default(config.keymap)} Timezone: {none_to_default(config.timezone)} Hostname: {none_to_default(config.hostname)} Root Password: {("(set)" if config.root_password else "(default)")} Root Shell: {none_to_default(config.root_shell)} Autologin: {yes_no(config.autologin)} Make Initrd: {yes_no(config.make_initrd)} SSH: {yes_no(config.ssh)} SELinux Relabel: {config.selinux_relabel} """ if config.output_format.is_extension_image() or config.output_format in ( OutputFormat.disk, OutputFormat.uki, OutputFormat.esp, ): summary += f"""\ {bold("VALIDATION")}: UEFI SecureBoot: {yes_no(config.secure_boot)} UEFI SecureBoot AutoEnroll: {yes_no(config.secure_boot_auto_enroll)} SecureBoot Signing Key: {none_to_none(config.secure_boot_key)} SecureBoot Signing Key Source: {config.secure_boot_key_source} SecureBoot Certificate: {none_to_none(config.secure_boot_certificate)} SecureBoot Sign Tool: {config.secure_boot_sign_tool} Verity Signing Key: {none_to_none(config.verity_key)} Verity Signing Key Source: {config.verity_key_source} Verity Certificate: {none_to_none(config.verity_certificate)} Sign Expected PCRs: {config.sign_expected_pcr} Passphrase: {none_to_none(config.passphrase)} Checksum: {yes_no(config.checksum)} Sign: {yes_no(config.sign)} GPG Key: ({"default" if config.key is None else config.key}) """ summary += f"""\ {bold("HOST CONFIGURATION")}: Proxy URL: {none_to_none(config.proxy_url)} Proxy Peer Certificate: {none_to_none(config.proxy_peer_certificate)} Proxy Client Certificate: {none_to_none(config.proxy_client_certificate)} Proxy Client Key: {none_to_none(config.proxy_client_key)} Incremental: {yes_no(config.incremental)} NSpawn Settings: {none_to_none(config.nspawn_settings)} Extra Search Paths: {line_join_list(config.extra_search_paths)} Ephemeral: {config.ephemeral} Credentials: {line_join_list(config.credentials.keys())} Extra Kernel Command Line: {line_join_list(config.kernel_command_line_extra)} Use ACLs: {yes_no(config.acl)} Tools Tree: {config.tools_tree} Tools Tree Distribution: {none_to_none(config.tools_tree_distribution)} Tools Tree Release: {none_to_none(config.tools_tree_release)} Tools Tree Mirror: {none_to_default(config.tools_tree_mirror)} Tools Tree Repositories: {line_join_list(config.tools_tree_repositories)} Tools Tree Package Manager Trees: {line_join_list(config.tools_tree_package_manager_trees)} Tools Tree Packages: {line_join_list(config.tools_tree_packages)} Tools Tree Certificates: {yes_no(config.tools_tree_certificates)} Runtime Trees: {line_join_list(config.runtime_trees)} Runtime Size: {format_bytes_or_none(config.runtime_size)} Runtime Scratch: {config.runtime_scratch} Runtime Network: {config.runtime_network} Runtime Build Sources: {config.runtime_build_sources} Unit Properties: {line_join_list(config.unit_properties)} SSH Signing Key: {none_to_none(config.ssh_key)} SSH Certificate: {none_to_none(config.ssh_certificate)} Machine: {config.machine_or_name()} Forward Journal: {none_to_none(config.forward_journal)} Virtual Machine Monitor: {config.vmm} QEMU GUI: {yes_no(config.qemu_gui)} QEMU CPU Cores: {config.qemu_smp} QEMU Memory: {config.qemu_mem} QEMU Use KVM: {config.qemu_kvm} QEMU Use VSock: {config.qemu_vsock} QEMU VSock Connection ID: {QemuVsockCID.format(config.qemu_vsock_cid)} QEMU Use Swtpm: {config.qemu_swtpm} QEMU Use CD-ROM: {yes_no(config.qemu_cdrom)} QEMU Firmware: {config.qemu_firmware} QEMU Firmware Variables: {none_to_none(config.qemu_firmware_variables)} QEMU Kernel: {none_to_none(config.qemu_kernel)} QEMU Extra Arguments: {line_join_list(config.qemu_args)} """ return summary class JsonEncoder(json.JSONEncoder): def default(self, o: Any) -> Any: if isinstance(o, StrEnum): return str(o) elif isinstance(o, GenericVersion): return str(o) elif isinstance(o, os.PathLike): return os.fspath(o) elif isinstance(o, uuid.UUID): return str(o) elif isinstance(o, (Args, Config)): return o.to_dict() return super().default(o) E = TypeVar("E", bound=StrEnum) def json_type_transformer(refcls: Union[type[Args], type[Config]]) -> Callable[[str, Any], Any]: fields_by_name = {field.name: field for field in dataclasses.fields(refcls)} def path_transformer(path: str, fieldtype: type[Path]) -> Path: return Path(path) def optional_path_transformer(path: Optional[str], fieldtype: type[Optional[Path]]) -> Optional[Path]: return Path(path) if path is not None else None def path_list_transformer(pathlist: list[str], fieldtype: type[list[Path]]) -> list[Path]: return [Path(p) for p in pathlist] def uuid_transformer(uuidstr: str, fieldtype: type[uuid.UUID]) -> uuid.UUID: return uuid.UUID(uuidstr) def root_password_transformer( rootpw: Optional[list[Union[str, bool]]], fieldtype: type[Optional[tuple[str, bool]]] ) -> Optional[tuple[str, bool]]: if rootpw is None: return None return (cast(str, rootpw[0]), cast(bool, rootpw[1])) def config_tree_transformer(trees: list[dict[str, Any]], fieldtype: type[ConfigTree]) -> list[ConfigTree]: # TODO: exchange for TypeGuard and list comprehension once on 3.10 ret = [] for d in trees: assert "Source" in d assert "Target" in d ret.append( ConfigTree( source=Path(d["Source"]), target=Path(d["Target"]) if d["Target"] is not None else None, ) ) return ret def enum_transformer(enumval: str, fieldtype: type[E]) -> E: return fieldtype(enumval) def optional_enum_transformer(enumval: Optional[str], fieldtype: type[Optional[E]]) -> Optional[E]: return typing.get_args(fieldtype)[0](enumval) if enumval is not None else None def enum_list_transformer(enumlist: list[str], fieldtype: type[list[E]]) -> list[E]: enumtype = fieldtype.__args__[0] # type: ignore return [enumtype[e] for e in enumlist] def config_drive_transformer(drives: list[dict[str, Any]], fieldtype: type[QemuDrive]) -> list[QemuDrive]: # TODO: exchange for TypeGuard and list comprehension once on 3.10 ret = [] for d in drives: assert "Id" in d assert "Size" in d ret.append( QemuDrive( id=d["Id"], size=d["Size"] if isinstance(d["Size"], int) else parse_bytes(d["Size"]), directory=Path(d["Directory"]) if d.get("Directory") else None, options=d.get("Options"), file_id=d.get("FileId", d["Id"]), ) ) return ret def generic_version_transformer( version: Optional[str], fieldtype: type[Optional[GenericVersion]], ) -> Optional[GenericVersion]: return GenericVersion(version) if version is not None else None def key_source_transformer(keysource: dict[str, Any], fieldtype: type[KeySource]) -> KeySource: assert "Type" in keysource return KeySource(type=KeySource.Type(keysource["Type"]), source=keysource.get("Source", "")) # The type of this should be # dict[type, Callable[a stringy JSON object (str, null, list or dict of str), type of the key], type of the key] # though this seems impossible to express, since e.g. mypy will make this a # builtins.dict[builtins.object, builtins.function] # whereas pyright gives the type of the dict keys as the proper union of # all functions in the dict. We therefore squash all the types here to Any # to shut up the type checkers and rely on the tests. transformers: dict[Any, Callable[[Any, Any], Any]] = { Path: path_transformer, Optional[Path]: optional_path_transformer, list[Path]: path_list_transformer, uuid.UUID: uuid_transformer, Optional[tuple[str, bool]]: root_password_transformer, list[ConfigTree]: config_tree_transformer, Architecture: enum_transformer, BiosBootloader: enum_transformer, ShimBootloader: enum_transformer, Bootloader: enum_transformer, Compression: enum_transformer, ConfigFeature: enum_transformer, Distribution: enum_transformer, OutputFormat: enum_transformer, QemuFirmware: enum_transformer, SecureBootSignTool: enum_transformer, Optional[Distribution]: optional_enum_transformer, list[ManifestFormat]: enum_list_transformer, Verb: enum_transformer, DocFormat: enum_transformer, list[QemuDrive]: config_drive_transformer, GenericVersion: generic_version_transformer, Cacheonly: enum_transformer, Network: enum_transformer, KeySource: key_source_transformer, Vmm: enum_transformer, } def json_transformer(key: str, val: Any) -> Any: fieldtype: Optional[dataclasses.Field[Any]] = fields_by_name.get(key) # It is unlikely that the type of a field will be None only, so let's not bother with a different sentinel # value if fieldtype is None: raise ValueError(f"{refcls} has no field {key}") transformer = transformers.get(fieldtype.type) if transformer is not None: try: return transformer(val, fieldtype.type) except (ValueError, IndexError, AssertionError) as e: raise ValueError(f"Unable to parse {val:r} for attribute {key:r} for {refcls.__name__}") from e return val return json_transformer def want_selinux_relabel(config: Config, root: Path, fatal: bool = True) -> Optional[tuple[Path, str, Path, Path]]: if config.selinux_relabel == ConfigFeature.disabled: return None selinux = root / "etc/selinux/config" if not selinux.exists(): if fatal and config.selinux_relabel == ConfigFeature.enabled: die("SELinux relabel is requested but could not find selinux config at /etc/selinux/config") return None policy = run(["sh", "-c", f". {selinux} && echo $SELINUXTYPE"], sandbox=config.sandbox(binary="sh", mounts=[Mount(selinux, selinux, ro=True)]), stdout=subprocess.PIPE).stdout.strip() if not policy: if fatal and config.selinux_relabel == ConfigFeature.enabled: die("SELinux relabel is requested but no selinux policy is configured in /etc/selinux/config") return None if not (setfiles := config.find_binary("setfiles")): if fatal and config.selinux_relabel == ConfigFeature.enabled: die("SELinux relabel is requested but setfiles is not installed") return None fc = root / "etc/selinux" / policy / "contexts/files/file_contexts" if not fc.exists(): if fatal and config.selinux_relabel == ConfigFeature.enabled: die(f"SELinux relabel is requested but SELinux file contexts not found in {fc}") return None binpolicydir = root / "etc/selinux" / policy / "policy" # The policy file is named policy.XX where XX is the policy version that indicates what features are # available. We check for string.digits instead of using isdecimal() as the latter checks for more than just # digits. policies = [p for p in binpolicydir.glob("*") if p.suffix and all(c in string.digits for c in p.suffix[1:])] if not policies: if fatal and config.selinux_relabel == ConfigFeature.enabled: die(f"SELinux relabel is requested but SELinux binary policy not found in {binpolicydir}") return None binpolicy = sorted(policies, key=lambda p: GenericVersion(p.name), reverse=True)[0] return setfiles, policy, fc, binpolicy def systemd_tool_version(*tool: PathString, sandbox: SandboxProtocol = nosandbox) -> GenericVersion: return GenericVersion( run( [*tool, "--version"], stdout=subprocess.PIPE, sandbox=sandbox(binary=tool[-1]), ).stdout.split()[2].strip("()").removeprefix("v") ) mkosi-24.3/mkosi/context.py000066400000000000000000000071711465176501400157630ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later from collections.abc import Sequence from contextlib import AbstractContextManager from pathlib import Path from typing import Optional from mkosi.config import Args, Config from mkosi.sandbox import Mount from mkosi.tree import make_tree from mkosi.types import PathString from mkosi.util import umask class Context: """State related properties.""" def __init__( self, args: Args, config: Config, *, workspace: Path, resources: Path, package_cache_dir: Optional[Path] = None, package_dir: Optional[Path] = None, ) -> None: self.args = args self.config = config self.workspace = workspace self.resources = resources self.package_cache_dir = package_cache_dir or (self.root / "var") self.package_dir = package_dir or (self.workspace / "packages") self.package_dir.mkdir(exist_ok=True) with umask(~0o755): # Using a btrfs subvolume as the upperdir in an overlayfs results in EXDEV so make sure we create # the root directory as a regular directory if the Overlay= option is enabled. if config.overlay: self.root.mkdir() else: make_tree( self.root, use_subvolumes=self.config.use_subvolumes, sandbox=config.sandbox, ) self.staging.mkdir() self.pkgmngr.mkdir() self.repository.mkdir() self.artifacts.mkdir() self.install_dir.mkdir() @property def root(self) -> Path: return self.workspace / "root" @property def staging(self) -> Path: return self.workspace / "staging" @property def pkgmngr(self) -> Path: return self.workspace / "pkgmngr" @property def repository(self) -> Path: return self.workspace / "repository" @property def artifacts(self) -> Path: return self.workspace / "artifacts" @property def install_dir(self) -> Path: return self.workspace / "dest" def sandbox( self, *, binary: Optional[PathString], network: bool = False, devices: bool = False, vartmp: bool = False, scripts: Optional[Path] = None, mounts: Sequence[Mount] = (), options: Sequence[PathString] = (), extra: Sequence[PathString] = (), ) -> AbstractContextManager[list[PathString]]: if (self.pkgmngr / "usr").exists(): extra = [ "sh", "-c", f"mount -t overlay -o lowerdir={self.pkgmngr / 'usr'}:/usr overlayfs /usr && exec $0 \"$@\"", *extra, ] return self.config.sandbox( binary=binary, network=network, devices=devices, vartmp=vartmp, scripts=scripts, mounts=[ # This mount is writable so bubblewrap can create extra directories or symlinks inside of it as needed. # This isn't a problem as the package manager directory is created by mkosi and thrown away when the # build finishes. Mount(self.pkgmngr / "etc", "/etc"), Mount(self.pkgmngr / "var/log", "/var/log"), *([Mount(p, p, ro=True)] if (p := self.pkgmngr / "usr").exists() else []), *mounts, ], options=[ "--uid", "0", "--gid", "0", "--cap-add", "ALL", *options, ], extra=extra, ) mkosi-24.3/mkosi/distributions/000077500000000000000000000000001465176501400166215ustar00rootroot00000000000000mkosi-24.3/mkosi/distributions/__init__.py000066400000000000000000000132621465176501400207360ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import enum import importlib import re import urllib.parse from collections.abc import Sequence from typing import TYPE_CHECKING, Optional, cast from mkosi.util import StrEnum, read_env_file if TYPE_CHECKING: from mkosi.config import Architecture, Config from mkosi.context import Context from mkosi.installer import PackageManager class PackageType(StrEnum): none = enum.auto() rpm = enum.auto() deb = enum.auto() pkg = enum.auto() class DistributionInstaller: @classmethod def pretty_name(cls) -> str: raise NotImplementedError @classmethod def package_manager(cls, config: "Config") -> type["PackageManager"]: raise NotImplementedError @classmethod def setup(cls, context: "Context") -> None: raise NotImplementedError @classmethod def install(cls, context: "Context") -> None: raise NotImplementedError @classmethod def install_packages(cls, context: "Context", packages: Sequence[str]) -> None: raise NotImplementedError @classmethod def remove_packages(cls, context: "Context", packages: Sequence[str]) -> None: raise NotImplementedError @classmethod def filesystem(cls) -> str: return "ext4" @classmethod def architecture(cls, arch: "Architecture") -> str: raise NotImplementedError @classmethod def package_type(cls) -> PackageType: return PackageType.none @classmethod def default_release(cls) -> str: return "" @classmethod def default_tools_tree_distribution(cls) -> Optional["Distribution"]: return None @classmethod def grub_prefix(cls) -> str: return "grub" class Distribution(StrEnum): # Please consult docs/distribution-policy.md and contact one # of the mkosi maintainers before implementing a new distribution. fedora = enum.auto() debian = enum.auto() ubuntu = enum.auto() arch = enum.auto() opensuse = enum.auto() mageia = enum.auto() centos = enum.auto() rhel = enum.auto() rhel_ubi = enum.auto() openmandriva = enum.auto() rocky = enum.auto() alma = enum.auto() custom = enum.auto() def is_centos_variant(self) -> bool: return self in ( Distribution.centos, Distribution.alma, Distribution.rocky, Distribution.rhel, Distribution.rhel_ubi, ) def is_apt_distribution(self) -> bool: return self in (Distribution.debian, Distribution.ubuntu) def pretty_name(self) -> str: return self.installer().pretty_name() def package_manager(self, config: "Config") -> type["PackageManager"]: return self.installer().package_manager(config) def setup(self, context: "Context") -> None: return self.installer().setup(context) def install(self, context: "Context") -> None: return self.installer().install(context) def install_packages(self, context: "Context", packages: Sequence[str]) -> None: return self.installer().install_packages(context, packages) def remove_packages(self, context: "Context", packages: Sequence[str]) -> None: return self.installer().remove_packages(context, packages) def filesystem(self) -> str: return self.installer().filesystem() def architecture(self, arch: "Architecture") -> str: return self.installer().architecture(arch) def package_type(self) -> PackageType: return self.installer().package_type() def default_release(self) -> str: return self.installer().default_release() def default_tools_tree_distribution(self) -> "Distribution": return self.installer().default_tools_tree_distribution() or self def grub_prefix(self) -> str: return self.installer().grub_prefix() def createrepo(self, context: "Context") -> None: return self.installer().package_manager(context.config).createrepo(context) def installer(self) -> type[DistributionInstaller]: modname = str(self).replace('-', '_') mod = importlib.import_module(f"mkosi.distributions.{modname}") installer = getattr(mod, "Installer") assert issubclass(installer, DistributionInstaller) return cast(type[DistributionInstaller], installer) def detect_distribution() -> tuple[Optional[Distribution], Optional[str]]: try: os_release = read_env_file("/usr/lib/os-release") except FileNotFoundError: return None, None dist_id = os_release.get("ID", "linux") dist_id_like = os_release.get("ID_LIKE", "").split() version = os_release.get("VERSION", None) version_id = os_release.get("VERSION_ID", None) version_codename = os_release.get("VERSION_CODENAME", None) extracted_codename = None if version: # extract Debian release codename m = re.search(r"\((.*?)\)", version) if m: extracted_codename = m.group(1) d: Optional[Distribution] = None for the_id in [dist_id, *dist_id_like]: d = Distribution.__members__.get(the_id, None) if d is not None: break if d in {Distribution.debian, Distribution.ubuntu} and (version_codename or extracted_codename): version_id = version_codename or extracted_codename return d, version_id def join_mirror(mirror: str, link: str) -> str: # urljoin() behaves weirdly if the base does not end with a / or the path starts with a / so fix them up as needed. if not mirror.endswith("/"): mirror = f"{mirror}/" link = link.removeprefix("/") return urllib.parse.urljoin(mirror, link) mkosi-24.3/mkosi/distributions/alma.py000066400000000000000000000022121465176501400201020ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later from mkosi.context import Context from mkosi.distributions import centos, join_mirror from mkosi.installer.rpm import RpmRepository, find_rpm_gpgkey class Installer(centos.Installer): @classmethod def pretty_name(cls) -> str: return "AlmaLinux" @staticmethod def gpgurls(context: Context) -> tuple[str, ...]: return ( find_rpm_gpgkey( context, f"RPM-GPG-KEY-AlmaLinux-{context.config.release}", ) or f"https://repo.almalinux.org/almalinux/RPM-GPG-KEY-AlmaLinux-{context.config.release}", ) @classmethod def repository_variants(cls, context: Context, repo: str) -> list[RpmRepository]: if context.config.mirror: url = f"baseurl={join_mirror(context.config.mirror, f'$releasever/{repo}/$basearch/os')}" else: url = f"mirrorlist=https://mirrors.almalinux.org/mirrorlist/$releasever/{repo.lower()}" return [RpmRepository(repo, url, cls.gpgurls(context))] @classmethod def sig_repositories(cls, context: Context) -> list[RpmRepository]: return [] mkosi-24.3/mkosi/distributions/arch.py000066400000000000000000000062261465176501400201160ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later from collections.abc import Iterable, Sequence from mkosi.config import Architecture, Config from mkosi.context import Context from mkosi.distributions import DistributionInstaller, PackageType from mkosi.installer import PackageManager from mkosi.installer.pacman import Pacman, PacmanRepository from mkosi.log import die from mkosi.util import listify, sort_packages class Installer(DistributionInstaller): @classmethod def pretty_name(cls) -> str: return "Arch Linux" @classmethod def filesystem(cls) -> str: return "ext4" @classmethod def package_type(cls) -> PackageType: return PackageType.pkg @classmethod def default_release(cls) -> str: return "rolling" @classmethod def package_manager(cls, config: "Config") -> type[PackageManager]: return Pacman @classmethod def setup(cls, context: Context) -> None: Pacman.setup(context, cls.repositories(context)) @classmethod def install(cls, context: Context) -> None: cls.install_packages(context, ["filesystem"], apivfs=False) @classmethod def install_packages(cls, context: Context, packages: Sequence[str], apivfs: bool = True) -> None: Pacman.invoke( context, "--sync", ["--needed", "--assume-installed", "initramfs", *sort_packages(packages)], apivfs=apivfs, ) @classmethod def remove_packages(cls, context: Context, packages: Sequence[str]) -> None: Pacman.invoke(context, "--remove", ["--nosave", "--recursive", *packages], apivfs=True) @classmethod @listify def repositories(cls, context: Context) -> Iterable[PacmanRepository]: if context.config.local_mirror: yield PacmanRepository("core", context.config.local_mirror) else: if context.config.architecture.is_arm_variant(): url = f"{context.config.mirror or 'http://mirror.archlinuxarm.org'}/$arch/$repo" else: url = f"{context.config.mirror or 'https://geo.mirror.pkgbuild.com'}/$repo/os/$arch" # Testing repositories have to go before regular ones to to take precedence. repos = [ repo for repo in ( "core-testing", "core-testing-debug", "extra-testing", "extra-testing-debug", "core-debug", "extra-debug", "multilib-testing", "multilib", ) if repo in context.config.repositories ] + ["core", "extra"] if context.config.architecture.is_arm_variant(): repos += ["alarm"] for repo in repos: yield PacmanRepository(repo, url) @classmethod def architecture(cls, arch: Architecture) -> str: a = { Architecture.x86_64 : "x86_64", Architecture.arm64 : "aarch64", Architecture.arm : "armv7h", }.get(arch) if not a: die(f"Architecture {a} is not supported by Arch Linux") return a mkosi-24.3/mkosi/distributions/centos.py000066400000000000000000000332611465176501400204730ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later from collections.abc import Iterable, Sequence from mkosi.config import Architecture, Config from mkosi.context import Context from mkosi.distributions import ( Distribution, DistributionInstaller, PackageType, join_mirror, ) from mkosi.installer import PackageManager from mkosi.installer.dnf import Dnf from mkosi.installer.rpm import RpmRepository, find_rpm_gpgkey, setup_rpm from mkosi.log import die from mkosi.util import listify from mkosi.versioncomp import GenericVersion CENTOS_SIG_REPO_PRIORITY = 50 class Installer(DistributionInstaller): @classmethod def pretty_name(cls) -> str: return "CentOS" @classmethod def filesystem(cls) -> str: return "xfs" @classmethod def package_type(cls) -> PackageType: return PackageType.rpm @classmethod def default_release(cls) -> str: return "9" @classmethod def default_tools_tree_distribution(cls) -> Distribution: return Distribution.fedora @classmethod def package_manager(cls, config: "Config") -> type[PackageManager]: return Dnf @classmethod def grub_prefix(cls) -> str: return "grub2" @classmethod def dbpath(cls, context: Context) -> str: # The Hyperscale SIG uses /usr/lib/sysimage/rpm in its rebuild of rpm for C9S that's shipped in the # hyperscale-packages-experimental repository. if ( GenericVersion(context.config.release) > 9 or "hyperscale-packages-experimental" in context.config.repositories ): return "/usr/lib/sysimage/rpm" return "/var/lib/rpm" @classmethod def setup(cls, context: Context) -> None: if GenericVersion(context.config.release) <= 8: die(f"{cls.pretty_name()} Stream 8 or earlier variants are not supported") Dnf.setup(context, cls.repositories(context)) (context.pkgmngr / "etc/dnf/vars/stream").write_text(f"{context.config.release}-stream\n") setup_rpm(context, dbpath=cls.dbpath(context)) @classmethod def install(cls, context: Context) -> None: cls.install_packages(context, ["basesystem"], apivfs=False) @classmethod def install_packages(cls, context: Context, packages: Sequence[str], apivfs: bool = True) -> None: Dnf.invoke(context, "install", packages, apivfs=apivfs) @classmethod def remove_packages(cls, context: Context, packages: Sequence[str]) -> None: Dnf.invoke(context, "remove", packages, apivfs=True) @classmethod def architecture(cls, arch: Architecture) -> str: a = { Architecture.x86_64 : "x86_64", Architecture.ppc64_le : "ppc64le", Architecture.s390x : "s390x", Architecture.arm64 : "aarch64", }.get(arch) if not a: die(f"Architecture {a} is not supported by {cls.pretty_name()}") return a @staticmethod def gpgurls(context: Context) -> tuple[str, ...]: rel = "RPM-GPG-KEY-CentOS-Official" if context.config.release == "9" else "RPM-GPG-KEY-CentOS-Official-SHA256" return tuple( find_rpm_gpgkey(context, key) or f"https://www.centos.org/keys/{key}" for key in (rel, "RPM-GPG-KEY-CentOS-SIG-Extras") ) @classmethod def repository_variants(cls, context: Context, repo: str) -> Iterable[RpmRepository]: if context.config.local_mirror: yield RpmRepository(repo, f"baseurl={context.config.local_mirror}", cls.gpgurls(context)) elif mirror := context.config.mirror: if repo == "extras": yield RpmRepository( repo.lower(), f"baseurl={join_mirror(mirror, f'SIGs/$stream/{repo}/$basearch/extras-common')}", cls.gpgurls(context), ) yield RpmRepository( f"{repo.lower()}-source", f"baseurl={join_mirror(mirror, f'SIGs/$stream/{repo}/source/extras-common')}", cls.gpgurls(context), enabled=False, ) else: yield RpmRepository( repo.lower(), f"baseurl={join_mirror(mirror, f'$stream/{repo}/$basearch/os')}", cls.gpgurls(context), ) yield RpmRepository( f"{repo.lower()}-debuginfo", f"baseurl={join_mirror(mirror, f'$stream/{repo}/$basearch/debug/tree')}", cls.gpgurls(context), enabled=False, ) yield RpmRepository( f"{repo.lower()}-source", f"baseurl={join_mirror(mirror, f'$stream/{repo}/source/tree')}", cls.gpgurls(context), enabled=False, ) else: url = "metalink=https://mirrors.centos.org/metalink" if repo == "extras": yield RpmRepository( repo.lower(), f"{url}?arch=$basearch&repo=centos-extras-sig-extras-common-$stream", cls.gpgurls(context), ) yield RpmRepository( f"{repo.lower()}-source", f"{url}?arch=source&repo=centos-extras-sig-extras-common-source-$stream", cls.gpgurls(context), enabled=False, ) else: yield RpmRepository( repo.lower(), f"{url}?arch=$basearch&repo=centos-{repo.lower()}-$stream", cls.gpgurls(context), ) yield RpmRepository( f"{repo.lower()}-debuginfo", f"{url}?arch=$basearch&repo=centos-{repo.lower()}-debug-$stream", cls.gpgurls(context), enabled=False, ) yield RpmRepository( f"{repo.lower()}-source", f"{url}?arch=source&repo=centos-{repo.lower()}-source-$stream", cls.gpgurls(context), enabled=False, ) @classmethod @listify def repositories(cls, context: Context) -> Iterable[RpmRepository]: if context.config.local_mirror: yield from cls.repository_variants(context, "AppStream") return yield from cls.repository_variants(context, "BaseOS") yield from cls.repository_variants(context, "AppStream") yield from cls.repository_variants(context, "extras") yield from cls.repository_variants(context, "CRB") yield from cls.epel_repositories(context) yield from cls.sig_repositories(context) @classmethod def epel_repositories(cls, context: Context) -> Iterable[RpmRepository]: gpgurls = ( find_rpm_gpgkey( context, f"RPM-GPG-KEY-EPEL-{context.config.release}", ) or f"https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-{context.config.release}", ) if context.config.local_mirror: return if mirror := context.config.mirror: for repo, dir in ( ("epel", "epel"), ("epel-next", "epel/next"), ("epel-testing", "epel/testing"), ("epel-next-testing", "epel/testing/next") ): # For EPEL we make the assumption that epel is mirrored in the parent directory of the mirror URL and # path we were given. Since this doesn't work for all scenarios, we also allow overriding the mirror # via ane environment variable. url = context.config.environment.get("EPEL_MIRROR", join_mirror(mirror, "../fedora")) yield RpmRepository( repo, f"baseurl={url}/{dir}/$releasever/Everything/$basearch", gpgurls, enabled=False, ) yield RpmRepository( f"{repo}-debuginfo", f"baseurl={url}/{dir}/$releasever/Everything/$basearch/debug", gpgurls, enabled=False, ) yield RpmRepository( f"{repo}-source", f"baseurl={url}/{dir}/$releasever/Everything/source/tree", gpgurls, enabled=False, ) else: url = "metalink=https://mirrors.fedoraproject.org/metalink?arch=$basearch" for repo in ("epel", "epel-next"): yield RpmRepository(repo, f"{url}&repo={repo}-$releasever", gpgurls, enabled=False) yield RpmRepository( f"{repo}-debuginfo", f"{url}&repo={repo}-debug-$releasever", gpgurls, enabled=False ) yield RpmRepository( f"{repo}-source", f"{url}&repo={repo}-source-$releasever", gpgurls, enabled=False ) yield RpmRepository( "epel-testing", f"{url}&repo=testing-epel$releasever", gpgurls, enabled=False ) yield RpmRepository( "epel-testing-debuginfo", f"{url}&repo=testing-debug-epel$releasever", gpgurls, enabled=False ) yield RpmRepository( "epel-testing-source", f"{url}&repo=testing-source-epel$releasever", gpgurls, enabled=False ) yield RpmRepository( "epel-next-testing", f"{url}&repo=epel-testing-next-$releasever", gpgurls, enabled=False ) yield RpmRepository( "epel-next-testing-debuginfo", f"{url}&repo=epel-testing-next-debug-$releasever", gpgurls, enabled=False, ) yield RpmRepository( "epel-next-testing-source", f"{url}&repo=epel-testing-next-source-$releasever", gpgurls, enabled=False, ) @classmethod def sig_repositories(cls, context: Context) -> Iterable[RpmRepository]: if context.config.local_mirror: return sigs = ( ( "hyperscale", (f"packages-{c}" for c in ("main", "experimental", "facebook", "hotfixes", "spin", "intel")), ("RPM-GPG-KEY-CentOS-SIG-HyperScale",), ), ) for sig, components, keys in sigs: gpgurls = tuple(find_rpm_gpgkey(context, key) or f"https://www.centos.org/keys/{key}" for key in keys) for c in components: if mirror := context.config.mirror: yield RpmRepository( f"{sig}-{c}", f"baseurl={join_mirror(mirror, f'SIGs/$stream/{sig}/$basearch/{c}')}", gpgurls, enabled=False, priority=CENTOS_SIG_REPO_PRIORITY, ) yield RpmRepository( f"{sig}-{c}-debuginfo", f"baseurl={join_mirror(mirror, f'SIGs/$stream/{sig}/$basearch/{c}/debug')}", gpgurls, enabled=False, priority=CENTOS_SIG_REPO_PRIORITY, ) yield RpmRepository( f"{sig}-{c}-source", f"baseurl={join_mirror(mirror, f'SIGs/$stream/{sig}/source/{c}')}", gpgurls, enabled=False, priority=CENTOS_SIG_REPO_PRIORITY, ) else: url = "metalink=https://mirrors.centos.org/metalink" yield RpmRepository( f"{sig}-{c}", f"{url}?arch=$basearch&repo=centos-{sig}-sig-{c}-$stream", gpgurls, enabled=False, priority=CENTOS_SIG_REPO_PRIORITY, ) yield RpmRepository( f"{sig}-{c}-debuginfo", f"{url}?arch=$basearch&repo=centos-{sig}-sig-{c}-debug-$stream", gpgurls, enabled=False, priority=CENTOS_SIG_REPO_PRIORITY, ) yield RpmRepository( f"{sig}-{c}-source", f"{url}?arch=source&repo=centos-{sig}-sig-{c}-source-$stream", gpgurls, enabled=False, priority=CENTOS_SIG_REPO_PRIORITY, ) yield RpmRepository( f"{sig}-{c}-testing", f"baseurl=https://buildlogs.centos.org/centos/$stream/{sig}/$basearch/{c}", gpgurls, enabled=False, priority=CENTOS_SIG_REPO_PRIORITY, ) yield RpmRepository( f"{sig}-{c}-testing-debuginfo", f"baseurl=https://buildlogs.centos.org/centos/$stream/{sig}/$basearch/{c}", gpgurls, enabled=False, priority=CENTOS_SIG_REPO_PRIORITY, ) mkosi-24.3/mkosi/distributions/custom.py000066400000000000000000000021151465176501400205040ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later from collections.abc import Sequence from mkosi.config import Architecture, Config from mkosi.context import Context from mkosi.distributions import DistributionInstaller from mkosi.installer import PackageManager from mkosi.log import die class Installer(DistributionInstaller): @classmethod def architecture(cls, arch: Architecture) -> str: return str(arch) @classmethod def package_manager(cls, config: Config) -> type[PackageManager]: return PackageManager @classmethod def setup(cls, context: Context) -> None: pass @classmethod def install(cls, context: Context) -> None: pass @classmethod def install_packages(cls, context: Context, packages: Sequence[str]) -> None: if packages: die("Installing packages is not supported for custom distributions'") @classmethod def remove_packages(cls, context: Context, packages: Sequence[str]) -> None: if packages: die("Removing packages is not supported for custom distributions") mkosi-24.3/mkosi/distributions/debian.py000066400000000000000000000241161465176501400204210ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import tempfile from collections.abc import Iterable, Sequence from pathlib import Path from mkosi.archive import extract_tar from mkosi.config import Architecture, Config from mkosi.context import Context from mkosi.distributions import DistributionInstaller, PackageType from mkosi.installer import PackageManager from mkosi.installer.apt import Apt, AptRepository from mkosi.log import die from mkosi.run import run from mkosi.sandbox import Mount from mkosi.util import listify, umask class Installer(DistributionInstaller): @classmethod def pretty_name(cls) -> str: return "Debian" @classmethod def filesystem(cls) -> str: return "ext4" @classmethod def package_type(cls) -> PackageType: return PackageType.deb @classmethod def default_release(cls) -> str: return "testing" @classmethod def package_manager(cls, config: Config) -> type[PackageManager]: return Apt @staticmethod @listify def repositories(context: Context, local: bool = True) -> Iterable[AptRepository]: types = ("deb", "deb-src") components = ("main", *context.config.repositories) if context.config.local_mirror and local: yield AptRepository( types=("deb",), url=context.config.local_mirror, suite=context.config.release, components=("main",), signedby=None, ) return mirror = context.config.mirror or "http://deb.debian.org/debian" signedby = Path("/usr/share/keyrings/debian-archive-keyring.gpg") yield AptRepository( types=types, url=mirror, suite=context.config.release, components=components, signedby=signedby, ) # Debug repos are typically not mirrored. url = "http://deb.debian.org/debian-debug" yield AptRepository( types=types, url=url, suite=f"{context.config.release}-debug", components=components, signedby=signedby, ) if context.config.release in ("unstable", "sid"): return yield AptRepository( types=types, url=mirror, suite=f"{context.config.release}-updates", components=components, signedby=signedby, ) yield AptRepository( types=types, # Security updates repos are never mirrored. url="http://security.debian.org/debian-security", suite=f"{context.config.release}-security", components=components, signedby=signedby, ) @classmethod def setup(cls, context: Context) -> None: Apt.setup(context, cls.repositories(context)) @classmethod def install(cls, context: Context) -> None: # Instead of using debootstrap, we replicate its core functionality here. Because dpkg does not have # an option to delay running pre-install maintainer scripts when it installs a package, it's # impossible to use apt directly to bootstrap a Debian chroot since dpkg will try to run a maintainer # script which depends on some basic tool to be available in the chroot from a deb which hasn't been # unpacked yet, causing the script to fail. To avoid these issues, we have to extract all the # essential debs first, and only then run the maintainer scripts for them. # First, we set up merged usr. # This list is taken from https://salsa.debian.org/installer-team/debootstrap/-/blob/master/functions#L1369. subdirs = ["bin", "sbin", "lib"] + { "amd64" : ["lib32", "lib64", "libx32"], "i386" : ["lib64", "libx32"], "mips" : ["lib32", "lib64"], "mipsel" : ["lib32", "lib64"], "mips64el" : ["lib32", "lib64", "libo32"], "loongarch64" : ["lib32", "lib64"], "powerpc" : ["lib64"], "ppc64" : ["lib32", "lib64"], "ppc64el" : ["lib64"], "s390x" : ["lib32"], "sparc" : ["lib64"], "sparc64" : ["lib32", "lib64"], "x32" : ["lib32", "lib64", "libx32"], }.get(context.config.distribution.architecture(context.config.architecture), []) with umask(~0o755): for d in subdirs: (context.root / d).symlink_to(f"usr/{d}") (context.root / f"usr/{d}").mkdir(parents=True, exist_ok=True) # Next, we invoke apt-get install to download all the essential packages. With DPkg::Pre-Install-Pkgs, # we specify a shell command that will receive the list of packages that will be installed on stdin. # By configuring Debug::pkgDpkgPm=1, apt-get install will not actually execute any dpkg commands, so # all it does is download the essential debs and tell us their full in the apt cache without actually # installing them. with tempfile.NamedTemporaryFile(mode="r") as f: Apt.invoke( context, "install", [ "-oDebug::pkgDPkgPm=1", f"-oDPkg::Pre-Install-Pkgs::=cat >{f.name}", "?essential", "?exact-name(usr-is-merged)", "base-files", ], mounts=[Mount(f.name, f.name)], ) essential = f.read().strip().splitlines() # Now, extract the debs to the chroot by first extracting the sources tar file out of the deb and # then extracting the tar file into the chroot. for deb in essential: # If a deb path is in the form of "/var/cache/apt/", we transform it to the corresponding path in # mkosi's package cache directory. If it's relative to /repository, we transform it to the corresponding # path in mkosi's local package repository. Otherwise, we use the path as is. if Path(deb).is_relative_to("/var/cache"): path = context.config.package_cache_dir_or_default() / Path(deb).relative_to("/var") elif Path(deb).is_relative_to("/repository"): path = context.repository / Path(deb).relative_to("/repository") else: path = Path(deb) with open(path, "rb") as i, tempfile.NamedTemporaryFile() as o: run( ["dpkg-deb", "--fsys-tarfile", "/dev/stdin"], stdin=i, stdout=o, sandbox=context.sandbox(binary="dpkg-deb"), ) extract_tar( Path(o.name), context.root, log=False, options=( [f"--exclude=./{glob}" for glob in Apt.documentation_exclude_globs] if not context.config.with_docs else [] ), sandbox=context.sandbox ) # Finally, run apt to properly install packages in the chroot without having to worry that maintainer # scripts won't find basic tools that they depend on. cls.install_packages(context, [Path(deb).name.partition("_")[0].removesuffix(".deb") for deb in essential]) @classmethod def install_packages(cls, context: Context, packages: Sequence[str], apivfs: bool = True) -> None: # Debian policy is to start daemons by default. The policy-rc.d script can be used choose which ones to # start. Let's install one that denies all daemon startups. # See https://people.debian.org/~hmh/invokerc.d-policyrc.d-specification.txt for more information. # Note: despite writing in /usr/sbin, this file is not shipped by the OS and instead should be managed by # the admin. policyrcd = context.root / "usr/sbin/policy-rc.d" with umask(~0o755): policyrcd.parent.mkdir(parents=True, exist_ok=True) with umask(~0o644): policyrcd.write_text("#!/bin/sh\nexit 101\n") Apt.invoke(context, "install", packages, apivfs=apivfs) install_apt_sources(context, cls.repositories(context, local=False)) policyrcd.unlink() # systemd-gpt-auto-generator is disabled by default in Ubuntu: # https://git.launchpad.net/ubuntu/+source/systemd/tree/debian/systemd.links?h=ubuntu/noble-proposed. # Let's make sure it is enabled by default in our images. (context.root / "etc/systemd/system-generators/systemd-gpt-auto-generator").unlink(missing_ok=True) @classmethod def remove_packages(cls, context: Context, packages: Sequence[str]) -> None: Apt.invoke(context, "purge", packages, apivfs=True) @classmethod def architecture(cls, arch: Architecture) -> str: a = { Architecture.arm64 : "arm64", Architecture.arm : "armhf", Architecture.alpha : "alpha", Architecture.x86_64 : "amd64", Architecture.x86 : "i386", Architecture.ia64 : "ia64", Architecture.loongarch64 : "loongarch64", Architecture.mips64_le : "mips64el", Architecture.mips_le : "mipsel", Architecture.parisc : "hppa", Architecture.ppc64_le : "ppc64el", Architecture.ppc64 : "ppc64", Architecture.riscv64 : "riscv64", Architecture.s390x : "s390x", Architecture.s390 : "s390", }.get(arch) if not a: die(f"Architecture {arch} is not supported by Debian") return a def install_apt_sources(context: Context, repos: Iterable[AptRepository]) -> None: if not (context.root / "usr/bin/apt").exists(): return sources = context.root / f"etc/apt/sources.list.d/{context.config.release}.sources" if not sources.exists(): with sources.open("w") as f: for repo in repos: f.write(str(repo)) mkosi-24.3/mkosi/distributions/fedora.py000066400000000000000000000164541465176501400204450ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import re from collections.abc import Iterable, Sequence from pathlib import Path from mkosi.config import Architecture, Config from mkosi.context import Context from mkosi.distributions import ( DistributionInstaller, PackageType, join_mirror, ) from mkosi.installer import PackageManager from mkosi.installer.dnf import Dnf from mkosi.installer.rpm import RpmRepository, find_rpm_gpgkey, setup_rpm from mkosi.log import die from mkosi.util import listify, startswith, tuplify @tuplify def find_fedora_rpm_gpgkeys(context: Context) -> Iterable[str]: key1 = find_rpm_gpgkey(context, key=f"RPM-GPG-KEY-fedora-{context.config.release}-primary") key2 = find_rpm_gpgkey(context, key=f"RPM-GPG-KEY-fedora-{context.config.release}-secondary") if key1: # During branching, there is always a kerfuffle with the key transition. # For Rawhide, try to load the N+1 key, just in case our local configuration # still indicates that Rawhide==N, but really Rawhide==N+1. if context.config.release == "rawhide" and (rhs := startswith(key1, "file://")): path = Path(rhs).resolve() if m := re.match(r"RPM-GPG-KEY-fedora-(\d+)-(primary|secondary)", path.name): version = int(m.group(1)) if key3 := find_rpm_gpgkey(context, key=f"RPM-GPG-KEY-fedora-{version + 1}-primary"): # We yield the resolved path for key1, to make it clear that it's # for version N, and the other key is for version N+1. key1 = path.as_uri() yield key3 yield key1 if key2: yield key2 if not key1 and not key2: yield "https://fedoraproject.org/fedora.gpg" class Installer(DistributionInstaller): @classmethod def pretty_name(cls) -> str: return "Fedora Linux" @classmethod def filesystem(cls) -> str: return "btrfs" @classmethod def package_type(cls) -> PackageType: return PackageType.rpm @classmethod def default_release(cls) -> str: return "40" @classmethod def grub_prefix(cls) -> str: return "grub2" @classmethod def package_manager(cls, config: Config) -> type[PackageManager]: return Dnf @classmethod def setup(cls, context: Context) -> None: Dnf.setup(context, cls.repositories(context), filelists=False) setup_rpm(context) @classmethod def install(cls, context: Context) -> None: cls.install_packages(context, ["basesystem"], apivfs=False) @classmethod def install_packages(cls, context: Context, packages: Sequence[str], apivfs: bool = True) -> None: Dnf.invoke(context, "install", packages, apivfs=apivfs) @classmethod def remove_packages(cls, context: Context, packages: Sequence[str]) -> None: Dnf.invoke(context, "remove", packages, apivfs=True) @classmethod @listify def repositories(cls, context: Context) -> Iterable[RpmRepository]: gpgurls = find_fedora_rpm_gpgkeys(context) if context.config.local_mirror: yield RpmRepository("fedora", f"baseurl={context.config.local_mirror}", gpgurls) return if context.config.release == "eln": mirror = context.config.mirror or "https://odcs.fedoraproject.org/composes/production/latest-Fedora-ELN/compose" for repo in ("Appstream", "BaseOS", "Extras", "CRB"): url = f"baseurl={join_mirror(mirror, repo)}" yield RpmRepository(repo.lower(), f"{url}/$basearch/os", gpgurls) yield RpmRepository(repo.lower(), f"{url}/$basearch/debug/tree", gpgurls, enabled=False) yield RpmRepository(repo.lower(), f"{url}/source/tree", gpgurls, enabled=False) elif (m := context.config.mirror): directory = "development" if context.config.release == "rawhide" else "releases" url = f"baseurl={join_mirror(m, f'linux/{directory}/$releasever/Everything')}" yield RpmRepository("fedora", f"{url}/$basearch/os", gpgurls) yield RpmRepository("fedora-debuginfo", f"{url}/$basearch/debug/tree", gpgurls, enabled=False) yield RpmRepository("fedora-source", f"{url}/source/tree", gpgurls, enabled=False) if context.config.release != "rawhide": url = f"baseurl={join_mirror(m, 'linux/updates/$releasever/Everything')}" yield RpmRepository("updates", f"{url}/$basearch", gpgurls) yield RpmRepository("updates-debuginfo", f"{url}/$basearch/debug", gpgurls, enabled=False) yield RpmRepository("updates-source", f"{url}/source/tree", gpgurls, enabled=False) url = f"baseurl={join_mirror(m, 'linux/updates/testing/$releasever/Everything')}" yield RpmRepository("updates-testing", f"{url}/$basearch", gpgurls, enabled=False) yield RpmRepository("updates-testing-debuginfo", f"{url}/$basearch/debug", gpgurls, enabled=False) yield RpmRepository("updates-testing-source", f"{url}/source/tree", gpgurls, enabled=False) else: url = "metalink=https://mirrors.fedoraproject.org/metalink?arch=$basearch" yield RpmRepository("fedora", f"{url}&repo=fedora-$releasever", gpgurls) yield RpmRepository("fedora-debuginfo", f"{url}&repo=fedora-debug-$releasever", gpgurls, enabled=False) yield RpmRepository("fedora-source", f"{url}&repo=fedora-source-$releasever", gpgurls, enabled=False) if context.config.release != "rawhide": yield RpmRepository("updates", f"{url}&repo=updates-released-f$releasever", gpgurls) yield RpmRepository( "updates-debuginfo", f"{url}&repo=updates-released-debug-f$releasever", gpgurls, enabled=False, ) yield RpmRepository( "updates-source", f"{url}&repo=updates-released-source-f$releasever", gpgurls, enabled=False ) yield RpmRepository( "updates-testing", f"{url}&repo=updates-testing-f$releasever", gpgurls, enabled=False ) yield RpmRepository( "updates-testing-debuginfo", f"{url}&repo=updates-testing-debug-f$releasever", gpgurls, enabled=False, ) yield RpmRepository( "updates-testing-source", f"{url}&repo=updates-testing-source-f$releasever", gpgurls, enabled=False, ) @classmethod def architecture(cls, arch: Architecture) -> str: a = { Architecture.arm64 : "aarch64", Architecture.mips64_le : "mips64el", Architecture.mips_le : "mipsel", Architecture.ppc64_le : "ppc64le", Architecture.riscv64 : "riscv64", Architecture.s390x : "s390x", Architecture.x86_64 : "x86_64", }.get(arch) if not a: die(f"Architecture {a} is not supported by Fedora") return a mkosi-24.3/mkosi/distributions/mageia.py000066400000000000000000000041001465176501400204110ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later from collections.abc import Iterable from mkosi.config import Architecture from mkosi.context import Context from mkosi.distributions import fedora, join_mirror from mkosi.installer.rpm import RpmRepository, find_rpm_gpgkey from mkosi.log import die from mkosi.util import listify class Installer(fedora.Installer): @classmethod def pretty_name(cls) -> str: return "Mageia" @classmethod def filesystem(cls) -> str: return "ext4" @classmethod def default_release(cls) -> str: return "cauldron" @classmethod def install(cls, context: Context) -> None: cls.install_packages(context, ["filesystem"], apivfs=False) @classmethod @listify def repositories(cls, context: Context) -> Iterable[RpmRepository]: gpgurls = ( find_rpm_gpgkey( context, "RPM-GPG-KEY-Mageia", ) or "https://mirrors.kernel.org/mageia/distrib/$releasever/$basearch/media/core/release/media_info/pubkey", ) if context.config.local_mirror: yield RpmRepository("core-release", f"baseurl={context.config.local_mirror}", gpgurls) return if context.config.mirror: url = f"baseurl={join_mirror(context.config.mirror, 'distrib/$releasever/$basearch/media/core/')}" yield RpmRepository("core-release", f"{url}/release", gpgurls) yield RpmRepository("core-updates", f"{url}/updates/", gpgurls) else: url = "mirrorlist=https://www.mageia.org/mirrorlist/?release=$releasever&arch=$basearch§ion=core" yield RpmRepository("core-release", f"{url}&repo=release", gpgurls) yield RpmRepository("core-updates", f"{url}&repo=updates", gpgurls) @classmethod def architecture(cls, arch: Architecture) -> str: a = { Architecture.x86_64 : "x86_64", Architecture.arm64 : "aarch64", }.get(arch) if not a: die(f"Architecture {a} is not supported by Mageia") return a mkosi-24.3/mkosi/distributions/openmandriva.py000066400000000000000000000035621465176501400216640ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later from collections.abc import Iterable from mkosi.config import Architecture from mkosi.context import Context from mkosi.distributions import fedora, join_mirror from mkosi.installer.rpm import RpmRepository, find_rpm_gpgkey from mkosi.log import die from mkosi.util import listify class Installer(fedora.Installer): @classmethod def pretty_name(cls) -> str: return "OpenMandriva" @classmethod def filesystem(cls) -> str: return "ext4" @classmethod def default_release(cls) -> str: return "cooker" @classmethod def install(cls, context: Context) -> None: cls.install_packages(context, ["filesystem"], apivfs=False) @classmethod @listify def repositories(cls, context: Context) -> Iterable[RpmRepository]: mirror = context.config.mirror or "http://mirror.openmandriva.org" gpgurls = ( find_rpm_gpgkey( context, "RPM-GPG-KEY-OpenMandriva", ) or "https://raw.githubusercontent.com/OpenMandrivaAssociation/openmandriva-repos/master/RPM-GPG-KEY-OpenMandriva", ) if context.config.local_mirror: yield RpmRepository("main-release", f"baseurl={context.config.local_mirror}", gpgurls) return url = f"baseurl={join_mirror(mirror, '$releasever/repository/$basearch/main')}" yield RpmRepository("main-release", f"{url}/release", gpgurls) yield RpmRepository("main-updates", f"{url}/updates", gpgurls) @classmethod def architecture(cls, arch: Architecture) -> str: a = { Architecture.x86_64 : "x86_64", Architecture.arm64 : "aarch64", Architecture.riscv64 : "riscv64", }.get(arch) if not a: die(f"Architecture {a} is not supported by OpenMandriva") return a mkosi-24.3/mkosi/distributions/opensuse.py000066400000000000000000000240241465176501400210360ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import tempfile from collections.abc import Iterable, Sequence from pathlib import Path from xml.etree import ElementTree from mkosi.config import Architecture, Config from mkosi.context import Context from mkosi.distributions import DistributionInstaller, PackageType, join_mirror from mkosi.installer import PackageManager from mkosi.installer.dnf import Dnf from mkosi.installer.rpm import RpmRepository, find_rpm_gpgkey, setup_rpm from mkosi.installer.zypper import Zypper from mkosi.log import die from mkosi.mounts import finalize_crypto_mounts from mkosi.run import run from mkosi.sandbox import Mount from mkosi.util import listify, sort_packages class Installer(DistributionInstaller): @classmethod def pretty_name(cls) -> str: return "openSUSE" @classmethod def filesystem(cls) -> str: return "btrfs" @classmethod def package_type(cls) -> PackageType: return PackageType.rpm @classmethod def default_release(cls) -> str: return "tumbleweed" @classmethod def grub_prefix(cls) -> str: return "grub2" @classmethod def package_manager(cls, config: Config) -> type[PackageManager]: if config.find_binary("zypper"): return Zypper else: return Dnf @classmethod def setup(cls, context: Context) -> None: zypper = context.config.find_binary("zypper") if zypper: Zypper.setup(context, cls.repositories(context)) else: Dnf.setup(context, cls.repositories(context)) setup_rpm(context) @classmethod def install(cls, context: Context) -> None: cls.install_packages(context, ["filesystem"], apivfs=False) @classmethod def install_packages(cls, context: Context, packages: Sequence[str], apivfs: bool = True) -> None: if context.config.find_binary("zypper"): Zypper.invoke( context, "install", [ "--download", "in-advance", "--recommends" if context.config.with_recommends else "--no-recommends", *sort_packages(packages), ], apivfs=apivfs) else: Dnf.invoke(context, "install", sort_packages(packages), apivfs=apivfs) @classmethod def remove_packages(cls, context: Context, packages: Sequence[str]) -> None: if context.config.find_binary("zypper"): Zypper.invoke(context, "remove", ["--clean-deps", *sort_packages(packages)], apivfs=True) else: Dnf.invoke(context, "remove", packages, apivfs=True) @classmethod @listify def repositories(cls, context: Context) -> Iterable[RpmRepository]: if context.config.local_mirror: yield RpmRepository(id="local-mirror", url=f"baseurl={context.config.local_mirror}", gpgurls=()) return zypper = context.config.find_binary("zypper") mirror = context.config.mirror or "https://download.opensuse.org" if context.config.release == "tumbleweed" or context.config.release.isdigit(): gpgurls = ( *([p] if (p := find_rpm_gpgkey(context, key="RPM-GPG-KEY-openSUSE-Tumbleweed")) else []), *([p] if (p := find_rpm_gpgkey(context, key="RPM-GPG-KEY-openSUSE")) else []), ) if context.config.release == "tumbleweed": if context.config.architecture == Architecture.x86_64: subdir = "" else: subdir = f"ports/{cls.architecture(context.config.architecture)}" else: if context.config.architecture != Architecture.x86_64: die(f"Old snapshots are only supported for x86-64 on {cls.pretty_name()}") subdir = f"history/{context.config.release}" for repo in ("oss", "non-oss"): url = join_mirror(mirror, f"{subdir}/tumbleweed/repo/{repo}") yield RpmRepository( id=repo, url=f"baseurl={url}", gpgurls=gpgurls or (fetch_gpgurls(context, url) if not zypper else ()), enabled=repo == "oss", ) if context.config.release == "tumbleweed": for d in ("debug", "source"): url = join_mirror(mirror, f"{subdir}/{d}/tumbleweed/repo/{repo}") yield RpmRepository( id=f"{repo}-{d}", url=f"baseurl={url}", gpgurls=gpgurls or (fetch_gpgurls(context, url) if not zypper else ()), enabled=False, ) if context.config.release == "tumbleweed": url = join_mirror(mirror, f"{subdir}/update/tumbleweed") yield RpmRepository( id="oss-update", url=f"baseurl={url}", gpgurls=gpgurls or (fetch_gpgurls(context, url) if not zypper else ()), ) url = join_mirror(mirror, f"{subdir}/update/tumbleweed-non-oss") yield RpmRepository( id="non-oss-update", url=f"baseurl={url}", gpgurls=gpgurls or (fetch_gpgurls(context, url) if not zypper else ()), enabled=False, ) else: if ( context.config.release in ("current", "stable", "leap") and context.config.architecture != Architecture.x86_64 ): die(f"{cls.pretty_name()} only supports current and stable releases for the x86-64 architecture", hint="Specify either tumbleweed or a specific leap release such as 15.6") if context.config.release in ("current", "stable", "leap"): release = "openSUSE-current" else: release = f"leap/{context.config.release}" if context.config.architecture == Architecture.x86_64: subdir = "" else: subdir = f"ports/{cls.architecture(context.config.architecture)}" for repo in ("oss", "non-oss"): url = join_mirror(mirror, f"{subdir}/distribution/{release}/repo/{repo}") yield RpmRepository( id=repo, url=f"baseurl={url}", gpgurls=fetch_gpgurls(context, url) if not zypper else (), enabled=repo == "oss", ) for d in ("debug", "source"): for repo in ("oss", "non-oss"): url = join_mirror(mirror, f"{subdir}/{d}/distribution/{release}/repo/{repo}") yield RpmRepository( id=f"{repo}-{d}", url=f"baseurl={url}", gpgurls=fetch_gpgurls(context, url) if not zypper else (), enabled=False, ) if context.config.release in ("current", "stable", "leap"): url = join_mirror(mirror, f"{subdir}/update/openSUSE-current") yield RpmRepository( id="oss-update", url=f"baseurl={url}", gpgurls=fetch_gpgurls(context, url) if not zypper else (), ) url = join_mirror(mirror, f"{subdir}/update/openSUSE-non-oss-current") yield RpmRepository( id="non-oss-update", url=f"baseurl={url}", gpgurls=fetch_gpgurls(context, url) if not zypper else (), enabled=False, ) else: for repo in ("oss", "non-oss"): url = join_mirror(mirror, f"{subdir}/update/{release}/{repo}") yield RpmRepository( id=f"{repo}-update", url=f"baseurl={url}", gpgurls=fetch_gpgurls(context, url) if not zypper else (), enabled=repo == "oss", ) @classmethod def architecture(cls, arch: Architecture) -> str: a = { Architecture.x86_64 : "x86_64", Architecture.arm64 : "aarch64", }.get(arch) if not a: die(f"Architecture {a} is not supported by OpenSUSE") return a def fetch_gpgurls(context: Context, repourl: str) -> tuple[str, ...]: gpgurls = [f"{repourl}/repodata/repomd.xml.key"] with tempfile.TemporaryDirectory() as d: run( [ "curl", "--location", "--output-dir", d, "--remote-name", "--no-progress-meter", "--fail", *(["--proxy", context.config.proxy_url] if context.config.proxy_url else []), *(["--noproxy", ",".join(context.config.proxy_exclude)] if context.config.proxy_exclude else []), *(["--proxy-capath", "/proxy.cacert"] if context.config.proxy_peer_certificate else []), *(["--proxy-cert", "/proxy.clientcert"] if context.config.proxy_client_certificate else []), *(["--proxy-key", "/proxy.clientkey"] if context.config.proxy_client_key else []), f"{repourl}/repodata/repomd.xml", ], sandbox=context.sandbox( binary="curl", network=True, mounts=[Mount(d, d), *finalize_crypto_mounts(context.config)], ), ) xml = (Path(d) / "repomd.xml").read_text() root = ElementTree.fromstring(xml) tags = root.find("{http://linux.duke.edu/metadata/repo}tags") if not tags: die("repomd.xml missing element") for child in tags.iter("{http://linux.duke.edu/metadata/repo}content"): if child.text and child.text.startswith("gpg-pubkey"): gpgkey = child.text.partition("?")[0] gpgurls += [f"{repourl}{gpgkey}"] return tuple(gpgurls) mkosi-24.3/mkosi/distributions/rhel.py000066400000000000000000000076351465176501400201400ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later from collections.abc import Iterable from pathlib import Path from typing import Any, Optional from mkosi.context import Context from mkosi.distributions import centos, join_mirror from mkosi.installer.rpm import RpmRepository, find_rpm_gpgkey from mkosi.log import die from mkosi.util import listify class Installer(centos.Installer): @classmethod def pretty_name(cls) -> str: return "RHEL" @staticmethod def gpgurls(context: Context) -> tuple[str, ...]: major = int(float(context.config.release)) return ( find_rpm_gpgkey( context, f"RPM-GPG-KEY-redhat{major}-release", ) or "https://access.redhat.com/security/data/fd431d51.txt", ) @staticmethod def sslcacert(context: Context) -> Optional[Path]: if context.config.mirror: return None p = Path("etc/rhsm/ca/redhat-uep.pem") if (context.pkgmngr / p).exists(): p = context.pkgmngr / p elif (Path("/") / p).exists(): p = Path("/") / p else: die("redhat-uep.pem certificate not found in host system or package manager tree") return p @staticmethod def sslclientkey(context: Context) -> Optional[Path]: if context.config.mirror: return None pattern = "etc/pki/entitlement/*-key.pem" p = next((p for p in sorted(context.pkgmngr.glob(pattern))), None) if not p: p = next((p for p in Path("/").glob(pattern)), None) if not p: die("Entitlement key not found in host system or package manager tree") return p @staticmethod def sslclientcert(context: Context) -> Optional[Path]: if context.config.mirror: return None pattern = "etc/pki/entitlement/*.pem" p = next((p for p in sorted(context.pkgmngr.glob(pattern)) if "key" not in p.name), None) if not p: p = next((p for p in sorted(Path("/").glob(pattern)) if "key" not in p.name), None) if not p: die("Entitlement certificate not found in host system or package manager tree") return p @classmethod def repository_variants(cls, context: Context, repo: str) -> Iterable[RpmRepository]: if context.config.local_mirror: yield RpmRepository(repo, f"baseurl={context.config.local_mirror}", cls.gpgurls(context)) else: mirror = context.config.mirror or "https://cdn.redhat.com/content/dist/" common: dict[str, Any] = dict( gpgurls=cls.gpgurls(context), sslcacert=cls.sslcacert(context), sslclientcert=cls.sslclientcert(context), sslclientkey=cls.sslclientkey(context), ) v = context.config.release major = int(float(v)) yield RpmRepository( f"rhel-{v}-{repo}-rpms", f"baseurl={join_mirror(mirror, f'rhel{major}/{v}/$basearch/{repo}/os')}", enabled=True, **common, ) yield RpmRepository( f"rhel-{v}-{repo}-debug-rpms", f"baseurl={join_mirror(mirror, f'rhel{major}/{v}/$basearch/{repo}/debug')}", enabled=False, **common, ) yield RpmRepository( f"rhel-{v}-{repo}-source", f"baseurl={join_mirror(mirror, f'rhel{major}/{v}/$basearch/{repo}/source')}", enabled=False, **common, ) @classmethod @listify def repositories(cls, context: Context) -> Iterable[RpmRepository]: yield from cls.repository_variants(context, "baseos") yield from cls.repository_variants(context, "appstream") yield from cls.repository_variants(context, "codeready-builder") yield from cls.epel_repositories(context) mkosi-24.3/mkosi/distributions/rhel_ubi.py000066400000000000000000000041761465176501400207740ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later from collections.abc import Iterable from mkosi.context import Context from mkosi.distributions import centos, join_mirror from mkosi.installer.rpm import RpmRepository, find_rpm_gpgkey from mkosi.util import listify class Installer(centos.Installer): @classmethod def pretty_name(cls) -> str: return "RHEL UBI" @staticmethod def gpgurls(context: Context) -> tuple[str, ...]: major = int(float(context.config.release)) return ( find_rpm_gpgkey( context, f"RPM-GPG-KEY-redhat{major}-release", ) or "https://access.redhat.com/security/data/fd431d51.txt", ) @classmethod def repository_variants(cls, context: Context, repo: str) -> Iterable[RpmRepository]: if context.config.local_mirror: yield RpmRepository(repo, f"baseurl={context.config.local_mirror}", cls.gpgurls(context)) else: mirror = context.config.mirror or "https://cdn-ubi.redhat.com/content/public/ubi/dist/" v = context.config.release yield RpmRepository( f"ubi-{v}-{repo}-rpms", f"baseurl={join_mirror(mirror, f'ubi{v}/{v}/$basearch/{repo}/os')}", cls.gpgurls(context), ) yield RpmRepository( f"ubi-{v}-{repo}-debug-rpms", f"baseurl={join_mirror(mirror, f'ubi{v}/{v}/$basearch/{repo}/debug')}", cls.gpgurls(context), enabled=False, ) yield RpmRepository( f"ubi-{v}-{repo}-source", f"baseurl={join_mirror(mirror, f'ubi{v}/{v}/$basearch/{repo}/source')}", cls.gpgurls(context), enabled=False, ) @classmethod @listify def repositories(cls, context: Context) -> Iterable[RpmRepository]: yield from cls.repository_variants(context, "baseos") yield from cls.repository_variants(context, "appstream") yield from cls.repository_variants(context, "codeready-builder") yield from cls.epel_repositories(context) mkosi-24.3/mkosi/distributions/rocky.py000066400000000000000000000022261465176501400203240ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later from mkosi.context import Context from mkosi.distributions import centos, join_mirror from mkosi.installer.rpm import RpmRepository, find_rpm_gpgkey class Installer(centos.Installer): @classmethod def pretty_name(cls) -> str: return "Rocky Linux" @staticmethod def gpgurls(context: Context) -> tuple[str, ...]: return ( find_rpm_gpgkey( context, f"RPM-GPG-KEY-Rocky-{context.config.release}", ) or f"https://download.rockylinux.org/pub/rocky/RPM-GPG-KEY-Rocky-{context.config.release}", ) @classmethod def repository_variants(cls, context: Context, repo: str) -> list[RpmRepository]: if context.config.mirror: url = f"baseurl={join_mirror(context.config.mirror, f'$releasever/{repo}/$basearch/os')}" else: url = f"mirrorlist=https://mirrors.rockylinux.org/mirrorlist?arch=$basearch&repo={repo}-$releasever" return [RpmRepository(repo, url, cls.gpgurls(context))] @classmethod def sig_repositories(cls, context: Context) -> list[RpmRepository]: return [] mkosi-24.3/mkosi/distributions/ubuntu.py000066400000000000000000000046541465176501400205260ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later from collections.abc import Iterable from pathlib import Path from mkosi.context import Context from mkosi.distributions import debian from mkosi.installer.apt import AptRepository from mkosi.util import listify class Installer(debian.Installer): @classmethod def pretty_name(cls) -> str: return "Ubuntu" @classmethod def default_release(cls) -> str: return "noble" @staticmethod @listify def repositories(context: Context, local: bool = True) -> Iterable[AptRepository]: types = ("deb", "deb-src") # From kinetic onwards, the usr-is-merged package is available in universe and is required by # mkosi to set up a proper usr-merged system so we add the universe repository unconditionally. components = ["main"] + (["universe"] if context.config.release not in ("focal", "jammy") else []) components = (*components, *context.config.repositories) if context.config.local_mirror and local: yield AptRepository( types=("deb",), url=context.config.local_mirror, suite=context.config.release, components=("main",), signedby=None, ) return if context.config.architecture.is_x86_variant(): mirror = context.config.mirror or "http://archive.ubuntu.com/ubuntu" else: mirror = context.config.mirror or "http://ports.ubuntu.com" signedby = Path("/usr/share/keyrings/ubuntu-archive-keyring.gpg") yield AptRepository( types=types, url=mirror, suite=context.config.release, components=components, signedby=signedby, ) yield AptRepository( types=types, url=mirror, suite=f"{context.config.release}-updates", components=components, signedby=signedby, ) # Security updates repos are never mirrored. But !x86 are on the ports server. if context.config.architecture.is_x86_variant(): mirror = "http://security.ubuntu.com/ubuntu" else: mirror = "http://ports.ubuntu.com" yield AptRepository( types=types, url=mirror, suite=f"{context.config.release}-security", components=components, signedby=signedby, ) mkosi-24.3/mkosi/initrd/000077500000000000000000000000001465176501400152105ustar00rootroot00000000000000mkosi-24.3/mkosi/initrd/__main__.py000066400000000000000000000075771465176501400173220ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import argparse import os import platform import shutil import tempfile from pathlib import Path from mkosi.config import OutputFormat, __version__ from mkosi.log import log_setup from mkosi.run import find_binary, run, uncaught_exception_handler from mkosi.types import PathString @uncaught_exception_handler() def main() -> None: log_setup() parser = argparse.ArgumentParser( prog="mkosi-initrd", description="Build initrds or unified kernel images for the current system using mkosi", allow_abbrev=False, usage="mkosi-initrd [options...]", ) parser.add_argument( "--kernel-version", metavar="KERNEL_VERSION", help="Kernel version string", default=platform.uname().release, ) parser.add_argument( "-t", "--format", choices=[str(OutputFormat.cpio), str(OutputFormat.uki), str(OutputFormat.directory)], help="Output format (CPIO archive, UKI or local directory)", default="cpio", ) parser.add_argument( "-o", "--output", metavar="NAME", help="Output name", default="initrd", ) parser.add_argument( "-O", "--output-dir", metavar="DIR", help="Output directory", default="", ) parser.add_argument( "--debug", help="Turn on debugging output", action="store_true", default=False, ) parser.add_argument( "--version", action="version", version=f"mkosi {__version__}", ) args = parser.parse_args() cmdline: list[PathString] = [ "mkosi", "--force", "--directory", "", "--format", str(args.format), "--output", args.output, "--output-dir", args.output_dir, "--cache-only=metadata", "--extra-tree", f"/usr/lib/modules/{args.kernel_version}:/usr/lib/modules/{args.kernel_version}", "--extra-tree=/usr/lib/firmware:/usr/lib/firmware", "--remove-files=/usr/lib/firmware/*-ucode", "--kernel-modules-exclude=.*", "--kernel-modules-include=host", "--include=mkosi-initrd", ] if args.debug: cmdline += ["--debug"] if os.getuid() == 0: cmdline += [ "--workspace-dir=/var/tmp", "--package-cache-dir=/var", ] for d in ("/usr/lib/mkosi-initrd", "/etc/mkosi-initrd"): if Path(d).exists(): cmdline += ["--include", d] with tempfile.TemporaryDirectory() as d: # Make sure we don't use any of mkosi's default repositories. for p in ( "yum.repos.d/mkosi.repo", "apt/sources.list.d/mkosi.sources", "zypp/repos.d/mkosi.repo", "pacman.conf", ): (Path(d) / "etc" / p).parent.mkdir(parents=True, exist_ok=True) (Path(d) / "etc" / p).touch() # Copy in the host's package manager configuration. for p in ( "dnf", "yum.repos.d/", "apt", "zypp", "pacman.conf", "pacman.d/", ): if not (Path("/etc") / p).exists(): continue (Path(d) / "etc" / p).parent.mkdir(parents=True, exist_ok=True) if (Path("/etc") / p).resolve().is_file(): shutil.copy2(Path("/etc") / p, Path(d) / "etc" / p) else: shutil.copytree(Path("/etc") / p, Path(d) / "etc" / p, ignore=shutil.ignore_patterns("S.*"), dirs_exist_ok=True) cmdline += ["--package-manager-tree", d] # Prefer dnf as dnf5 has not yet officially replaced it and there's a much bigger chance that there will be a # populated dnf cache directory. run(cmdline, env={"MKOSI_DNF": dnf.name} if (dnf := find_binary("dnf", "dnf5")) else {}) if __name__ == "__main__": main() mkosi-24.3/mkosi/initrd/resources/000077500000000000000000000000001465176501400172225ustar00rootroot00000000000000mkosi-24.3/mkosi/initrd/resources/__init__.py000066400000000000000000000000001465176501400213210ustar00rootroot00000000000000mkosi-24.3/mkosi/initrd/resources/mkosi-initrd.md000066400000000000000000000024421465176501400221570ustar00rootroot00000000000000% mkosi-initrd(1) % % # NAME mkosi-initrd — Build initrds or unified kernel images for the current system using mkosi # SYNOPSIS `mkosi-initrd [options…]` # DESCRIPTION `mkosi-initrd` is wrapper on top of `mkosi` to simplify the generation of initrds and Unified Kernel Images for the current running system. # OPTIONS `--kernel-version=` : Kernel version where to look for the kernel modules to include. Defaults to the kernel version of the running system (`uname -r`). `--format=`, `-t` : Output format. One of `cpio` (CPIO archive), `uki` (a unified kernel image with the image in the `.initrd` PE section) or `directory` (for generating an image directly in a local directory). Defaults to `cpio`. `--output=`, `-o` : Name to use for the generated output image file or directory. Defaults to `initrd`. Note that this only specifies the output prefix, depending on the specific output format and compression used, the full output name might be `initrd.cpio.zst`. `--output-dir=`, `-O` : Path to a directory where to place all generated artifacts. Defaults to the current working directory. `--debug=` : Enable additional debugging output. `--version` : Show package version. `--help`, `-h` : Show brief usage information. # SEE ALSO `mkosi(1)` mkosi-24.3/mkosi/installer/000077500000000000000000000000001465176501400157145ustar00rootroot00000000000000mkosi-24.3/mkosi/installer/__init__.py000066400000000000000000000131151465176501400200260ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later from pathlib import Path from mkosi.config import Config, ConfigFeature, OutputFormat from mkosi.context import Context from mkosi.mounts import finalize_crypto_mounts from mkosi.run import find_binary from mkosi.sandbox import Mount from mkosi.tree import copy_tree, rmtree from mkosi.types import PathString from mkosi.util import startswith class PackageManager: @classmethod def executable(cls, config: Config) -> str: return "custom" @classmethod def subdir(cls, config: Config) -> Path: return Path("custom") @classmethod def cache_subdirs(cls, cache: Path) -> list[Path]: return [] @classmethod def state_subdirs(cls, state: Path) -> list[Path]: return [] @classmethod def scripts(cls, context: Context) -> dict[str, list[PathString]]: return {} @classmethod def finalize_environment(cls, context: Context) -> dict[str, str]: env = { "HOME": "/", # Make sure rpm doesn't pick up ~/.rpmmacros and ~/.rpmrc. } if "SYSTEMD_HWDB_UPDATE_BYPASS" not in context.config.environment: env["SYSTEMD_HWDB_UPDATE_BYPASS"] = "1" if ( "KERNEL_INSTALL_BYPASS" not in context.config.environment and context.config.bootable != ConfigFeature.disabled ): env["KERNEL_INSTALL_BYPASS"] = "1" return env @classmethod def env_cmd(cls, context: Context) -> list[PathString]: return ["env", *([f"{k}={v}" for k, v in cls.finalize_environment(context).items()])] @classmethod def mounts(cls, context: Context) -> list[Mount]: mounts = [ *finalize_crypto_mounts(context.config), Mount(context.repository, "/repository"), ] if context.config.local_mirror and (mirror := startswith(context.config.local_mirror, "file://")): mounts += [Mount(mirror, mirror, ro=True)] subdir = context.config.distribution.package_manager(context.config).subdir(context.config) for d in ("cache", "lib"): src = context.package_cache_dir / d / subdir mounts += [Mount(src, Path("/var") / d / subdir)] # If we're not operating on the configured package cache directory, we're operating on a snapshot of the # repository metadata in the image root directory. To make sure any downloaded packages are still cached in # the configured package cache directory in this scenario, we mount in the relevant directories from the # configured package cache directory. if d == "cache" and context.package_cache_dir != context.config.package_cache_dir_or_default(): caches = context.config.distribution.package_manager(context.config).cache_subdirs(src) mounts += [ Mount( context.config.package_cache_dir_or_default() / d / subdir / p.relative_to(src), Path("/var") / d / subdir / p.relative_to(src), ) for p in caches if (context.config.package_cache_dir_or_default() / d / subdir / p.relative_to(src)).exists() ] return mounts @classmethod def sync(cls, context: Context, force: bool) -> None: pass @classmethod def createrepo(cls, context: Context) -> None: pass def clean_package_manager_metadata(context: Context) -> None: """ Remove package manager metadata Try them all regardless of the distro: metadata is only removed if the package manager is not present in the image. """ subdir = context.config.distribution.package_manager(context.config).subdir(context.config) if context.package_cache_dir.is_relative_to(context.root): # Copy the package manager repository metadata to the workspace so it stays available for later steps even if # it is removed from the image by a later step. for d in ("cache", "lib"): src = context.package_cache_dir / d / subdir if not src.exists(): continue dst = context.workspace / "package-cache-dir" / d / subdir dst.mkdir(parents=True, exist_ok=True) copy_tree(src, dst, sandbox=context.sandbox) context.package_cache_dir = context.workspace / "package-cache-dir" if context.config.overlay: return if context.config.clean_package_metadata == ConfigFeature.disabled: return if ( context.config.clean_package_metadata == ConfigFeature.auto and context.config.output_format in (OutputFormat.directory, OutputFormat.tar) ): return # If cleaning is not explicitly requested, keep the repository metadata if we're building a directory or tar image # (which are often used as a base tree for extension images and thus should retain package manager metadata) or if # the corresponding package manager is installed in the image. executable = context.config.distribution.package_manager(context.config).executable(context.config) for tool, paths in (("rpm", ["var/lib/rpm", "usr/lib/sysimage/rpm"]), ("dnf5", ["usr/lib/sysimage/libdnf5"]), ("dpkg", ["var/lib/dpkg"]), (executable, [f"var/lib/{subdir}", f"var/cache/{subdir}"])): if context.config.clean_package_metadata == ConfigFeature.enabled or not find_binary(tool, root=context.root): rmtree(*(context.root / p for p in paths if (context.root / p).exists()), sandbox=context.sandbox) mkosi-24.3/mkosi/installer/apt.py000066400000000000000000000242451465176501400170610ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import dataclasses import textwrap from collections.abc import Iterable, Sequence from pathlib import Path from typing import Optional from mkosi.config import PACKAGE_GLOBS, Config, ConfigFeature from mkosi.context import Context from mkosi.installer import PackageManager from mkosi.log import die from mkosi.run import run from mkosi.sandbox import Mount, apivfs_cmd from mkosi.types import _FILE, CompletedProcess, PathString from mkosi.util import umask @dataclasses.dataclass(frozen=True) class AptRepository: types: tuple[str, ...] url: str suite: str components: tuple[str, ...] signedby: Optional[Path] def __str__(self) -> str: return textwrap.dedent( f"""\ Types: {" ".join(self.types)} URIs: {self.url} Suites: {self.suite} Components: {" ".join(self.components)} {"Signed-By" if self.signedby else "Trusted"}: {self.signedby or "yes"} """ ) class Apt(PackageManager): documentation_exclude_globs = [ "usr/share/doc/*", "usr/share/man/*", "usr/share/groff/*", "usr/share/gtk-doc/*", "usr/share/info/*", ] @classmethod def executable(cls, config: Config) -> str: return "apt" @classmethod def subdir(cls, config: Config) -> Path: return Path("apt") @classmethod def cache_subdirs(cls, cache: Path) -> list[Path]: return [cache / "archives"] @classmethod def dpkg_cmd(cls, command: str) -> list[PathString]: return [ command, "--admindir=/buildroot/var/lib/dpkg", "--root=/buildroot", ] @classmethod def scripts(cls, context: Context) -> dict[str, list[PathString]]: return { **{ command: apivfs_cmd() + cls.env_cmd(context) + cls.cmd(context, command) for command in ( "apt", "apt-cache", "apt-cdrom", "apt-config", "apt-extracttemplates", "apt-get", "apt-key", "apt-mark", "apt-sortpkgs", ) }, **{ command: apivfs_cmd() + cls.dpkg_cmd(command) for command in( "dpkg", "dpkg-query", ) }, "mkosi-install" : ["apt-get", "install"], "mkosi-upgrade" : ["apt-get", "upgrade"], "mkosi-remove" : ["apt-get", "purge"], "mkosi-reinstall": ["apt-get", "install", "--reinstall"], } @classmethod def setup(cls, context: Context, repos: Iterable[AptRepository]) -> None: (context.pkgmngr / "etc/apt").mkdir(exist_ok=True, parents=True) (context.pkgmngr / "etc/apt/apt.conf.d").mkdir(exist_ok=True, parents=True) (context.pkgmngr / "etc/apt/preferences.d").mkdir(exist_ok=True, parents=True) (context.pkgmngr / "etc/apt/sources.list.d").mkdir(exist_ok=True, parents=True) with umask(~0o755): # TODO: Drop once apt 2.5.4 is widely available. (context.root / "var/lib/dpkg").mkdir(parents=True, exist_ok=True) (context.root / "var/lib/dpkg/status").touch() (context.root / "var/lib/dpkg/available").touch() # We have a special apt.conf outside of pkgmngr dir that only configures "Dir::Etc" that we pass to APT_CONFIG # to tell apt it should read config files from /etc/apt in case this is overridden by distributions. This is # required because apt parses CLI configuration options after parsing its configuration files and as such we # can't use CLI options to tell apt where to look for configuration files. config = context.pkgmngr / "etc/apt.conf" if not config.exists(): config.write_text( textwrap.dedent( """\ Dir::Etc "/etc/apt"; """ ) ) sources = context.pkgmngr / "etc/apt/sources.list.d/mkosi.sources" if not sources.exists(): for repo in repos: if repo.signedby and not repo.signedby.exists(): die( f"Keyring for repo {repo.url} not found at {repo.signedby}", hint="Make sure the right keyring package (e.g. debian-archive-keyring or ubuntu-keyring) is " "installed", ) with sources.open("w") as f: for repo in repos: f.write(str(repo)) @classmethod def finalize_environment(cls, context: Context) -> dict[str, str]: env = { "APT_CONFIG": "/etc/apt.conf", "DEBIAN_FRONTEND" : "noninteractive", "DEBCONF_INTERACTIVE_SEEN": "true", } if "INITRD" not in context.config.environment and context.config.bootable != ConfigFeature.disabled: env["INITRD"] = "No" return super().finalize_environment(context) | env @classmethod def cmd(cls, context: Context, command: str) -> list[PathString]: debarch = context.config.distribution.architecture(context.config.architecture) cmdline: list[PathString] = [ command, "-o", f"APT::Architecture={debarch}", "-o", f"APT::Architectures={debarch}", "-o", f"APT::Install-Recommends={str(context.config.with_recommends).lower()}", "-o", "APT::Immediate-Configure=off", "-o", "APT::Get::Assume-Yes=true", "-o", "APT::Get::AutomaticRemove=true", "-o", "APT::Get::Allow-Change-Held-Packages=true", "-o", "APT::Get::Allow-Remove-Essential=true", "-o", "APT::Sandbox::User=root", "-o", "Acquire::AllowReleaseInfoChange=true", "-o", "Dir::Cache=/var/cache/apt", "-o", "Dir::State=/var/lib/apt", "-o", "Dir::Log=/var/log/apt", "-o", "Dir::State::Status=/buildroot/var/lib/dpkg/status", "-o", f"Dir::Bin::DPkg={context.config.find_binary('dpkg')}", "-o", "Debug::NoLocking=true", "-o", "DPkg::Options::=--root=/buildroot", "-o", "DPkg::Options::=--force-unsafe-io", "-o", "DPkg::Options::=--force-architecture", "-o", "DPkg::Options::=--force-depends", "-o", "DPkg::Options::=--no-debsig", "-o", "DPkg::Use-Pty=false", "-o", "DPkg::Install::Recursive::Minimum=1000", "-o", "pkgCacheGen::ForceEssential=,", ] if not context.config.repository_key_check: cmdline += [ "-o", "Acquire::AllowInsecureRepositories=true", "-o", "Acquire::AllowDowngradeToInsecureRepositories=true", "-o", "APT::Get::AllowUnauthenticated=true", ] if not context.config.with_docs: cmdline += [f"--option=DPkg::Options::=--path-exclude=/{glob}" for glob in cls.documentation_exclude_globs] cmdline += ["--option=DPkg::Options::=--path-include=/usr/share/doc/*/copyright"] if context.config.proxy_url: cmdline += [ "-o", f"Acquire::http::Proxy={context.config.proxy_url}", "-o", f"Acquire::https::Proxy={context.config.proxy_url}", ] return cmdline @classmethod def invoke( cls, context: Context, operation: str, arguments: Sequence[str] = (), *, apivfs: bool = False, mounts: Sequence[Mount] = (), stdout: _FILE = None, ) -> CompletedProcess: return run( cls.cmd(context, "apt-get") + [operation, *arguments], sandbox=( context.sandbox( binary="apt-get", network=True, vartmp=True, mounts=[Mount(context.root, "/buildroot"), *cls.mounts(context), *mounts], extra=apivfs_cmd() if apivfs else [] ) ), env=context.config.environment | cls.finalize_environment(context), stdout=stdout, ) @classmethod def sync(cls, context: Context, force: bool) -> None: cls.invoke(context, "update") @classmethod def createrepo(cls, context: Context) -> None: if not (conf := context.repository / "conf/distributions").exists(): conf.parent.mkdir(exist_ok=True) conf.write_text( textwrap.dedent( f"""\ Origin: mkosi Label: mkosi Architectures: {context.config.distribution.architecture(context.config.architecture)} Codename: mkosi Components: main Description: mkosi local repository """ ) ) run( [ "reprepro", "--ignore=extension", "includedeb", "mkosi", *(d.name for glob in PACKAGE_GLOBS for d in context.repository.glob(glob) if "deb" in glob), ], sandbox=context.sandbox( binary="reprepro", mounts=[Mount(context.repository, context.repository)], options=["--chdir", context.repository], ), ) (context.pkgmngr / "etc/apt/sources.list.d").mkdir(parents=True, exist_ok=True) (context.pkgmngr / "etc/apt/sources.list.d/mkosi-local.sources").write_text( textwrap.dedent( """\ Enabled: yes Types: deb URIs: file:///repository Suites: mkosi Components: main Trusted: yes """ ) ) cls.invoke( context, "update", arguments=[ "-o", "Dir::Etc::sourcelist=sources.list.d/mkosi-local.sources", "-o", "Dir::Etc::sourceparts=-", "-o", "APT::Get::List-Cleanup=0", ], ) mkosi-24.3/mkosi/installer/dnf.py000066400000000000000000000225151465176501400170420ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import textwrap from collections.abc import Iterable, Sequence from pathlib import Path from mkosi.config import Cacheonly, Config from mkosi.context import Context from mkosi.installer import PackageManager from mkosi.installer.rpm import RpmRepository, rpm_cmd from mkosi.log import ARG_DEBUG from mkosi.run import run from mkosi.sandbox import Mount, apivfs_cmd from mkosi.types import _FILE, CompletedProcess, PathString class Dnf(PackageManager): @classmethod def executable(cls, config: Config) -> str: # Allow the user to override autodetection with an environment variable dnf = config.environment.get("MKOSI_DNF") return Path(dnf or config.find_binary("dnf5") or config.find_binary("dnf") or "yum").name @classmethod def subdir(cls, config: Config) -> Path: return Path("libdnf5" if cls.executable(config) == "dnf5" else "dnf") @classmethod def cache_subdirs(cls, cache: Path) -> list[Path]: return [ p / "packages" for p in cache.iterdir() if p.is_dir() and "-" in p.name and "mkosi" not in p.name ] @classmethod def scripts(cls, context: Context) -> dict[str, list[PathString]]: return { "dnf": apivfs_cmd() + cls.env_cmd(context) + cls.cmd(context), "rpm": apivfs_cmd() + rpm_cmd(), "mkosi-install" : ["dnf", "install"], "mkosi-upgrade" : ["dnf", "upgrade"], "mkosi-remove" : ["dnf", "remove"], "mkosi-reinstall": ["dnf", "reinstall"], } @classmethod def setup(cls, context: Context, repositories: Iterable[RpmRepository], filelists: bool = True) -> None: (context.pkgmngr / "etc/dnf/vars").mkdir(parents=True, exist_ok=True) (context.pkgmngr / "etc/yum.repos.d").mkdir(parents=True, exist_ok=True) config = context.pkgmngr / "etc/dnf/dnf.conf" if not config.exists(): config.parent.mkdir(exist_ok=True, parents=True) with config.open("w") as f: # Make sure we download filelists so all dependencies can be resolved. # See https://bugzilla.redhat.com/show_bug.cgi?id=2180842 if cls.executable(context.config).endswith("dnf5") and filelists: f.write("[main]\noptional_metadata_types=filelists\n") # The versionlock plugin will fail if enabled without a configuration file so lets' write a noop configuration # file to make it happy which can be overridden by users. versionlock = context.pkgmngr / "etc/dnf/plugins/versionlock.conf" if not versionlock.exists(): versionlock.parent.mkdir(parents=True, exist_ok=True) versionlock.write_text( textwrap.dedent( """\ [main] enabled=0 locklist=/dev/null """ ) ) repofile = context.pkgmngr / "etc/yum.repos.d/mkosi.repo" if not repofile.exists(): repofile.parent.mkdir(exist_ok=True, parents=True) with repofile.open("w") as f: for repo in repositories: f.write( textwrap.dedent( f"""\ [{repo.id}] name={repo.id} {repo.url} gpgcheck=1 enabled={int(repo.enabled)} """ ) ) if repo.sslcacert: f.write(f"sslcacert={repo.sslcacert}\n") if repo.sslclientcert: f.write(f"sslclientcert={repo.sslclientcert}\n") if repo.sslclientkey: f.write(f"sslclientkey={repo.sslclientkey}\n") if repo.priority: f.write(f"priority={repo.priority}\n") for i, url in enumerate(repo.gpgurls): f.write("gpgkey=" if i == 0 else len("gpgkey=") * " ") f.write(f"{url}\n") f.write("\n") @classmethod def finalize_environment(cls, context: Context) -> dict[str, str]: return super().finalize_environment(context) | { "RPM_FORCE_DEBIAN": "1", } @classmethod def cmd( cls, context: Context, cached_metadata: bool = True, ) -> list[PathString]: dnf = cls.executable(context.config) cmdline: list[PathString] = [ dnf, "--assumeyes", "--best", f"--releasever={context.config.release}", "--installroot=/buildroot", "--setopt=keepcache=1", "--setopt=logdir=/var/log", f"--setopt=cachedir=/var/cache/{cls.subdir(context.config)}", f"--setopt=persistdir=/var/lib/{cls.subdir(context.config)}", f"--setopt=install_weak_deps={int(context.config.with_recommends)}", "--setopt=check_config_file_age=0", "--disable-plugin=*" if dnf.endswith("dnf5") else "--disableplugin=*", ] for plugin in ("builddep", "versionlock"): cmdline += ["--enable-plugin", plugin] if dnf.endswith("dnf5") else ["--enableplugin", plugin] if ARG_DEBUG.get(): cmdline += ["--setopt=debuglevel=10"] if not context.config.repository_key_check: cmdline += ["--nogpgcheck"] if context.config.repositories: opt = "--enable-repo" if dnf.endswith("dnf5") else "--enablerepo" cmdline += [f"{opt}={repo}" for repo in context.config.repositories] if context.config.cacheonly == Cacheonly.always: cmdline += ["--cacheonly"] elif cached_metadata: cmdline += ["--setopt=metadata_expire=never"] if dnf == "dnf5": cmdline += ["--setopt=cacheonly=metadata"] if not context.config.architecture.is_native(): cmdline += [f"--forcearch={context.config.distribution.architecture(context.config.architecture)}"] if not context.config.with_docs: cmdline += ["--no-docs" if dnf.endswith("dnf5") else "--nodocs"] if dnf.endswith("dnf5"): cmdline += ["--use-host-config"] else: cmdline += [ "--config=/etc/dnf/dnf.conf", "--setopt=reposdir=/etc/yum.repos.d", "--setopt=varsdir=/etc/dnf/vars", ] if context.config.proxy_url: cmdline += [f"--setopt=proxy={context.config.proxy_url}"] if context.config.proxy_peer_certificate: cmdline += ["--setopt=proxy_sslcacert=/proxy.cacert"] if context.config.proxy_client_certificate: cmdline += ["--setopt=proxy_sslclientcert=/proxy.clientcert"] if context.config.proxy_client_key: cmdline += ["--setopt=proxy_sslclientkey=/proxy.clientkey"] return cmdline @classmethod def invoke( cls, context: Context, operation: str, arguments: Sequence[str] = (), *, apivfs: bool = False, stdout: _FILE = None, cached_metadata: bool = True, ) -> CompletedProcess: try: return run( cls.cmd(context, cached_metadata=cached_metadata) + [operation, *arguments], sandbox=( context.sandbox( binary=cls.executable(context.config), network=True, vartmp=True, mounts=[Mount(context.root, "/buildroot"), *cls.mounts(context)], extra=apivfs_cmd() if apivfs else [], ) ), env=context.config.environment | cls.finalize_environment(context), stdout=stdout, ) finally: # dnf interprets the log directory relative to the install root so there's nothing we can do but to remove # the log files from the install root afterwards. if (context.root / "var/log").exists(): for p in (context.root / "var/log").iterdir(): if any(p.name.startswith(prefix) for prefix in ("dnf", "hawkey", "yum")): p.unlink() @classmethod def sync(cls, context: Context, force: bool, arguments: Sequence[str] = ()) -> None: cls.invoke( context, "makecache", arguments=[*(["--refresh"] if force else []), *arguments], cached_metadata=False, ) @classmethod def createrepo(cls, context: Context) -> None: run(["createrepo_c", context.repository], sandbox=context.sandbox(binary="createrepo_c", mounts=[Mount(context.repository, context.repository)])) (context.pkgmngr / "etc/yum.repos.d/mkosi-local.repo").write_text( textwrap.dedent( """\ [mkosi] name=mkosi baseurl=file:///repository gpgcheck=0 metadata_expire=never priority=10 """ ) ) cls.sync(context, force=True, arguments=["--disablerepo=*", "--enablerepo=mkosi"]) mkosi-24.3/mkosi/installer/pacman.py000066400000000000000000000164041465176501400175320ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import dataclasses import shutil import textwrap from collections.abc import Iterable, Sequence from pathlib import Path from mkosi.config import Config from mkosi.context import Context from mkosi.installer import PackageManager from mkosi.run import run from mkosi.sandbox import Mount, apivfs_cmd from mkosi.types import _FILE, CompletedProcess, PathString from mkosi.util import umask from mkosi.versioncomp import GenericVersion @dataclasses.dataclass(frozen=True) class PacmanRepository: id: str url: str class Pacman(PackageManager): @classmethod def executable(cls, config: Config) -> str: return "pacman" @classmethod def subdir(cls, config: Config) -> Path: return Path("pacman") @classmethod def cache_subdirs(cls, cache: Path) -> list[Path]: return [cache / "pkg"] @classmethod def state_subdirs(cls, state: Path) -> list[Path]: return [state / "local"] @classmethod def scripts(cls, context: Context) -> dict[str, list[PathString]]: return { "pacman": apivfs_cmd() + cls.env_cmd(context) + cls.cmd(context), "mkosi-install" : ["pacman", "--sync", "--needed"], "mkosi-upgrade" : ["pacman", "--sync", "--sysupgrade", "--needed"], "mkosi-remove" : ["pacman", "--remove", "--recursive", "--nosave"], "mkosi-reinstall": ["pacman", "--sync"], } @classmethod def mounts(cls, context: Context) -> list[Mount]: mounts = [ *super().mounts(context), # pacman writes downloaded packages to the first writable cache directory. We don't want it to write to our # local repository directory so we expose it as a read-only directory to pacman. Mount(context.repository, "/var/cache/pacman/mkosi", ro=True), ] if (context.root / "var/lib/pacman/local").exists(): # pacman reuses the same directory for the sync databases and the local database containing the list of # installed packages. The former should go in the cache directory, the latter should go in the image, so we # bind mount the local directory from the image to make sure that happens. mounts += [Mount(context.root / "var/lib/pacman/local", "/var/lib/pacman/local")] return mounts @classmethod def setup(cls, context: Context, repositories: Iterable[PacmanRepository]) -> None: if context.config.repository_key_check: sig_level = "Required DatabaseOptional" else: # If we are using a single local mirror built on the fly there # will be no signatures sig_level = "Never" with umask(~0o755): (context.root / "var/lib/pacman/local").mkdir(parents=True, exist_ok=True) (context.pkgmngr / "etc/mkosi-local.conf").touch() config = context.pkgmngr / "etc/pacman.conf" if config.exists(): return config.parent.mkdir(exist_ok=True, parents=True) with config.open("w") as f: f.write( textwrap.dedent( f"""\ [options] SigLevel = {sig_level} LocalFileSigLevel = Optional ParallelDownloads = 5 Architecture = {context.config.distribution.architecture(context.config.architecture)} """ ) ) if not context.config.with_docs: f.write( textwrap.dedent( """\ NoExtract = usr/share/doc/* NoExtract = usr/share/man/* NoExtract = usr/share/groff/* NoExtract = usr/share/gtk-doc/* NoExtract = usr/share/info/* """ ) ) # This has to go first so that our local repository always takes precedence over any other ones. f.write("Include = /etc/mkosi-local.conf\n") for repo in repositories: f.write( textwrap.dedent( f"""\ [{repo.id}] Server = {repo.url} """ ) ) if any((context.pkgmngr / "etc/pacman.d/").glob("*.conf")): f.write( textwrap.dedent( """\ Include = /etc/pacman.d/*.conf """ ) ) @classmethod def cmd(cls, context: Context) -> list[PathString]: return [ "pacman", "--root=/buildroot", "--logfile=/dev/null", "--dbpath=/var/lib/pacman", # Make sure pacman looks at our local repository first by putting it as the first cache directory. We mount # it read-only so the second directory will still be used for writing new cache entries. "--cachedir=/var/cache/pacman/mkosi", "--cachedir=/var/cache/pacman/pkg", "--hookdir=/buildroot/etc/pacman.d/hooks", "--arch", context.config.distribution.architecture(context.config.architecture), "--color", "auto", "--noconfirm", ] @classmethod def invoke( cls, context: Context, operation: str, arguments: Sequence[str] = (), *, apivfs: bool = False, stdout: _FILE = None, ) -> CompletedProcess: return run( cls.cmd(context) + [operation, *arguments], sandbox=( context.sandbox( binary="pacman", network=True, vartmp=True, mounts=[Mount(context.root, "/buildroot"), *cls.mounts(context)], extra=apivfs_cmd() if apivfs else [], ) ), env=context.config.environment | cls.finalize_environment(context), stdout=stdout, ) @classmethod def sync(cls, context: Context, force: bool) -> None: cls.invoke(context, "--sync", ["--refresh", *(["--refresh"] if force else [])]) @classmethod def createrepo(cls, context: Context) -> None: run( [ "repo-add", "--quiet", context.repository / "mkosi.db.tar", *sorted(context.repository.glob("*.pkg.tar*"), key=lambda p: GenericVersion(Path(p).name)) ], sandbox=context.sandbox(binary="repo-add", mounts=[Mount(context.repository, context.repository)]), ) (context.pkgmngr / "etc/mkosi-local.conf").write_text( textwrap.dedent( """\ [mkosi] Server = file:///i/dont/exist SigLevel = Never Usage = Install Search Upgrade """ ) ) # pacman can't sync a single repository, so we go behind its back and do it ourselves. shutil.move( context.repository / "mkosi.db.tar", context.package_cache_dir / "lib/pacman/sync/mkosi.db" ) mkosi-24.3/mkosi/installer/rpm.py000066400000000000000000000053111465176501400170640ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import dataclasses import subprocess import textwrap from pathlib import Path from typing import Optional from mkosi.context import Context from mkosi.run import run from mkosi.types import PathString @dataclasses.dataclass(frozen=True) class RpmRepository: id: str url: str gpgurls: tuple[str, ...] enabled: bool = True sslcacert: Optional[Path] = None sslclientkey: Optional[Path] = None sslclientcert: Optional[Path] = None priority: Optional[int] = None def find_rpm_gpgkey(context: Context, key: str) -> Optional[str]: root = context.config.tools() if context.config.tools_tree_certificates else Path("/") if gpgpath := next((root / "usr/share/distribution-gpg-keys").rglob(key), None): return (Path("/") / gpgpath.relative_to(root)).as_uri() if gpgpath := next(Path(context.pkgmngr / "etc/pki/rpm-gpg").rglob(key), None): return (Path("/") / gpgpath.relative_to(context.pkgmngr)).as_uri() return None def setup_rpm(context: Context, *, dbpath: str = "/usr/lib/sysimage/rpm") -> None: confdir = context.pkgmngr / "etc/rpm" confdir.mkdir(parents=True, exist_ok=True) if not (confdir / "macros.lang").exists() and context.config.locale: (confdir / "macros.lang").write_text(f"%_install_langs {context.config.locale}") if not (confdir / "macros.dbpath").exists(): (confdir / "macros.dbpath").write_text(f"%_dbpath {dbpath}") plugindir = Path(run(["rpm", "--eval", "%{__plugindir}"], sandbox=context.sandbox(binary="rpm"), stdout=subprocess.PIPE).stdout.strip()) if (plugindir := context.config.tools() / plugindir.relative_to("/")).exists(): with (confdir / "macros.disable-plugins").open("w") as f: for plugin in plugindir.iterdir(): f.write(f"%__transaction_{plugin.stem} %{{nil}}\n") # Write an rpm sequoia policy that allows SHA1 as various distribution GPG keys (OpenSUSE) still use SHA1 for # various things. # TODO: Remove when all rpm distribution GPG keys have stopped using SHA1. if not (p := context.pkgmngr / "etc/crypto-policies/back-ends/rpm-sequoia.config").exists(): p.parent.mkdir(parents=True, exist_ok=True) p.write_text( textwrap.dedent( """ [hash_algorithms] sha1.second_preimage_resistance = "always" sha224 = "always" sha256 = "always" sha384 = "always" sha512 = "always" default_disposition = "never" """ ) ) def rpm_cmd() -> list[PathString]: return ["env", "HOME=/", "rpm", "--root=/buildroot"] mkosi-24.3/mkosi/installer/zypper.py000066400000000000000000000134671465176501400176320ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import hashlib import textwrap from collections.abc import Iterable, Sequence from pathlib import Path from mkosi.config import Config, yes_no from mkosi.context import Context from mkosi.installer import PackageManager from mkosi.installer.rpm import RpmRepository, rpm_cmd from mkosi.run import run from mkosi.sandbox import Mount, apivfs_cmd from mkosi.types import _FILE, CompletedProcess, PathString class Zypper(PackageManager): @classmethod def executable(cls, config: Config) -> str: return "zypper" @classmethod def subdir(cls, config: Config) -> Path: return Path("zypp") @classmethod def cache_subdirs(cls, cache: Path) -> list[Path]: return [cache / "packages"] @classmethod def scripts(cls, context: Context) -> dict[str, list[PathString]]: install: list[PathString] = [ "zypper", "install", "--download", "in-advance", "--recommends" if context.config.with_recommends else "--no-recommends", ] return { "zypper": apivfs_cmd() + cls.env_cmd(context) + cls.cmd(context), "rpm" : apivfs_cmd() + rpm_cmd(), "mkosi-install" : install, "mkosi-upgrade" : ["zypper", "update"], "mkosi-remove" : ["zypper", "remove", "--clean-deps"], "mkosi-reinstall": install + ["--force"], } @classmethod def setup(cls, context: Context, repos: Iterable[RpmRepository]) -> None: config = context.pkgmngr / "etc/zypp/zypp.conf" config.parent.mkdir(exist_ok=True, parents=True) # rpm.install.excludedocs can only be configured in zypp.conf so we append # to any user provided config file. Let's also bump the refresh delay to # the same default as dnf which is 48 hours. with config.open("a") as f: f.write( textwrap.dedent( f""" [main] rpm.install.excludedocs = {yes_no(not context.config.with_docs)} repo.refresh.delay = {48 * 60} """ ) ) repofile = context.pkgmngr / "etc/zypp/repos.d/mkosi.repo" if not repofile.exists(): repofile.parent.mkdir(exist_ok=True, parents=True) with repofile.open("w") as f: for repo in repos: # zypper uses the repo ID as its cache key which is unsafe so add a hash of the url used to it to # make sure a unique cache is used for each repository. We use roughly the same algorithm here that # dnf uses as well. key = hashlib.sha256(repo.url.encode()).hexdigest()[:16] f.write( textwrap.dedent( f"""\ [{repo.id}-{key}] name={repo.id} {repo.url} gpgcheck=1 enabled={int(repo.enabled)} autorefresh=0 keeppackages=1 """ ) ) if repo.priority: f.write(f"priority={repo.priority}\n") for i, url in enumerate(repo.gpgurls): f.write("gpgkey=" if i == 0 else len("gpgkey=") * " ") f.write(f"{url}\n") f.write("\n") @classmethod def finalize_environment(cls, context: Context) -> dict[str, str]: return super().finalize_environment(context) | { "ZYPP_CONF": "/etc/zypp/zypp.conf", "RPM_FORCE_DEBIAN": "1", } @classmethod def cmd(cls, context: Context) -> list[PathString]: return [ "zypper", "--installroot=/buildroot", "--cache-dir=/var/cache/zypp", "--gpg-auto-import-keys" if context.config.repository_key_check else "--no-gpg-checks", "--non-interactive", "--no-refresh", *([f"--plus-content={repo}" for repo in context.config.repositories]), ] @classmethod def invoke( cls, context: Context, operation: str, arguments: Sequence[str] = (), *, apivfs: bool = False, stdout: _FILE = None, ) -> CompletedProcess: return run( cls.cmd(context) + [operation, *arguments], sandbox=( context.sandbox( binary="zypper", network=True, vartmp=True, mounts=[Mount(context.root, "/buildroot"), *cls.mounts(context)], extra=apivfs_cmd() if apivfs else [], ) ), env=context.config.environment | cls.finalize_environment(context), stdout=stdout, ) @classmethod def sync(cls, context: Context, force: bool, arguments: Sequence[str] = ()) -> None: cls.invoke(context, "refresh", [*(["--force"] if force else []), *arguments]) @classmethod def createrepo(cls, context: Context) -> None: run(["createrepo_c", context.repository], sandbox=context.sandbox(binary="createrepo_c", mounts=[Mount(context.repository, context.repository)])) (context.pkgmngr / "etc/zypp/repos.d/mkosi-local.repo").write_text( textwrap.dedent( """\ [mkosi] name=mkosi baseurl=file:///repository gpgcheck=0 autorefresh=0 keeppackages=0 priority=10 """ ) ) cls.sync(context, force=True, arguments=["mkosi"]) mkosi-24.3/mkosi/kmod.py000066400000000000000000000175721465176501400152370ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import itertools import logging import os import re import subprocess from collections.abc import Iterable, Iterator from pathlib import Path from mkosi.log import complete_step, log_step from mkosi.run import run from mkosi.sandbox import Mount, SandboxProtocol, chroot_cmd, nosandbox from mkosi.util import chdir, parents_below def loaded_modules() -> list[str]: return [fr"{line.split()[0]}\.ko" for line in Path("/proc/modules").read_text().splitlines()] def filter_kernel_modules(root: Path, kver: str, *, include: Iterable[str], exclude: Iterable[str]) -> list[Path]: modulesd = Path("usr/lib/modules") / kver with chdir(root): modules = set(modulesd.rglob("*.ko*")) keep = set() if include: regex = re.compile("|".join(include)) for m in modules: rel = os.fspath(Path(*m.parts[1:])) if regex.search(rel): keep.add(rel) if exclude: remove = set() regex = re.compile("|".join(exclude)) for m in modules: rel = os.fspath(Path(*m.parts[1:])) if rel not in keep and regex.search(rel): remove.add(m) modules -= remove return sorted(modules) def normalize_module_name(name: str) -> str: return name.replace("_", "-") def module_path_to_name(path: Path) -> str: return normalize_module_name(path.name.partition(".")[0]) def resolve_module_dependencies( root: Path, kver: str, modules: Iterable[str], *, sandbox: SandboxProtocol = nosandbox, ) -> tuple[set[Path], set[Path]]: """ Returns a tuple of lists containing the paths to the module and firmware dependencies of the given list of module names (including the given module paths themselves). The paths are returned relative to the root directory. """ modulesd = Path("usr/lib/modules") / kver if (p := root / modulesd / "modules.builtin").exists(): builtin = set(module_path_to_name(Path(m)) for m in p.read_text().splitlines()) else: builtin = set() with chdir(root): allmodules = set(modulesd.rglob("*.ko*")) nametofile = {module_path_to_name(m): m for m in allmodules} log_step("Running modinfo to fetch kernel module dependencies") # We could run modinfo once for each module but that's slow. Luckily we can pass multiple modules to # modinfo and it'll process them all in a single go. We get the modinfo for all modules to build two maps # that map the path of the module to its module dependencies and its firmware dependencies respectively. # Because there's more kernel modules than the max number of accepted CLI arguments for bwrap, we split the modules # list up into chunks. info = "" for i in range(0, len(nametofile.keys()), 8500): chunk = list(nametofile.keys())[i:i+8500] info += run( ["modinfo", "--set-version", kver, "--null", *chunk], stdout=subprocess.PIPE, sandbox=sandbox(binary="modinfo", mounts=[Mount(root, "/buildroot", ro=True)], extra=chroot_cmd()), cwd=root, ).stdout.strip() log_step("Calculating required kernel modules and firmware") moddep = {} firmwaredep = {} depends = [] firmware = [] with chdir(root): for line in info.split("\0"): key, sep, value = line.partition(":") if not sep: key, sep, value = line.partition("=") if key == "depends": depends += [normalize_module_name(d) for d in value.strip().split(",") if d] elif key == "softdep": # softdep is delimited by spaces and can contain strings like pre: and post: so discard anything that # ends with a colon. depends += [normalize_module_name(d) for d in value.strip().split() if not d.endswith(":")] elif key == "firmware": fw = [f for f in Path("usr/lib/firmware").glob(f"{value.strip()}*")] if not fw: logging.debug(f"Not including missing firmware /usr/lib/firmware/{value} in the initrd") firmware += fw elif key == "name": # The file names use dashes, but the module names use underscores. We track the names # in terms of the file names, since the depends use dashes and therefore filenames as # well. name = normalize_module_name(value.strip()) moddep[name] = depends firmwaredep[name] = firmware depends = [] firmware = [] todo = [*builtin, *modules] mods = set() firmware = set() while todo: m = todo.pop() if m in mods: continue depends = moddep.get(m, []) for d in depends: if d not in nametofile and d not in builtin: logging.warning(f"{d} is a dependency of {m} but is not installed, ignoring ") mods.add(m) todo += depends firmware.update(firmwaredep.get(m, [])) return set(nametofile[m] for m in mods if m in nametofile), set(firmware) def gen_required_kernel_modules( root: Path, kver: str, *, include: Iterable[str], exclude: Iterable[str], sandbox: SandboxProtocol = nosandbox, ) -> Iterator[Path]: modulesd = Path("usr/lib/modules") / kver # There is firmware in /usr/lib/firmware that is not depended on by any modules so if any firmware was installed # we have to take the slow path to make sure we don't copy firmware into the initrd that is not depended on by any # kernel modules. if exclude or (root / "usr/lib/firmware").glob("*"): modules = filter_kernel_modules(root, kver, include=include, exclude=exclude) names = [module_path_to_name(m) for m in modules] mods, firmware = resolve_module_dependencies(root, kver, names, sandbox=sandbox) else: logging.debug("No modules excluded and no firmware installed, using kernel modules generation fast path") with chdir(root): mods = set(modulesd.rglob("*.ko*")) firmware = set() yield from sorted( itertools.chain( {p.relative_to(root) for f in mods | firmware for p in parents_below(root / f, root / "usr/lib")}, mods, firmware, (p.relative_to(root) for p in (root / modulesd).glob("modules*")), ) ) if (modulesd / "vdso").exists(): if not mods: yield from (p.relative_to(root) for p in parents_below(root / modulesd / "vdso", root / "usr/lib")) yield modulesd / "vdso" yield from sorted(p.relative_to(root) for p in (root / modulesd / "vdso").iterdir()) def process_kernel_modules( root: Path, kver: str, *, include: Iterable[str], exclude: Iterable[str], sandbox: SandboxProtocol = nosandbox, ) -> None: if not exclude: return modulesd = Path("usr/lib/modules") / kver firmwared = Path("usr/lib/firmware") with complete_step("Applying kernel module filters"): required = set(gen_required_kernel_modules(root, kver, include=include, exclude=exclude, sandbox=sandbox)) with chdir(root): modules = sorted(modulesd.rglob("*.ko*"), reverse=True) firmware = sorted(firmwared.rglob("*"), reverse=True) for m in modules: if m in required: continue p = root / m if p.is_file() or p.is_symlink(): p.unlink() else: p.rmdir() for fw in firmware: if fw in required: continue if any(fw.is_relative_to(Path("usr/lib/firmware") / d) for d in ("amd-ucode", "intel-ucode")): continue p = root / fw if p.is_file() or p.is_symlink(): p.unlink() else: p.rmdir() mkosi-24.3/mkosi/log.py000066400000000000000000000054171465176501400150610ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import contextlib import contextvars import logging import os import sys from collections.abc import Iterator from typing import Any, NoReturn, Optional # This global should be initialized after parsing arguments ARG_DEBUG = contextvars.ContextVar("debug", default=False) ARG_DEBUG_SHELL = contextvars.ContextVar("debug-shell", default=False) LEVEL = 0 class Style: bold = "\033[0;1;39m" if sys.stderr.isatty() else "" gray = "\033[0;38;5;245m" if sys.stderr.isatty() else "" red = "\033[31;1m" if sys.stderr.isatty() else "" yellow = "\033[33;1m" if sys.stderr.isatty() else "" reset = "\033[0m" if sys.stderr.isatty() else "" def die(message: str, *, hint: Optional[str] = None) -> NoReturn: logging.error(f"{message}") if hint: logging.info(f"({hint})") sys.exit(1) def log_step(text: str) -> None: prefix = " " * LEVEL if sys.exc_info()[0]: # We are falling through exception handling blocks. # De-emphasize this step here, so the user can tell more # easily which step generated the exception. The exception # or error will only be printed after we finish cleanup. logging.info(f"{prefix}({text})") else: logging.info(f"{prefix}{Style.bold}{text}{Style.reset}") def log_notice(text: str) -> None: logging.info(f"{Style.bold}{text}{Style.reset}") @contextlib.contextmanager def complete_step(text: str, text2: Optional[str] = None) -> Iterator[list[Any]]: global LEVEL log_step(text) LEVEL += 1 try: args: list[Any] = [] yield args finally: LEVEL -= 1 assert LEVEL >= 0 if text2 is not None: log_step(text2.format(*args)) class Formatter(logging.Formatter): def __init__(self, fmt: Optional[str] = None, *args: Any, **kwargs: Any) -> None: fmt = fmt or "%(message)s" self.formatters = { logging.DEBUG: logging.Formatter(f"‣ {Style.gray}{fmt}{Style.reset}"), logging.INFO: logging.Formatter(f"‣ {fmt}"), logging.WARNING: logging.Formatter(f"‣ {Style.yellow}{fmt}{Style.reset}"), logging.ERROR: logging.Formatter(f"‣ {Style.red}{fmt}{Style.reset}"), logging.CRITICAL: logging.Formatter(f"‣ {Style.red}{Style.bold}{fmt}{Style.reset}"), } super().__init__(fmt, *args, **kwargs) def format(self, record: logging.LogRecord) -> str: return self.formatters[record.levelno].format(record) def log_setup() -> None: handler = logging.StreamHandler(stream=sys.stderr) handler.setFormatter(Formatter()) logging.getLogger().addHandler(handler) logging.getLogger().setLevel(logging.getLevelName(os.getenv("SYSTEMD_LOG_LEVEL", "info").upper())) mkosi-24.3/mkosi/manifest.py000066400000000000000000000251041465176501400161010ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import dataclasses import datetime import json import subprocess import textwrap from pathlib import Path from typing import IO, Any, Optional from mkosi.config import ManifestFormat from mkosi.context import Context from mkosi.distributions import PackageType from mkosi.installer.apt import Apt from mkosi.log import complete_step from mkosi.run import run from mkosi.sandbox import Mount @dataclasses.dataclass class PackageManifest: """A description of a package The fields used here must match https://systemd.io/COREDUMP_PACKAGE_METADATA/#well-known-keys. """ type: str name: str version: str architecture: str size: int def as_dict(self) -> dict[str, str]: return { "type": self.type, "name": self.name, "version": self.version, "architecture": self.architecture, } @dataclasses.dataclass class SourcePackageManifest: name: str changelog: Optional[str] packages: list[PackageManifest] = dataclasses.field(default_factory=list) def add(self, package: PackageManifest) -> None: self.packages.append(package) def report(self) -> str: size = sum(p.size for p in self.packages) t = textwrap.dedent( f"""\ SourcePackage: {self.name} Packages: {" ".join(p.name for p in self.packages)} Size: {size} """ ) if self.changelog: t += f"""\nChangelog:\n{self.changelog}\n""" return t def parse_pkg_desc(f: Path) -> tuple[str, str, str, str]: name = version = base = arch = "" with f.open() as desc: for line in desc: line = line.strip() if line == "%NAME%": name = next(desc).strip() elif line == "%VERSION%": version = next(desc).strip() elif line == "%BASE%": base = next(desc).strip() elif line == "%ARCH%": arch = next(desc).strip() break return name, version, base, arch @dataclasses.dataclass class Manifest: context: Context packages: list[PackageManifest] = dataclasses.field(default_factory=list) source_packages: dict[str, SourcePackageManifest] = dataclasses.field(default_factory=dict) _init_timestamp: datetime.datetime = dataclasses.field(init=False, default_factory=datetime.datetime.now) def need_source_info(self) -> bool: return ManifestFormat.changelog in self.context.config.manifest_format def record_packages(self) -> None: with complete_step("Recording packages in manifest…"): if self.context.config.distribution.package_type() == PackageType.rpm: self.record_rpm_packages() if self.context.config.distribution.package_type() == PackageType.deb: self.record_deb_packages() if self.context.config.distribution.package_type() == PackageType.pkg: self.record_pkg_packages() def record_rpm_packages(self) -> None: c = run( [ "rpm", "--root=/buildroot", "--query", "--all", "--queryformat", r"%{NEVRA}\t%{SOURCERPM}\t%{NAME}\t%{ARCH}\t%{LONGSIZE}\t%{INSTALLTIME}\n", ], stdout=subprocess.PIPE, sandbox=self.context.sandbox(binary="rpm", mounts=[Mount(self.context.root, "/buildroot")]), ) packages = sorted(c.stdout.splitlines()) for package in packages: nevra, srpm, name, arch, size, installtime = package.split("\t") assert nevra.startswith(f"{name}-") evra = nevra.removeprefix(f"{name}-") # Some packages have architecture '(none)', and it's not part of NEVRA, e.g.: # gpg-pubkey-45719a39-5f2c0192 gpg-pubkey (none) 0 1635985199 if arch != "(none)": assert nevra.endswith(f".{arch}") evr = evra.removesuffix(f".{arch}") else: evr = evra arch = "" size = int(size) installtime = datetime.datetime.fromtimestamp(int(installtime)) # If we are creating a layer based on a BaseImage=, e.g. a sysext, filter by # packages that were installed in this execution of mkosi. We assume that the # upper layer is put together in one go, which currently is always true. if self.context.config.base_trees and installtime < self._init_timestamp: continue manifest = PackageManifest("rpm", name, evr, arch, size) self.packages.append(manifest) if not self.need_source_info(): continue source = self.source_packages.get(srpm) if source is None: c = run( [ "rpm", "--root=/buildroot", "--query", "--changelog", nevra, ], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, sandbox=self.context.sandbox( binary="rpm", mounts=[Mount(self.context.root, "/buildroot", ro=True)] ), ) changelog = c.stdout.strip() source = SourcePackageManifest(srpm, changelog) self.source_packages[srpm] = source source.add(manifest) def record_deb_packages(self) -> None: c = run( [ "dpkg-query", "--admindir=/buildroot/var/lib/dpkg", "--show", "--showformat", r'${Package}\t${source:Package}\t${Version}\t${Architecture}\t${Installed-Size}\t${db-fsys:Last-Modified}\n', ], stdout=subprocess.PIPE, sandbox=self.context.sandbox( binary="dpkg-query", mounts=[Mount(self.context.root, "/buildroot", ro=True)], ), ) packages = sorted(c.stdout.splitlines()) for package in packages: name, source, version, arch, size, installtime = package.split("\t") # dpkg records the size in KBs, the field is optional # db-fsys:Last-Modified is not available in very old dpkg, so just skip creating # the manifest for sysext when building on very old distributions by setting the # timestamp to epoch. This only affects Ubuntu Bionic which is nearing EOL. size = int(size) * 1024 if size else 0 installtime = datetime.datetime.fromtimestamp(int(installtime) if installtime else 0) # If we are creating a layer based on a BaseImage=, e.g. a sysext, filter by # packages that were installed in this execution of mkosi. We assume that the # upper layer is put together in one go, which currently is always true. if self.context.config.base_trees and installtime < self._init_timestamp: continue manifest = PackageManifest("deb", name, version, arch, size) self.packages.append(manifest) if not self.need_source_info(): continue source_package = self.source_packages.get(source) if source_package is None: # Yes, --quiet is specified twice, to avoid output about download stats. Note that the argument of the # 'changelog' verb is the binary package name, not the source package name. We also have to set "Dir" # explicitly because apt has no separate option to configure the changelog directory. Apt.invoke() # sets all options that are interpreted relative to Dir to absolute paths by default so this is afe. result = Apt.invoke( self.context, "changelog", ["--quiet", "--quiet", "-o", "Dir=/buildroot", name], stdout=subprocess.PIPE, ) source_package = SourcePackageManifest(source, result.stdout.strip()) self.source_packages[source] = source_package source_package.add(manifest) def record_pkg_packages(self) -> None: packages = sorted((self.context.root / "var/lib/pacman/local").glob("*/desc")) for desc in packages: name, version, source, arch = parse_pkg_desc(desc) package = PackageManifest("pkg", name, version, arch, 0) self.packages.append(package) source_package = self.source_packages.get(source) if source_package is None: source_package = SourcePackageManifest(source, None) self.source_packages[source] = source_package source_package.add(package) def has_data(self) -> bool: # We might add more data in the future return len(self.packages) > 0 def as_dict(self) -> dict[str, Any]: config = { "name": self.context.config.image_id or "image", "distribution": str(self.context.config.distribution), "architecture": str(self.context.config.architecture), } if self.context.config.image_version is not None: config["version"] = self.context.config.image_version if self.context.config.release is not None: config["release"] = self.context.config.release return { # Bump this when incompatible changes are made to the manifest format. "manifest_version": 1, # Describe the image itself. "config": config, # Describe the image content in terms of packages. "packages": [package.as_dict() for package in self.packages], } def write_json(self, out: IO[str]) -> None: json.dump(self.as_dict(), out, indent=2) def write_package_report(self, out: IO[str]) -> None: """Create a human-readable report about packages This is modelled after "Fedora compose reports" that are sent to fedora-devel. The format describes added and removed packages, and includes the changelogs. A diff between two such reports shows what changed *in* the packages quite nicely. """ out.write(f"Packages: {len(self.packages)}\n") out.write(f"Size: {sum(p.size for p in self.packages)}") for package in self.source_packages.values(): out.write(f"\n{80*'-'}\n") out.write(package.report()) mkosi-24.3/mkosi/mounts.py000066400000000000000000000116311465176501400156200ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import contextlib import os import platform import stat import tempfile from collections.abc import Iterator, Sequence from pathlib import Path from typing import Optional from mkosi.config import Config from mkosi.run import run from mkosi.sandbox import Mount from mkosi.types import PathString from mkosi.util import umask from mkosi.versioncomp import GenericVersion def stat_is_whiteout(st: os.stat_result) -> bool: return stat.S_ISCHR(st.st_mode) and st.st_rdev == 0 def delete_whiteout_files(path: Path) -> None: """Delete any char(0,0) device nodes underneath @path Overlayfs uses such files to mark "whiteouts" (files present in the lower layers, but removed in the upper one). """ for entry in path.rglob("*"): # TODO: Use Path.stat() once we depend on Python 3.10+. if stat_is_whiteout(os.stat(entry, follow_symlinks=False)): entry.unlink() @contextlib.contextmanager def mount( what: PathString, where: Path, operation: Optional[str] = None, options: Sequence[str] = (), type: Optional[str] = None, read_only: bool = False, lazy: bool = False, umount: bool = True, ) -> Iterator[Path]: if not where.exists(): with umask(~0o755): where.mkdir(parents=True) if read_only: options = ["ro", *options] cmd: list[PathString] = ["mount", "--no-mtab"] if operation: cmd += [operation] cmd += [what, where] if type: cmd += ["--types", type] if options: cmd += ["--options", ",".join(options)] try: run(cmd) yield where finally: if umount: run(["umount", "--no-mtab", *(["--lazy"] if lazy else []), where]) @contextlib.contextmanager def mount_overlay( lowerdirs: Sequence[Path], upperdir: Optional[Path] = None, where: Optional[Path] = None, lazy: bool = False, ) -> Iterator[Path]: with contextlib.ExitStack() as stack: if upperdir is None: upperdir = Path(stack.enter_context(tempfile.TemporaryDirectory(prefix="volatile-overlay"))) st = lowerdirs[-1].stat() os.chmod(upperdir, st.st_mode) os.chown(upperdir, st.st_uid, st.st_gid) workdir = Path( stack.enter_context(tempfile.TemporaryDirectory(dir=upperdir.parent, prefix=f"{upperdir.name}-workdir")) ) if where is None: where = Path( stack.enter_context( tempfile.TemporaryDirectory(dir=upperdir.parent, prefix=f"{upperdir.name}-mountpoint") ) ) options = [ f"lowerdir={':'.join(os.fspath(p) for p in reversed(lowerdirs))}", f"upperdir={upperdir}", f"workdir={workdir}", # Disable the inodes index and metacopy (only copy metadata upwards if possible) # options. If these are enabled (e.g., if the kernel enables them by default), # the mount will fail if the upper directory has been earlier used with a different # lower directory, such as with a build overlay that was generated on top of a # different temporary root. # See https://www.kernel.org/doc/html/latest/filesystems/overlayfs.html#sharing-and-copying-layers # and https://github.com/systemd/mkosi/issues/1841. "index=off", "metacopy=off" ] # userxattr is only supported on overlayfs since kernel 5.11 if GenericVersion(platform.release()) >= GenericVersion("5.11"): options.append("userxattr") try: with mount("overlay", where, options=options, type="overlay", lazy=lazy): yield where finally: delete_whiteout_files(upperdir) @contextlib.contextmanager def finalize_source_mounts(config: Config, *, ephemeral: bool) -> Iterator[list[Mount]]: with contextlib.ExitStack() as stack: sources = ( (stack.enter_context(mount_overlay([source])) if ephemeral else source, target) for source, target in {t.with_prefix(Path("/work/src")) for t in config.build_sources} ) yield [Mount(src, target) for src, target in sorted(sources, key=lambda s: s[1])] def finalize_crypto_mounts(config: Config) -> list[Mount]: root = config.tools() if config.tools_tree_certificates else Path("/") mounts = [ (root / subdir, Path("/") / subdir) for subdir in ( Path("usr/share/keyrings"), Path("usr/share/distribution-gpg-keys"), Path("etc/pki"), Path("etc/ssl"), Path("etc/ca-certificates"), Path("etc/pacman.d/gnupg"), Path("var/lib/ca-certificates"), ) if (root / subdir).exists() ] return [ Mount(src, target, ro=True) for src, target in sorted(set(mounts), key=lambda s: s[1]) ] mkosi-24.3/mkosi/pager.py000066400000000000000000000010201465176501400153600ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import os import pydoc from typing import Optional def page(text: str, enabled: Optional[bool]) -> None: if enabled: # Initialize less options from $MKOSI_LESS or provide a suitable fallback. # F: don't page if one screen # X: do not clear screen # M: verbose prompt # K: quit on ^C # R: allow rich formatting os.environ["LESS"] = os.getenv("MKOSI_LESS", "FXMKR") pydoc.pager(text) else: print(text) mkosi-24.3/mkosi/partition.py000066400000000000000000000044551465176501400163120ustar00rootroot00000000000000import dataclasses import json import subprocess from collections.abc import Mapping, Sequence from pathlib import Path from typing import Any, Optional from mkosi.log import die from mkosi.run import run from mkosi.sandbox import Mount, SandboxProtocol, nosandbox @dataclasses.dataclass(frozen=True) class Partition: type: str uuid: str partno: Optional[int] split_path: Optional[Path] roothash: Optional[str] @classmethod def from_dict(cls, dict: Mapping[str, Any]) -> "Partition": return cls( type=dict["type"], uuid=dict["uuid"], partno=int(partno) if (partno := dict.get("partno")) else None, split_path=Path(p) if ((p := dict.get("split_path")) and p != "-") else None, roothash=dict.get("roothash"), ) GRUB_BOOT_PARTITION_UUID = "21686148-6449-6e6f-744e-656564454649" def find_partitions(image: Path, *, sandbox: SandboxProtocol = nosandbox) -> list[Partition]: output = json.loads( run( ["systemd-repart", "--json=short", image], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, sandbox=sandbox(binary="systemd-repart", mounts=[Mount(image, image, ro=True)]), ).stdout ) return [Partition.from_dict(d) for d in output] def finalize_roothash(partitions: Sequence[Partition]) -> Optional[str]: roothash = usrhash = None for p in partitions: if (h := p.roothash) is None: continue if not (p.type.startswith("usr") or p.type.startswith("root")): die(f"Found roothash property on unexpected partition type {p.type}") # When there's multiple verity enabled root or usr partitions, the first one wins. if p.type.startswith("usr"): usrhash = usrhash or h else: roothash = roothash or h return f"roothash={roothash}" if roothash else f"usrhash={usrhash}" if usrhash else None def finalize_root(partitions: Sequence[Partition]) -> Optional[str]: root = finalize_roothash(partitions) if not root: root = next((f"root=PARTUUID={p.uuid}" for p in partitions if p.type.startswith("root")), None) if not root: root = next((f"mount.usr=PARTUUID={p.uuid}" for p in partitions if p.type.startswith("usr")), None) return root mkosi-24.3/mkosi/qemu.py000066400000000000000000001403231465176501400152430ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import asyncio import base64 import contextlib import dataclasses import enum import errno import fcntl import hashlib import json import logging import os import random import resource import shutil import signal import socket import struct import subprocess import sys import tempfile import textwrap import uuid from collections.abc import Iterator, Sequence from pathlib import Path from typing import Optional from mkosi.config import ( Args, Config, ConfigFeature, Network, OutputFormat, QemuDrive, QemuFirmware, QemuVsockCID, format_bytes, systemd_tool_version, want_selinux_relabel, yes_no, ) from mkosi.log import ARG_DEBUG, die from mkosi.mounts import finalize_source_mounts from mkosi.partition import finalize_root, find_partitions from mkosi.run import SD_LISTEN_FDS_START, AsyncioThread, find_binary, fork_and_wait, kill, run, spawn from mkosi.sandbox import Mount from mkosi.tree import copy_tree, rmtree from mkosi.types import PathString from mkosi.user import INVOKING_USER, become_root, become_root_cmd from mkosi.util import StrEnum, flock, flock_or_die, groupby, round_up, try_or from mkosi.versioncomp import GenericVersion QEMU_KVM_DEVICE_VERSION = GenericVersion("9.0") VHOST_VSOCK_SET_GUEST_CID = 0x4008af60 class QemuDeviceNode(StrEnum): kvm = enum.auto() vhost_vsock = enum.auto() def device(self) -> Path: return Path("/dev") / str(self) def description(self) -> str: return { QemuDeviceNode.kvm: "KVM acceleration", QemuDeviceNode.vhost_vsock: "a VSock device", }[self] def feature(self, config: Config) -> ConfigFeature: return { QemuDeviceNode.kvm: config.qemu_kvm, QemuDeviceNode.vhost_vsock: config.qemu_vsock, }[self] def open(self) -> int: return os.open(self.device(), os.O_RDWR|os.O_CLOEXEC|os.O_NONBLOCK) def available(self, log: bool = False) -> bool: try: os.close(self.open()) except OSError as e: if e.errno not in (errno.ENOENT, errno.ENODEV, errno.EPERM, errno.EACCES): raise e if log and e.errno in (errno.ENOENT, errno.ENODEV): logging.warning(f"{self.device()} not found. Not adding {self.description()} to the virtual machine.") if log and e.errno in (errno.EPERM, errno.EACCES): logging.warning( f"Permission denied to access {self.device()}. " f"Not adding {self.description()} to the virtual machine. " "(Maybe a kernel module could not be loaded?)" ) return False return True def hash_output(config: Config) -> "hashlib._Hash": p = os.fspath(config.output_dir_or_cwd() / config.output) return hashlib.sha256(p.encode()) def hash_to_vsock_cid(hash: "hashlib._Hash") -> int: cid = int.from_bytes(hash.digest()[:4], byteorder='little') # Make sure we don't return any of the well-known CIDs. return max(3, min(cid, 0xFFFFFFFF - 1)) def vsock_cid_in_use(vfd: int, cid: int) -> bool: try: fcntl.ioctl(vfd, VHOST_VSOCK_SET_GUEST_CID, struct.pack("=Q", cid)) except OSError as e: if e.errno != errno.EADDRINUSE: raise return True return False def find_unused_vsock_cid(config: Config, vfd: int) -> int: hash = hash_output(config) for i in range(64): cid = hash_to_vsock_cid(hash) if not vsock_cid_in_use(vfd, cid): return cid hash.update(i.to_bytes(length=4, byteorder='little')) for i in range(64): cid = random.randint(0, 0xFFFFFFFF - 1) if not vsock_cid_in_use(vfd, cid): return cid die("Failed to find an unused VSock connection ID") class KernelType(StrEnum): pe = enum.auto() uki = enum.auto() unknown = enum.auto() @classmethod def identify(cls, config: Config, path: Path) -> "KernelType": if not config.find_binary("bootctl"): logging.warning("bootctl is not installed, assuming 'unknown' kernel type") return KernelType.unknown if (v := systemd_tool_version("bootctl", sandbox=config.sandbox)) < 253: logging.warning(f"bootctl {v} doesn't know kernel-identify verb, assuming 'unknown' kernel type") return KernelType.unknown type = run( ["bootctl", "kernel-identify", path], stdout=subprocess.PIPE, sandbox=config.sandbox(binary="bootctl", mounts=[Mount(path, path, ro=True)]), ).stdout.strip() try: return cls(type) except ValueError: logging.warning(f"Unknown kernel type '{type}', assuming 'unknown'") return KernelType.unknown @dataclasses.dataclass(frozen=True) class OvmfConfig: description: Path firmware: Path format: str vars: Path vars_format: str def find_ovmf_firmware(config: Config, qemu: Path, firmware: QemuFirmware) -> Optional[OvmfConfig]: if not firmware.is_uefi(): return None tools = Path("/") if any(qemu.is_relative_to(d) for d in config.extra_search_paths) else config.tools() desc = list((tools / "usr/share/qemu/firmware").glob("*")) if tools == Path("/"): desc += list((tools / "etc/qemu/firmware").glob("*")) arch = config.architecture.to_qemu() machine = config.architecture.default_qemu_machine() for p in sorted(desc): if p.is_dir(): continue j = json.loads(p.read_text()) if "uefi" not in j["interface-types"]: logging.debug(f"{p.name} firmware description does not target UEFI, skipping") continue for target in j["targets"]: if target["architecture"] != arch: continue # We cannot use fnmatch as for example our default machine for x86-64 is q35 and the firmware description # lists "pc-q35-*" so we use a substring check instead. if any(machine in glob for glob in target["machines"]): break else: logging.debug( f"{p.name} firmware description does not target architecture {arch} or machine {machine}, skipping" ) continue if firmware == QemuFirmware.uefi_secure_boot and "secure-boot" not in j["features"]: logging.debug(f"{p.name} firmware description does not include secure boot, skipping") continue if firmware != QemuFirmware.uefi_secure_boot and "secure-boot" in j["features"]: logging.debug(f"{p.name} firmware description includes secure boot, skipping") continue if config.qemu_firmware_variables == Path("microsoft") and "enrolled-keys" not in j["features"]: logging.debug(f"{p.name} firmware description does not have enrolled Microsoft keys, skipping") continue if config.qemu_firmware_variables != Path("microsoft") and "enrolled-keys" in j["features"]: logging.debug(f"{p.name} firmware description has enrolled Microsoft keys, skipping") continue logging.debug(f"Using {p.name} firmware description") return OvmfConfig( description=Path("/") / p.relative_to(tools), firmware=Path(j["mapping"]["executable"]["filename"]), format=j["mapping"]["executable"]["format"], vars=Path(j["mapping"]["nvram-template"]["filename"]), vars_format=j["mapping"]["nvram-template"]["format"], ) die("Couldn't find matching OVMF UEFI firmware description") @contextlib.contextmanager def start_swtpm(config: Config) -> Iterator[Path]: with tempfile.TemporaryDirectory(prefix="mkosi-swtpm-") as state: # swtpm_setup is noisy and doesn't have a --quiet option so we pipe it's stdout to /dev/null. run( ["swtpm_setup", "--tpm-state", state, "--tpm2", "--pcr-banks", "sha256", "--config", "/dev/null"], sandbox=config.sandbox( binary="swtpm_setup", mounts=[Mount(state, state)], ), scope=scope_cmd( name=f"mkosi-swtpm-{config.machine_or_name()}", description=f"swtpm for {config.machine_or_name()}", ), env=scope_env(), stdout=None if ARG_DEBUG.get() else subprocess.DEVNULL, ) cmdline = ["swtpm", "socket", "--tpm2", "--tpmstate", f"dir={state}"] # We create the socket ourselves and pass the fd to swtpm to avoid race conditions where we start qemu before # swtpm has had the chance to create the socket (or where we try to chown it first). with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as sock: path = Path(state) / Path("sock") sock.bind(os.fspath(path)) sock.listen() cmdline += ["--ctrl", f"type=unixio,fd={SD_LISTEN_FDS_START}"] with spawn( cmdline, pass_fds=(sock.fileno(),), sandbox=config.sandbox(binary="swtpm", mounts=[Mount(state, state)]), ) as (proc, innerpid): yield path kill(proc, innerpid, signal.SIGTERM) def find_virtiofsd(*, root: Path = Path("/"), extra: Sequence[Path] = ()) -> Optional[Path]: if p := find_binary("virtiofsd", root=root, extra=extra): return p if (p := root / "usr/libexec/virtiofsd").exists(): return Path("/") / p.relative_to(root) if (p := root / "usr/lib/virtiofsd").exists(): return Path("/") / p.relative_to(root) return None def unshare_version() -> str: return run(["unshare", "--version"], stdout=subprocess.PIPE).stdout.strip().split()[-1] @contextlib.contextmanager def start_virtiofsd(config: Config, directory: PathString, *, name: str, selinux: bool = False) -> Iterator[Path]: uidmap = Path(directory).stat().st_uid == INVOKING_USER.uid virtiofsd = find_virtiofsd(root=config.tools(), extra=config.extra_search_paths) if virtiofsd is None: die("virtiofsd must be installed to boot directory images or use RuntimeTrees= with mkosi qemu") cmdline: list[PathString] = [ virtiofsd, "--shared-dir", directory, "--xattr", # qemu's client doesn't seem to support announcing submounts so disable the feature to avoid the warning. "--no-announce-submounts", "--sandbox=chroot", f"--inode-file-handles={'prefer' if os.getuid() == 0 and not uidmap else 'never'}", ] if selinux: cmdline += ["--security-label"] # We create the socket ourselves and pass the fd to virtiofsd to avoid race conditions where we start qemu # before virtiofsd has had the chance to create the socket (or where we try to chown it first). with ( tempfile.TemporaryDirectory(prefix="mkosi-virtiofsd-") as context, socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as sock, ): # Make sure virtiofsd can access the socket in this directory. os.chown(context, INVOKING_USER.uid, INVOKING_USER.gid) # Make sure we can use the socket name as a unique identifier for the fs as well but make sure it's not too # long as virtiofs tag names are limited to 36 bytes. path = Path(context) / f"sock-{uuid.uuid4().hex}"[:35] sock.bind(os.fspath(path)) sock.listen() # Make sure virtiofsd can connect to the socket. os.chown(path, INVOKING_USER.uid, INVOKING_USER.gid) cmdline += ["--fd", str(SD_LISTEN_FDS_START)] name = f"mkosi-virtiofsd-{name}" description = f"virtiofsd for {directory}" uid = gid = None runas = [] scope = [] if uidmap: uid = INVOKING_USER.uid if os.getuid() != INVOKING_USER.uid else None gid = INVOKING_USER.gid if os.getgid() != INVOKING_USER.gid else None scope = scope_cmd(name=name, description=description, user=uid, group=gid) elif not uidmap and (os.getuid() == 0 or unshare_version() >= "2.38"): runas = become_root_cmd() scope = scope_cmd(name=name, description=description) with spawn( cmdline, pass_fds=(sock.fileno(),), # When not invoked as root, bubblewrap will automatically map the current uid/gid to the requested uid/gid # in the user namespace it spawns, so by specifying --uid 0 --gid 0 we'll get a userns with the current # uid/gid mapped to root in the userns. --cap-add=all is required to make virtiofsd work. Since it drops # capabilities itself, we don't bother figuring out the exact set of capabilities it needs. user=uid if not scope else None, group=gid if not scope else None, preexec_fn=become_root if not scope and not uidmap else None, env=scope_env() if scope else {}, sandbox=config.sandbox( binary=virtiofsd, mounts=[Mount(directory, directory)], options=["--uid", "0", "--gid", "0", "--cap-add", "all"], setup=runas, ), scope=scope, ) as (proc, innerpid): yield path kill(proc, innerpid, signal.SIGTERM) @contextlib.contextmanager def vsock_notify_handler() -> Iterator[tuple[str, dict[str, str]]]: """ This yields a vsock address and a dict that will be filled in with the notifications from the VM. The dict should only be accessed after the context manager has been finalized. """ with socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) as vsock: vsock.bind((socket.VMADDR_CID_ANY, socket.VMADDR_PORT_ANY)) vsock.listen() vsock.setblocking(False) num_messages = 0 num_bytes = 0 messages = {} async def notify() -> None: nonlocal num_messages nonlocal num_bytes loop = asyncio.get_running_loop() while True: s, _ = await loop.sock_accept(vsock) num_messages += 1 with s: data = [] try: while (buf := await loop.sock_recv(s, 4096)): data.append(buf) except ConnectionResetError: logging.debug("vsock notify listener connection reset by peer") for msg in b"".join(data).decode().split("\n"): if not msg: continue num_bytes += len(msg) k, _, v = msg.partition("=") messages[k] = v with AsyncioThread(notify()): try: yield f"vsock-stream:{socket.VMADDR_CID_HOST}:{vsock.getsockname()[1]}", messages finally: logging.debug(f"Received {num_messages} notify messages totalling {format_bytes(num_bytes)} bytes") for k, v in messages.items(): logging.debug(f"- {k}={v}") @contextlib.contextmanager def start_journal_remote(config: Config, sockfd: int) -> Iterator[None]: assert config.forward_journal bin = config.find_binary("systemd-journal-remote", "/usr/lib/systemd/systemd-journal-remote") if not bin: die("systemd-journal-remote must be installed to forward logs from the virtual machine") d = config.forward_journal.parent if config.forward_journal.suffix == ".journal" else config.forward_journal if not d.exists(): # Pass exist_ok=True because multiple mkosi processes might be trying to create the parent directory at the # same time. d.mkdir(exist_ok=True, parents=True) # Make sure COW is disabled so systemd-journal-remote doesn't complain on btrfs filesystems. run(["chattr", "+C", d], check=False, stderr=subprocess.DEVNULL if not ARG_DEBUG.get() else None) INVOKING_USER.chown(d) with tempfile.NamedTemporaryFile(mode="w", prefix="mkosi-journal-remote-config-") as f: os.chmod(f.name, 0o644) # Make sure we capture all the logs by bumping the limits. We set MaxFileSize=4G because with the compact mode # enabled the files cannot grow any larger anyway. f.write( textwrap.dedent( f"""\ [Remote] MaxUse=1T KeepFree=1G MaxFileSize=4G MaxFiles={1 if config.forward_journal.suffix == ".journal" else 100} """ ) ) f.flush() user = config.forward_journal.parent.stat().st_uid if INVOKING_USER.invoked_as_root else None group = config.forward_journal.parent.stat().st_gid if INVOKING_USER.invoked_as_root else None scope = scope_cmd( name=f"mkosi-journal-remote-{config.machine_or_name()}", description=f"mkosi systemd-journal-remote for {config.machine_or_name()}", user=user, group=group, ) with spawn( [ bin, "--output", config.forward_journal, "--split-mode", "none" if config.forward_journal.suffix == ".journal" else "host", ], pass_fds=(sockfd,), sandbox=config.sandbox( binary=bin, mounts=[ Mount(config.forward_journal.parent, config.forward_journal.parent), Mount(f.name, "/etc/systemd/journal-remote.conf"), ], ), user=user if not scope else None, group=group if not scope else None, scope=scope, env=scope_env(), foreground=False, ) as (proc, innerpid): yield kill(proc, innerpid, signal.SIGTERM) @contextlib.contextmanager def start_journal_remote_vsock(config: Config) -> Iterator[str]: with socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) as sock: sock.bind((socket.VMADDR_CID_ANY, socket.VMADDR_PORT_ANY)) sock.listen() with start_journal_remote(config, sock.fileno()): yield f"vsock-stream:{socket.VMADDR_CID_HOST}:{sock.getsockname()[1]}" @contextlib.contextmanager def copy_ephemeral(config: Config, src: Path) -> Iterator[Path]: if not config.ephemeral or config.output_format in (OutputFormat.cpio, OutputFormat.uki): with flock_or_die(src): yield src return src = src.resolve() # tempfile doesn't provide an API to get a random filename in an arbitrary directory so we do this # instead. Limit the size to 16 characters as the output name might be used in a unix socket path by vmspawn and # needs to fit in 108 characters. tmp = src.parent / f"{src.name}-{uuid.uuid4().hex[:16]}" try: def copy() -> None: if config.output_format == OutputFormat.directory: become_root() elif config.output_format in (OutputFormat.disk, OutputFormat.esp): attr = run( ["lsattr", "-l", src], sandbox=config.sandbox(binary="lsattr", mounts=[Mount(src, src, ro=True)]), stdout=subprocess.PIPE, ).stdout if "No_COW" in attr: tmp.touch() run( ["chattr", "+C", tmp], sandbox=config.sandbox(binary="chattr", mounts=[Mount(tmp, tmp)]), ) copy_tree( src, tmp, preserve=config.output_format == OutputFormat.directory, use_subvolumes=config.use_subvolumes, sandbox=config.sandbox, ) with flock(src): fork_and_wait(copy) yield tmp finally: def rm() -> None: if config.output_format == OutputFormat.directory: become_root() rmtree(tmp, sandbox=config.sandbox) fork_and_wait(rm) def qemu_version(config: Config, binary: Path) -> GenericVersion: return GenericVersion( run( [binary, "--version"], stdout=subprocess.PIPE, sandbox=config.sandbox(binary=binary), ).stdout.split()[3] ) def want_scratch(config: Config) -> bool: return config.runtime_scratch == ConfigFeature.enabled or ( config.runtime_scratch == ConfigFeature.auto and config.find_binary(f"mkfs.{config.distribution.filesystem()}") is not None ) @contextlib.contextmanager def generate_scratch_fs(config: Config) -> Iterator[Path]: with tempfile.NamedTemporaryFile(dir="/var/tmp", prefix="mkosi-scratch-") as scratch: scratch.truncate(1024**4) fs = config.distribution.filesystem() extra = config.environment.get(f"SYSTEMD_REPART_MKFS_OPTIONS_{fs.upper()}", "") run( [f"mkfs.{fs}", "-L", "scratch", *extra.split(), scratch.name], stdout=subprocess.DEVNULL, sandbox=config.sandbox(binary= f"mkfs.{fs}", mounts=[Mount(scratch.name, scratch.name)]), ) yield Path(scratch.name) def finalize_qemu_firmware(config: Config, kernel: Optional[Path]) -> QemuFirmware: if config.qemu_firmware == QemuFirmware.auto: if kernel: return ( QemuFirmware.uefi_secure_boot if KernelType.identify(config, kernel) != KernelType.unknown else QemuFirmware.linux ) elif ( config.output_format in (OutputFormat.cpio, OutputFormat.directory) or config.architecture.to_efi() is None ): return QemuFirmware.linux else: # At the moment there are no qemu firmware descriptions for non-x86 architectures that advertise # secure-boot support so let's default to no secure boot for non-x86 architectures. return QemuFirmware.uefi_secure_boot if config.architecture.is_x86_variant() else QemuFirmware.uefi else: return config.qemu_firmware def finalize_firmware_variables( config: Config, qemu: Path, ovmf: OvmfConfig, stack: contextlib.ExitStack, ) -> tuple[Path, str]: ovmf_vars = stack.enter_context(tempfile.NamedTemporaryFile(prefix="mkosi-ovmf-vars-")) if config.qemu_firmware_variables in (None, Path("custom"), Path("microsoft")): ovmf_vars_format = ovmf.vars_format else: ovmf_vars_format = "raw" if config.qemu_firmware_variables == Path("custom"): assert config.secure_boot_certificate run( [ "virt-fw-vars", "--input", ovmf.vars, "--output", ovmf_vars.name, "--enroll-cert", config.secure_boot_certificate, "--add-db", "OvmfEnrollDefaultKeys", config.secure_boot_certificate, "--no-microsoft", "--secure-boot", "--loglevel", "WARNING", ], sandbox=config.sandbox( binary=qemu, mounts=[ Mount(ovmf_vars.name, ovmf_vars.name), Mount(config.secure_boot_certificate, config.secure_boot_certificate, ro=True), ], ), ) else: tools = Path("/") if any(qemu.is_relative_to(d) for d in config.extra_search_paths) else config.tools() vars = ( tools / ovmf.vars.relative_to("/") if config.qemu_firmware_variables == Path("microsoft") or not config.qemu_firmware_variables else config.qemu_firmware_variables ) shutil.copy2(vars, Path(ovmf_vars.name)) return Path(ovmf_vars.name), ovmf_vars_format def apply_runtime_size(config: Config, image: Path) -> None: if config.output_format != OutputFormat.disk or not config.runtime_size: return run( [ "systemd-repart", "--definitions", "", "--no-pager", # To use qemu's cache.direct option, the drive size has to be a multiple of the page size. f"--size={round_up(config.runtime_size, resource.getpagesize())}", "--pretty=no", "--offline=yes", image, ], sandbox=config.sandbox(binary="systemd-repart", mounts=[Mount(image, image)]), ) @contextlib.contextmanager def finalize_drive(drive: QemuDrive) -> Iterator[Path]: with tempfile.NamedTemporaryFile(dir=drive.directory or "/var/tmp", prefix=f"mkosi-drive-{drive.id}") as file: file.truncate(drive.size) yield Path(file.name) @contextlib.contextmanager def finalize_state(config: Config, cid: int) -> Iterator[None]: (INVOKING_USER.runtime_dir() / "machine").mkdir(parents=True, exist_ok=True) if INVOKING_USER.is_regular_user(): os.chown(INVOKING_USER.runtime_dir(), INVOKING_USER.uid, INVOKING_USER.gid) os.chown(INVOKING_USER.runtime_dir() / "machine", INVOKING_USER.uid, INVOKING_USER.gid) with flock(INVOKING_USER.runtime_dir() / "machine"): if (p := INVOKING_USER.runtime_dir() / "machine" / f"{config.machine_or_name()}.json").exists(): die(f"Another virtual machine named {config.machine_or_name()} is already running", hint="Use --machine to specify a different virtual machine name") p.write_text( json.dumps( { "Machine": config.machine_or_name(), "ProxyCommand": f"socat - VSOCK-CONNECT:{cid}:%p", "SshKey": os.fspath(config.ssh_key) if config.ssh_key else None, }, sort_keys=True, indent=4, ) ) if INVOKING_USER.is_regular_user(): os.chown(p, INVOKING_USER.uid, INVOKING_USER.gid) try: yield finally: with flock(INVOKING_USER.runtime_dir() / "machine"): p.unlink(missing_ok=True) def scope_env() -> dict[str, str]: if not find_binary("systemd-run"): return {} elif os.getuid() != 0 and "DBUS_SESSION_BUS_ADDRESS" in os.environ and "XDG_RUNTIME_DIR" in os.environ: return { "DBUS_SESSION_BUS_ADDRESS": os.environ["DBUS_SESSION_BUS_ADDRESS"], "XDG_RUNTIME_DIR": os.environ["XDG_RUNTIME_DIR"] } elif os.getuid() == 0: if "DBUS_SYSTEM_ADDRESS" in os.environ: return {"DBUS_SYSTEM_ADDRESS": os.environ["DBUS_SYSTEM_ADDRESS"]} elif Path("/run/dbus/system_bus_socket").exists(): return {"DBUS_SYSTEM_ADDRESS": "/run/dbus/system_bus_socket"} else: return {} else: return {} def scope_cmd( name: str, description: str, user: Optional[int] = None, group: Optional[int] = None, properties: Sequence[str] = (), ) -> list[str]: if not scope_env(): return [] return [ "systemd-run", "--system" if os.getuid() == 0 else "--user", *(["--quiet"] if not ARG_DEBUG.get() else []), "--unit", name, "--description", description, "--scope", "--collect", *(["--expand-environment=no"] if systemd_tool_version("systemd-run") >= 254 else []), *(["--uid", str(user)] if user is not None else []), *(["--gid", str(group)] if group is not None else []), *([f"--property={p}" for p in properties]), ] def register_machine(config: Config, pid: int, fname: Path) -> None: if ( os.getuid() != 0 or ("DBUS_SYSTEM_ADDRESS" not in os.environ and not Path("/run/dbus/system_bus_socket").exists()) ): return run( [ "busctl", "call", "--quiet", "org.freedesktop.machine1", "/org/freedesktop/machine1", "org.freedesktop.machine1.Manager", "RegisterMachine", "sayssus", config.machine_or_name().replace("_", "-"), "0", "mkosi", "vm", str(pid), fname if fname.is_dir() else "", ], foreground=False, env=os.environ | config.environment, sandbox=config.sandbox(binary="busctl", relaxed=True), # systemd-machined might not be installed so let's ignore any failures unless running in debug mode. check=ARG_DEBUG.get(), stderr=None if ARG_DEBUG.get() else subprocess.DEVNULL, ) def run_qemu(args: Args, config: Config) -> None: if config.output_format not in ( OutputFormat.disk, OutputFormat.cpio, OutputFormat.uki, OutputFormat.esp, OutputFormat.directory, ): die(f"{config.output_format} images cannot be booted in qemu") if ( config.output_format in (OutputFormat.cpio, OutputFormat.uki, OutputFormat.esp) and config.qemu_firmware not in (QemuFirmware.auto, QemuFirmware.linux) and not config.qemu_firmware.is_uefi() ): die(f"{config.output_format} images cannot be booted with the '{config.qemu_firmware}' firmware") if config.runtime_trees and config.qemu_firmware == QemuFirmware.bios: die("RuntimeTrees= cannot be used when booting in BIOS firmware") if config.qemu_kvm == ConfigFeature.enabled and not config.architecture.is_native(): die(f"KVM acceleration requested but {config.architecture} does not match the native host architecture") if config.qemu_firmware_variables == Path("custom") and not config.secure_boot_certificate: die("SecureBootCertificate= must be configured to use QemuFirmwareVariables=custom") # After we unshare the user namespace to sandbox qemu, we might not have access to /dev/kvm or related device nodes # anymore as access to these might be gated behind the kvm group and we won't be part of the kvm group anymore # after unsharing the user namespace. To get around this, open all those device nodes early can pass them as file # descriptors to qemu later. Note that we can't pass the kvm file descriptor to qemu until version 9.0. qemu_device_fds = { d: d.open() for d in QemuDeviceNode if d.feature(config) != ConfigFeature.disabled and d.available(log=True) } if not (qemu := config.find_binary(f"qemu-system-{config.architecture.to_qemu()}")): die("qemu not found.", hint=f"Is qemu-system-{config.architecture.to_qemu()} installed on the host system?") have_kvm = ((qemu_version(config, qemu) < QEMU_KVM_DEVICE_VERSION and QemuDeviceNode.kvm.available()) or (qemu_version(config, qemu) >= QEMU_KVM_DEVICE_VERSION and QemuDeviceNode.kvm in qemu_device_fds)) if config.qemu_kvm == ConfigFeature.enabled and not have_kvm: die("KVM acceleration requested but cannot access /dev/kvm") if config.qemu_vsock == ConfigFeature.enabled and QemuDeviceNode.vhost_vsock not in qemu_device_fds: die("VSock requested but cannot access /dev/vhost-vsock") if config.qemu_kernel: kernel = config.qemu_kernel elif "-kernel" in args.cmdline: kernel = Path(args.cmdline[args.cmdline.index("-kernel") + 1]) else: kernel = None if config.output_format in (OutputFormat.uki, OutputFormat.esp) and kernel: logging.warning( f"Booting UKI output, kernel {kernel} configured with QemuKernel= or passed with -kernel will not be used" ) kernel = None if kernel and not kernel.exists(): die(f"Kernel not found at {kernel}") firmware = finalize_qemu_firmware(config, kernel) if ( not kernel and ( firmware == QemuFirmware.linux or config.output_format in (OutputFormat.cpio, OutputFormat.directory, OutputFormat.uki) ) ): if firmware.is_uefi(): name = config.output if config.output_format == OutputFormat.uki else config.output_split_uki kernel = config.output_dir_or_cwd() / name else: kernel = config.output_dir_or_cwd() / config.output_split_kernel if not kernel.exists(): die( f"Kernel or UKI not found at {kernel}, please install a kernel in the image " "or provide a -kernel argument to mkosi qemu" ) ovmf = find_ovmf_firmware(config, qemu, firmware) # A shared memory backend might increase ram usage so only add one if actually necessary for virtiofsd. shm = [] if config.runtime_trees or config.runtime_build_sources or config.output_format == OutputFormat.directory: shm = ["-object", f"memory-backend-memfd,id=mem,size={config.qemu_mem // 1024**2}M,share=on"] machine = f"type={config.architecture.default_qemu_machine()}" if firmware.is_uefi() and config.architecture.supports_smm(): machine += f",smm={'on' if firmware == QemuFirmware.uefi_secure_boot else 'off'}" if shm: machine += ",memory-backend=mem" cmdline: list[PathString] = [ qemu, "-machine", machine, "-smp", str(config.qemu_smp or os.cpu_count()), "-m", f"{config.qemu_mem // 1024**2}M", "-object", "rng-random,filename=/dev/urandom,id=rng0", "-device", "virtio-rng-pci,rng=rng0,id=rng-device0", "-device", "virtio-balloon,free-page-reporting=on", "-no-user-config", *shm, ] if config.runtime_network == Network.user: cmdline += ["-nic", f"user,model={config.architecture.default_qemu_nic_model()}"] elif config.runtime_network == Network.interface: if os.getuid() != 0: die("RuntimeNetwork=interface requires root privileges") cmdline += ["-nic", "tap,script=no,model=virtio-net-pci"] elif config.runtime_network == Network.none: cmdline += ["-nic", "none"] if config.qemu_kvm != ConfigFeature.disabled and have_kvm and config.architecture.can_kvm(): accel = "kvm" if qemu_version(config, qemu) >= QEMU_KVM_DEVICE_VERSION: index = list(qemu_device_fds.keys()).index(QemuDeviceNode.kvm) cmdline += ["--add-fd", f"fd={SD_LISTEN_FDS_START + index},set=1,opaque=/dev/kvm"] accel += ",device=/dev/fdset/1" else: accel = "tcg" cmdline += ["-accel", accel] cid: Optional[int] = None if QemuDeviceNode.vhost_vsock in qemu_device_fds: if config.qemu_vsock_cid == QemuVsockCID.auto: cid = find_unused_vsock_cid(config, qemu_device_fds[QemuDeviceNode.vhost_vsock]) elif config.qemu_vsock_cid == QemuVsockCID.hash: cid = hash_to_vsock_cid(hash_output(config)) else: cid = config.qemu_vsock_cid if vsock_cid_in_use(qemu_device_fds[QemuDeviceNode.vhost_vsock], cid): die(f"VSock connection ID {cid} is already in use by another virtual machine", hint="Use QemuVsockConnectionId=auto to have mkosi automatically find a free vsock connection ID") index = list(qemu_device_fds.keys()).index(QemuDeviceNode.vhost_vsock) cmdline += [ "-device", f"vhost-vsock-pci,guest-cid={cid},vhostfd={SD_LISTEN_FDS_START + index}" ] cmdline += ["-cpu", "max"] if config.qemu_gui: cmdline += ["-vga", "virtio"] else: # -nodefaults removes the default CDROM device which avoids an error message during boot # -serial mon:stdio adds back the serial device removed by -nodefaults. cmdline += [ "-nographic", "-nodefaults", "-chardev", "stdio,mux=on,id=console,signal=off", "-device", "virtio-serial-pci,id=mkosi-virtio-serial-pci", "-device", "virtconsole,chardev=console", "-mon", "console", ] # QEMU has built-in logic to look for the BIOS firmware so we don't need to do anything special for that. if firmware.is_uefi(): assert ovmf cmdline += ["-drive", f"if=pflash,format={ovmf.format},readonly=on,file={ovmf.firmware}"] notifications: dict[str, str] = {} with contextlib.ExitStack() as stack: if firmware.is_uefi(): assert ovmf ovmf_vars, ovmf_vars_format = finalize_firmware_variables(config, qemu, ovmf, stack) cmdline += ["-drive", f"file={ovmf_vars},if=pflash,format={ovmf_vars_format}"] if firmware == QemuFirmware.uefi_secure_boot: cmdline += [ "-global", "ICH9-LPC.disable_s3=1", "-global", "driver=cfi.pflash01,property=secure,value=on", ] if config.qemu_cdrom and config.output_format in (OutputFormat.disk, OutputFormat.esp): # CD-ROM devices have sector size 2048 so we transform disk images into ones with sector size 2048. src = (config.output_dir_or_cwd() / config.output_with_compression).resolve() fname = src.parent / f"{src.name}-{uuid.uuid4().hex}" run( [ "systemd-repart", "--definitions", "", "--no-pager", "--pretty=no", "--offline=yes", "--empty=create", "--size=auto", "--sector-size=2048", "--copy-from", src, fname, ], sandbox=config.sandbox( binary="systemd-repart", vartmp=True, mounts=[Mount(fname.parent, fname.parent), Mount(src, src, ro=True)], ), ) stack.callback(lambda: fname.unlink()) else: fname = stack.enter_context( copy_ephemeral(config, config.output_dir_or_cwd() / config.output_with_compression) ) apply_runtime_size(config, fname) if ( kernel and ( KernelType.identify(config, kernel) != KernelType.uki or not config.architecture.supports_smbios(firmware) ) ): kcl = config.kernel_command_line + config.kernel_command_line_extra else: kcl = config.kernel_command_line_extra if kernel: cmdline += ["-kernel", kernel] if any(s.startswith("root=") for s in kcl): pass elif config.output_format == OutputFormat.disk: # We can't rely on gpt-auto-generator when direct kernel booting so synthesize a root= # kernel argument instead. root = finalize_root(find_partitions(fname, sandbox=config.sandbox)) if not root: die("Cannot perform a direct kernel boot without a root or usr partition") kcl += [root] elif config.output_format == OutputFormat.directory: sock = stack.enter_context( start_virtiofsd( config, fname, name=config.machine_or_name(), selinux=bool(want_selinux_relabel(config, fname, fatal=False))), ) cmdline += [ "-chardev", f"socket,id={sock.name},path={sock}", "-device", f"vhost-user-fs-pci,queue-size=1024,chardev={sock.name},tag=root", ] kcl += ["root=root", "rootfstype=virtiofs"] credentials = dict(config.credentials) def add_virtiofs_mount( sock: Path, dst: PathString, cmdline: list[PathString], credentials: dict[str, str], *, tag: str ) -> None: cmdline += [ "-chardev", f"socket,id={sock.name},path={sock}", "-device", f"vhost-user-fs-pci,queue-size=1024,chardev={sock.name},tag={tag}", ] if "fstab.extra" not in credentials: credentials["fstab.extra"] = "" if credentials["fstab.extra"] and not credentials["fstab.extra"][-1] == "\n": credentials["fstab.extra"] += "\n" credentials["fstab.extra"] += f"{tag} {dst} virtiofs x-initrd.mount\n" if config.runtime_build_sources: with finalize_source_mounts(config, ephemeral=False) as mounts: for mount in mounts: sock = stack.enter_context(start_virtiofsd(config, mount.src, name=os.fspath(mount.src))) add_virtiofs_mount(sock, mount.dst, cmdline, credentials, tag=Path(mount.src).name) if config.build_dir: sock = stack.enter_context(start_virtiofsd(config, config.build_dir, name=os.fspath(config.build_dir))) add_virtiofs_mount(sock, "/work/build", cmdline, credentials, tag="build") for tree in config.runtime_trees: sock = stack.enter_context(start_virtiofsd(config, tree.source, name=os.fspath(tree.source))) add_virtiofs_mount( sock, Path("/root/src") / (tree.target or ""), cmdline, credentials, tag=tree.target.name if tree.target else tree.source.name, ) if want_scratch(config) or config.output_format in (OutputFormat.disk, OutputFormat.esp): cmdline += ["-device", "virtio-scsi-pci,id=mkosi"] if want_scratch(config): scratch = stack.enter_context(generate_scratch_fs(config)) cache = "cache.writeback=on,cache.direct=on,cache.no-flush=yes,aio=io_uring" cmdline += [ "-drive", f"if=none,id=scratch,file={scratch},format=raw,discard=on,{cache}", "-device", "scsi-hd,drive=scratch", ] kcl += [f"systemd.mount-extra=LABEL=scratch:/var/tmp:{config.distribution.filesystem()}"] if config.output_format == OutputFormat.cpio: cmdline += ["-initrd", fname] elif ( kernel and KernelType.identify(config, kernel) != KernelType.uki and "-initrd" not in args.cmdline and (config.output_dir_or_cwd() / config.output_split_initrd).exists() ): cmdline += ["-initrd", config.output_dir_or_cwd() / config.output_split_initrd] if config.output_format in (OutputFormat.disk, OutputFormat.esp): direct = fname.stat().st_size % resource.getpagesize() == 0 ephemeral = config.ephemeral cache = f"cache.writeback=on,cache.direct={yes_no(direct)},cache.no-flush={yes_no(ephemeral)},aio=io_uring" cmdline += ["-drive", f"if=none,id=mkosi,file={fname},format=raw,discard=on,{cache}", "-device", f"scsi-{'cd' if config.qemu_cdrom else 'hd'},drive=mkosi,bootindex=1"] if ( config.qemu_swtpm == ConfigFeature.enabled or ( config.qemu_swtpm == ConfigFeature.auto and firmware.is_uefi() and config.find_binary("swtpm") is not None ) ): sock = stack.enter_context(start_swtpm(config)) cmdline += ["-chardev", f"socket,id=chrtpm,path={sock}", "-tpmdev", "emulator,id=tpm0,chardev=chrtpm"] if config.architecture.is_x86_variant(): cmdline += ["-device", "tpm-tis,tpmdev=tpm0"] elif config.architecture.is_arm_variant(): cmdline += ["-device", "tpm-tis-device,tpmdev=tpm0"] if QemuDeviceNode.vhost_vsock in qemu_device_fds: addr, notifications = stack.enter_context(vsock_notify_handler()) credentials["vmm.notify_socket"] = addr if config.forward_journal: credentials["journal.forward_to_socket"] = stack.enter_context(start_journal_remote_vsock(config)) for k, v in credentials.items(): payload = base64.b64encode(v.encode()).decode() if config.architecture.supports_smbios(firmware): cmdline += ["-smbios", f"type=11,value=io.systemd.credential.binary:{k}={payload}"] # qemu's fw_cfg device only supports keys up to 55 characters long. elif config.architecture.supports_fw_cfg() and len(k) <= 55 - len("opt/io.systemd.credentials/"): f = stack.enter_context(tempfile.NamedTemporaryFile(prefix="mkosi-fw-cfg-", mode="w")) f.write(v) f.flush() cmdline += ["-fw_cfg", f"name=opt/io.systemd.credentials/{k},file={f.name}"] elif kernel: kcl += [f"systemd.set_credential_binary={k}:{payload}"] if ( kernel and ( KernelType.identify(config, kernel) != KernelType.uki or not config.architecture.supports_smbios(firmware) ) ): cmdline += ["-append", " ".join(kcl)] elif config.architecture.supports_smbios(firmware): cmdline += [ "-smbios", f"type=11,value=io.systemd.stub.kernel-cmdline-extra={' '.join(kcl).replace(',', ',,')}", "-smbios", f"type=11,value=io.systemd.boot.kernel-cmdline-extra={' '.join(kcl).replace(',', ',,')}", ] for _, drives in groupby(config.qemu_drives, key=lambda d: d.file_id): file = stack.enter_context(finalize_drive(drives[0])) for drive in drives: arg = f"if=none,id={drive.id},file={file},format=raw,file.locking=off" if drive.options: arg += f",{drive.options}" cmdline += ["-drive", arg] cmdline += config.qemu_args cmdline += args.cmdline if cid is not None: stack.enter_context(finalize_state(config, cid)) # Reopen stdin, stdout and stderr to give qemu a private copy of them. # This is a mitigation for the case when running mkosi under meson and # one or two of the three are redirected and their pipe might block, # but qemu opens all of them non-blocking because at least one of them # is opened this way. stdin = try_or( lambda: os.open(f"/proc/self/fd/{sys.stdin.fileno()}", os.O_RDONLY), OSError, sys.stdin.fileno(), ) stdout = try_or( lambda: os.open(f"/proc/self/fd/{sys.stdout.fileno()}", os.O_WRONLY), OSError, sys.stdout.fileno(), ) stderr = try_or( lambda: os.open(f"/proc/self/fd/{sys.stderr.fileno()}", os.O_WRONLY), OSError, sys.stderr.fileno(), ) name = f"mkosi-{config.machine_or_name().replace('_', '-')}" with spawn( cmdline, stdin=stdin, stdout=stdout, stderr=stderr, pass_fds=qemu_device_fds.values(), env=os.environ | config.environment, log=False, foreground=True, sandbox=config.sandbox(binary=qemu, network=True, devices=True, relaxed=True), scope=scope_cmd( name=name, description=f"mkosi Virtual Machine {name}", properties=config.unit_properties, ), ) as (proc, innerpid): # We have to close these before we wait for qemu otherwise we'll deadlock as qemu will never exit. for fd in qemu_device_fds.values(): os.close(fd) register_machine(config, innerpid, fname) if proc.wait() == 0 and (status := int(notifications.get("EXIT_STATUS", 0))): raise subprocess.CalledProcessError(status, cmdline) def run_ssh(args: Args, config: Config) -> None: with flock(INVOKING_USER.runtime_dir() / "machine"): if not (p := INVOKING_USER.runtime_dir() / "machine" / f"{config.machine_or_name()}.json").exists(): die(f"{p} not found, cannot SSH into virtual machine {config.machine_or_name()}", hint="Is the machine running and was it built with Ssh=yes and QemuVsock=yes?") state = json.loads(p.read_text()) if not state["SshKey"]: die("An SSH key must be configured when booting the image to use 'mkosi ssh'", hint="Use 'mkosi genkey' to generate a new SSH key and certificate") cmd: list[PathString] = [ "ssh", "-i", state["SshKey"], "-F", "none", # Silence known hosts file errors/warnings. "-o", "UserKnownHostsFile=/dev/null", "-o", "StrictHostKeyChecking=no", "-o", "LogLevel=ERROR", "-o", f"ProxyCommand={state['ProxyCommand']}", "root@mkosi", ] cmd += args.cmdline run( cmd, stdin=sys.stdin, stdout=sys.stdout, env=os.environ | config.environment, log=False, sandbox=config.sandbox(binary="ssh", network=True, devices=True, relaxed=True), ) mkosi-24.3/mkosi/resources/000077500000000000000000000000001465176501400157315ustar00rootroot00000000000000mkosi-24.3/mkosi/resources/__init__.py000066400000000000000000000000001465176501400200300ustar00rootroot00000000000000mkosi-24.3/mkosi/resources/completion.bash000066400000000000000000000034101465176501400207370ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later # shellcheck shell=bash _mkosi_compgen_files() { compgen -f -- "$1" } _mkosi_compgen_dirs() { compgen -d -- "$1" } _mkosi_completion() { local -a _mkosi_options _mkosi_verbs local -A _mkosi_nargs _mkosi_choices _mkosi_compgen ##VARIABLEDEFINITIONS## # completing_program="$1" local completing_word="$2" local completing_word_preceding="$3" if [[ "$completing_word" =~ ^- ]] # completing an option then readarray -t COMPREPLY < <(compgen -W "${_mkosi_options[*]}" -- "${completing_word}") elif [[ "$completing_word_preceding" =~ ^- ]] # the previous word was an option then current_option="${completing_word_preceding}" current_option_nargs="${_mkosi_nargs[${current_option}]}" current_option_choices="${_mkosi_choices[${current_option}]}" current_option_compgen="${_mkosi_compgen[${current_option}]}" if [[ -n "${current_option_compgen}" ]] then readarray -t COMPREPLY < <("${current_option_compgen}" "${completing_word}") fi readarray -t COMPREPLY -O "${#COMPREPLY[@]}" \ < <(compgen -W "${current_option_choices}" -- "${completing_word}") if [[ "${current_option_nargs}" == "?" ]] then readarray -t COMPREPLY -O "${#COMPREPLY[@]}" \ < <(compgen -W "${_mkosi_verbs[*]}" -- "${completing_word}") fi else # the preceding word wasn't an option, so we are doing position # arguments now and all of them are verbs readarray -t COMPREPLY < <(compgen -W "${_mkosi_verbs[*]}" -- "${completing_word}") fi } complete -o filenames -F _mkosi_completion mkosi complete -o filenames -F _mkosi_completion python -m mkosi mkosi-24.3/mkosi/resources/completion.zsh000066400000000000000000000010561465176501400206320ustar00rootroot00000000000000#compdef mkosi # SPDX-License-Identifier: LGPL-2.1-or-later # shellcheck shell=zsh _mkosi_verb(){ if (( CURRENT == 1 )); then _describe -t commands 'mkosi verb' _mkosi_verbs else local curcontext="$curcontext" cmd="${${_mkosi_verbs[(r)$words[1]:*]%%:*}}" if (( $#cmd )); then if (( $+functions[_mkosi_$cmd] )); then _mkosi_$cmd else _message "no more options" fi else _message "unknown mkosi verb: $words[1]" fi fi } mkosi-24.3/mkosi/resources/mkosi-initrd/000077500000000000000000000000001465176501400203425ustar00rootroot00000000000000mkosi-24.3/mkosi/resources/mkosi-initrd/mkosi.conf000066400000000000000000000050111465176501400223300ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Output] Output=initrd Format=cpio ManifestFormat= [Content] BuildSources= Bootable=no MakeInitrd=yes CleanPackageMetadata=yes Packages= systemd # sine qua non udev bash # for emergency logins less # this makes 'systemctl' much nicer to use ;) p11-kit # dl-opened by systemd lvm2 RemoveFiles= # we don't need this after the binary catalogs have been built /usr/lib/systemd/catalog /etc/udev/hwdb.d /usr/lib/udev/hwdb.d # this is not needed by anything updated in the last 20 years /etc/services # Including kernel images in the initrd is generally not useful. # This also stops mkosi from extracting the kernel image out of the image as a separate output. /usr/lib/modules/*/vmlinuz* /usr/lib/modules/*/vmlinux* /usr/lib/modules/*/System.map # Configure locale explicitly so that all other locale data is stripped on distros whose package manager supports it. Locale=C.UTF-8 WithDocs=no # Make sure various core modules are always included in the initrd. KernelModulesInclude= /ahci.ko /autofs4.ko /binfmt_misc.ko /btrfs.ko /configfs.ko /dm-crypt.ko /dm-integrity.ko /dm-mod.ko /dm-multipath.ko /dm-raid.ko /dm-verity.ko /dmi-sysfs.ko /efi-pstore.ko /efivarfs.ko /erofs.ko /ext4.ko /loop.ko /nvme.ko /overlay.ko /qemu_fw_cfg.ko /raid[0-9]*.ko /scsi_mod.ko /sd_mod.ko /sg.ko /squashfs.ko /vfat.ko /virtio_balloon.ko /virtio_console.ko /virtio_mmio.ko /virtio_net.ko /virtio_pci.ko /virtio_scsi.ko /virtio-rng.ko /virtiofs.ko /vmw_vsock_virtio_transport.ko /vsock.ko /x_tables.ko /xfs.ko ^fs/nls/ crypto/ mkosi-24.3/mkosi/resources/mkosi-initrd/mkosi.conf.d/000077500000000000000000000000001465176501400226325ustar00rootroot00000000000000mkosi-24.3/mkosi/resources/mkosi-initrd/mkosi.conf.d/10-arch.conf000066400000000000000000000015601465176501400246360ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=arch [Content] Packages= gzip # For compressed keymap unpacking by loadkeys e2fsprogs xfsprogs # Various libraries that are dlopen'ed by systemd libfido2 tpm2-tss util-linux RemoveFiles= # Arch Linux doesn't split their gcc-libs package so we manually remove # unneeded stuff here to make sure it doesn't end up in the initrd. /usr/lib/libgfortran.so* /usr/lib/libgo.so* /usr/lib/libgomp.so* /usr/lib/libgphobos.so* /usr/lib/libobjc.so* /usr/lib/libgdruntime.so* # Remove all files that are only required for development. /usr/lib/*.a /usr/include/* /usr/share/i18n/* /usr/share/hwdata/* /usr/share/iana-etc/* /usr/share/locale/* mkosi-24.3/mkosi/resources/mkosi-initrd/mkosi.conf.d/10-centos-fedora.conf000066400000000000000000000010631465176501400264500ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=|fedora Distribution=|centos Distribution=|alma Distribution=|rocky Distribution=|rhel [Content] Packages= # Various libraries that are dlopen'ed by systemd libfido2 tpm2-tss # File system checkers for supported root file systems e2fsprogs xfsprogs # fsck.btrfs is a dummy, checking is done in the kernel. RemovePackages= # Various packages pull in shadow-utils to create users, we can remove it afterwards shadow-utils mkosi-24.3/mkosi/resources/mkosi-initrd/mkosi.conf.d/10-centos.conf000066400000000000000000000002551465176501400252140ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=|centos Distribution=|alma Distribution=|rocky Distribution=|rhel [Content] Packages= util-linux mkosi-24.3/mkosi/resources/mkosi-initrd/mkosi.conf.d/10-debian-ubuntu/000077500000000000000000000000001465176501400256125ustar00rootroot00000000000000mkosi-24.3/mkosi/resources/mkosi-initrd/mkosi.conf.d/10-debian-ubuntu/mkosi.conf000066400000000000000000000013441465176501400276050ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=|debian Distribution=|ubuntu [Content] Packages= kmod # Not pulled in as a dependency on Debian/Ubuntu dmsetup # Not pulled in as a dependency on Debian/Ubuntu ?exact-name(systemd-cryptsetup) ?exact-name(systemd-repart) libcryptsetup12 # xfsprogs pulls in python on Debian (???) and XFS generally # isn't used on Debian so we don't install xfsprogs. e2fsprogs util-linux # Various libraries that are dlopen'ed by systemd libfido2-1 RemovePackages= # TODO: Remove dpkg if dash ever loses its dependency on it. # dpkg RemoveFiles= /usr/share/locale/* mkosi-24.3/mkosi/resources/mkosi-initrd/mkosi.conf.d/10-debian-ubuntu/mkosi.conf.d/000077500000000000000000000000001465176501400301025ustar00rootroot00000000000000mkosi-24.3/mkosi/resources/mkosi-initrd/mkosi.conf.d/10-debian-ubuntu/mkosi.conf.d/10-libtss.conf000066400000000000000000000004101465176501400324620ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [TriggerMatch] Distribution=debian [TriggerMatch] Distribution=ubuntu Release=!focal [Content] Packages= ^libtss2-esys-[0-9.]+-0$ ^libtss2-mu[0-9.-]+$ libtss2-rc0 libtss2-tcti-device0 mkosi-24.3/mkosi/resources/mkosi-initrd/mkosi.conf.d/10-fedora.conf000066400000000000000000000001671465176501400251630ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=fedora [Content] Packages= util-linux-core mkosi-24.3/mkosi/resources/mkosi-initrd/mkosi.conf.d/10-opensuse.conf000066400000000000000000000010521465176501400255560ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=opensuse [Content] Packages= patterns-base-minimal_base # Various libraries that are dlopen'ed by systemd libfido2-1 libtss2-esys0 libtss2-mu0 libtss2-rc0 libtss2-tcti-device0 # File system checkers for supported root file systems e2fsprogs xfsprogs # fsck.btrfs is a dummy, checking is done in the kernel. util-linux RemoveFiles= /usr/share/locale/* /usr/etc/services mkosi-24.3/mkosi/resources/mkosi-initrd/mkosi.conf.d/20-stub.conf000066400000000000000000000001651465176501400246770ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Format=uki Distribution=!arch [Content] Packages=systemd-boot mkosi-24.3/mkosi/resources/mkosi-initrd/mkosi.extra/000077500000000000000000000000001465176501400226065ustar00rootroot00000000000000mkosi-24.3/mkosi/resources/mkosi-initrd/mkosi.extra/usr/000077500000000000000000000000001465176501400234175ustar00rootroot00000000000000mkosi-24.3/mkosi/resources/mkosi-initrd/mkosi.extra/usr/lib/000077500000000000000000000000001465176501400241655ustar00rootroot00000000000000mkosi-24.3/mkosi/resources/mkosi-initrd/mkosi.extra/usr/lib/systemd/000077500000000000000000000000001465176501400256555ustar00rootroot00000000000000mkosi-24.3/mkosi/resources/mkosi-initrd/mkosi.extra/usr/lib/systemd/system-preset/000077500000000000000000000000001465176501400305015ustar00rootroot00000000000000mkosi-24.3/mkosi/resources/mkosi-initrd/mkosi.extra/usr/lib/systemd/system-preset/99-mkosi.preset000066400000000000000000000002111465176501400333000ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later # Make sure that services are disabled by default (primarily for Debian/Ubuntu). disable * mkosi-24.3/mkosi/resources/mkosi-initrd/mkosi.extra/usr/lib/systemd/system/000077500000000000000000000000001465176501400272015ustar00rootroot00000000000000systemd-cryptsetup@.service.d/000077500000000000000000000000001465176501400350135ustar00rootroot00000000000000mkosi-24.3/mkosi/resources/mkosi-initrd/mkosi.extra/usr/lib/systemd/systemcredential.conf000066400000000000000000000004501465176501400377730ustar00rootroot00000000000000mkosi-24.3/mkosi/resources/mkosi-initrd/mkosi.extra/usr/lib/systemd/system/systemd-cryptsetup@.service.d[Service] ImportCredential=cryptsetup.* # Compat with older systemd versions that don't support ImportCredential=. LoadCredential=cryptsetup.passphrase LoadCredential=cryptsetup.fido2-pin LoadCredential=cryptsetup.tpm2-pin LoadCredential=cryptsetup.luks2-pin LoadCredential=cryptsetup.pkcs11-pin mkosi-24.3/mkosi/resources/mkosi-initrd/mkosi.extra/usr/lib/udev/000077500000000000000000000000001465176501400251305ustar00rootroot00000000000000mkosi-24.3/mkosi/resources/mkosi-initrd/mkosi.extra/usr/lib/udev/rules.d/000077500000000000000000000000001465176501400265045ustar00rootroot00000000000000mkosi-24.3/mkosi/resources/mkosi-initrd/mkosi.extra/usr/lib/udev/rules.d/10-mkosi-initrd-dm.rules000066400000000000000000000004161465176501400330060ustar00rootroot00000000000000# SPDX-License-Identifier: GPL-2.0-only # Copied from https://github.com/dracutdevs/dracut/blob/059/modules.d/90dm/11-dm.rules SUBSYSTEM!="block", GOTO="dm_end" KERNEL!="dm-[0-9]*", GOTO="dm_end" ACTION!="add|change", GOTO="dm_end" OPTIONS+="db_persist" LABEL="dm_end" mkosi-24.3/mkosi/resources/mkosi-initrd/mkosi.extra/usr/lib/udev/rules.d/10-mkosi-initrd-md.rules000066400000000000000000000017531465176501400330130ustar00rootroot00000000000000# SPDX-License-Identifier: GPL-2.0-only # Copied from https://github.com/dracutdevs/dracut/blob/059/modules.d/90mdraid/59-persistent-storage-md.rules SUBSYSTEM!="block", GOTO="md_end" ACTION!="add|change", GOTO="md_end" # Also don't process disks that are slated to be a multipath device ENV{DM_MULTIPATH_DEVICE_PATH}=="1", GOTO="md_end" KERNEL!="md[0-9]*|md_d[0-9]*|md/*", KERNEL!="md*", GOTO="md_end" # partitions have no md/{array_state,metadata_version} ENV{DEVTYPE}=="partition", GOTO="md_ignore_state" # container devices have a metadata version of e.g. 'external:ddf' and # never leave state 'inactive' ATTR{md/metadata_version}=="external:[A-Za-z]*", ATTR{md/array_state}=="inactive", GOTO="md_ignore_state" TEST!="md/array_state", GOTO="md_end" ATTR{md/array_state}=="|clear|inactive", GOTO="md_end" LABEL="md_ignore_state" IMPORT{program}="/sbin/mdadm --detail --export $devnode" IMPORT{builtin}="blkid" OPTIONS+="link_priority=100" OPTIONS+="watch" OPTIONS+="db_persist" LABEL="md_end" mkosi-24.3/mkosi/resources/mkosi-tools/000077500000000000000000000000001465176501400202115ustar00rootroot00000000000000mkosi-24.3/mkosi/resources/mkosi-tools/mkosi.conf000066400000000000000000000011401465176501400221760ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Output] Format=directory Output=mkosi.tools ManifestFormat= [Content] BuildSources= Bootable=no Packages= acl attr bash bubblewrap ca-certificates coreutils cpio curl diffutils dnf dosfstools e2fsprogs findutils grep jq kmod less mtools nano openssl sed socat strace swtpm systemd tar util-linux xfsprogs zstd SELinuxRelabel=no mkosi-24.3/mkosi/resources/mkosi-tools/mkosi.conf.d/000077500000000000000000000000001465176501400225015ustar00rootroot00000000000000mkosi-24.3/mkosi/resources/mkosi-tools/mkosi.conf.d/10-arch.conf000066400000000000000000000011421465176501400245010ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=arch [Content] Packages= apt archlinux-keyring base btrfs-progs dbus-broker dbus-broker-units debian-archive-keyring distribution-gpg-keys dpkg edk2-ovmf erofs-utils git grub openssh pacman pesign python-cryptography qemu-base reprepro sbsigntools shadow squashfs-tools systemd-ukify ubuntu-keyring virt-firmware virtiofsd xz mkosi-24.3/mkosi/resources/mkosi-tools/mkosi.conf.d/10-centos-fedora/000077500000000000000000000000001465176501400254505ustar00rootroot00000000000000mkosi-24.3/mkosi/resources/mkosi-tools/mkosi.conf.d/10-centos-fedora/mkosi.conf000066400000000000000000000010671465176501400274450ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=|centos Distribution=|alma Distribution=|rocky Distribution=|rhel Distribution=|fedora [Content] Packages= createrepo_c dnf-plugins-core git-core grub2-tools openssh-clients policycoreutils python3-cryptography qemu-img qemu-kvm-core shadow-utils squashfs-tools swtpm-tools systemd-container systemd-journal-remote systemd-udev virt-firmware virtiofsd xz mkosi-24.3/mkosi/resources/mkosi-tools/mkosi.conf.d/10-centos-fedora/mkosi.conf.d/000077500000000000000000000000001465176501400277405ustar00rootroot00000000000000mkosi-24.3/mkosi/resources/mkosi-tools/mkosi.conf.d/10-centos-fedora/mkosi.conf.d/10-uefi.conf000066400000000000000000000002351465176501400317550ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] HostArchitecture=|x86-64 HostArchitecture=|arm64 [Content] Packages= edk2-ovmf pesign mkosi-24.3/mkosi/resources/mkosi-tools/mkosi.conf.d/10-centos/000077500000000000000000000000001465176501400242125ustar00rootroot00000000000000mkosi-24.3/mkosi/resources/mkosi-tools/mkosi.conf.d/10-centos/mkosi.conf000066400000000000000000000001121465176501400261750ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=centos mkosi-24.3/mkosi/resources/mkosi-tools/mkosi.conf.d/10-centos/mkosi.conf.d/000077500000000000000000000000001465176501400265025ustar00rootroot00000000000000mkosi-24.3/mkosi/resources/mkosi-tools/mkosi.conf.d/10-centos/mkosi.conf.d/10-epel.conf000066400000000000000000000001541465176501400305140ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Release=9 [Distribution] Repositories=epel,epel-next mkosi-24.3/mkosi/resources/mkosi-tools/mkosi.conf.d/10-centos/mkosi.conf.d/20-epel-packages.conf000066400000000000000000000003571465176501400322760ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Repositories=epel [Content] Packages= apt archlinux-keyring debian-keyring distribution-gpg-keys pacman sbsigntools ubu-keyring mkosi-24.3/mkosi/resources/mkosi-tools/mkosi.conf.d/10-debian-ubuntu/000077500000000000000000000000001465176501400254615ustar00rootroot00000000000000mkosi-24.3/mkosi/resources/mkosi-tools/mkosi.conf.d/10-debian-ubuntu/mkosi.conf000066400000000000000000000015231465176501400274530ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=|debian Distribution=|ubuntu [Content] Packages= ?exact-name(systemd-repart) ?exact-name(systemd-ukify) apt archlinux-keyring btrfs-progs createrepo-c debian-archive-keyring erofs-utils git-core grub-common libarchive-tools libcryptsetup12 libtss2-dev makepkg openssh-client ovmf pacman-package-manager pesign policycoreutils python3-cryptography python3-pefile qemu-efi-aarch64 qemu-system reprepro sbsigntool squashfs-tools swtpm-tools systemd-container systemd-coredump systemd-journal-remote uidmap xz-utils zypper mkosi-24.3/mkosi/resources/mkosi-tools/mkosi.conf.d/10-debian-ubuntu/mkosi.conf.d/000077500000000000000000000000001465176501400277515ustar00rootroot00000000000000mkosi-24.3/mkosi/resources/mkosi-tools/mkosi.conf.d/10-debian-ubuntu/mkosi.conf.d/grub.conf000066400000000000000000000001631465176501400315570ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Architecture=x86-64 [Content] Packages= grub-pc-bin mkosi-24.3/mkosi/resources/mkosi-tools/mkosi.conf.d/10-debian-ubuntu/mkosi.conf.d/systemd-boot.conf000066400000000000000000000002061465176501400332470ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=|!ubuntu Release=|!jammy [Content] Packages= systemd-boot ubuntu-keyring.conf000066400000000000000000000002131465176501400335250ustar00rootroot00000000000000mkosi-24.3/mkosi/resources/mkosi-tools/mkosi.conf.d/10-debian-ubuntu/mkosi.conf.d# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=|!debian Release=|!bookworm [Content] Packages= ubuntu-keyring mkosi-24.3/mkosi/resources/mkosi-tools/mkosi.conf.d/10-debian-ubuntu/mkosi.conf.d/virtiofsd.conf000066400000000000000000000003661465176501400326360ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later # Install virtiofsd except on Ubuntu Jammy and Debian Bookworm. [Match] Distribution=|!ubuntu Release=|!jammy [Match] Distribution=|!debian Release=|!bookworm [Content] Packages= virtiofsd mkosi-24.3/mkosi/resources/mkosi-tools/mkosi.conf.d/10-fedora/000077500000000000000000000000001465176501400241575ustar00rootroot00000000000000mkosi-24.3/mkosi/resources/mkosi-tools/mkosi.conf.d/10-fedora/mkosi.conf000066400000000000000000000006441465176501400261540ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=fedora [Content] Packages= apt archlinux-keyring btrfs-progs debian-keyring distribution-gpg-keys dnf5 dnf5-plugins erofs-utils pacman qemu-system-aarch64-core qemu-system-ppc-core qemu-system-s390x-core reprepro ubu-keyring zypper mkosi-24.3/mkosi/resources/mkosi-tools/mkosi.conf.d/10-fedora/mkosi.conf.d/000077500000000000000000000000001465176501400264475ustar00rootroot00000000000000mkosi-24.3/mkosi/resources/mkosi-tools/mkosi.conf.d/10-fedora/mkosi.conf.d/10-uefi.conf000066400000000000000000000002461465176501400304660ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] HostArchitecture=|x86-64 HostArchitecture=|arm64 [Content] Packages= sbsigntools systemd-ukify mkosi-24.3/mkosi/resources/mkosi-tools/mkosi.conf.d/10-opensuse.conf000066400000000000000000000014551465176501400254340ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=opensuse [Content] Packages= btrfs-progs ca-certificates-mozilla createrepo_c distribution-gpg-keys dnf-plugins-core dnf5 dnf5-plugins erofs-utils git-core glibc-gconv-modules-extra grep openssh-clients ovmf patterns-base-minimal_base pesign policycoreutils python3-pefile qemu-headless qemu-ipxe qemu-ovmf-x86_64 qemu-uefi-aarch64 reprepro sbsigntools shadow squashfs systemd-boot systemd-container systemd-coredump systemd-experimental systemd-journal-remote virtiofsd xz zypper mkosi-24.3/mkosi/resources/mkosi-tools/mkosi.prepare.chroot000077500000000000000000000002521465176501400242120ustar00rootroot00000000000000#!/bin/sh # SPDX-License-Identifier: LGPL-2.1-or-later if [ "$1" = "final" ] && command -v pacman-key; then pacman-key --init pacman-key --populate archlinux fi mkosi-24.3/mkosi/resources/mkosi.md000066400000000000000000004027621465176501400174100ustar00rootroot00000000000000% mkosi(1) % % # NAME mkosi — Build Bespoke OS Images # SYNOPSIS `mkosi [options…] summary` `mkosi [options…] build [command line…]` `mkosi [options…] shell [command line…]` `mkosi [options…] boot [nspawn settings…]` `mkosi [options…] qemu [qemu parameters…]` `mkosi [options…] ssh [command line…]` `mkosi [options…] journalctl [command line…]` `mkosi [options…] coredumpctl [command line…]` `mkosi [options…] clean` `mkosi [options…] serve` `mkosi [options…] burn ` `mkosi [options…] bump` `mkosi [options…] genkey` `mkosi [options…] documentation` `mkosi [options…] dependencies` `mkosi [options…] help` # DESCRIPTION `mkosi` is a tool for easily building customized OS images. It's a fancy wrapper around `dnf --installroot`, `apt`, `pacman` and `zypper` that may generate disk images with a number of bells and whistles. ## Command Line Verbs The following command line verbs are known: `summary` : Outputs a human-readable summary of all options used for building an image. This will parse the command line and `mkosi.conf` file as it would do on `build`, but only output what it is configured for and not actually build anything. `build` : This builds the image based on the settings passed in on the command line or read from configuration files. This command is the default if no verb is explicitly specified. If any command line arguments are specified, these are passed directly to the build script if one is defined. `shell` : This builds the image if it is not built yet, and then invokes `systemd-nspawn` to acquire an interactive shell prompt in it. An optional command line may be specified after the `shell` verb, to be invoked in place of the shell in the container. Use `-f` in order to rebuild the image unconditionally before acquiring the shell, see below. This command must be executed as `root`. `boot` : Similar to `shell`, but boots the image using `systemd-nspawn`. An optional command line may be specified after the `boot` verb, which can contain extra nspawn options as well as arguments which are passed as the *kernel command line* to the init system in the image. `qemu` : Similar to `boot`, but uses the configured virtual machine monitor (by default `qemu`) to boot up the image, i.e. instead of container virtualization virtual machine virtualization is used. How extra command line arguments are interpreted depends on the configured virtual machine monitor. See `VirtualMachineMonitor=` for more information. `ssh` : When the image is built with the `Ssh=yes` option, this command connects to a booted virtual machine (`qemu`) via SSH. Make sure to run `mkosi ssh` with the same config as `mkosi build` so that it has the necessary information available to connect to the running virtual machine via SSH. Specifically, the SSH private key from the `SshKey=` setting is used to connect to the virtual machine. Use `mkosi genkey` to automatically generate a key and certificate that will be picked up by mkosi. Any arguments passed after the `ssh` verb are passed as arguments to the `ssh` invocation. To connect to a container, use `machinectl login` or `machinectl shell`. The `Machine=` option can be used to give the machine a custom hostname when booting it which can later be used to ssh into the image (e.g. `mkosi --machine=mymachine qemu` followed by `mkosi --machine=mymachine ssh`). `journalctl` : Uses `journalctl` to inspect the journal inside the image. Any arguments specified after the `journalctl` verb are appended to the `journalctl` invocation. If `ForwardJournal=` is specified, this verb will operate on the forwarded journal instead of the journal inside the image. `coredumpctl` : Uses `coredumpctl` to look for coredumps inside the image. Any arguments specified after the `coredumpctl` verb are appended to the `coredumpctl` invocation. If `ForwardJournal=` is specified, this verb will operate on the forwarded journal instead of the image. Note that this requires configuring systemd-coredump to store coredumps in the journal. `clean` : Remove build artifacts generated on a previous build. If combined with `-f`, also removes incremental build cache images. If `-f` is specified twice, also removes any package cache. `serve` : This builds the image if it is not built yet, and then serves the output directory (i.e. usually `mkosi.output/`, see below) via a small embedded HTTP server, listening on port 8081. Combine with `-f` in order to rebuild the image unconditionally before serving it. This command is useful for testing network based acquisition of OS images, for example via `machinectl pull-raw …` and `machinectl pull-tar …`. `burn ` : This builds the image if it is not built yet, and then writes it to the specified block device. The partition contents are written as-is, but the GPT partition table is corrected to match sector and disk size of the specified medium. `bump` : Bumps the image version from `mkosi.version` and writes the resulting version string to `mkosi.version`. This is useful for implementing a simple versioning scheme: each time this verb is called the version is bumped in preparation for the subsequent build. Note that `--auto-bump`/`-B` may be used to automatically bump the version after each successful build. `genkey` : Generate a pair of SecureBoot keys for usage with the `SecureBootKey=`/`--secure-boot-key=` and `SecureBootCertificate=`/`--secure-boot-certificate=` options. `documentation` : Show mkosi's documentation. By default this verb will try several ways to output the documentation, but a specific option can be chosen with the `--doc-format` option. Distro packagers are encouraged to add a file `mkosi.1` into the `mkosi/resources` directory of the Python package, if it is missing, as well as to install it in the appropriate search path for man pages. The man page can be generated from the markdown file `mkosi/resources/mkosi.md` e.g via `pandoc -t man -s -o mkosi.1 mkosi.md`. `dependencies` : Output the list of packages required by mkosi to build and boot images. This list can be piped directly to a package manager to install the packages. For example, if the host system uses the dnf package manager, the packages could be installed as follows: ```sh mkosi dependencies | xargs -d '\n' dnf install ``` `help` : This verb is equivalent to the `--help` switch documented below: it shows a brief usage explanation. ## Commandline-only Options Those settings cannot be configured in the configuration files. `--force`, `-f` : Replace the output file if it already exists, when building an image. By default when building an image and an output artifact already exists `mkosi` will refuse operation. Specify this option once to delete all build artifacts from a previous run before re-building the image. If incremental builds are enabled, specifying this option twice will ensure the intermediary cache files are removed, too, before the re-build is initiated. If a package cache is used (also see the **Files** section below), specifying this option thrice will ensure the package cache is removed too, before the re-build is initiated. For the `clean` operation this option has a slightly different effect: by default the verb will only remove build artifacts from a previous run, when specified once the incremental cache files are deleted too, and when specified twice the package cache is also removed. `--directory=`, `-C` : Takes a path to a directory. `mkosi` switches to this directory before doing anything. Note that the various configuration files are searched for in this directory, hence using this option is an effective way to build a project located in a specific directory. `--debug=` : Enable additional debugging output. `--debug-shell` : When executing a command in the image fails, mkosi will start an interactive shell in the image allowing further debugging. `--debug-workspace=` : When an error occurs, the workspace directory will not be deleted. `--version` : Show package version. `--help`, `-h` : Show brief usage information. `--genkey-common-name=` : Common name to be used when generating keys via mkosi's `genkey` command. Defaults to `mkosi of %u`, where `%u` expands to the username of the user invoking mkosi. `--genkey-valid-days=` : Number of days that the keys should remain valid when generating keys via mkosi's `genkey` command. Defaults to two years (730 days). `--auto-bump=`, `-B` : If specified, after each successful build the version is bumped in a fashion equivalent to the `bump` verb, in preparation for the next build. This is useful for simple, linear version management: each build in a series will have a version number one higher then the previous one. `--doc-format` : The format to show the documentation in. Supports the values `markdown`, `man`, `pandoc`, `system` and `auto`. In the case of `markdown` the documentation is shown in the original Markdown format. `man` shows the documentation in man page format, if it is available. `pandoc` will generate the man page format on the fly, if `pandoc` is available. `system` will show the system-wide man page for mkosi, which may or may not correspond to the version you are using, depending on how you installed mkosi. `auto`, which is the default, will try all methods in the order `man`, `pandoc`, `markdown`, `system`. `--json` : Show the summary output as JSON-SEQ. ## Supported output formats The following output formats are supported: * Raw *GPT* disk image, created using systemd-repart (*disk*) * Plain directory, containing the OS tree (*directory*) * Tar archive (*tar*) * CPIO archive (*cpio*) The output format may also be set to *none* to have mkosi produce no image at all. This can be useful if you only want to use the image to produce another output in the build scripts (e.g. build an rpm). When a *GPT* disk image is created, repart partition definition files may be placed in `mkosi.repart/` to configure the generated disk image. It is highly recommended to run `mkosi` on a file system that supports reflinks such as XFS and btrfs and to keep all related directories on the same file system. This allows mkosi to create images very quickly by using reflinks to perform copying via copy-on-write operations. ## Configuration Settings The following settings can be set through configuration files (the syntax with `SomeSetting=value`) and on the command line (the syntax with `--some-setting=value`). For some command line parameters, a single-letter shortcut is also allowed. In the configuration files, the setting must be in the appropriate section, so the settings are grouped by section below. Configuration is parsed in the following order: * The command line arguments are parsed * `mkosi.local.conf` is parsed if it exists. This file should be in the gitignore (or equivalent) and is intended for local configuration. * Any default paths (depending on the option) are configured if the corresponding path exists. * `mkosi.conf` is parsed if it exists in the directory configured with `--directory=` or the current working directory if `--directory=` is not used. * If a profile is defined, its configuration is parsed from the `mkosi.profiles/` directory. * `mkosi.conf.d/` is parsed in the same directory if it exists. Each directory and each file with the `.conf` extension in `mkosi.conf.d/` is parsed. Any directory in `mkosi.conf.d` is parsed as if it were a regular top level directory. * Subimages are parsed from the `mkosi.images` directory if it exists. Note that settings configured via the command line always override settings configured via configuration files. If the same setting is configured more than once via configuration files, later assignments override earlier assignments except for settings that take a collection of values. Also, settings read from `mkosi.local.conf` will override settings from configuration files that are parsed later but not settings specified on the CLI. Settings that take a collection of values are merged by appending the new values to the previously configured values. Assigning the empty string to such a setting removes all previously assigned values, and overrides any configured default values as well. The values specified on the CLI are appended after all the values from configuration files. To conditionally include configuration files, the `[Match]` section can be used. A `[Match]` section consists of individual conditions. Conditions can use a pipe symbol (`|`) after the equals sign (`…=|…`), which causes the condition to become a triggering condition. The config file will be included if the logical AND of all non-triggering conditions and the logical OR of all triggering conditions is satisfied. To negate the result of a condition, prefix the argument with an exclamation mark. If an argument is prefixed with the pipe symbol and an exclamation mark, the pipe symbol must be passed first, and the exclamation second. Note that `[Match]` conditions compare against the current values of specific settings, and do not take into account changes made to the setting in configuration files that have not been parsed yet (settings specified on the CLI are taken into account). Also note that matching against a setting and then changing its value afterwards in a different config file may lead to unexpected results. The `[Match]` section of a `mkosi.conf` file in a directory applies to the entire directory. If the conditions are not satisfied, the entire directory is skipped. The `[Match]` sections of files in `mkosi.conf.d/` and `mkosi.local.conf` only apply to the file itself. If there are multiple `[Match]` sections in the same configuration file, each of them has to be satisfied in order for the configuration file to be included. Specifically, triggering conditions only apply to the current `[Match]` section and are reset between multiple `[Match]` sections. As an example, the following will only match if the output format is one of `disk` or `directory` and the architecture is one of `x86-64` or `arm64`: ```ini [Match] Format=|disk Format=|directory [Match] Architecture=|x86-64 Architecture=|arm64 ``` The `[TriggerMatch]` section can be used to indicate triggering match sections. These are identical to triggering conditions except they apply to the entire match section instead of just a single condition. As an example, the following will match if the distribution is `debian` and the release is `bookworm` or if the distribution is `ubuntu` and the release is `focal`. ```ini [TriggerMatch] Distribution=debian Release=bookworm [TriggerMatch] Distribution=ubuntu Release=focal ``` The semantics of conditions in `[TriggerMatch]` sections is the same as in `[Match]`, i.e. all normal conditions are joined by a logical AND and all triggering conditions are joined by a logical OR. When mixing `[Match]` and `[TriggerMatch]` sections, a match is achieved when all `[Match]` sections match and at least one `[TriggerMatch]` section matches. No match sections are valued as true. Logically this means: ``` (⋀ᵢ Matchᵢ) ∧ (⋁ᵢ TriggerMatchᵢ) ``` Command line options that take no argument are shown without `=` in their long version. In the config files, they should be specified with a boolean argument: either `1`, `yes`, or `true` to enable, or `0`, `no`, `false` to disable. ### [Distribution] Section `Distribution=`, `--distribution=`, `-d` : The distribution to install in the image. Takes one of the following arguments: `fedora`, `debian`, `ubuntu`, `arch`, `opensuse`, `mageia`, `centos`, `rhel`, `rhel-ubi`, `openmandriva`, `rocky`, `alma`, `custom`. If not specified, defaults to the distribution of the host or `custom` if the distribution of the host is not a supported distribution. `Release=`, `--release=`, `-r` : The release of the distribution to install in the image. The precise syntax of the argument this takes depends on the distribution used, and is either a numeric string (in case of Fedora Linux, CentOS, …, e.g. `29`), or a distribution version name (in case of Debian, Ubuntu, …, e.g. `artful`). Defaults to a recent version of the chosen distribution, or the version of the distribution running on the host if it matches the configured distribution. `Architecture=`, `--architecture=` : The architecture to build the image for. The architectures that are actually supported depends on the distribution used and whether a bootable image is requested or not. When building for a foreign architecture, you'll also need to install and register a user mode emulator for that architecture. One of the following architectures can be specified per image built: `alpha`, `arc`, `arm`, `arm64`, `ia64`, `loongarch64`, `mips64-le`, `mips-le`, `parisc`, `ppc`, `ppc64`, `ppc64-le`, `riscv32`, `riscv64`, `s390`, `s390x`, `tilegx`, `x86`, `x86-64`. `Mirror=`, `--mirror=`, `-m` : The mirror to use for downloading the distribution packages. Expects a mirror URL as argument. If not provided, the default mirror for the distribution is used. The default mirrors for each distribution are as follows (unless specified, the same mirror is used for all architectures): | | x86-64 | aarch64 | |----------------|-----------------------------------|--------------------------------| | `debian` | http://deb.debian.org/debian | | | `arch` | https://geo.mirror.pkgbuild.com | http://mirror.archlinuxarm.org | | `opensuse` | http://download.opensuse.org | | | `ubuntu` | http://archive.ubuntu.com | http://ports.ubuntu.com | | `centos` | https://mirrors.centos.org | | | `rocky` | https://mirrors.rockylinux.org | | | `alma` | https://mirrors.almalinux.org | | | `fedora` | https://mirrors.fedoraproject.org | | | `rhel-ubi` | https://cdn-ubi.redhat.com | | | `mageia` | https://www.mageia.org | | | `openmandriva` | http://mirrors.openmandriva.org | | `LocalMirror=`, `--local-mirror=` : The mirror will be used as a local, plain and direct mirror instead of using it as a prefix for the full set of repositories normally supported by distributions. Useful for fully offline builds with a single repository. Supported on deb/rpm/arch based distributions. Overrides `--mirror=` but only for the local mkosi build, it will not be configured inside the final image, `--mirror=` (or the default repository) will be configured inside the final image instead. `RepositoryKeyCheck=`, `--repository-key-check=` : Controls signature/key checks when using repositories, enabled by default. Useful to disable checks when combined with `--local-mirror=` and using only a repository from a local filesystem. Not used for DNF-based distros yet. `Repositories=`, `--repositories=` : Enable package repositories that are disabled by default. This can be used to enable the EPEL repos for CentOS or different components of the Debian/Ubuntu repositories. `CacheOnly=`, `--cache-only=` : Takes one of `auto`, `metadata`, `always` or `never`. Defaults to `auto`. If `always`, the package manager is instructed not to contact the network. This provides a minimal level of reproducibility, as long as the package cache is already fully populated. If set to `metadata`, the package manager can still download packages, but we won't sync the repository metadata. If set to `auto`, the repository metadata is synced unless we have a cached image (see `Incremental=`) and packages can be downloaded during the build. If set to `never`, repository metadata is always synced and and packages can be downloaded during the build. `PackageManagerTrees=`, `--package-manager-tree=` : Takes a comma separated list of colon separated path pairs. The first path of each pair refers to a directory to copy into the OS tree before invoking the package manager. This option is similar to the `SkeletonTrees=` option, but installs the files to a subdirectory of the workspace directory instead of the OS tree. This subdirectory of the workspace is used to configure the package manager. If the `mkosi.pkgmngr/` directory is found in the local directory it is used for this purpose with the root directory as target (also see the **Files** section below). If not configured in any way this value will default to the same value of `SkeletonTrees=`. `mkosi` will look for the package manager configuration and related files in the configured package manager trees. Unless specified otherwise, it will use the configuration files from their canonical locations in `/usr` or `/etc` in the package manager trees. For example, it will look for `etc/dnf/dnf.conf` in the package manager trees if `dnf` is used to install packages. `SkeletonTrees=` and `PackageManagerTrees=` fulfill similar roles. Use `SkeletonTrees=` if you want the files to be present in the final image. Use `PackageManagerTrees=` if you don't want the files to be present in the final image, e.g. when building an initrd or if you want to refer to paths outside of the image in your repository configuration. ### [Output] Section `Format=`, `--format=`, `-t` : The image format type to generate. One of `directory` (for generating an OS image directly in a local directory), `tar` (similar, but a tarball of the OS image is generated), `cpio` (similar, but a cpio archive is generated), `disk` (a block device OS image with a GPT partition table), `uki` (a unified kernel image with the OS image in the `.initrd` PE section), `esp` (`uki` but wrapped in a disk image with only an ESP partition), `oci` (a directory compatible with the OCI image specification), `sysext`, `confext`, `portable` or `none` (the OS image is solely intended as a build image to produce another artifact). If the `disk` output format is used, the disk image is generated using `systemd-repart`. The repart partition definition files to use can be configured using the `RepartDirectories=` setting or via `mkosi.repart/`. When verity partitions are configured using systemd-repart's `Verity=` setting, mkosi will automatically parse the verity hash partition's roothash from systemd-repart's JSON output and include it in the kernel command line of every unified kernel image built by mkosi. If the `none` output format is used, the outputs from a previous build are not removed, but clean scripts (see `CleanScripts=`) are still executed. This allows rerunning a build script (see `BuildScripts=`) without removing the results of a previous build. `ManifestFormat=`, `--manifest-format=` : The manifest format type or types to generate. A comma-delimited list consisting of `json` (the standard JSON output format that describes the packages installed), `changelog` (a human-readable text format designed for diffing). By default no manifest is generated. `Output=`, `--output=`, `-o` : Name to use for the generated output image file or directory. Defaults to `image` or, if `ImageId=` is specified, it is used as the default output name, optionally suffixed with the version set with `ImageVersion=` or if a specific image is built from `mkosi.images`, the name of the image is preferred over `ImageId`. Note that this option does not allow configuring the output directory, use `OutputDirectory=` for that. Note that this only specifies the output prefix, depending on the specific output format, compression and image version used, the full output name might be `image_7.8.raw.xz`. `CompressOutput=`, `--compress-output=` : Configure compression for the resulting image or archive. The argument can be either a boolean or a compression algorithm (`xz`, `zstd`). `zstd` compression is used by default, except CentOS and derivatives up to version 8, which default to `xz`, and OCI images, which default to `gzip`. Note that when applied to block device image types, compression means the image cannot be started directly but needs to be decompressed first. This also means that the `shell`, `boot`, `qemu` verbs are not available when this option is used. Implied for `tar`, `cpio`, `uki`, `esp`, and `oci`. `CompressLevel=`, `--compress-level=` : Configure the compression level to use. Takes an integer. The possible values depend on the compression being used. `OutputDirectory=`, `--output-dir=`, `-O` : Path to a directory where to place all generated artifacts. If this is not specified and the directory `mkosi.output/` exists in the local directory, it is automatically used for this purpose. `WorkspaceDirectory=`, `--workspace-dir=` : Path to a directory where to store data required temporarily while building the image. This directory should have enough space to store the full OS image, though in most modes the actually used disk space is smaller. If not specified, a subdirectory of `$XDG_CACHE_HOME` (if set), `$HOME/.cache` (if set) or `/var/tmp` is used. The data in this directory is removed automatically after each build. It's safe to manually remove the contents of this directory should an `mkosi` invocation be aborted abnormally (for example, due to reboot/power failure). `CacheDirectory=`, `--cache-dir=` : Takes a path to a directory to use as the incremental cache directory for the incremental images produced when the `Incremental=` option is enabled. If this option is not used, but a `mkosi.cache/` directory is found in the local directory it is automatically used for this purpose. `PackageCacheDirectory=`, `--package-cache-dir` : Takes a path to a directory to use as the package cache directory for the distribution package manager used. If unset, a suitable directory in the user's home directory or system is used. `BuildDirectory=`, `--build-dir=` : Takes a path to a directory to use as the build directory for build systems that support out-of-tree builds (such as Meson). The directory used this way is shared between repeated builds, and allows the build system to reuse artifacts (such as object files, executable, …) generated on previous invocations. The build scripts can find the path to this directory in the `$BUILDDIR` environment variable. This directory is mounted into the image's root directory when `mkosi-chroot` is invoked during execution of the build scripts. If this option is not specified, but a directory `mkosi.builddir/` exists in the local directory it is automatically used for this purpose (also see the **Files** section below). `ImageVersion=`, `--image-version=` : Configure the image version. This accepts any string, but it is recommended to specify a series of dot separated components. The version may also be configured in a file `mkosi.version` in which case it may be conveniently managed via the `bump` verb or the `--auto-bump` option. When specified the image version is included in the default output file name, i.e. instead of `image.raw` the default will be `image_0.1.raw` for version `0.1` of the image, and similar. The version is also passed via the `$IMAGE_VERSION` to any build scripts invoked (which may be useful to patch it into `/usr/lib/os-release` or similar, in particular the `IMAGE_VERSION=` field of it). `ImageId=`, `--image-id=` : Configure the image identifier. This accepts a freeform string that shall be used to identify the image with. If set the default output file will be named after it (possibly suffixed with the version). The identifier is also passed via the `$IMAGE_ID` to any build scripts invoked. The image ID is automatically added to `/usr/lib/os-release`. `SplitArtifacts=`, `--split-artifacts` : If specified and building a disk image, pass `--split=yes` to systemd-repart to have it write out split partition files for each configured partition. Read the [man](https://www.freedesktop.org/software/systemd/man/systemd-repart.html#--split=BOOL) page for more information. This is useful in A/B update scenarios where an existing disk image shall be augmented with a new version of a root or `/usr` partition along with its Verity partition and unified kernel. `RepartDirectories=`, `--repart-dir=` : Paths to directories containing systemd-repart partition definition files that are used when mkosi invokes systemd-repart when building a disk image. If `mkosi.repart/` exists in the local directory, it will be used for this purpose as well. Note that mkosi invokes repart with `--root=` set to the root of the image root, so any `CopyFiles=` source paths in partition definition files will be relative to the image root directory. `SectorSize=`, `--sector-size=` : Override the default sector size that systemd-repart uses when building a disk image. `RepartOffline=`, `--repart-offline=` : Specifies whether to build disk images using loopback devices. Enabled by default. When enabled, `systemd-repart` will not use loopback devices to build disk images. When disabled, `systemd-repart` will always use loopback devices to build disk images. Note that when using `RepartOffline=no` mkosi cannot run unprivileged and the image build has to be done as the root user outside of any containers and with loopback devices available on the host system. There are currently two known scenarios where `RepartOffline=no` has to be used. The first is when using `Subvolumes=` in a repart partition definition file, as subvolumes cannot be created without using loopback devices. The second is when creating a system with SELinux and an XFS root partition. Because `mkfs.xfs` does not support populating an XFS filesystem with extended attributes, loopback devices have to be used to ensure the SELinux extended attributes end up in the generated XFS filesystem. `Overlay=`, `--overlay` : When used together with `BaseTrees=`, the output will consist only out of changes to the specified base trees. Each base tree is attached as a lower layer in an overlayfs structure, and the output becomes the upper layer, initially empty. Thus files that are not modified compared to the base trees will not be present in the final output. This option may be used to create [systemd *system extensions* or *portable services*](https://uapi-group.org/specifications/specs/extension_image). `UseSubvolumes=`, `--use-subvolumes=` : Takes a boolean or `auto`. Enables or disables use of btrfs subvolumes for directory tree outputs. If enabled, mkosi will create the root directory as a btrfs subvolume and use btrfs subvolume snapshots where possible to copy base or cached trees which is much faster than doing a recursive copy. If explicitly enabled and `btrfs` is not installed or subvolumes cannot be created, an error is raised. If `auto`, missing `btrfs` or failures to create subvolumes are ignored. `Seed=`, `--seed=` : Takes a UUID as argument or the special value `random`. Overrides the seed that [`systemd-repart(8)`](https://www.freedesktop.org/software/systemd/man/systemd-repart.service.html) uses when building a disk image. This is useful to achieve reproducible builds, where deterministic UUIDs and other partition metadata should be derived on each build. `SourceDateEpoch=`, `--source-date-epoch=` : Takes a timestamp in seconds since the UNIX epoch as argument. File modification times of all files will be clamped to this value. The variable is also propagated to systemd-repart and scripts executed by mkosi. If not set explicitly, `SOURCE_DATE_EPOCH` from `--environment` and from the host environment are tried in that order. This is useful to make builds reproducible. See [SOURCE_DATE_EPOCH](https://reproducible-builds.org/specs/source-date-epoch/) for more information. `CleanScripts=`, `--clean-script=` : Takes a comma-separated list of paths to executables that are used as the clean scripts for this image. See the **Scripts** section for more information. ### [Content] Section `Packages=`, `--package=`, `-p` : Install the specified distribution packages (i.e. RPM, DEB, …) in the image. Takes a comma separated list of package specifications. This option may be used multiple times in which case the specified package lists are combined. Use `BuildPackages=` to specify packages that shall only be installed in an overlay that is mounted when the prepare scripts are executed with the `build` argument and when the build scripts are executed. The types and syntax of *package specifications* that are allowed depend on the package installer (e.g. `dnf` for `rpm`-based distros or `apt` for `deb`-based distros), but may include package names, package names with version and/or architecture, package name globs, package groups, and virtual provides, including file paths. See `PackageDirectories=` for information on how to make local packages available for installation with `Packages=`. **Example**: when using a distro that uses `dnf`, the following configuration would install the `meson` package (in the latest version), the 32-bit version of the `libfdisk-devel` package, all available packages that start with the `git-` prefix, a `systemd` rpm from the local file system, one of the packages that provides `/usr/bin/ld`, the packages in the *Development Tools* group, and the package that contains the `mypy` python module. ```ini Packages=meson libfdisk-devel.i686 git-* /usr/bin/ld @development-tools python3dist(mypy) ``` `BuildPackages=`, `--build-package=` : Similar to `Packages=`, but configures packages to install only in an overlay that is made available on top of the image to the prepare scripts when executed with the `build` argument and the build scripts. This option should be used to list packages containing header files, compilers, build systems, linkers and other build tools the `mkosi.build` scripts require to operate. Note that packages listed here will be absent in the final image. `VolatilePackages=`, `--volatile-package=` : Similar to `Packages=`, but packages configured with this setting are not cached when `Incremental=` is enabled and are installed after executing any build scripts. Specifically, this setting can be used to install packages that change often or which are built by a build script. `PackageDirectories=`, `--package-directory=` : Specify directories containing extra packages to be made available during the build. `mkosi` will create a local repository containing all packages in these directories and make it available when installing packages or running scripts. If the `mkosi.packages/` directory is found in the local directory it is also used for this purpose. `VolatilePackageDirectories=`, `--volatile-package-directory=` : Like `PackageDirectories=`, but any changes to the packages in these directories will not invalidate the cached images if `Incremental=` is enabled. Additionally, build scripts can add more packages to the local repository by placing the built packages in `$PACKAGEDIR`. The packages placed in `$PACKAGEDIR` are shared between all image builds and thus available for installation in all images using `VolatilePackages=`. `WithRecommends=`, `--with-recommends=` : Configures whether to install recommended or weak dependencies, depending on how they are named by the used package manager, or not. By default, recommended packages are not installed. This is only used for package managers that support the concept, which are currently apt, dnf and zypper. `WithDocs=`, `--with-docs` : Include documentation in the image. Enabled by default. When disabled, if the underlying distribution package manager supports it documentation is not included in the image. The `$WITH_DOCS` environment variable passed to the `mkosi.build` scripts is set to `0` or `1` depending on whether this option is enabled or disabled. `BaseTrees=`, `--base-tree=` : Takes a comma separated list of paths to use as base trees. When used, these base trees are each copied into the OS tree and form the base distribution instead of installing the distribution from scratch. Only extra packages are installed on top of the ones already installed in the base trees. Note that for this to work properly, the base image still needs to contain the package manager metadata by setting `CleanPackageMetadata=no` (see `CleanPackageMetadata=`). Instead of a directory, a tar file or a disk image may be provided. In this case it is unpacked into the OS tree. This mode of operation allows setting permissions and file ownership explicitly, in particular for projects stored in a version control system such as `git` which retain full file ownership and access mode metadata for committed files. `SkeletonTrees=`, `--skeleton-tree=` : Takes a comma separated list of colon separated path pairs. The first path of each pair refers to a directory to copy into the OS tree before invoking the package manager. The second path of each pair refers to the target directory inside the image. If the second path is not provided, the directory is copied on top of the root directory of the image. The second path is always interpreted as an absolute path. Use this to insert files and directories into the OS tree before the package manager installs any packages. If the `mkosi.skeleton/` directory is found in the local directory it is also used for this purpose with the root directory as target (also see the **Files** section below). Note that skeleton trees are cached and any changes to skeleton trees after a cached image has been built (when using `Incremental=`) are only applied when the cached image is rebuilt (by using `-ff` or running `mkosi -f clean`). As with the base tree logic above, instead of a directory, a tar file may be provided too. `mkosi.skeleton.tar` will be automatically used if found in the local directory. `ExtraTrees=`, `--extra-tree=` : Takes a comma separated list of colon separated path pairs. The first path of each pair refers to a directory to copy from the host into the image. The second path of each pair refers to the target directory inside the image. If the second path is not provided, the directory is copied on top of the root directory of the image. The second path is always interpreted as an absolute path. Use this to override any default configuration files shipped with the distribution. If the `mkosi.extra/` directory is found in the local directory it is also used for this purpose with the root directory as target. (also see the **Files** section below). As with the base tree logic above, instead of a directory, a tar file may be provided too. `mkosi.extra.tar` will be automatically used if found in the local directory. `RemovePackages=`, `--remove-package=` : Takes a comma-separated list of package specifications for removal, in the same format as `Packages=`. The removal will be performed as one of the last steps. This step is skipped if `CleanPackageMetadata=no` is used. `RemoveFiles=`, `--remove-files=` : Takes a comma-separated list of globs. Files in the image matching the globs will be purged at the end. `CleanPackageMetadata=`, `--clean-package-metadata=` : Enable/disable removal of package manager databases and repository metadata at the end of installation. Can be specified as `true`, `false`, or `auto` (the default). With `auto`, package manager databases and repository metadata will be removed if the respective package manager executable is *not* present at the end of the installation. `SyncScripts=`, `--sync-script=` : Takes a comma-separated list of paths to executables that are used as the sync scripts for this image. See the **Scripts** section for more information. `PrepareScripts=`, `--prepare-script=` : Takes a comma-separated list of paths to executables that are used as the prepare scripts for this image. See the **Scripts** section for more information. `BuildScripts=`, `--build-script=` : Takes a comma-separated list of paths to executables that are used as the build scripts for this image. See the **Scripts** section for more information. `PostInstallationScripts=`, `--postinst-script=` : Takes a comma-separated list of paths to executables that are used as the post-installation scripts for this image. See the **Scripts** section for more information. `FinalizeScripts=`, `--finalize-script=` : Takes a comma-separated list of paths to executables that are used as the finalize scripts for this image. See the **Scripts** section for more information. `PostOutputScripts=`, `--postoutput-script` : Takes a comma-separated list of paths to executables that are used as the post output scripts for this image. See the **Scripts** section for more information. `BuildSources=`, `--build-sources=` : Takes a comma separated list of colon separated path pairs. The first path of each pair refers to a directory to mount from the host. The second path of each pair refers to the directory where the source directory should be mounted when running scripts. Every target path is prefixed with `/work/src` and all build sources are sorted lexicographically by their target before mounting, so that top level paths are mounted first. If not configured explicitly, the current working directory is mounted to `/work/src`. `BuildSourcesEphemeral=`, `--build-sources-ephemeral=` : Takes a boolean. Disabled by default. Configures whether changes to source directories (The working directory and configured using `BuildSources=`) are persisted. If enabled, all source directories will be reset to their original state every time after running all scripts of a specific type (except sync scripts). `Environment=`, `--environment=` : Adds variables to the environment that package managers and the prepare/build/postinstall/finalize scripts are executed with. Takes a space-separated list of variable assignments or just variable names. In the latter case, the values of those variables will be passed through from the environment in which `mkosi` was invoked. This option may be specified more than once, in which case all listed variables will be set. If the same variable is set twice, the later setting overrides the earlier one. `EnvironmentFiles=`, `--env-file=` : Takes a comma-separated list of paths to files that contain environment variable definitions to be added to the scripting environment. Uses `mkosi.env` if it is found in the local directory. The variables are first read from `mkosi.env` if it exists, then from the given list of files and then from the `Environment=` settings. `WithTests=`, `--without-tests`, `-T` : If set to false (or when the command-line option is used), the `$WITH_TESTS` environment variable is set to `0` when the `mkosi.build` scripts are invoked. This is supposed to be used by the build scripts to bypass any unit or integration tests that are normally run during the source build process. Note that this option has no effect unless the `mkosi.build` build scripts honor it. `WithNetwork=`, `--with-network=` : When true, enables network connectivity while the build scripts `mkosi.build` are invoked. By default, the build scripts run with networking turned off. The `$WITH_NETWORK` environment variable is passed to the `mkosi.build` build scripts indicating whether the build is done with or without network. `Bootable=`, `--bootable=` : Takes a boolean or `auto`. Enables or disables generation of a bootable image. If enabled, mkosi will install an EFI bootloader, and add an ESP partition when the disk image output is used. If the selected EFI bootloader (See `Bootloader=`) is not installed or no kernel images can be found, the build will fail. `auto` behaves as if the option was enabled, but the build won't fail if either no kernel images or the selected EFI bootloader can't be found. If disabled, no bootloader will be installed even if found inside the image, no unified kernel images will be generated and no ESP partition will be added to the image if the disk output format is used. `Bootloader=`, `--bootloader=` : Takes one of `none`, `systemd-boot`, `uki` or `grub`. Defaults to `systemd-boot`. If set to `none`, no EFI bootloader will be installed into the image. If set to `systemd-boot`, systemd-boot will be installed and for each installed kernel, a UKI will be generated and stored in `EFI/Linux` in the ESP. If set to `uki`, a single UKI will be generated for the latest installed kernel (the one with the highest version) which is installed to `EFI/BOOT/BOOTX64.EFI` in the ESP. If set to `grub`, for each installed kernel, a UKI will be generated and stored in `EFI/Linux` in the ESP. For each generated UKI, a menu entry is appended to the grub configuration in `grub/grub.cfg` in the ESP which chainloads into the UKI. A shim grub.cfg is also written to `EFI//grub.cfg` in the ESP which loads `grub/grub.cfg` in the ESP for compatibility with signed versions of grub which load the grub configuration from this location. Note that we do not yet install grub to the ESP when `Bootloader=` is set to `grub`. This has to be done manually in a postinst or finalize script. The grub EFI binary should be installed to `/efi/EFI/BOOT/BOOTX64.EFI` (or similar depending on the architecture) and should be configured to load its configuration from `EFI//grub.cfg` in the ESP. Signed versions of grub shipped by distributions will load their configuration from this location by default. `BiosBootloader=`, `--bios-bootloader=` : Takes one of `none` or `grub`. Defaults to `none`. If set to `none`, no BIOS bootloader will be installed. If set to `grub`, grub is installed as the BIOS boot loader if a bootable image is requested with the `Bootable=` option. If no repart partition definition files are configured, mkosi will add a grub BIOS boot partition and an EFI system partition to the default partition definition files. Note that this option is not mutually exclusive with `Bootloader=`. It is possible to have an image that is both bootable on UEFI and BIOS by configuring both `Bootloader=` and `BiosBootloader=`. The grub BIOS boot partition should have UUID `21686148-6449-6e6f-744e-656564454649` and should be at least 1MB. Even if no EFI bootloader is installed, we still need an ESP for BIOS boot as that's where we store the kernel, initrd and grub modules. `ShimBootloader=`, `--shim-bootloader=` : Takes one of `none`, `unsigned`, or `signed`. Defaults to `none`. If set to `none`, shim and MokManager will not be installed to the ESP. If set to `unsigned`, mkosi will search for unsigned shim and MokManager EFI binaries and install them. If `SecureBoot=` is enabled, mkosi will sign the unsigned EFI binaries before installing them. If set to `signed`, mkosi will search for signed EFI binaries and install those. Even if `SecureBoot=` is enabled, mkosi won't sign these binaries again. Note that this option only takes effect when an image that is bootable on UEFI firmware is requested using other options (`Bootable=`, `Bootloader=`). Note that when this option is enabled, mkosi will only install already signed bootloader binaries, kernel image files and unified kernel images as self-signed binaries would not be accepted by the signed version of shim. `UnifiedKernelImages=`, `--unified-kernel-images=` : Specifies whether to use unified kernel images or not when `Bootloader=` is set to `systemd-boot` or `grub`. Takes a boolean value or `auto`. Defaults to `auto`. If enabled, unified kernel images are always used and the build will fail if any components required to build unified kernel images are missing. If set to `auto`, unified kernel images will be used if all necessary components are available. Otherwise Type 1 entries as defined by the Boot Loader Specification will be used instead. If disabled, Type 1 entries will always be used. `UnifiedKernelImageFormat=`, `--unified-kernel-image-format=` : Takes a filename without any path components to specify the format that unified kernel images should be installed as. This may include both the regular specifiers (see **Specifiers**) and special delayed specifiers, that are expanded during the installation of the files, which are described below. The default format for this parameter is `&e-&k` with `-&h` being appended if `roothash=` or `usrhash=` is found on the kernel command line and `+&c` if `/etc/kernel/tries` is found in the image. The following specifiers may be used: | Specifier | Value | |-----------|----------------------------------------------------| | `&&` | `&` character | | `&e` | Entry Token | | `&k` | Kernel version | | `&h` | `roothash=` or `usrhash=` value of kernel argument | | `&c` | Number of tries used for boot attempt counting | `Initrds=`, `--initrd` : Use user-provided initrd(s). Takes a comma separated list of paths to initrd files. This option may be used multiple times in which case the initrd lists are combined. If no initrds are specified and a bootable image is requested, mkosi will look for initrds in a subdirectory `io.mkosi.initrd` of the artifact directory (see `$ARTIFACTDIR` in the section **ENVIRONMENT VARIABLES**), if none are found there mkosi will automatically build a default initrd. `InitrdPackages=`, `--initrd-package=` : Extra packages to install into the default initrd. Takes a comma separated list of package specifications. This option may be used multiple times in which case the specified package lists are combined. `InitrdVolatilePackages=`, `--initrd-volatile-package=` : Similar to `VolatilePackages=`, except it applies to the default initrd. `MicrocodeHost=`, `--microcode-host=` : When set to true only include microcode for the host's CPU in the image. `KernelCommandLine=`, `--kernel-command-line=` : Use the specified kernel command line when building images. If the value of this setting contains the literals `root=PARTUUID` or `mount.usr=PARTUUID`, these are replaced with the partition UUID of the root or usr partition respectively. For example, `root=PARTUUID` would be replaced with `root=PARTUUID=58c7d0b2-d224-4834-a16f-e036322e88f7` where `58c7d0b2-d224-4834-a16f-e036322e88f7` is the partition UUID of the root partition. `KernelModulesInclude=`, `--kernel-modules-include=` : Takes a list of regex patterns that specify kernel modules to include in the image. Patterns should be relative to the `/usr/lib/modules//kernel` directory. mkosi checks for a match anywhere in the module path (e.g. `i915` will match against `drivers/gpu/drm/i915.ko`). All modules that match any of the specified patterns are included in the image. All module and firmware dependencies of the matched modules are included in the image as well. If the special value `default` is used, the default kernel modules defined in the `mkosi-initrd` configuration are included as well. If the special value `host` is used, the currently loaded modules on the host system are included as well. This setting takes priority over `KernelModulesExclude=` and only makes sense when used in combination with it because all kernel modules are included in the image by default. `KernelModulesExclude=`, `--kernel-modules-exclude=` : Takes a list of regex patterns that specify modules to exclude from the image. Behaves the same as `KernelModulesInclude=` except that all modules that match any of the specified patterns are excluded from the image. `KernelModulesInitrd=`, `--kernel-modules-initrd=` : Enable/Disable generation of the kernel modules initrd when building a bootable image. Enabled by default. If enabled, when building a bootable image, for each kernel that we assemble a unified kernel image for we generate an extra initrd containing only the kernel modules for that kernel version and append it to the prebuilt initrd. This allows generating kernel independent initrds which are augmented with the necessary kernel modules when the UKI is assembled. `KernelModulesInitrdInclude=`, `--kernel-modules-initrd-include=` : Like `KernelModulesInclude=`, but applies to the kernel modules included in the kernel modules initrd. `KernelModulesInitrdExclude=`, `--kernel-modules-initrd-exclude=` : Like `KernelModulesExclude=`, but applies to the kernel modules included in the kernel modules initrd. `Locale=`, `--locale=`, `LocaleMessages=`, `--locale-messages=`, `Keymap=`, `--keymap=`, `Timezone=`, `--timezone=`, `Hostname=`, `--hostname=`, `RootShell=`, `--root-shell=` : The settings `Locale=`, `--locale=`, `LocaleMessages=`, `--locale-messages=`, `Keymap=`, `--keymap=`, `Timezone=`, `--timezone=`, `Hostname=`, `--hostname=`, `RootShell=`, `--root-shell=` correspond to the identically named systemd-firstboot options. See the systemd firstboot [manpage](https://www.freedesktop.org/software/systemd/man/systemd-firstboot.html) for more information. Additionally, where applicable, the corresponding systemd credentials for these settings are written to `/usr/lib/credstore`, so that they apply even if only `/usr` is shipped in the image. `RootPassword=`, `--root-password=`, : Set the system root password. If this option is not used, but a `mkosi.rootpw` file is found in the local directory, the password is automatically read from it. If the password starts with `hashed:`, it is treated as an already hashed root password. The root password is also stored in `/usr/lib/credstore` under the appropriate systemd credential so that it applies even if only `/usr` is shipped in the image. To create an unlocked account without any password use `hashed:` without a hash. `Autologin=`, `--autologin` : Enable autologin for the `root` user on `/dev/pts/0` (nspawn), `/dev/tty1` and `/dev/hvc0`. `MakeInitrd=`, `--make-initrd` : Add `/etc/initrd-release` and `/init` to the image so that it can be used as an initramfs. `Ssh=`, `--ssh` : If specified, an sshd socket unit and matching service are installed in the final image that expose SSH over VSock. When building with this option and running the image using `mkosi qemu`, the `mkosi ssh` command can be used to connect to the container/VM via SSH. Note that you still have to make sure openssh is installed in the image to make this option behave correctly. Run `mkosi genkey` to automatically generate an X509 certificate and private key to be used by mkosi to enable SSH access to any virtual machines via `mkosi ssh`. To access images booted using `mkosi boot`, use `machinectl`. `SELinuxRelabel=`, `--selinux-relabel=` : Specifies whether to relabel files to match the image's SELinux policy. Takes a boolean value or `auto`. Defaults to `auto`. If disabled, files will not relabeled. If enabled, an SELinux policy has to be installed in the image and `setfiles` has to be available to relabel files. If any errors occur during `setfiles`, the build will fail. If set to `auto`, files will be relabeled if an SELinux policy is installed in the image and if `setfiles` is available. Any errors occurred during `setfiles` will be ignored. Note that when running unprivileged, `setfiles` will fail to set any labels that are not in the host's SELinux policy. To ensure `setfiles` succeeds without errors, make sure to run mkosi as root or build from a host system with the same SELinux policy as the image you're building. ### [Validation] Section `SecureBoot=`, `--secure-boot` : Sign systemd-boot (if it is not signed yet) and any generated unified kernel images for UEFI SecureBoot. `SecureBootAutoEnroll=`, `--secure-boot-auto-enroll=` : Set up automatic enrollment of the secure boot keys in virtual machines as documented in the systemd-boot [man page](https://www.freedesktop.org/software/systemd/man/systemd-boot.html) if `SecureBoot=` is used. Note that systemd-boot will only do automatic secure boot key enrollment in virtual machines starting from systemd v253. To do auto enrollment on systemd v252 or on bare metal machines, write a systemd-boot configuration file to `/efi/loader/loader.conf` using an extra tree with `secure-boot-enroll force` or `secure-boot-enroll manual` in it. Auto enrollment is not supported on systemd versions older than v252. Defaults to `yes`. `SecureBootKey=`, `--secure-boot-key=` : Path to the PEM file containing the secret key for signing the UEFI kernel image if `SecureBoot=` is used and PCR signatures when `SignExpectedPcr=` is also used. When `SecureBootKeySource=` is specified, the input type depends on the source. `SecureBootKeySource=`, `--secure-boot-key-source=` : Source of `SecureBootKey=`, to support OpenSSL engines. E.g.: `--secure-boot-key-source=engine:pkcs11` `SecureBootCertificate=`, `--secure-boot-certificate=` : Path to the X.509 file containing the certificate for the signed UEFI kernel image, if `SecureBoot=` is used. `SecureBootSignTool=`, `--secure-boot-sign-tool` : Tool to use to sign secure boot PE binaries. Takes one of `sbsign`, `pesign` or `auto`. Defaults to `auto`. If set to `auto`, either sbsign or pesign are used if available, with sbsign being preferred if both are installed. `VerityKey=`, `--verity-key=` : Path to the PEM file containing the secret key for signing the verity signature, if a verity signature partition is added with systemd-repart. When `VerityKeySource=` is specified, the input type depends on the source. `VerityKeySource=`, `--verity-key-source=` : Source of `VerityKey=`, to support OpenSSL engines. E.g.: `--verity-key-source=engine:pkcs11` `VerityCertificate=`, `--verity-certificate=` : Path to the X.509 file containing the certificate for signing the verity signature, if a verity signature partition is added with systemd-repart. `SignExpectedPcr=`, `--sign-expected-pcr` : Measure the components of the unified kernel image (UKI) using `systemd-measure` and embed the PCR signature into the unified kernel image. This option takes a boolean value or the special value `auto`, which is the default, which is equal to a true value if the `systemd-measure` binary is in `PATH`. Depends on `SecureBoot=` being enabled and key from `SecureBootKey=`. `Passphrase=`, `--passphrase` : Specify the path to a file containing the passphrase to use for LUKS encryption. It should contain the passphrase literally, and not end in a newline character (i.e. in the same format as cryptsetup and `/etc/crypttab` expect the passphrase files). The file must have an access mode of 0600 or less. `Checksum=`, `--checksum` : Generate a `SHA256SUMS` file of all generated artifacts after the build is complete. `Sign=`, `--sign` : Sign the generated `SHA256SUMS` using `gpg` after completion. `Key=`, `--key=` : Select the `gpg` key to use for signing `SHA256SUMS`. This key must be already present in the `gpg` keyring. ### [Host] Section `ProxyUrl=`, `--proxy-url=` : Configure a proxy to be used for all outgoing network connections. Various tools that mkosi invokes and for which the proxy can be configured are configured to use this proxy. mkosi also sets various well-known environment variables to specify the proxy to use for any programs it invokes that may need internet access. `ProxyExclude=`, `--proxy-exclude=` : Configure hostnames for which requests should not go through the proxy. Takes a comma separated list of hostnames. `ProxyPeerCertificate=`, `--proxy-peer-certificate=` : Configure a file containing certificates used to verify the proxy. Defaults to the system-wide certificate store. Currently, setting a proxy peer certificate is only supported when `dnf` or `dnf5` is used to build the image. `ProxyClientCertificate=`, `--proxy-client-certificate=` : Configure a file containing the certificate used to authenticate the client with the proxy. Currently, setting a proxy client certificate is only supported when `dnf` or `dnf5` is used to build the image. `ProxyClientKey=`, `--proxy-client-key=` : Configure a file containing the private key used to authenticate the client with the proxy. Defaults to the proxy client certificate if one is provided. Currently, setting a proxy client key is only supported when `dnf` or `dnf5` is used to build the image. `Incremental=`, `--incremental=`, `-i` : Enable incremental build mode. In this mode, a copy of the OS image is created immediately after all OS packages are installed and the prepare scripts have executed but before the `mkosi.build` scripts are invoked (or anything that happens after it). On subsequent invocations of `mkosi` with the `-i` switch this cached image may be used to skip the OS package installation, thus drastically speeding up repetitive build times. Note that while there is some rudimentary cache invalidation, it is definitely not perfect. In order to force rebuilding of the cached image, combine `-i` with `-ff` to ensure the cached image is first removed and then re-created. `NSpawnSettings=`, `--settings=` : Specifies a `.nspawn` settings file for `systemd-nspawn` to use in the `boot` and `shell` verbs, and to place next to the generated image file. This is useful to configure the `systemd-nspawn` environment when the image is run. If this setting is not used but an `mkosi.nspawn` file found in the local directory it is automatically used for this purpose. `ExtraSearchPaths=`, `--extra-search-path=` : List of colon-separated paths to look for tools in, before using the regular `$PATH` search path. `VirtualMachineMonitor=`, `--vmm=` : Configures the virtual machine monitor to use. Takes one of `qemu` or `vmspawn`. Defaults to `qemu`. When set to `qemu`, the image is booted with `qemu`. Most output formats can be booted in `qemu`. Any arguments specified after the verb are appended to the `qemu` invocation and are interpreted as extra qemu command line arguments. When set to `vmspawn`, `systemd-vmspawn` is used to boot up the image, `vmspawn` only supports disk and directory type images. Any arguments specified after the verb are appended to the `systemd-vmspawn` invocation and are interpreted as extra vmspawn options and extra kernel command line arguments. `QemuGui=`, `--qemu-gui=` : If enabled, qemu is executed with its graphical interface instead of with a serial console. `QemuSmp=`, `--qemu-smp=` : When used with the `qemu` verb, this options sets `qemu`'s `-smp` argument which controls the number of guest's CPUs. Defaults to `2`. When set to `0`, the number of CPUs available to the mkosi process will be used. `QemuMem=`, `--qemu-mem=` : When used with the `qemu` verb, this options sets `qemu`'s `-m` argument which controls the amount of guest's RAM. Defaults to `2G`. `QemuKvm=`, `--qemu-kvm=` : When used with the `qemu` verb, this option specifies whether QEMU should use KVM acceleration. Takes a boolean value or `auto`. Defaults to `auto`. `QemuVsock=`, `--qemu-vsock=` : When used with the `qemu` verb, this option specifies whether QEMU should be configured with a vsock. Takes a boolean value or `auto`. Defaults to `auto`. `QemuVsockConnectionId=`, `--qemu-vsock-cid=` : When used with the `qemu` verb, this option specifies the vsock connection ID to use. Takes a number in the interval `[3, 0xFFFFFFFF)` or `hash` or `auto`. Defaults to `auto`. When set to `hash`, the connection ID will be derived from the full path to the image. When set to `auto`, `mkosi` will try to find a free connection ID automatically. Otherwise, the provided number will be used as is. `QemuSwtpm=`, `--qemu-swtpm=` : When used with the `qemu` verb, this option specifies whether to start an instance of swtpm to be used as a TPM with qemu. This requires swtpm to be installed on the host. Takes a boolean value or `auto`. Defaults to `auto`. `QemuCdrom=`, `--qemu-cdrom=` : When used with the `qemu` verb, this option specifies whether to attach the image to the virtual machine as a CD-ROM device. Takes a boolean. Defaults to `no`. `QemuFirmware=`, `--qemu-firmware=` : When used with the `qemu` verb, this option specifies which firmware to use. Takes one of `uefi`, `uefi-secure-boot`, `bios`, `linux`, or `auto`. Defaults to `auto`. When set to `uefi`, the OVMF firmware without secure boot support is used. When set to `uefi-secure-boot`, the OVMF firmware with secure boot support is used. When set to `bios`, the default SeaBIOS firmware is used. When set to `linux`, direct kernel boot is used. See the `QemuKernel=` option for more details on which kernel image is used with direct kernel boot. When set to `auto`, `uefi-secure-boot` is used if possible and `linux` otherwise. `QemuFirmwareVariables=`, `--qemu-firmware-variables=` : When used with the `qemu` verb, this option specifies the path to the the firmware variables file to use. Currently, this option is only taken into account when the `uefi` or `uefi-secure-boot` firmware is used. If not specified, mkosi will search for the default variables file and use that instead. When set to `microsoft`, a firmware variables file with the Microsoft secure boot certificates already enrolled will be used. When set to `custom`, the secure boot certificate from `SecureBootCertificate=` will be enrolled into the default firmware variables file. `virt-fw-vars` from the [virt-firmware](https://gitlab.com/kraxel/virt-firmware) project can be used to customize OVMF variable files. `QemuKernel=`, `--qemu-kernel=` : Set the kernel image to use for qemu direct kernel boot. If not specified, mkosi will use the kernel provided via the command line (`-kernel` option) or latest the kernel that was installed into the image (or fail if no kernel was installed into the image). Note that when the `cpio` output format is used, direct kernel boot is used regardless of the configured firmware. Depending on the configured firmware, qemu might boot the kernel itself or using the configured firmware. `QemuDrives=`, `--qemu-drive=` : Add a qemu drive. Takes a colon-delimited string of format `:[:[:[:]]]`. `id` specifies the qemu ID assigned to the drive. This can be used as the `drive=` property in various qemu devices. `size` specifies the size of the drive. This takes a size in bytes. Additionally, the suffixes `K`, `M` and `G` can be used to specify a size in kilobytes, megabytes and gigabytes respectively. `directory` optionally specifies the directory in which to create the file backing the drive. `options` optionally specifies extra comma-delimited properties which are passed verbatim to qemu's `-drive` option. `file-id` specifies the ID of the file backing the drive. Drives with the same file ID will share the backing file. The directory and size of the file will be determined from the first drive with a given file ID. **Example usage:** ```ini [Host] QemuDrives=btrfs:10G ext4:20G QemuArgs=-device nvme,serial=btrfs,drive=btrfs -device nvme,serial=ext4,drive=ext4 ``` `QemuArgs=` : Space-delimited list of additional arguments to pass when invoking qemu. `Ephemeral=`, `--ephemeral` : When used with the `shell`, `boot`, or `qemu` verbs, this option runs the specified verb on a temporary snapshot of the output image that is removed immediately when the container terminates. Taking the temporary snapshot is more efficient on file systems that support reflinks natively (btrfs or xfs) than on more traditional file systems that do not (ext4). `Credentials=`, `--credential=` : Set credentials to be passed to systemd-nspawn or qemu respectively when `mkosi shell/boot` or `mkosi qemu` are used. This option takes a space separated list of values which can be either key=value pairs or paths. If a path is provided, if it is a file, the credential name will be the name of the file. If the file is executable, the credential value will be the output of executing the file. Otherwise, the credential value will be the contents of the file. If the path is a directory, the same logic applies to each file in the directory. Note that values will only be treated as paths if they do not contain the delimiter (`=`). `KernelCommandLineExtra=`, `--kernel-command-line-extra=` : Set extra kernel command line entries that are appended to the kernel command line at runtime when booting the image. When booting in a container, these are passed as extra arguments to systemd. When booting in a VM, these are appended to the kernel command line via the SMBIOS io.systemd.stub.kernel-cmdline-extra OEM string. This will only be picked up by systemd-boot/systemd-stub versions newer than or equal to v254. `Acl=`, `--acl=` : If specified, ACLs will be set on any generated root filesystem directories that allow the user running mkosi to remove them without needing privileges. `ToolsTree=`, `--tools-tree=` : If specified, programs executed by mkosi to build and boot an image are looked up inside the given tree instead of in the host system. Use this option to make image builds more reproducible by always using the same versions of programs to build the final image instead of whatever version is installed on the host system. If this option is not used, but the `mkosi.tools/` directory is found in the local directory it is automatically used for this purpose with the root directory as target. Note if a binary is found in any of the paths configured with `ExtraSearchPaths=`, the binary will be executed on the host. If set to `default`, mkosi will automatically add an extra tools tree image and use it as the tools tree. Note that mkosi will only build a single default tools tree per build, even if multiple images are defined in `mkosi.images` with `ToolsTree=default`. The settings of the "last" image will apply to the default tools tree (usually the image defined last in mkosi.images and without any dependencies on other images). The following table shows for which distributions default tools tree packages are defined and which packages are included in those default tools trees: | | Fedora | CentOS | Debian | Ubuntu | Arch | openSUSE | |-------------------------|:------:|:------:|:------:|:------:|:----:|:--------:| | `acl` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `apt` | ✓ | ✓ | ✓ | ✓ | ✓ | | | `archlinux-keyring` | ✓ | ✓ | ✓ | ✓ | ✓ | | | `attr` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `bash` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `btrfs-progs` | ✓ | | ✓ | ✓ | ✓ | ✓ | | `bubblewrap` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `ca-certificates` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `coreutils` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `cpio` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `curl` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `debian-keyring` | ✓ | ✓ | ✓ | ✓ | ✓ | | | `diffutils` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `distribution-gpg-keys` | ✓ | ✓ | | | ✓ | ✓ | | `dnf` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `dnf-plugins-core` | ✓ | ✓ | | | | ✓ | | `dnf5` | ✓ | | | | | | | `dnf5-plugins` | ✓ | | | | | | | `dosfstools` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `e2fsprogs` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `edk2-ovmf` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `erofs-utils` | ✓ | | ✓ | ✓ | ✓ | ✓ | | `findutils` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `git` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `grep` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `grub-tools` | ✓ | ✓ | ✓ | ✓ | ✓ | | | `jq` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `kmod` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `less` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `mtools` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `nano` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `openssh` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `openssl` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `sed` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `pacman` | ✓ | ✓ | ✓ | ✓ | ✓ | | | `pesign` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `policycoreutils` | ✓ | ✓ | ✓ | ✓ | | ✓ | | `qemu` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `sbsigntools` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `socat` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `squashfs-tools` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `strace` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `swtpm` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `systemd` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `ukify` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `tar` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `ubuntu-keyring` | ✓ | ✓ | ✓ | ✓ | ✓ | | | `util-linux` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `virtiofsd` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `virt-firmware` | ✓ | ✓ | | | ✓ | | | `xfsprogs` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `xz` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `zstd` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `zypper` | ✓ | | ✓ | ✓ | ✓ | | `ToolsTreeDistribution=`, `--tools-tree-distribution=` : Set the distribution to use for the default tools tree. By default, the same distribution as the image that's being built is used, except for CentOS and Ubuntu images, in which case Fedora and Debian are used respectively. `ToolsTreeRelease=`, `--tools-tree-release=` : Set the distribution release to use for the default tools tree. By default, the hardcoded default release in mkosi for the distribution is used. `ToolsTreeMirror=`, `--tools-tree-mirror=` : Set the mirror to use for the default tools tree. By default, the default mirror for the tools tree distribution is used. `ToolsTreeRepositories=`, `--tools-tree-repository` : Same as `Repositories=` but for the default tools tree. `ToolsTreePackageManagerTrees=`, `--tools-tree-package-manager-tree=` : Same as `PackageManagerTrees=` but for the default tools tree. `ToolsTreePackages=`, `--tools-tree-packages=` : Extra packages to install into the default tools tree. Takes a comma separated list of package specifications. This option may be used multiple times in which case the specified package lists are combined. `ToolsTreeCertificates=`, `--tools-tree-certificates=` : Specify whether to use certificates and keys from the tools tree. If enabled, `/usr/share/keyrings`, `/usr/share/distribution-gpg-keys`, `/etc/pki`, `/etc/ssl`, `/etc/ca-certificates`, `/etc/pacman.d/gnupg` and `/var/lib/ca-certificates` from the tools tree are used. Otherwise, these directories are picked up from the host. `RuntimeTrees=`, `--runtime-tree=` : Takes a colon separated pair of paths. The first path refers to a directory to mount into any machine (container or VM) started by mkosi. The second path refers to the target directory inside the machine. If the second path is not provided, the directory is mounted at `/root/src` in the machine. If the second path is relative, it is interpreted relative to `/root/src` in the machine. For each mounted directory, the uid and gid of the user running mkosi are mapped to the root user in the machine. This means that all the files and directories will appear as if they're owned by root in the machine, and all new files and directories created by root in the machine in these directories will be owned by the user running mkosi on the host. Note that when using `mkosi qemu` with this feature systemd v254 or newer has to be installed in the image. `RuntimeSize=`, `--runtime-size=` : If specified, disk images are grown to the specified size when they're booted with `mkosi boot` or `mkosi qemu`. Takes a size in bytes. Additionally, the suffixes `K`, `M` and `G` can be used to specify a size in kilobytes, megabytes and gigabytes respectively. `RuntimeScratch=`, `--runtime-scratch=` : Takes a boolean value or `auto`. Specifies whether to mount extra scratch space to `/var/tmp`. If enabled, practically unlimited scratch space is made available under `/var/tmp` when booting the image with `mkosi qemu`, `mkosi boot` or `mkosi shell`. Note that using this feature with `mkosi qemu` requires systemd v254 or newer in the guest. `RuntimeNetwork=`, `--runtime-network=` : Takes one of `user`, `interface` or `none`. Defaults to `user`. Specifies the networking to set up when booting the image. `user` sets up usermode networking. `interface` sets up a virtual network connection between the host and the image. This translates to a veth interface for `mkosi shell` and `mkosi boot` and a tap interface for `mkosi qemu` and `mkosi vmspawn`. Note that when using `interface`, mkosi does not automatically configure the host interface. It is expected that a recent version of `systemd-networkd` is running on the host which will automatically configure the host interface of the link. `RuntimeBuildSources=`, `--runtime-build-sources=` : Mount the build sources configured with `BuildSources=` and the build directory (if one is configured) to the same locations in `/work` that they were mounted to when running the build script when using `mkosi boot` or `mkosi qemu`. `UnitProperties=`, `--unit-property=` : Configure systemd unit properties to add to the systemd scopes allocated when using `mkosi boot` or `mkosi qemu`. These are passed directly to the `--property` options of `systemd-nspawn` and `systemd-run` respectively. `SshKey=`, `--ssh-key=` : Path to the X509 private key in PEM format to use to connect to a virtual machine started with `mkosi qemu` and built with the `Ssh=` option enabled via the `mkosi ssh` command. If not configured and `mkosi.key` exists in the working directory, it will automatically be used for this purpose. Run `mkosi genkey` to automatically generate a key in `mkosi.key`. `SshCertificate=`, `--ssh-certificate=` : Path to the X509 certificate in PEM format to provision as the SSH public key in virtual machines started with `mkosi qemu`. If not configured and `mkosi.crt` exists in the working directory, it will automatically be used for this purpose. Run `mkosi genkey` to automatically generate a certificate in `mkosi.crt`. `Machine=`, `--machine=` : Specify the machine name to use when booting the image. Can also be used to refer to a specific image when SSH-ing into an image (e.g. `mkosi --image=myimage ssh`). Note that `Ephemeral=` has to be enabled to start multiple instances of the same image. `ForwardJournal=`, `--forward-journal=` : Specify the path to which journal logs from containers and virtual machines should be forwarded. If the path has the `.journal` extension, it is interpreted as a file to which the journal should be written. Otherwise, the path is interpreted as a directory to which the journal should be written. Note that systemd v256 or newer is required in the virtual machine for log forwarding to work. Note that if a path with the `.journal` extension is given, the journal size is limited to `4G`. Configure an output directory instead of file if your workload produces more than `4G` worth of journal data. ### [Match] Section. `Profile=` : Matches against the configured profile. `Distribution=` : Matches against the configured distribution. `Release=` : Matches against the configured distribution release. If this condition is used and no distribution has been explicitly configured yet, the host distribution and release are used. `Architecture=` : Matches against the configured architecture. If this condition is used and no architecture has been explicitly configured yet, the host architecture is used. `Repositories=` : Matches against repositories enabled with the `Repositories=` setting. Takes a single repository name. `PathExists=` : This condition is satisfied if the given path exists. Relative paths are interpreted relative to the parent directory of the config file that the condition is read from. `ImageId=` : Matches against the configured image ID, supporting globs. If this condition is used and no image ID has been explicitly configured yet, this condition fails. `ImageVersion=` : Matches against the configured image version. Image versions can be prepended by the operators `==`, `!=`, `>=`, `<=`, `<`, `>` for rich version comparisons according to the UAPI group version format specification. If no operator is prepended, the equality operator is assumed by default. If this condition is used and no image version has been explicitly configured yet, this condition fails. `Bootable=` : Matches against the configured value for the `Bootable=` feature. Takes a boolean value or `auto`. `Format=` : Matches against the configured value for the `Format=` option. Takes an output format (see the `Format=` option). `SystemdVersion=` : Matches against the systemd version on the host (as reported by `systemctl --version`). Values can be prepended by the operators `==`, `!=`, `>=`, `<=`, `<`, `>` for rich version comparisons according to the UAPI group version format specification. If no operator is prepended, the equality operator is assumed by default. `BuildSources=` : Takes a build source target path (see `BuildSources=`). This match is satisfied if any of the configured build sources uses this target path. For example, if we have a `mkosi.conf` file containing: ```ini [Content] BuildSources=../abc/qed:kernel ``` and a drop-in containing: ```ini [Match] BuildSources=kernel ``` The drop-in will be included. Any absolute paths passed to this setting are interpreted relative to the current working directory. `HostArchitecture=` : Matches against the host's native architecture. See the `Architecture=` setting for a list of possible values. `ToolsTreeDistribution=` : Matches against the configured tools tree distribution. `Environment=` : Matches against a specific key/value pair configured with `Environment=`. If no value is provided, check if the given key is in the environment regardless of which value it has. This table shows which matchers support globs, rich comparisons and the default value that is matched against if no value has been configured at the time the config file is read: | Matcher | Globs | Rich Comparisons | Default | |--------------------------|-------|------------------|---------------------------------------| | `Profile=` | no | no | match fails | | `Distribution=` | no | no | match host distribution | | `Release=` | no | no | match host release | | `Architecture=` | no | no | match host architecture | | `PathExists=` | no | no | n/a | | `ImageId=` | yes | no | match fails | | `ImageVersion=` | no | yes | match fails | | `Bootable=` | no | no | match auto feature | | `Format=` | no | no | match default format | | `SystemdVersion=` | no | yes | n/a | | `BuildSources=` | no | no | match fails | | `HostArchitecture=` | no | no | n/a | | `ToolsTreeDistribution=` | no | no | match default tools tree distribution | | `Environment=` | no | no | n/a | ### [Config] Section `Profile=`, `--profile=` : Select the given profile. A profile is a configuration file or directory in the `mkosi.profiles/` directory. When selected, this configuration file or directory is included after parsing the `mkosi.conf` file, but before any `mkosi.conf.d/*.conf` drop in configuration. `Include=`, `--include=`, `-I` : Include extra configuration from the given file or directory. The extra configuration is included immediately after parsing the setting, except when used on the command line, in which case the extra configuration is included after parsing all command line arguments. Note that each path containing extra configuration is only parsed once, even if included more than once with `Include=`. The builtin configs for the mkosi default initrd and default tools tree can be included by including the literal value `mkosi-initrd` and `mkosi-tools` respectively. Note: Include names starting with either of the literals `mkosi-` or `contrib-` are reserved for use by mkosi itself. `InitrdInclude=`, `--initrd-include=` : Same as `Include=`, but the extra configuration files or directories are included when building the default initrd. `Dependencies=`, `--dependency=` : The images that this image depends on specified as a comma-separated list. All images configured in this option will be built before this image. When this setting is specified for the "main" image, it specifies which subimages should be built. See the **Building multiple images** section for more information. `MinimumVersion=`, `--minimum-version=` : The minimum mkosi version required to build this configuration. If specified multiple times, the highest specified version is used. `ConfigureScripts=`, `--configure-script=` : Takes a comma-separated list of paths to executables that are used as the configure scripts for this image. See the **Scripts** section for more information. `PassEnvironment=`, `--pass-environment=` : Takes a list of environment variable names separated by spaces. When building multiple images, pass the listed environment variables to each individual subimage as if they were "universal" settings. See the **Building multiple images** section for more information. ## Specifiers The current value of various settings can be accessed when parsing configuration files by using specifiers. To write a literal `%` character in a configuration file without treating it as a specifier, use `%%`. The following specifiers are understood: | Setting | Specifier | |--------------------|-----------| | `Distribution=` | `%d` | | `Release=` | `%r` | | `Architecture=` | `%a` | | `Format=` | `%t` | | `Output=` | `%o` | | `OutputDirectory=` | `%O` | | `ImageId=` | `%i` | | `ImageVersion=` | `%v` | | `Profile=` | `%p` | There are also specifiers that are independent of settings: | Specifier | Value | |-----------|-----------------------------------------| | `%C` | Parent directory of current config file | | `%P` | Current working directory | | `%D` | Directory that mkosi was invoked in | Finally, there are specifiers that are derived from a setting: | Specifier | Value | |-----------|-------------------------------------------------------| | `%F` | The default filesystem of the configured distribution | Note that the current working directory changes as mkosi parses its configuration. Specifically, each time mkosi parses a directory containing a `mkosi.conf` file, mkosi changes its working directory to that directory. Note that the directory that mkosi was invoked in is influenced by the `--directory=` command line argument. The following table shows example values for the directory specifiers listed above: | | `$D/mkosi.conf` | `$D/mkosi.conf.d/abc/abc.conf` | `$D/mkosi.conf.d/abc/mkosi.conf` | |------|-----------------|--------------------------------|----------------------------------| | `%C` | `$D` | `$D/mkosi.conf.d` | `$D/mkosi.conf.d/abc` | | `%P` | `$D` | `$D` | `$D/mkosi.conf.d/abc` | | `%D` | `$D` | `$D` | `$D` | ## Supported distributions Images may be created containing installations of the following distributions: * *Fedora Linux* * *Debian* * *Ubuntu* * *Arch Linux* * *openSUSE* * *Mageia* * *CentOS* * *RHEL* * *RHEL UBI* * *OpenMandriva* * *Rocky Linux* * *Alma Linux* * *None* (**Requires the user to provide a pre-built rootfs**) In theory, any distribution may be used on the host for building images containing any other distribution, as long as the necessary tools are available. Specifically, any distribution that packages `apt` may be used to build *Debian* or *Ubuntu* images. Any distribution that packages `dnf` may be used to build images for any of the rpm-based distributions. Any distro that packages `pacman` may be used to build *Arch Linux* images. Any distribution that packages `zypper` may be used to build *openSUSE* images. Other distributions and build automation tools for embedded Linux systems such as Buildroot, OpenEmbedded and Yocto Project may be used by selecting the `custom` distribution, and populating the rootfs via a combination of base trees, skeleton trees, and prepare scripts. Currently, *Fedora Linux* packages all relevant tools as of Fedora 28. Note that when not using a custom mirror, `RHEL` images can only be built from a host system with a `RHEL` subscription (established using e.g. `subscription-manager`). # Execution Flow Execution flow for `mkosi build`. Default values/calls are shown in parentheses. When building with `--incremental` mkosi creates a cache of the distribution installation if not already existing and replaces the distribution installation in consecutive runs with data from the cached one. 1. Parse CLI options 1. Parse configuration files 1. Run configure scripts (`mkosi.configure`) 1. If we're not running as root, unshare the user namespace and map the subuid range configured in `/etc/subuid` and `/etc/subgid` into it. 1. Unshare the mount namespace 1. Remount the following directories read-only if they exist: - `/usr` - `/etc` - `/opt` - `/srv` - `/boot` - `/efi` - `/media` - `/mnt` Then, for each image, we execute the following steps: 1. Copy package manager trees into the workspace 1. Sync the package manager repository metadata 1. Run sync scripts (`mkosi.sync`) 1. Copy base trees (`--base-tree=`) into the image 1. Reuse a cached image if one is available 1. Copy a snapshot of the package manager repository metadata into the image 1. Copy skeleton trees (`mkosi.skeleton`) into image 1. Install distribution and packages into image 1. Run prepare scripts on image with the `final` argument (`mkosi.prepare`) 1. Install build packages in overlay if any build scripts are configured 1. Run prepare scripts on overlay with the `build` argument if any build scripts are configured (`mkosi.prepare`) 1. Cache the image if configured (`--incremental`) 1. Run build scripts on image + overlay if any build scripts are configured (`mkosi.build`) 1. Finalize the build if the output format `none` is configured 1. Copy the build scripts outputs into the image 1. Copy the extra trees into the image (`mkosi.extra`) 1. Run post-install scripts (`mkosi.postinst`) 1. Write config files required for `Ssh=`, `Autologin=` and `MakeInitrd=` 1. Install systemd-boot and configure secure boot if configured (`--secure-boot`) 1. Run `systemd-sysusers` 1. Run `systemd-tmpfiles` 1. Run `systemctl preset-all` 1. Run `depmod` 1. Run `systemd-firstboot` 1. Run `systemd-hwdb` 1. Remove packages and files (`RemovePackages=`, `RemoveFiles=`) 1. Run SELinux relabel is a SELinux policy is installed 1. Run finalize scripts (`mkosi.finalize`) 1. Generate unified kernel image if configured to do so 1. Generate final output format 1. Run post-output scripts (`mkosi.postoutput`) # Scripts To allow for image customization that cannot be implemented using mkosi's builtin features, mkosi supports running scripts at various points during the image build process that can customize the image as needed. Scripts are executed on the host system as root (either real root or root within the user namespace that mkosi created when running unprivileged) with a customized environment to simplify modifying the image. For each script, the configured build sources (`BuildSources=`) are mounted into the current working directory before running the script in the current working directory. `$SRCDIR` is set to point to the current working directory. The following scripts are supported: * If **`mkosi.configure`** (`ConfigureScripts=`) exists, it is executed before building the image. This script may be used to dynamically modify the configuration. It receives the configuration serialized as JSON on stdin and should output the modified configuration serialized as JSON on stdout. Note that this script only runs when building or booting the image (`build`, `qemu`, `boot` and `shell` verbs). If a default tools tree is configured, it will be built before running the configure scripts and the configure scripts will run with the tools tree available. This also means that the modifications made by configure scripts will not be visible in the `summary` output. * If **`mkosi.sync`** (`SyncScripts=`) exists, it is executed before the image is built. This script may be used to update various sources that are used to build the image. One use case is to run `git pull` on various source repositories before building the image. Specifically, the `BuildSourcesEphemeral=` setting does not apply to sync scripts, which means sync scripts can be used to update build sources even if `BuildSourcesEphemeral=` is enabled. * If **`mkosi.prepare`** (`PrepareScripts=`) exists, it is first called with the `final` argument, right after the software packages are installed. It is called a second time with the `build` command line parameter, right after the build packages are installed and the build overlay mounted on top of the image's root directory . This script has network access and may be used to install packages from other sources than the distro's package manager (e.g. `pip`, `npm`, ...), after all software packages are installed but before the image is cached (if incremental mode is enabled). In contrast to a general purpose installation, it is safe to install packages to the system (`pip install`, `npm install -g`) instead of in `$SRCDIR` itself because the build image is only used for a single project and can easily be thrown away and rebuilt so there's no risk of conflicting dependencies and no risk of polluting the host system. * If **`mkosi.build`** (`BuildScripts=`) exists, it is executed with the build overlay mounted on top of the image's root directory. When running the build script, `$DESTDIR` points to a directory where the script should place any files generated it would like to end up in the image. Note that `make`/`automake`/`meson` based build systems generally honor `$DESTDIR`, thus making it very natural to build *source* trees from the build script. After running the build script, the contents of `$DESTDIR` are copied into the image. * If **`mkosi.postinst`** (`PostInstallationScripts=`) exists, it is executed after the (optional) build tree and extra trees have been installed. This script may be used to alter the images without any restrictions, after all software packages and built sources have been installed. * If **`mkosi.finalize`** (`FinalizeScripts=`) exists, it is executed as the last step of preparing an image. * If **`mkosi.postoutput`** (`PostOutputScripts=`) exists, it is executed right after all the output files have been generated, before they are finally moved into the output directory. This can be used to generate additional or alternative outputs, e.g. `SHA256FILES` or SBOM manifests. * If **`mkosi.clean`** (`CleanScripts=`) exists, it is executed right after the outputs of a previous build have been cleaned up. A clean script can clean up any outputs that mkosi does not know about (e.g. artifacts from `SplitArtifacts=yes` or RPMs built in a build script). Note that this script does not use the tools tree even if one is configured. If a script uses the `.chroot` extension, mkosi will chroot into the image using `mkosi-chroot` (see below) before executing the script. For example, if `mkosi.postinst.chroot` exists, mkosi will chroot into the image and execute it as the post-installation script. Scripts executed by mkosi receive the following environment variables: * `$ARCHITECTURE` contains the architecture from the `Architecture=` setting. If `Architecture=` is not set, it will contain the native architecture of the host machine. See the documentation of `Architecture=` for possible values for this variable. * `$QEMU_ARCHITECTURE` contains the architecture from `$ARCHITECTURE` in the format used by `qemu`. Useful for finding the qemu binary ( `qemu-system-$QEMU_ARCHITECTURE`). * `$DISTRIBUTION` contains the distribution from the `Distribution=` setting. * `$RELEASE` contains the release from the `Release=` setting. * `$DISTRIBUTION_ARCHITECTURE` contains the architecture from `$ARCHITECTURE` in the format used by the configured distribution. * `$PROFILE` contains the profile from the `Profile=` setting. * `$CACHED=` is set to `1` if a cached image is available, `0` otherwise. * `$CHROOT_SCRIPT` contains the path to the running script relative to the image root directory. The primary usecase for this variable is in combination with the `mkosi-chroot` script. See the description of `mkosi-chroot` below for more information. * `$SRCDIR` contains the path to the directory mkosi was invoked from, with any configured build sources mounted on top. `$CHROOT_SRCDIR` contains the value that `$SRCDIR` will have after invoking `mkosi-chroot`. * `$BUILDDIR` is only defined if `mkosi.builddir` exists and points to the build directory to use. This is useful for all build systems that support out-of-tree builds to reuse already built artifacts from previous runs. `$CHROOT_BUILDDIR` contains the value that `$BUILDDIR` will have after invoking `mkosi-chroot`. * `$DESTDIR` is a directory into which any installed software generated by a build script may be placed. This variable is only set when executing a build script. `$CHROOT_DESTDIR` contains the value that `$DESTDIR` will have after invoking `mkosi-chroot`. * `$OUTPUTDIR` points to the staging directory used to store build artifacts generated during the build. `$CHROOT_OUTPUTDIR` contains the value that `$OUTPUTDIR` will have after invoking `mkosi-chroot`. * `$PACKAGEDIR` points to the directory containing the local package repository. Build scripts can add more packages to the local repository by writing the packages to `$PACKAGEDIR`. * `$ARTIFACTDIR` points to the directory that is used to pass around build artifacts generated during the build and make them available for use by mkosi. This is similar to `PACKAGEDIR`, but is meant for artifacts that may not be packages understood by the package manager, e.g. initrds created by other initrd generators than mkosi. Build scripts can add more artifacts to the directory by placing them in `$ARTIFACTDIR`. Files in this directory are only available for the current build and are not copied out like the contents of `$OUTPUTDIR`. `mkosi` will also use certain subdirectories of an artifacts directory to automatically use their contents at certain steps. Currently the following two subdirectories in the artifact directory are used by mkosi: - `io.mkosi.microcode`: All files in this directory are used as microcode files, i.e. they are prepended to the initrds in lexicographical order. - `io.mkosi.initrd`: All files in this directory are used as initrds and joined in lexicographical order. It is recommend users of `$ARTIFACTDIR` put things for their own use in a similar namespaced directory, e.h. `local.my.namespace`. * `$BUILDROOT` is the root directory of the image being built, optionally with the build overlay mounted on top depending on the script that's being executed. * `$WITH_DOCS` is either `0` or `1` depending on whether a build without or with installed documentation was requested (`WithDocs=yes`). A build script should suppress installation of any package documentation to `$DESTDIR` in case `$WITH_DOCS` is set to `0`. * `$WITH_TESTS` is either `0` or `1` depending on whether a build without or with running the test suite was requested (`WithTests=no`). A build script should avoid running any unit or integration tests in case `$WITH_TESTS` is `0`. * `$WITH_NETWORK` is either `0` or `1` depending on whether a build without or with networking is being executed (`WithNetwork=no`). A build script should avoid any network communication in case `$WITH_NETWORK` is `0`. * `$SOURCE_DATE_EPOCH` is defined if requested (`SourceDateEpoch=TIMESTAMP`, `Environment=SOURCE_DATE_EPOCH=TIMESTAMP` or the host environment variable `$SOURCE_DATE_EPOCH`). This is useful to make builds reproducible. See [SOURCE_DATE_EPOCH](https://reproducible-builds.org/specs/source-date-epoch/) for more information. * `$MKOSI_UID` and `$MKOSI_GID` are the respectively the uid, gid of the user that invoked mkosi, potentially translated to a uid in the user namespace that mkosi is running in. These can be used in combination with `setpriv` to run commands as the user that invoked mkosi (e.g. `setpriv --reuid=$MKOSI_UID --regid=$MKOSI_GID --clear-groups `) * `$MKOSI_CONFIG` is a file containing a json summary of the settings of the current image. This file can be parsed inside scripts to gain access to all settings for the current image. * `$IMAGE_ID` contains the identifier from the `ImageId=` or `--image-id=` setting. * `$IMAGE_VERSION` contains the version from the `ImageVersion=` or `--image-version=` setting Consult this table for which script receives which environment variables: | Variable | `configure` | `sync` | `prepare` | `build` | `postinst` | `finalize` | `postoutput` | `clean` | |-----------------------------|:-----------:|:------:|:---------:|:-------:|:----------:|:----------:|:------------:|:-------:| | `ARCHITECTURE` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `QEMU_ARCHITECTURE` | ✓ | | | | | | | | | `DISTRIBUTION` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `DISTRIBUTION_ARCHITECTURE` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `RELEASE` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `PROFILE` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | ✓ | | `CACHED` | | ✓ | | | | | | | | `CHROOT_SCRIPT` | | | ✓ | ✓ | ✓ | ✓ | | | | `SRCDIR` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `CHROOT_SRCDIR` | | | ✓ | ✓ | ✓ | ✓ | | | | `BUILDDIR` | | | ✓ | ✓ | ✓ | ✓ | | | | `CHROOT_BUILDDIR` | | | | ✓ | | | | | | `DESTDIR` | | | | ✓ | | | | | | `CHROOT_DESTDIR` | | | | ✓ | | | | | | `OUTPUTDIR` | | | | ✓ | ✓ | ✓ | ✓ | ✓ | | `CHROOT_OUTPUTDIR` | | | | ✓ | ✓ | ✓ | | | | `BUILDROOT` | | | ✓ | ✓ | ✓ | ✓ | | | | `PACKAGEDIR` | | | ✓ | ✓ | ✓ | ✓ | | | | `ARTIFACTDIR` | | | ✓ | ✓ | ✓ | ✓ | | | | `WITH_DOCS` | | | ✓ | ✓ | | | | | | `WITH_TESTS` | | | ✓ | ✓ | | | | | | `WITH_NETWORK` | | | ✓ | ✓ | ✓ | ✓ | | | | `SOURCE_DATE_EPOCH` | | | ✓ | ✓ | ✓ | ✓ | | ✓ | | `MKOSI_UID` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `MKOSI_GID` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `MKOSI_CONFIG` | | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `IMAGE_ID` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `IMAGE_VERSION` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | Additionally, when a script is executed, a few scripts are made available via `$PATH` to simplify common usecases. * `mkosi-chroot`: This script will chroot into the image and execute the given command. On top of chrooting into the image, it will also mount various files and directories (`$SRCDIR`, `$DESTDIR`, `$BUILDDIR`, `$OUTPUTDIR`, `$CHROOT_SCRIPT`) into the image and modify the corresponding environment variables to point to the locations inside the image. It will also mount APIVFS filesystems (`/proc`, `/dev`, ...) to make sure scripts and tools executed inside the chroot work properly. It also propagates `/etc/resolv.conf` from the host into the chroot if requested so that DNS resolution works inside the chroot. After the mkosi-chroot command exits, various mount points are cleaned up. For example, to invoke `ls` inside of the image, use the following ```sh mkosi-chroot ls ... ``` To execute the entire script inside the image, add a ".chroot" suffix to the name (`mkosi.build.chroot` instead of `mkosi.build`, etc.). * For all of the supported package managers (`dnf`, `rpm`, `apt`, `dpkg`, `pacman`, `zypper`), scripts of the same name are put into `$PATH` that make sure these commands operate on the image's root directory with the configuration supplied by the user instead of on the host system. This means that from a script, you can do e.g. `dnf install vim` to install vim into the image. Additionally, `mkosi-install`, `mkosi-reinstall`, `mkosi-upgrade` and `mkosi-remove` will invoke the corresponding operation of the package manager being used to built the image. * `mkosi-as-caller`: This script uses `setpriv` to switch from the user `root` in the user namespace used for various build steps back to the original user that called mkosi. This is useful when we want to invoke build steps which will write to `$BUILDDIR` and we want to have the files owned by the calling user. For example, a complete `mkosi.build` script might be the following: ```sh set -ex mkosi-as-caller meson setup "$BUILDDIR/build" "$SRCDIR" mkosi-as-caller meson compile -C "$BUILDDIR/build" meson install -C "$BUILDDIR/build" --no-rebuild ``` * `git` is automatically invoked with `safe.directory=*` to avoid permissions errors when running as the root user in a user namespace. * `useradd` and `groupadd` are automatically invoked with `--root=$BUILDROOT` when executed outside of the image. When scripts are executed, any directories that are still writable are also made read-only (`/home`, `/var`, `/root`, ...) and only the minimal set of directories that need to be writable remain writable. This is to ensure that scripts can't mess with the host system when mkosi is running as root. Note that when executing scripts, all source directories are made ephemeral which means all changes made to source directories while running scripts are thrown away after the scripts finish executing. Use the output, build or cache directories if you need to persist data between builds. # Files To make it easy to build images for development versions of your projects, mkosi can read configuration data from the local directory, under the assumption that it is invoked from a *source* tree. Specifically, the following files are used if they exist in the local directory: * The **`mkosi.skeleton/`** directory or **`mkosi.skeleton.tar`** archive may be used to insert files into the image. The files are copied *before* the distribution packages are installed into the image. This allows creation of files that need to be provided early, for example to configure the package manager or set systemd presets. When using the directory, file ownership is not preserved: all files copied will be owned by root. To preserve ownership, use a tar archive. * The **`mkosi.extra/`** directory or **`mkosi.extra.tar`** archive may be used to insert additional files into the image, on top of what the distribution includes in its packages. They are similar to `mkosi.skeleton/` and `mkosi.skeleton.tar`, but the files are copied into the directory tree of the image *after* the OS was installed. When using the directory, file ownership is not preserved: all files copied will be owned by root. To preserve ownership, use a tar archive. * The **`mkosi.pkgmngr/`** directory or **`mkosi.pkgmngr.tar`** archive may be used to configure the package manager without the files being inserted into the image. If the files should be included in the image `mkosi.skeleton/` and `mkosi.skeleton.tar` should be used instead. When using the directory, file ownership is not preserved: all files copied will be owned by root. To preserve ownership, use a tar archive. * The **`mkosi.nspawn`** nspawn settings file will be copied into the same place as the output image file, if it exists. This is useful since nspawn looks for settings files next to image files it boots, for additional container runtime settings. * The **`mkosi.cache/`** directory, if it exists, is automatically used as package download cache, in order to speed repeated runs of the tool. * The **`mkosi.builddir/`** directory, if it exists, is automatically used as out-of-tree build directory, if the build commands in the `mkosi.build` scripts support it. Specifically, this directory will be mounted into the build container, and the `$BUILDDIR` environment variable will be set to it when the build scripts are invoked. A build script may then use this directory as build directory, for automake-style or ninja-style out-of-tree builds. This speeds up builds considerably, in particular when `mkosi` is used in incremental mode (`-i`): not only the image and build overlay, but also the build tree is reused between subsequent invocations. Note that if this directory does not exist the `$BUILDDIR` environment variable is not set, and it is up to the build scripts to decide whether to do in in-tree or an out-of-tree build, and which build directory to use. * The **`mkosi.rootpw`** file can be used to provide the password for the root user of the image. If the password is prefixed with `hashed:` it is treated as an already hashed root password. The password may optionally be followed by a newline character which is implicitly removed. The file must have an access mode of 0600 or less. If this file does not exist, the distribution's default root password is set (which usually means access to the root user is blocked). * The **`mkosi.passphrase`** file provides the passphrase to use when LUKS encryption is selected. It should contain the passphrase literally, and not end in a newline character (i.e. in the same format as cryptsetup and `/etc/crypttab` expect the passphrase files). The file must have an access mode of 0600 or less. * The **`mkosi.crt`** and **`mkosi.key`** files contain an X.509 certificate and PEM private key to use when signing is required (UEFI SecureBoot, verity, ...). * The **`mkosi.output/`** directory is used to store all build artifacts. * The **`mkosi.credentials/`** directory is used as a source of extra credentials similar to the `Credentials=` option. For each file in the directory, the filename will be used as the credential name and the file contents become the credential value, or, if the file is executable, mkosi will execute the file and the command's output to stdout will be used as the credential value. Output to stderr will be ignored. Credentials configured with `Credentials=` take precedence over files in `mkosi.credentials`. * The **`mkosi.repart/`** directory is used as the source for systemd-repart partition definition files which are passed to systemd-repart when building a disk image. If it does not exist and the `RepartDirectories=` setting is not configured, mkosi will default to the following partition definition files: `00-esp.conf` (if we're building a bootable image): ```ini [Partition] Type=esp Format=vfat CopyFiles=/boot:/ CopyFiles=/efi:/ SizeMinBytes=512M SizeMaxBytes=512M ``` `05-bios.conf` (if we're building a BIOS bootable image): ```ini [Partition] # UUID of the grub BIOS boot partition which grubs needs on GPT to # embed itself into. Type=21686148-6449-6e6f-744e-656564454649 SizeMinBytes=1M SizeMaxBytes=1M ``` `10-root.conf` ```ini [Partition] Type=root Format= CopyFiles=/ Minimize=guess ``` Note that if either `mkosi.repart/` is found or `RepartDirectories=` is used, we will not use any of the default partition definitions. All these files are optional. Note that the location of all these files may also be configured during invocation via command line switches, and as settings in `mkosi.conf`, in case the default settings are not acceptable for a project. # CACHING `mkosi` supports three different caches for speeding up repetitive re-building of images. Specifically: 1. The package cache of the distribution package manager may be cached between builds. This is configured with the `--cache-dir=` option or the `mkosi.cache/` directory. This form of caching relies on the distribution's package manager, and caches distribution packages (RPM, DEB, …) after they are downloaded, but before they are unpacked. 2. If the incremental build mode is enabled with `--incremental`, cached copies of the final image and build overlay are made immediately before the build sources are copied in (for the build overlay) or the artifacts generated by `mkosi.build` are copied in (in case of the final image). This form of caching allows bypassing the time-consuming package unpacking step of the distribution package managers, but is only effective if the list of packages to use remains stable, but the build sources and its scripts change regularly. Note that this cache requires manual flushing: whenever the package list is modified the cached images need to be explicitly removed before the next re-build, using the `-f` switch. 3. Finally, between multiple builds the build artifact directory may be shared, using the `mkosi.builddir/` directory. This directory allows build systems such as Meson to reuse already compiled sources from a previous built, thus speeding up the build process of a `mkosi.build` build script. The package cache and incremental mode are unconditionally useful. The final cache only apply to uses of `mkosi` with a source tree and build script. When all three are enabled together turn-around times for complete image builds are minimal, as only changed source files need to be recompiled. # Building multiple images If the `mkosi.images/` directory exists, mkosi will load individual subimage configurations from it and build each of them. Image configurations can be either directories containing mkosi configuration files or regular files with the `.conf` extension. When image configurations are found in `mkosi.images/`, mkosi will build the images specified in the `Dependencies=` setting of the main image and all of their dependencies (or all of them if no images were explicitly configured using `Dependencies=` in the main image configuration). To add dependencies between subimages, the `Dependencies=` setting can be used as well. Subimages are always built before the main image. When images are defined, mkosi will first read the main image configuration (configuration outside of the `mkosi.images/` directory), followed by the image specific configuration. Several "universal" settings apply to the main image and all its subimages and cannot be configured separately in subimages. The following settings are universal and cannot be configured in subimages (except for settings which take a collection of values which can be extended in subimages but not overridden): - `Profile=` - `Distribution=` - `Release=` - `Architecture=` - `Mirror=` - `LocalMirror=` - `RepositoryKeyCheck=` - `Repositories=` - `CacheOnly=` - `PackageManagerTrees=` - `OutputDirectory=` - `WorkspaceDirectory=` - `CacheDirectory=` - `PackageCacheDirectory=` - `BuildDirectory=` - `ImageId=` - `ImageVersion=` - `SectorSize=` - `RepartOffline=` - `UseSubvolumes=` - `PackageDirectories=` - `VolatilePackageDirectories=` - `SourceDateEpoch=` - `BuildSources=` - `BuildSourcesEphemeral=` - `WithTests` - `WithNetwork=` - `VerityKey=` - `VerityKeySource=` - `VerityCertificate=` - `ProxyUrl=` - `ProxyExclude=` - `ProxyPeerCertificate=` - `ProxyClientCertificate=` - `ProxyClientKey=` - `Incremental=` - `ExtraSearchPaths=` - `Acl=` - `ToolsTree=` - `ToolsTreeCertificates=` Images can refer to outputs of images they depend on. Specifically, for the following options, mkosi will only check whether the inputs exist just before building the image: - `BaseTrees=` - `PackageManagerTrees=` - `SkeletonTrees=` - `ExtraTrees=` - `ToolsTree=` - `Initrds=` To refer to outputs of a image's dependencies, simply configure any of these options with a relative path to the output to use in the output directory of the dependency. Or use the `%O` specifier to refer to the output directory. A good example on how to build multiple images can be found in the [systemd](https://github.com/systemd/systemd/tree/main/mkosi.images) repository. # ENVIRONMENT VARIABLES * `$MKOSI_LESS` overrides options for `less` when it is invoked by `mkosi` to page output. * `$MKOSI_DNF` can be used to override the executable used as `dnf`. This is particularly useful to select between `dnf` and `dnf5`. * `$EPEL_MIRROR` can be used to override the default mirror location used for the epel repositories when `Mirror=` is used. By default mkosi looks for the epel repositories in the `fedora` subdirectory of the parent directory of the mirror specified in `Mirror=`. For example if the mirror is set to `https://mirror.net/centos-stream` mkosi will look for the epel repositories in `https://mirror.net/fedora/epel`. # EXAMPLES Create and run a raw *GPT* image with *ext4*, as `image.raw`: ```console # mkosi -p systemd --incremental boot ``` Create and run a bootable *GPT* image, as `foobar.raw`: ```console $ mkosi -d fedora -p kernel-core -p systemd -p systemd-boot -p udev -o foobar.raw # mkosi --output foobar.raw boot $ mkosi --output foobar.raw qemu ``` Create and run a *Fedora Linux* image in a plain directory: ```console # mkosi --distribution fedora --format directory boot ``` Create a compressed image `image.raw.xz` with *SSH* installed and add a checksum file: ```console $ mkosi --distribution fedora --format disk --checksum --compress-output --package=openssh-clients ``` Inside the source directory of an `automake`-based project, configure *mkosi* so that simply invoking `mkosi` without any parameters builds an OS image containing a built version of the project in its current state: ```console $ cat >mkosi.conf <mkosi.build < None: """ If we're connected to a terminal, put the process in a new process group and make that the foreground process group so that only this process receives SIGINT. """ STDERR_FILENO = 2 if os.isatty(STDERR_FILENO): if new_process_group: os.setpgrp() old = signal.signal(signal.SIGTTOU, signal.SIG_IGN) try: os.tcsetpgrp(STDERR_FILENO, os.getpgrp()) except OSError as e: if e.errno != errno.ENOTTY: raise e signal.signal(signal.SIGTTOU, old) def ensure_exc_info() -> tuple[type[BaseException], BaseException, TracebackType]: exctype, exc, tb = sys.exc_info() assert exctype assert exc assert tb return (exctype, exc, tb) @contextlib.contextmanager def uncaught_exception_handler(exit: Callable[[int], NoReturn] = sys.exit) -> Iterator[None]: rc = 0 try: yield except SystemExit as e: rc = e.code if isinstance(e.code, int) else 1 if ARG_DEBUG.get(): sys.excepthook(*ensure_exc_info()) except KeyboardInterrupt: rc = 1 if ARG_DEBUG.get(): sys.excepthook(*ensure_exc_info()) else: logging.error("Interrupted") except subprocess.CalledProcessError as e: # We always log when subprocess.CalledProcessError is raised, so we don't log again here. rc = e.returncode # Failures from qemu, ssh and systemd-nspawn are expected and we won't log stacktraces for those. # Failures from self come from the forks we spawn to build images in a user namespace. We've already done all # the logging for those failures so we don't log stacktraces for those either. if ( ARG_DEBUG.get() and e.cmd and str(e.cmd[0]) not in ("self", "ssh", "systemd-nspawn") and "qemu-system" not in str(e.cmd[0]) ): sys.excepthook(*ensure_exc_info()) except BaseException: sys.excepthook(*ensure_exc_info()) rc = 1 finally: sys.stdout.flush() sys.stderr.flush() exit(rc) def fork_and_wait(target: Callable[..., None], *args: Any, **kwargs: Any) -> None: pid = os.fork() if pid == 0: with uncaught_exception_handler(exit=os._exit): make_foreground_process() target(*args, **kwargs) try: _, status = os.waitpid(pid, 0) except BaseException: os.kill(pid, signal.SIGTERM) _, status = os.waitpid(pid, 0) finally: make_foreground_process(new_process_group=False) rc = os.waitstatus_to_exitcode(status) if rc != 0: raise subprocess.CalledProcessError(rc, ["self"]) def log_process_failure(sandbox: Sequence[str], cmdline: Sequence[str], returncode: int) -> None: if returncode < 0: logging.error(f"Interrupted by {signal.Signals(-returncode).name} signal") else: logging.error( f"\"{shlex.join([*sandbox, *cmdline] if ARG_DEBUG.get() else cmdline)}\" returned non-zero exit code " f"{returncode}." ) def run( cmdline: Sequence[PathString], check: bool = True, stdin: _FILE = None, stdout: _FILE = None, stderr: _FILE = None, input: Optional[str] = None, user: Optional[int] = None, group: Optional[int] = None, env: Mapping[str, str] = {}, cwd: Optional[Path] = None, log: bool = True, foreground: bool = True, preexec_fn: Optional[Callable[[], None]] = None, success_exit_status: Sequence[int] = (0,), sandbox: AbstractContextManager[Sequence[PathString]] = contextlib.nullcontext([]), scope: Sequence[str] = (), ) -> CompletedProcess: if input is not None: assert stdin is None # stdin and input cannot be specified together stdin = subprocess.PIPE try: with spawn( cmdline, check=check, stdin=stdin, stdout=stdout, stderr=stderr, user=user, group=group, env=env, cwd=cwd, log=log, foreground=foreground, preexec_fn=preexec_fn, success_exit_status=success_exit_status, sandbox=sandbox, scope=scope, innerpid=False, ) as (process, _): out, err = process.communicate(input) except FileNotFoundError: return CompletedProcess(cmdline, 1, "", "") return CompletedProcess(cmdline, process.returncode, out, err) @contextlib.contextmanager def spawn( cmdline: Sequence[PathString], check: bool = True, stdin: _FILE = None, stdout: _FILE = None, stderr: _FILE = None, user: Optional[int] = None, group: Optional[int] = None, pass_fds: Collection[int] = (), env: Mapping[str, str] = {}, cwd: Optional[Path] = None, log: bool = True, foreground: bool = False, preexec_fn: Optional[Callable[[], None]] = None, success_exit_status: Sequence[int] = (0,), sandbox: AbstractContextManager[Sequence[PathString]] = contextlib.nullcontext([]), scope: Sequence[str] = (), innerpid: bool = True, ) -> Iterator[tuple[Popen, int]]: assert sorted(set(pass_fds)) == list(pass_fds) cmdline = [os.fspath(x) for x in cmdline] if ARG_DEBUG.get(): logging.info(f"+ {shlex.join(cmdline)}") if not stdout and not stderr: # Unless explicit redirection is done, print all subprocess # output on stderr, since we do so as well for mkosi's own # output. stdout = sys.stderr if stdin is None: stdin = subprocess.DEVNULL env = { "PATH": os.environ["PATH"], "TERM": os.getenv("TERM", "vt220"), "LANG": "C.UTF-8", **env, } if "TMPDIR" in os.environ: env["TMPDIR"] = os.environ["TMPDIR"] for e in ("SYSTEMD_LOG_LEVEL", "SYSTEMD_LOG_LOCATION"): if e in os.environ: env[e] = os.environ[e] if "HOME" not in env: env["HOME"] = "/" def preexec() -> None: if foreground: make_foreground_process() if preexec_fn: preexec_fn() if not pass_fds: return # The systemd socket activation interface requires any passed file descriptors to start at '3' and # incrementally increase from there. The file descriptors we got from the caller might be arbitrary, so we need # to move them around to make sure they start at '3' and incrementally increase. for i, fd in enumerate(pass_fds): # Don't do anything if the file descriptor is already what we need it to be. if fd == SD_LISTEN_FDS_START + i: continue # Close any existing file descriptor that occupies the id that we want to move to. This is safe because # using pass_fds implies using close_fds as well, except that file descriptors are closed by python after # running the preexec function, so we have to close a few of those manually here to make room if needed. try: os.close(SD_LISTEN_FDS_START + i) except OSError as e: if e.errno != errno.EBADF: raise nfd = fcntl.fcntl(fd, fcntl.F_DUPFD, SD_LISTEN_FDS_START + i) # fcntl.F_DUPFD uses the closest available file descriptor ID, so make sure it actually picked the ID we # expect it to pick. assert nfd == SD_LISTEN_FDS_START + i with sandbox as sbx: prefix = [os.fspath(x) for x in sbx] # First, check if the sandbox works at all before executing the command. if prefix and (rc := subprocess.run(prefix + ["true"]).returncode) != 0: log_process_failure(prefix, cmdline, rc) raise subprocess.CalledProcessError(rc, prefix + cmdline) if subprocess.run( prefix + ["sh", "-c", f"command -v {cmdline[0]}"], stdout=subprocess.DEVNULL, ).returncode != 0: if check: die(f"{cmdline[0]} not found.", hint=f"Is {cmdline[0]} installed on the host system?") # We can't really return anything in this case, so we raise a specific exception that we can catch in # run(). logging.debug(f"{cmdline[0]} not found, not running {shlex.join(cmdline)}") raise FileNotFoundError(cmdline[0]) if ( foreground and prefix and subprocess.run(prefix + ["sh", "-c", "command -v setpgid"], stdout=subprocess.DEVNULL).returncode == 0 ): prefix += ["setpgid", "--foreground", "--"] if pass_fds: # We don't know the PID before we start the process and we can't modify the environment in preexec_fn so we # have to spawn a temporary shell to set the necessary environment variables before spawning the actual # command. prefix += ["sh", "-c", f"LISTEN_FDS={len(pass_fds)} LISTEN_PID=$$ exec $0 \"$@\""] if prefix and innerpid: r, w = os.pipe2(os.O_CLOEXEC) # Make sure that the write end won't be overridden in preexec() when we're moving fds forward. q = fcntl.fcntl(w, fcntl.F_DUPFD_CLOEXEC, SD_LISTEN_FDS_START + len(pass_fds) + 1) os.close(w) w = q # dash doesn't support working with file descriptors higher than 9 so make sure we use bash. innerpidcmd = ["bash", "-c", f"echo $$ >&{w} && exec {w}>&- && exec $0 \"$@\""] else: innerpidcmd = [] r, w = (None, None) try: with subprocess.Popen( [*scope, *prefix, *innerpidcmd, *cmdline], stdin=stdin, stdout=stdout, stderr=stderr, text=True, user=user, group=group, # pass_fds only comes into effect after python has invoked the preexec function, so we make sure that # pass_fds contains the file descriptors to keep open after we've done our transformation in preexec(). pass_fds=[SD_LISTEN_FDS_START + i for i in range(len(pass_fds))] + ([w] if w else []), env=env, cwd=cwd, preexec_fn=preexec, ) as proc: if w: os.close(w) pid = proc.pid try: if r: with open(r) as f: s = f.read() if s: pid = int(s) yield proc, pid except BaseException: kill(proc, pid, signal.SIGTERM) raise finally: returncode = proc.wait() if check and returncode not in success_exit_status: if log: log_process_failure(prefix, cmdline, returncode) if ARG_DEBUG_SHELL.get(): subprocess.run( [*scope, *prefix, "bash"], check=False, stdin=sys.stdin, text=True, user=user, group=group, env=env, cwd=cwd, preexec_fn=preexec, ) raise subprocess.CalledProcessError(returncode, cmdline) except FileNotFoundError as e: die(f"{e.filename} not found.") finally: if foreground: make_foreground_process(new_process_group=False) def find_binary(*names: PathString, root: Path = Path("/"), extra: Sequence[Path] = ()) -> Optional[Path]: if root != Path("/"): path = ":".join( itertools.chain( (os.fspath(p) for p in extra), (os.fspath(p) for p in (root / "usr/bin", root / "usr/sbin")), ) ) else: path = os.environ["PATH"] for name in names: if any(Path(name).is_relative_to(d) for d in extra): pass elif Path(name).is_absolute(): name = root / Path(name).relative_to("/") elif "/" in str(name): name = root / name if binary := shutil.which(name, path=path): if root != Path("/") and not Path(binary).is_relative_to(root): return Path(binary) else: return Path("/") / Path(binary).relative_to(root) return None def kill(process: Popen, innerpid: int, signal: int) -> None: process.poll() if process.returncode is not None: return try: os.kill(innerpid, signal) # Handle the race condition where the process might exit between us calling poll() and us calling os.kill(). except ProcessLookupError: pass class AsyncioThread(threading.Thread): """ The default threading.Thread() is not interruptable, so we make our own version by using the concurrency feature in python that is interruptable, namely asyncio. Additionally, we store any exception that the coroutine raises and re-raise it in join() if no other exception was raised before. """ def __init__(self, target: Awaitable[Any], *args: Any, **kwargs: Any) -> None: self.target = target self.loop: queue.SimpleQueue[asyncio.AbstractEventLoop] = queue.SimpleQueue() self.exc: queue.SimpleQueue[BaseException] = queue.SimpleQueue() super().__init__(*args, **kwargs) def run(self) -> None: async def wrapper() -> None: self.loop.put(asyncio.get_running_loop()) await self.target try: asyncio.run(wrapper()) except asyncio.CancelledError: pass except BaseException as e: self.exc.put(e) def cancel(self) -> None: loop = self.loop.get() for task in asyncio.tasks.all_tasks(loop): loop.call_soon_threadsafe(task.cancel) def __enter__(self) -> "AsyncioThread": self.start() return self def __exit__( self, type: Optional[type[BaseException]], value: Optional[BaseException], traceback: Optional[TracebackType], ) -> None: self.cancel() self.join() if type is None: try: raise self.exc.get_nowait() except queue.Empty: pass mkosi-24.3/mkosi/sandbox.py000066400000000000000000000256371465176501400157440ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import contextlib import dataclasses import enum import logging import os import shutil import uuid from collections.abc import Iterator, Sequence from contextlib import AbstractContextManager from pathlib import Path from typing import Optional, Protocol from mkosi.types import PathString from mkosi.user import INVOKING_USER from mkosi.util import flatten, one_zero, startswith @dataclasses.dataclass(frozen=True) class Mount: src: PathString dst: PathString devices: bool = False ro: bool = False required: bool = True def __hash__(self) -> int: return hash((Path(self.src), Path(self.dst), self.devices, self.ro, self.required)) def __eq__(self, other: object) -> bool: if not isinstance(other, Mount): return False return self.__hash__() == other.__hash__() def options(self) -> list[str]: if self.devices: opt = "--dev-bind" if self.required else "--dev-bind-try" elif self.ro: opt = "--ro-bind" if self.required else "--ro-bind-try" else: opt = "--bind" if self.required else "--bind-try" return [opt, os.fspath(self.src), os.fspath(self.dst)] class SandboxProtocol(Protocol): def __call__( self, *, binary: Optional[PathString], vartmp: bool = False, mounts: Sequence[Mount] = (), extra: Sequence[PathString] = (), ) -> AbstractContextManager[list[PathString]]: ... def nosandbox( *, binary: Optional[PathString], vartmp: bool = False, mounts: Sequence[Mount] = (), extra: Sequence[PathString] = (), ) -> AbstractContextManager[list[PathString]]: return contextlib.nullcontext([]) # https://github.com/torvalds/linux/blob/master/include/uapi/linux/capability.h class Capability(enum.Enum): CAP_NET_ADMIN = 12 def have_effective_cap(capability: Capability) -> bool: for line in Path("/proc/self/status").read_text().splitlines(): if rhs := startswith(line, "CapEff:"): hexcap = rhs.strip() break else: logging.warning(f"\"CapEff:\" not found in /proc/self/status, assuming we don't have {capability}") return False return (int(hexcap, 16) & (1 << capability.value)) != 0 def finalize_passwd_mounts(root: PathString) -> list[Mount]: """ If passwd or a related file exists in the apivfs directory, bind mount it over the host files while we run the command, to make sure that the command we run uses user/group information from the apivfs directory instead of from the host. """ return [ Mount(Path(root) / "etc" / f, f"/etc/{f}", ro=True, required=False) for f in ("passwd", "group", "shadow", "gshadow") ] def finalize_mounts(mounts: Sequence[Mount]) -> list[PathString]: mounts = list(set(mounts)) mounts = [ m for m in mounts if not any( m != n and m.devices == n.devices and m.ro == n.ro and m.required == n.required and Path(m.src).is_relative_to(n.src) and Path(m.dst).is_relative_to(n.dst) and Path(m.src).relative_to(n.src) == Path(m.dst).relative_to(n.dst) for n in mounts ) ] mounts = sorted(mounts, key=lambda m: (Path(m.dst), m.devices, not m.ro, m.required, Path(m.src))) return flatten(m.options() for m in mounts) @contextlib.contextmanager def sandbox_cmd( *, network: bool = False, devices: bool = False, vartmp: bool = False, scripts: Optional[Path] = None, tools: Path = Path("/"), relaxed: bool = False, mounts: Sequence[Mount] = (), options: Sequence[PathString] = (), setup: Sequence[PathString] = (), extra: Sequence[PathString] = (), ) -> Iterator[list[PathString]]: cmdline: list[PathString] = [] mounts = list(mounts) if vartmp and not relaxed: # We want to use an empty subdirectory in the host's temporary directory as the sandbox's /var/tmp. vartmpdir = Path(os.getenv("TMPDIR", "/var/tmp")) / f"mkosi-var-tmp-{uuid.uuid4().hex[:16]}" else: vartmpdir = None cmdline += [ *setup, "bwrap", *( ["--unshare-net"] if not network and (os.getuid() != 0 or have_effective_cap(Capability.CAP_NET_ADMIN)) else [] ), "--die-with-parent", "--proc", "/proc", "--setenv", "SYSTEMD_OFFLINE", one_zero(network), # We mounted a subdirectory of TMPDIR to /var/tmp so we unset TMPDIR so that /tmp or /var/tmp are used instead. "--unsetenv", "TMPDIR", ] mounts += [Mount(tools / "usr", "/usr", ro=True)] if relaxed: mounts += [Mount("/tmp", "/tmp")] else: cmdline += ["--dir", "/tmp", "--dir", "/var/tmp", "--unshare-ipc"] if (tools / "nix/store").exists(): mounts += [Mount(tools / "nix/store", "/nix/store")] if devices or relaxed: mounts += [ Mount("/sys", "/sys"), Mount("/run", "/run"), Mount("/dev", "/dev", devices=True), ] else: cmdline += ["--dev", "/dev"] if relaxed: dirs = ("/etc", "/opt", "/srv", "/media", "/mnt", "/var", os.fspath(INVOKING_USER.home())) for d in dirs: if Path(d).exists(): mounts += [Mount(d, d)] if len(Path.cwd().parents) >= 2: # `Path.parents` only supports slices and negative indexing from Python 3.10 onwards. # TODO: Remove list() when we depend on Python 3.10 or newer. d = os.fspath(list(Path.cwd().parents)[-2]) elif len(Path.cwd().parents) == 1: d = os.fspath(Path.cwd()) else: d = "" if d and d not in (*dirs, "/home", "/usr", "/nix", "/tmp"): mounts += [Mount(d, d)] if vartmpdir: mounts += [Mount(vartmpdir, "/var/tmp")] for d in ("bin", "sbin", "lib", "lib32", "lib64"): if (p := tools / d).is_symlink(): cmdline += ["--symlink", p.readlink(), Path("/") / p.relative_to(tools)] elif p.is_dir(): mounts += [Mount(p, Path("/") / p.relative_to(tools), ro=True)] path = "/usr/bin:/usr/sbin" if tools != Path("/") else os.environ["PATH"] cmdline += ["--setenv", "PATH", f"/scripts:{path}", *options] # If we're using /usr from a tools tree, we have to use /etc/alternatives from the tools tree as well if it # exists since that points directly back to /usr. Apply this after the options so the caller can mount # something else to /etc without overriding this mount. In relaxed mode, we only do this if /etc/alternatives # already exists on the host as otherwise we'd modify the host's /etc by creating the mountpoint ourselves (or # fail when trying to create it). if (tools / "etc/alternatives").exists() and (not relaxed or Path("/etc/alternatives").exists()): mounts += [Mount(tools / "etc/alternatives", "/etc/alternatives", ro=True)] if scripts: mounts += [Mount(scripts, "/scripts", ro=True)] if network and not relaxed and Path("/etc/resolv.conf").exists(): mounts += [Mount("/etc/resolv.conf", "/etc/resolv.conf")] cmdline += finalize_mounts(mounts) if not any(Path(m.dst) == Path("/etc") for m in mounts): cmdline += ["--symlink", "../proc/self/mounts", "/etc/mtab"] # bubblewrap creates everything with a restricted mode so relax stuff as needed. ops = [] if not relaxed: ops += ["chmod 1777 /tmp"] if not devices: ops += ["chmod 1777 /dev/shm"] if vartmpdir: ops += ["chmod 1777 /var/tmp"] if relaxed and INVOKING_USER.home().exists() and len(INVOKING_USER.home().parents) > 1: # We might mount a subdirectory of /home so /home will be created with the wrong permissions by bubblewrap so # we need to fix up the permissions. ops += [f"chmod 755 {list(INVOKING_USER.home().parents)[-1]}"] else: ops += ["chmod 755 /etc"] ops += ["exec $0 \"$@\""] cmdline += ["sh", "-c", " && ".join(ops), *extra] if vartmpdir: vartmpdir.mkdir(mode=0o1777) try: yield cmdline finally: if vartmpdir: shutil.rmtree(vartmpdir) def apivfs_cmd() -> list[PathString]: return [ "bwrap", "--dev-bind", "/", "/", "--tmpfs", "/buildroot/run", "--tmpfs", "/buildroot/tmp", "--bind", "/var/tmp", "/buildroot/var/tmp", "--proc", "/buildroot/proc", "--dev", "/buildroot/dev", # Make sure /etc/machine-id is not overwritten by any package manager post install scripts. "--ro-bind-try", "/buildroot/etc/machine-id", "/buildroot/etc/machine-id", # Nudge gpg to create its sockets in /run by making sure /run/user/0 exists. "--dir", "/buildroot/run/user/0", *flatten(mount.options() for mount in finalize_passwd_mounts("/buildroot")), "sh", "-c", " && ".join( [ "chmod 1777 /buildroot/tmp /buildroot/var/tmp /buildroot/dev/shm", "chmod 755 /buildroot/run", # Make sure anything running in the root directory thinks it's in a container. $container can't always # be accessed so we write /run/host/container-manager as well which is always accessible. "mkdir -m 755 /buildroot/run/host", "echo mkosi >/buildroot/run/host/container-manager", "exec $0 \"$@\"", ] ), ] def chroot_cmd(*, resolve: bool = False, work: bool = False) -> list[PathString]: workdir = "/buildroot/work" if work else "" return apivfs_cmd() + [ "sh", "-c", " && ".join( [ *([f"trap 'rm -rf {workdir}' EXIT"] if work else []), # /etc/resolv.conf can be a dangling symlink to /run/systemd/resolve/stub-resolv.conf. Bubblewrap tries # to call mkdir() on each component of the path which means it will try to call # mkdir(/run/systemd/resolve/stub-resolv.conf) which will fail unless /run/systemd/resolve exists # already so we make sure that it already exists. f"mkdir -p -m 755 {workdir} /buildroot/run/systemd /buildroot/run/systemd/resolve", # No exec here because we need to clean up the /work directory afterwards. "$0 \"$@\"", ] ), "bwrap", "--dev-bind", "/buildroot", "/", "--setenv", "container", "mkosi", "--setenv", "HOME", "/", "--setenv", "PATH", "/work/scripts:/usr/bin:/usr/sbin", *(["--ro-bind-try", "/etc/resolv.conf", "/etc/resolv.conf"] if resolve else []), *(["--bind", "/work", "/work", "--chdir", "/work/src"] if work else []), "--setenv", "BUILDROOT", "/", # Start an interactive bash shell if we're not given any arguments. "sh", "-c", '[ "$0" = "sh" ] && [ $# -eq 0 ] && exec bash -i || exec $0 "$@"', ] mkosi-24.3/mkosi/tree.py000066400000000000000000000137241465176501400152370ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import contextlib import errno import logging import shutil import subprocess import tempfile from collections.abc import Iterator from pathlib import Path from mkosi.config import ConfigFeature from mkosi.log import ARG_DEBUG, die from mkosi.run import run from mkosi.sandbox import Mount, SandboxProtocol, nosandbox from mkosi.types import PathString from mkosi.versioncomp import GenericVersion def statfs(path: Path, *, sandbox: SandboxProtocol = nosandbox) -> str: return run( ["stat", "--file-system", "--format", "%T", path], stdout=subprocess.PIPE, sandbox=sandbox(binary="stat", mounts=[Mount(path, path, ro=True)]), ).stdout.strip() def is_subvolume(path: Path, *, sandbox: SandboxProtocol = nosandbox) -> bool: return path.is_dir() and path.stat().st_ino == 256 and statfs(path, sandbox=sandbox) == "btrfs" def cp_version(*, sandbox: SandboxProtocol = nosandbox) -> GenericVersion: return GenericVersion( run( ["cp", "--version"], sandbox=sandbox(binary="cp"), stdout=subprocess.PIPE, ).stdout.splitlines()[0].split()[3] ) def make_tree( path: Path, *, use_subvolumes: ConfigFeature = ConfigFeature.disabled, sandbox: SandboxProtocol = nosandbox, ) -> Path: if statfs(path.parent, sandbox=sandbox) != "btrfs": if use_subvolumes == ConfigFeature.enabled: die(f"Subvolumes requested but {path} is not located on a btrfs filesystem") path.mkdir() return path if use_subvolumes != ConfigFeature.disabled: result = run(["btrfs", "subvolume", "create", path], sandbox=sandbox(binary="btrfs", mounts=[Mount(path.parent, path.parent)]), check=use_subvolumes == ConfigFeature.enabled).returncode else: result = 1 if result != 0: path.mkdir() return path @contextlib.contextmanager def preserve_target_directories_stat(src: Path, dst: Path) -> Iterator[None]: dirs = [p for d in src.glob("**/") if (dst / (p := d.relative_to(src))).exists()] with tempfile.TemporaryDirectory() as tmp: for d in dirs: (tmp / d).mkdir(exist_ok=True) shutil.copystat(dst / d, tmp / d) yield for d in dirs: shutil.copystat(tmp / d, dst / d) def copy_tree( src: Path, dst: Path, *, preserve: bool = True, dereference: bool = False, use_subvolumes: ConfigFeature = ConfigFeature.disabled, sandbox: SandboxProtocol = nosandbox, ) -> Path: copy: list[PathString] = [ "cp", "--recursive", "--dereference" if dereference else "--no-dereference", f"--preserve=mode,links{',timestamps,ownership,xattr' if preserve else ''}", "--reflink=auto", "--copy-contents", src, dst, ] if cp_version(sandbox=sandbox) >= "9.5": copy += ["--keep-directory-symlink"] mounts = [Mount(src, src, ro=True), Mount(dst.parent, dst.parent)] # If the source and destination are both directories, we want to merge the source directory with the # destination directory. If the source if a file and the destination is a directory, we want to copy # the source inside the directory. if src.is_dir(): copy += ["--no-target-directory"] # Subvolumes always have inode 256 so we can use that to check if a directory is a subvolume. if ( use_subvolumes == ConfigFeature.disabled or not preserve or not is_subvolume(src, sandbox=sandbox) or (dst.exists() and any(dst.iterdir())) ): with ( preserve_target_directories_stat(src, dst) if not preserve else contextlib.nullcontext() ): run(copy, sandbox=sandbox(binary="cp", mounts=mounts)) return dst # btrfs can't snapshot to an existing directory so make sure the destination does not exist. if dst.exists(): dst.rmdir() result = run( ["btrfs", "subvolume", "snapshot", src, dst], check=use_subvolumes == ConfigFeature.enabled, sandbox=sandbox(binary="btrfs", mounts=mounts), ).returncode if result != 0: with ( preserve_target_directories_stat(src, dst) if not preserve else contextlib.nullcontext() ): run(copy, sandbox=sandbox(binary="cp", mounts=mounts)) return dst def rmtree(*paths: Path, sandbox: SandboxProtocol = nosandbox) -> None: if not paths: return if subvolumes := sorted({p for p in paths if p.exists() and is_subvolume(p, sandbox=sandbox)}): # Silence and ignore failures since when not running as root, this will fail with a permission error unless the # btrfs filesystem is mounted with user_subvol_rm_allowed. run(["btrfs", "subvolume", "delete", *subvolumes], check=False, sandbox=sandbox(binary="btrfs", mounts=[Mount(p.parent, p.parent) for p in subvolumes]), stdout=subprocess.DEVNULL if not ARG_DEBUG.get() else None, stderr=subprocess.DEVNULL if not ARG_DEBUG.get() else None) filtered = sorted({p for p in paths if p.exists() or p.is_symlink()}) if filtered: run(["rm", "-rf", "--", *filtered], sandbox=sandbox(binary="rm", mounts=[Mount(p.parent, p.parent) for p in filtered])) def move_tree( src: Path, dst: Path, *, use_subvolumes: ConfigFeature = ConfigFeature.disabled, sandbox: SandboxProtocol = nosandbox ) -> Path: if src == dst: return dst if dst.is_dir(): dst = dst / src.name try: src.rename(dst) except OSError as e: if e.errno != errno.EXDEV: raise e logging.info( f"Could not rename {src} to {dst} as they are located on different devices, falling back to copying" ) copy_tree(src, dst, use_subvolumes=use_subvolumes, sandbox=sandbox) rmtree(src, sandbox=sandbox) return dst mkosi-24.3/mkosi/types.py000066400000000000000000000021451465176501400154370ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import subprocess from pathlib import Path from typing import IO, TYPE_CHECKING, Any, Protocol, TypeVar, Union # These types are only generic during type checking and not at runtime, leading # to a TypeError during compilation. # Let's be as strict as we can with the description for the usage we have. if TYPE_CHECKING: CompletedProcess = subprocess.CompletedProcess[str] Popen = subprocess.Popen[str] else: CompletedProcess = subprocess.CompletedProcess Popen = subprocess.Popen # Borrowed from https://github.com/python/typeshed/blob/3d14016085aed8bcf0cf67e9e5a70790ce1ad8ea/stdlib/3/subprocess.pyi#L24 _FILE = Union[None, int, IO[Any]] PathString = Union[Path, str] # Borrowed from # https://github.com/python/typeshed/blob/ec52bf1adde1d3183d0595d2ba982589df48dff1/stdlib/_typeshed/__init__.pyi#L19 # and # https://github.com/python/typeshed/blob/ec52bf1adde1d3183d0595d2ba982589df48dff1/stdlib/_typeshed/__init__.pyi#L224 _T_co = TypeVar("_T_co", covariant=True) class SupportsRead(Protocol[_T_co]): def read(self, __length: int = ...) -> _T_co: ... mkosi-24.3/mkosi/user.py000066400000000000000000000207041465176501400152520ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import ctypes import ctypes.util import fcntl import functools import logging import os import pwd import tempfile from collections.abc import Sequence from pathlib import Path from mkosi.log import die from mkosi.run import run, spawn from mkosi.util import flock, parents_below SUBRANGE = 65536 class INVOKING_USER: uid = int(os.getenv("SUDO_UID") or os.getenv("PKEXEC_UID") or os.getuid()) gid = int(os.getenv("SUDO_GID") or os.getgid()) invoked_as_root = os.getuid() == 0 @classmethod def init(cls) -> None: name = cls.name() home = cls.home() extra_groups = cls.extra_groups() logging.debug( f"Running as user '{name}' ({cls.uid}:{cls.gid}) with home {home} " f"and extra groups {extra_groups}." ) @classmethod def is_running_user(cls) -> bool: return cls.uid == os.getuid() @classmethod @functools.lru_cache(maxsize=1) def name(cls) -> str: try: return pwd.getpwuid(cls.uid).pw_name except KeyError: if cls.uid == 0: return "root" if not (user := os.getenv("USER")): die(f"Could not find user name for UID {cls.uid}") return user @classmethod @functools.lru_cache(maxsize=1) def home(cls) -> Path: if cls.invoked_as_root and Path.cwd().is_relative_to("/home") and len(Path.cwd().parents) > 2: return list(Path.cwd().parents)[-3] try: return Path(pwd.getpwuid(cls.uid).pw_dir or "/") except KeyError: if not (home := os.getenv("HOME")): die(f"Could not find home directory for UID {cls.uid}") return Path(home) @classmethod @functools.lru_cache(maxsize=1) def extra_groups(cls) -> Sequence[int]: return os.getgrouplist(cls.name(), cls.gid) @classmethod def is_regular_user(cls) -> bool: return cls.uid >= 1000 @classmethod def cache_dir(cls) -> Path: if (env := os.getenv("XDG_CACHE_HOME")) or (env := os.getenv("CACHE_DIRECTORY")): cache = Path(env) elif ( cls.is_regular_user() and INVOKING_USER.home() != Path("/") and (Path.cwd().is_relative_to(INVOKING_USER.home()) or not cls.invoked_as_root) ): cache = INVOKING_USER.home() / ".cache" else: cache = Path("/var/cache") return cache / "mkosi" @classmethod def runtime_dir(cls) -> Path: if (env := os.getenv("XDG_RUNTIME_DIR")) or (env := os.getenv("RUNTIME_DIRECTORY")): d = Path(env) elif cls.is_regular_user(): d = Path("/run/user") / str(cls.uid) else: d = Path("/run") return d / "mkosi" @classmethod def rchown(cls, path: Path) -> None: if cls.is_regular_user() and any(p.stat().st_uid == cls.uid for p in path.parents) and path.exists(): run(["chown", "--recursive", f"{INVOKING_USER.uid}:{INVOKING_USER.gid}", path]) @classmethod def chown(cls, path: Path) -> None: # If we created a file/directory in a parent directory owned by the invoking user, make sure the path and any # parent directories are owned by the invoking user as well. def is_valid_dir(path: Path) -> bool: return path.stat().st_uid == cls.uid or path in (Path("/tmp"), Path("/var/tmp")) if cls.is_regular_user() and (q := next((parent for parent in path.parents if is_valid_dir(parent)), None)): os.chown(path, INVOKING_USER.uid, INVOKING_USER.gid) for parent in parents_below(path, q): os.chown(parent, INVOKING_USER.uid, INVOKING_USER.gid) def read_subrange(path: Path) -> int: uid = str(os.getuid()) try: user = pwd.getpwuid(os.getuid()).pw_name except KeyError: user = None for line in path.read_text().splitlines(): name, start, count = line.split(":") if name == uid or name == user: break else: die(f"No mapping found for {user or uid} in {path}") if int(count) < SUBRANGE: die( f"subuid/subgid range length must be at least {SUBRANGE}, " f"got {count} for {user or uid} from line '{line}'" ) return int(start) CLONE_NEWNS = 0x00020000 CLONE_NEWUSER = 0x10000000 def unshare(flags: int) -> None: libc_name = ctypes.util.find_library("c") if libc_name is None: die("Could not find libc") libc = ctypes.CDLL(libc_name, use_errno=True) if libc.unshare(ctypes.c_int(flags)) != 0: e = ctypes.get_errno() raise OSError(e, os.strerror(e)) def become_root() -> None: """ Set up a new user namespace mapping using /etc/subuid and /etc/subgid. The current user will be mapped to root and 65436 will be mapped to the UID/GID of the invoking user. The other IDs will be mapped through. The function modifies the uid, gid of the INVOKING_USER object to the uid, gid of the invoking user in the user namespace. """ if os.getuid() == 0: return subuid = read_subrange(Path("/etc/subuid")) subgid = read_subrange(Path("/etc/subgid")) pid = os.getpid() with tempfile.NamedTemporaryFile(prefix="mkosi-uidmap-lock-") as lockfile: lock = Path(lockfile.name) # We map the private UID range configured in /etc/subuid and /etc/subgid into the container using # newuidmap and newgidmap. On top of that, we also make sure to map in the user running mkosi so that # we can run still chown stuff to that user or run stuff as that user which will make sure any # generated files are owned by that user. We don't map to the last user in the range as the last user # is sometimes used in tests as a default value and mapping to that user might break those tests. newuidmap = [ "flock", "--exclusive", "--close", lock, "newuidmap", pid, 0, subuid, SUBRANGE - 100, SUBRANGE - 100, os.getuid(), 1, SUBRANGE - 100 + 1, subuid + SUBRANGE - 100 + 1, 99 ] newgidmap = [ "flock", "--exclusive", "--close", lock, "newgidmap", pid, 0, subgid, SUBRANGE - 100, SUBRANGE - 100, os.getgid(), 1, SUBRANGE - 100 + 1, subgid + SUBRANGE - 100 + 1, 99 ] newuidmap = [str(x) for x in newuidmap] newgidmap = [str(x) for x in newgidmap] # newuidmap and newgidmap have to run from outside the user namespace to be able to assign a uid mapping to the # process in the user namespace. The mapping can only be assigned after the user namespace has been unshared. # To make this work, we first lock a temporary file, then spawn the newuidmap and newgidmap processes, which we # execute using flock so they don't execute before they can get a lock on the same temporary file, then we # unshare the user namespace and finally we unlock the temporary file, which allows the newuidmap and newgidmap # processes to execute. we then wait for the processes to finish before continuing. with ( flock(lock) as fd, spawn(newuidmap, innerpid=False) as (uidmap, _), spawn(newgidmap, innerpid=False) as (gidmap, _) ): unshare(CLONE_NEWUSER) fcntl.flock(fd, fcntl.LOCK_UN) uidmap.wait() gidmap.wait() # By default, we're root in the user namespace because if we were our current user by default, we # wouldn't be able to chown stuff to be owned by root while the reverse is possible. os.setresuid(0, 0, 0) os.setresgid(0, 0, 0) os.setgroups([0]) INVOKING_USER.uid = SUBRANGE - 100 INVOKING_USER.gid = SUBRANGE - 100 def become_root_cmd() -> list[str]: if os.getuid() == 0: return [] subuid = read_subrange(Path("/etc/subuid")) subgid = read_subrange(Path("/etc/subgid")) cmd = [ "unshare", "--setuid", "0", "--setgid", "0", "--map-users", f"0:{subuid}:{SUBRANGE - 100}", "--map-users", f"{SUBRANGE - 100}:{os.getuid()}:1", "--map-users", f"{SUBRANGE - 100 + 1}:{subuid + SUBRANGE - 100 + 1}:99", "--map-groups", f"0:{subgid}:{SUBRANGE - 100}", "--map-groups", f"{SUBRANGE - 100}:{os.getgid()}:1", "--map-groups", f"{SUBRANGE - 100 + 1}:{subgid + SUBRANGE - 100 + 1}:99", "--keep-caps", ] return [str(x) for x in cmd] mkosi-24.3/mkosi/util.py000066400000000000000000000230261465176501400152510ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import ast import contextlib import copy import enum import errno import fcntl import functools import hashlib import importlib import importlib.resources import itertools import logging import os import re import resource import stat import tempfile from collections.abc import Hashable, Iterable, Iterator, Mapping, Sequence from pathlib import Path from types import ModuleType from typing import Any, Callable, Optional, TypeVar, no_type_check from mkosi.log import die from mkosi.types import PathString T = TypeVar("T") V = TypeVar("V") S = TypeVar("S", bound=Hashable) def dictify(f: Callable[..., Iterator[tuple[T, V]]]) -> Callable[..., dict[T, V]]: def wrapper(*args: Any, **kwargs: Any) -> dict[T, V]: return dict(f(*args, **kwargs)) return functools.update_wrapper(wrapper, f) def listify(f: Callable[..., Iterable[T]]) -> Callable[..., list[T]]: def wrapper(*args: Any, **kwargs: Any) -> list[T]: return list(f(*args, **kwargs)) return functools.update_wrapper(wrapper, f) def tuplify(f: Callable[..., Iterable[T]]) -> Callable[..., tuple[T, ...]]: def wrapper(*args: Any, **kwargs: Any) -> tuple[T, ...]: return tuple(f(*args, **kwargs)) return functools.update_wrapper(wrapper, f) def one_zero(b: bool) -> str: return "1" if b else "0" def is_power_of_2(x: int) -> bool: return x > 0 and (x & x - 1 == 0) def round_up(x: int, blocksize: int = 4096) -> int: return (x + blocksize - 1) // blocksize * blocksize def startswith(s: str, prefix: str) -> Optional[str]: if s.startswith(prefix): return s.removeprefix(prefix) return None @dictify def read_env_file(path: PathString) -> Iterator[tuple[str, str]]: with Path(path).open() as f: for line_number, line in enumerate(f, start=1): line = line.rstrip() if not line or line.startswith("#"): continue if m := re.match(r"([A-Z][A-Z_0-9]+)=(.*)", line): name, val = m.groups() if val and val[0] in "\"'": val = ast.literal_eval(val) yield name, val else: logging.info(f"{path}:{line_number}: bad line {line!r}") def format_rlimit(rlimit: int) -> str: limits = resource.getrlimit(rlimit) soft = "infinity" if limits[0] == resource.RLIM_INFINITY else str(limits[0]) hard = "infinity" if limits[1] == resource.RLIM_INFINITY else str(limits[1]) return f"{soft}:{hard}" def sort_packages(packages: Iterable[str]) -> list[str]: """Sorts packages: normal first, paths second, conditional third""" m = {"(": 2, "/": 1} return sorted(packages, key=lambda name: (m.get(name[0], 0), name)) def flatten(lists: Iterable[Iterable[T]]) -> list[T]: """Flatten a sequence of sequences into a single list.""" return list(itertools.chain.from_iterable(lists)) @contextlib.contextmanager def chdir(directory: PathString) -> Iterator[None]: old = Path.cwd() if old == directory: yield return try: os.chdir(directory) yield finally: os.chdir(old) def make_executable(*paths: Path) -> None: for path in paths: st = path.stat() os.chmod(path, st.st_mode | stat.S_IEXEC) @contextlib.contextmanager def flock(path: Path, flags: int = fcntl.LOCK_EX) -> Iterator[int]: fd = os.open(path, os.O_CLOEXEC|os.O_RDONLY) try: fcntl.fcntl(fd, fcntl.FD_CLOEXEC) logging.debug(f"Acquiring lock on {path}") fcntl.flock(fd, flags) logging.debug(f"Acquired lock on {path}") yield fd finally: os.close(fd) @contextlib.contextmanager def flock_or_die(path: Path) -> Iterator[Path]: try: with flock(path, fcntl.LOCK_EX|fcntl.LOCK_NB): yield path except OSError as e: if e.errno != errno.EWOULDBLOCK: raise e die(f"Cannot lock {path} as it is locked by another process", hint="Maybe another mkosi process is still using it? Use Ephemeral=yes to enable booting multiple " "instances of the same image") @contextlib.contextmanager def scopedenv(env: Mapping[str, Any]) -> Iterator[None]: old = copy.deepcopy(os.environ) os.environ |= env # python caches the default temporary directory so when we might modify TMPDIR we have to make sure it # gets recalculated (see https://docs.python.org/3/library/tempfile.html#tempfile.tempdir). tempfile.tempdir = None try: yield finally: os.environ = old tempfile.tempdir = None class StrEnum(enum.Enum): def __str__(self) -> str: assert isinstance(self.value, str) return self.value # Used by enum.auto() to get the next value. @staticmethod def _generate_next_value_(name: str, start: int, count: int, last_values: Sequence[str]) -> str: return name.replace("_", "-") @classmethod def values(cls) -> list[str]: return list(s.replace("_", "-") for s in map(str, cls.__members__)) @classmethod def choices(cls) -> list[str]: return [*cls.values(), ""] @contextlib.contextmanager def umask(mask: int) -> Iterator[None]: old = os.umask(mask) try: yield finally: os.umask(old) def parents_below(path: Path, below: Path) -> list[Path]: parents = list(path.parents) return parents[:parents.index(below)] @contextlib.contextmanager def resource_path(mod: ModuleType) -> Iterator[Path]: # We backport as_file() from python 3.12 here temporarily since it added directory support. # TODO: Remove once minimum python version is 3.12. # SPDX-License-Identifier: PSF-2.0 # Copied from https://github.com/python/cpython/blob/main/Lib/importlib/resources/_common.py @no_type_check @contextlib.contextmanager def _tempfile( reader, suffix='', # gh-93353: Keep a reference to call os.remove() in late Python # finalization. *, _os_remove=os.remove, ): # Not using tempfile.NamedTemporaryFile as it leads to deeper 'try' # blocks due to the need to close the temporary file to work on Windows # properly. fd, raw_path = tempfile.mkstemp(suffix=suffix) try: try: os.write(fd, reader()) finally: os.close(fd) del reader yield Path(raw_path) finally: try: _os_remove(raw_path) except FileNotFoundError: pass @no_type_check def _temp_file(path): return _tempfile(path.read_bytes, suffix=path.name) @no_type_check def _is_present_dir(path) -> bool: """ Some Traversables implement ``is_dir()`` to raise an exception (i.e. ``FileNotFoundError``) when the directory doesn't exist. This function wraps that call to always return a boolean and only return True if there's a dir and it exists. """ with contextlib.suppress(FileNotFoundError): return path.is_dir() return False @no_type_check @functools.singledispatch def as_file(path): """ Given a Traversable object, return that object as a path on the local file system in a context manager. """ return _temp_dir(path) if _is_present_dir(path) else _temp_file(path) @no_type_check @contextlib.contextmanager def _temp_path(dir: tempfile.TemporaryDirectory): """ Wrap tempfile.TemporyDirectory to return a pathlib object. """ with dir as result: yield Path(result) @no_type_check @contextlib.contextmanager def _temp_dir(path): """ Given a traversable dir, recursively replicate the whole tree to the file system in a context manager. """ assert path.is_dir() with _temp_path(tempfile.TemporaryDirectory()) as temp_dir: yield _write_contents(temp_dir, path) @no_type_check def _write_contents(target, source): child = target.joinpath(source.name) if source.is_dir(): child.mkdir() for item in source.iterdir(): _write_contents(child, item) else: child.write_bytes(source.read_bytes()) return child t = importlib.resources.files(mod) with as_file(t) as p: # Make sure any temporary directory that the resources are unpacked in is accessible to the invoking user so # that any commands executed as the invoking user can access files within it. if ( p.parent.parent == Path(os.getenv("TMPDIR", "/tmp")) and stat.S_IMODE(p.parent.stat().st_mode) == 0o700 ): p.parent.chmod(0o755) yield p def hash_file(path: Path) -> str: # TODO Replace with hashlib.file_digest after dropping support for Python 3.10. h = hashlib.sha256() b = bytearray(16 * 1024**2) mv = memoryview(b) with path.open("rb", buffering=0) as f: while n := f.readinto(mv): h.update(mv[:n]) return h.hexdigest() def try_or(fn: Callable[..., T], exception: type[Exception], default: T) -> T: try: return fn() except exception: return default def groupby(seq: Sequence[T], key: Callable[[T], S]) -> list[tuple[S, list[T]]]: grouped: dict[S, list[T]] = {} for i in seq: k = key(i) if k not in grouped: grouped[k] = [] grouped[k].append(i) return [(key, group) for key, group in grouped.items()] mkosi-24.3/mkosi/versioncomp.py000066400000000000000000000154611465176501400166440ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import functools import itertools import string @functools.total_ordering class GenericVersion: # These constants follow the convention of the return value of rpmdev-vercmp that are followe # by systemd-analyze compare-versions when called with only two arguments (without a comparison # operator), recreated in the compare_versions method. _EQUAL = 0 _RIGHT_SMALLER = 1 _LEFT_SMALLER = -1 def __init__(self, version: str): self._version = version @classmethod def compare_versions(cls, v1: str, v2: str) -> int: """Implements comparison according to UAPI Group Version Format Specification""" def rstrip_invalid_version_chars(s: str) -> str: valid_version_chars = {*string.ascii_letters, *string.digits, "~", "-", "^", "."} for i, c in enumerate(s): if c in valid_version_chars: return s[i:] return "" def digit_prefix(s: str) -> str: return "".join(itertools.takewhile(lambda c: c in string.digits, s)) def letter_prefix(s: str) -> str: return "".join(itertools.takewhile(lambda c: c in string.ascii_letters, s)) while True: # Any characters which are outside of the set of listed above (a-z, A-Z, 0-9, -, ., ~, # ^) are skipped in both strings. In particular, this means that non-ASCII characters # that are Unicode digits or letters are skipped too. v1 = rstrip_invalid_version_chars(v1) v2 = rstrip_invalid_version_chars(v2) # If the remaining part of one of strings starts with "~": if other remaining part does # not start with ~, the string with ~ compares lower. Otherwise, both tilde characters # are skipped. if v1.startswith("~") and v2.startswith("~"): v1 = v1.removeprefix("~") v2 = v2.removeprefix("~") elif v1.startswith("~"): return cls._LEFT_SMALLER elif v2.startswith("~"): return cls._RIGHT_SMALLER # If one of the strings has ended: if the other string hasn’t, the string that has # remaining characters compares higher. Otherwise, the strings compare equal. if not v1 and not v2: return cls._EQUAL elif not v1 and v2: return cls._LEFT_SMALLER elif v1 and not v2: return cls._RIGHT_SMALLER # If the remaining part of one of strings starts with "-": if the other remaining part # does not start with -, the string with - compares lower. Otherwise, both minus # characters are skipped. if v1.startswith("-") and v2.startswith("-"): v1 = v1.removeprefix("-") v2 = v2.removeprefix("-") elif v1.startswith("-"): return cls._LEFT_SMALLER elif v2.startswith("-"): return cls._RIGHT_SMALLER # If the remaining part of one of strings starts with "^": if the other remaining part # does not start with ^, the string with ^ compares higher. Otherwise, both caret # characters are skipped. if v1.startswith("^") and v2.startswith("^"): v1 = v1.removeprefix("^") v2 = v2.removeprefix("^") elif v1.startswith("^"): # TODO: bug? return cls._LEFT_SMALLER #cls._RIGHT_SMALLER elif v2.startswith("^"): return cls._RIGHT_SMALLER #cls._LEFT_SMALLER # If the remaining part of one of strings starts with ".": if the other remaining part # does not start with ., the string with . compares lower. Otherwise, both dot # characters are skipped. if v1.startswith(".") and v2.startswith("."): v1 = v1.removeprefix(".") v2 = v2.removeprefix(".") elif v1.startswith("."): return cls._LEFT_SMALLER elif v2.startswith("."): return cls._RIGHT_SMALLER # If either of the remaining parts starts with a digit: numerical prefixes are compared # numerically. Any leading zeroes are skipped. The numerical prefixes (until the first # non-digit character) are evaluated as numbers. If one of the prefixes is empty, it # evaluates as 0. If the numbers are different, the string with the bigger number # compares higher. Otherwise, the comparison continues at the following characters at # point 1. v1_digit_prefix = digit_prefix(v1) v2_digit_prefix = digit_prefix(v2) if v1_digit_prefix or v2_digit_prefix: v1_digits = int(v1_digit_prefix) if v1_digit_prefix else 0 v2_digits = int(v2_digit_prefix) if v2_digit_prefix else 0 if v1_digits < v2_digits: return cls._LEFT_SMALLER elif v1_digits > v2_digits: return cls._RIGHT_SMALLER v1 = v1.removeprefix(v1_digit_prefix) v2 = v2.removeprefix(v2_digit_prefix) continue # Leading alphabetical prefixes are compared alphabetically. The substrings are # compared letter-by-letter. If both letters are the same, the comparison continues # with the next letter. Capital letters compare lower than lower-case letters (A < # a). When the end of one substring has been reached (a non-letter character or the end # of the whole string), if the other substring has remaining letters, it compares # higher. Otherwise, the comparison continues at the following characters at point 1. v1_letter_prefix = letter_prefix(v1) v2_letter_prefix = letter_prefix(v2) if v1_letter_prefix < v2_letter_prefix: return cls._LEFT_SMALLER elif v1_letter_prefix > v2_letter_prefix: return cls._RIGHT_SMALLER v1 = v1.removeprefix(v1_letter_prefix) v2 = v2.removeprefix(v2_letter_prefix) def __eq__(self, other: object) -> bool: if isinstance(other, (str, int)): other = GenericVersion(str(other)) elif not isinstance(other, GenericVersion): return False return self.compare_versions(self._version, other._version) == self._EQUAL def __lt__(self, other: object) -> bool: if isinstance(other, (str, int)): other = GenericVersion(str(other)) elif not isinstance(other, GenericVersion): return False return self.compare_versions(self._version, other._version) == self._LEFT_SMALLER def __str__(self) -> str: return self._version mkosi-24.3/mkosi/vmspawn.py000066400000000000000000000071621465176501400157720ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import contextlib import os import sys from pathlib import Path from mkosi.config import ( Args, Config, Network, OutputFormat, QemuFirmware, yes_no, ) from mkosi.log import die from mkosi.mounts import finalize_source_mounts from mkosi.qemu import ( apply_runtime_size, copy_ephemeral, finalize_qemu_firmware, ) from mkosi.run import run from mkosi.types import PathString def run_vmspawn(args: Args, config: Config) -> None: if config.output_format not in (OutputFormat.disk, OutputFormat.esp, OutputFormat.directory): die(f"{config.output_format} images cannot be booted in systemd-vmspawn") if config.qemu_firmware == QemuFirmware.bios: die("systemd-vmspawn cannot boot BIOS firmware images") if config.qemu_cdrom: die("systemd-vmspawn does not support CD-ROM images") if config.qemu_firmware_variables and config.qemu_firmware_variables != Path("microsoft"): die("mkosi vmspawn does not support QemuFirmwareVariables=") kernel = config.qemu_kernel firmware = finalize_qemu_firmware(config, kernel) if not kernel and firmware == QemuFirmware.linux: kernel = config.output_dir_or_cwd() / config.output_split_kernel if not kernel.exists(): die( f"Kernel or UKI not found at {kernel}", hint="Please install a kernel in the image or provide a --qemu-kernel argument to mkosi vmspawn" ) cmdline: list[PathString] = [ "systemd-vmspawn", "--cpus", str(config.qemu_smp or os.cpu_count()), "--ram", str(config.qemu_mem), "--kvm", config.qemu_kvm.to_tristate(), "--vsock", config.qemu_vsock.to_tristate(), "--tpm", config.qemu_swtpm.to_tristate(), "--secure-boot", yes_no(config.secure_boot), ] if config.runtime_network == Network.user: cmdline += ["--network-user-mode"] elif config.runtime_network == Network.interface: cmdline += ["--network-tap"] if config.qemu_gui: cmdline += ["--console=gui"] cmdline += [f"--set-credential={k}:{v}" for k, v in config.credentials.items()] with contextlib.ExitStack() as stack: fname = stack.enter_context(copy_ephemeral(config, config.output_dir_or_cwd() / config.output)) apply_runtime_size(config, fname) if config.runtime_build_sources: with finalize_source_mounts(config, ephemeral=False) as mounts: for mount in mounts: cmdline += ["--bind", f"{mount.src}:{mount.dst}"] if config.build_dir: cmdline += ["--bind", f"{config.build_dir}:/work/build"] for tree in config.runtime_trees: target = Path("/root/src") / (tree.target or "") cmdline += ["--bind", f"{tree.source}:{target}"] if kernel: cmdline += ["--linux", kernel] if config.output_format == OutputFormat.directory: cmdline += ["--directory", fname] owner = os.stat(fname).st_uid if owner != 0: cmdline += [f"--private-users={str(owner)}"] else: cmdline += ["--image", fname] if config.forward_journal: cmdline += ["--forward-journal", config.forward_journal] cmdline += [*args.cmdline, *config.kernel_command_line_extra] run( cmdline, stdin=sys.stdin, stdout=sys.stdout, env=os.environ | config.environment, log=False, sandbox=config.sandbox(binary=cmdline[0], network=True, devices=True, relaxed=True), ) mkosi-24.3/pyproject.toml000066400000000000000000000034731465176501400155200ustar00rootroot00000000000000[build-system] requires = ["setuptools", "setuptools-scm"] build-backend = "setuptools.build_meta" [project] name = "mkosi" authors = [ {name = "mkosi contributors", email = "systemd-devel@lists.freedesktop.org"}, ] version = "24.3" description = "Build Bespoke OS Images" readme = "README.md" requires-python = ">=3.9" license = {file = "LICENSE"} [project.optional-dependencies] bootable = [ "pefile >= 2021.9.3", ] [project.scripts] mkosi = "mkosi.__main__:main" mkosi-initrd = "mkosi.initrd.__main__:main" [tool.setuptools] packages = [ "mkosi", "mkosi.distributions", "mkosi.initrd", "mkosi.initrd.resources", "mkosi.installer", "mkosi.resources", ] [tool.setuptools.package-data] "mkosi.resources" = ["repart/**/*", "mkosi.md", "mkosi.1", "mkosi-initrd/**/*", "mkosi-tools/**/*"] "mkosi.initrd.resources" = ["mkosi-initrd.md", "mkosi-initrd.1"] [tool.isort] profile = "black" include_trailing_comma = true multi_line_output = 3 py_version = "39" [tool.pyright] pythonVersion = "3.9" [tool.mypy] python_version = 3.9 # belonging to --strict warn_unused_configs = true disallow_any_generics = true disallow_subclassing_any = true disallow_untyped_calls = true disallow_untyped_defs = true disallow_untyped_decorators = true disallow_incomplete_defs = true check_untyped_defs = true no_implicit_optional = true warn_redundant_casts = true warn_unused_ignores = false warn_return_any = true no_implicit_reexport = true # extra options not in --strict pretty = true show_error_codes = true show_column_numbers = true warn_unreachable = true allow_redefinition = true strict_equality = true [tool.ruff] target-version = "py39" line-length = 119 lint.select = ["E", "F", "I", "UP"] [tool.pytest.ini_options] markers = [ "integration: mark a test as an integration test." ] addopts = "-m \"not integration\"" mkosi-24.3/tests/000077500000000000000000000000001465176501400137375ustar00rootroot00000000000000mkosi-24.3/tests/.gitignore000066400000000000000000000000071465176501400157240ustar00rootroot00000000000000/*.pyc mkosi-24.3/tests/__init__.py000066400000000000000000000146201465176501400160530ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import contextlib import dataclasses import os import subprocess import sys import uuid from collections.abc import Iterator, Sequence from pathlib import Path from types import TracebackType from typing import Any, Optional import pytest from mkosi.config import finalize_term from mkosi.distributions import Distribution from mkosi.run import run from mkosi.types import _FILE, CompletedProcess, PathString from mkosi.user import INVOKING_USER @dataclasses.dataclass(frozen=True) class ImageConfig: distribution: Distribution release: str tools_tree_distribution: Optional[Distribution] tools_tree_release: Optional[str] debug_shell: bool class Image: def __init__(self, config: ImageConfig, options: Sequence[PathString] = []) -> None: self.options = options self.config = config def __enter__(self) -> "Image": self.output_dir = Path(os.getenv("TMPDIR", "/var/tmp")) / uuid.uuid4().hex[:16] return self def __exit__( self, type: Optional[type[BaseException]], value: Optional[BaseException], traceback: Optional[TracebackType], ) -> None: self.mkosi("clean", user=INVOKING_USER.uid, group=INVOKING_USER.gid) def mkosi( self, verb: str, options: Sequence[PathString] = (), args: Sequence[str] = (), stdin: _FILE = None, user: Optional[int] = None, group: Optional[int] = None, check: bool = True, ) -> CompletedProcess: kcl = [ f"TERM={finalize_term()}", "loglevel=6", "systemd.crash_shell", "systemd.log_level=debug", "udev.log_level=info", "systemd.log_ratelimit_kmsg=0", "systemd.show_status=false", "systemd.journald.forward_to_console", "systemd.journald.max_level_console=info", "printk.devkmsg=on", "systemd.early_core_pattern=/core", "systemd.firstboot=no", ] return run([ "python3", "-m", "mkosi", "--distribution", str(self.config.distribution), "--release", self.config.release, *(["--tools-tree=default"] if self.config.tools_tree_distribution else []), *( ["--tools-tree-distribution", str(self.config.tools_tree_distribution)] if self.config.tools_tree_distribution else [] ), *(["--tools-tree-release", self.config.tools_tree_release] if self.config.tools_tree_release else []), *self.options, *options, "--output-dir", self.output_dir, # Some tests ignore the default image config but we still want them to reuse the cache directory for the # tools tree cache. "--cache-dir", "mkosi.cache", *(f"--kernel-command-line={i}" for i in kcl), "--qemu-vsock=yes", # TODO: Drop once both Hyper-V bugs are fixed in Github Actions. "--qemu-args=-cpu max,pcid=off", "--qemu-mem=2G", verb, *args, ], check=check, stdin=stdin, stdout=sys.stdout, user=user, group=group) def build(self, options: Sequence[str] = (), args: Sequence[str] = ()) -> CompletedProcess: return self.mkosi( "build", [*options, "--debug", "--force", *(["--debug-shell"] if self.config.debug_shell else [])], args, stdin=sys.stdin if sys.stdin.isatty() else None, user=INVOKING_USER.uid, group=INVOKING_USER.gid, ) def boot(self, options: Sequence[str] = (), args: Sequence[str] = ()) -> CompletedProcess: result = self.mkosi( "boot", [*options, "--debug"], args, stdin=sys.stdin if sys.stdin.isatty() else None, check=False, ) if result.returncode != 123: raise subprocess.CalledProcessError(result.returncode, result.args, result.stdout, result.stderr) return result def qemu(self, options: Sequence[str] = (), args: Sequence[str] = ()) -> CompletedProcess: result = self.mkosi( "qemu", [*options, "--debug"], args, stdin=sys.stdin if sys.stdin.isatty() else None, user=INVOKING_USER.uid, group=INVOKING_USER.gid, check=False, ) rc = 0 if self.config.distribution.is_centos_variant() else 123 if result.returncode != rc: raise subprocess.CalledProcessError(result.returncode, result.args, result.stdout, result.stderr) return result def vmspawn(self, options: Sequence[str] = (), args: Sequence[str] = ()) -> CompletedProcess: result = self.mkosi( "vmspawn", [*options, "--debug"], args, stdin=sys.stdin if sys.stdin.isatty() else None, check=False, ) rc = 0 if self.config.distribution.is_centos_variant() else 123 if result.returncode != rc: raise subprocess.CalledProcessError(result.returncode, result.args, result.stdout, result.stderr) return result def summary(self, options: Sequence[str] = ()) -> CompletedProcess: return self.mkosi("summary", options, user=INVOKING_USER.uid, group=INVOKING_USER.gid) def genkey(self) -> CompletedProcess: return self.mkosi("genkey", ["--force"], user=INVOKING_USER.uid, group=INVOKING_USER.gid) @pytest.fixture(scope="session", autouse=True) def suspend_capture_stdin(pytestconfig: Any) -> Iterator[None]: """ When --capture=no (or -s) is specified, pytest will still intercept stdin. Let's explicitly make it not capture stdin when --capture=no is specified so we can debug image boot failures by logging into the emergency shell. """ capmanager: Any = pytestconfig.pluginmanager.getplugin("capturemanager") if pytestconfig.getoption("capture") == "no": capmanager.suspend_global_capture(in_=True) yield if pytestconfig.getoption("capture") == "no": capmanager.resume_global_capture() @contextlib.contextmanager def ci_group(s: str) -> Iterator[None]: github_actions = os.getenv("GITHUB_ACTIONS") if github_actions: print(f"\n::group::{s}", flush=True) try: yield finally: if github_actions: print("\n::endgroup::", flush=True) mkosi-24.3/tests/conftest.py000066400000000000000000000041031465176501400161340ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later from collections.abc import Iterator from typing import Any, cast import pytest from mkosi.config import parse_config from mkosi.distributions import Distribution, detect_distribution from . import ImageConfig, ci_group def pytest_addoption(parser: Any) -> None: parser.addoption( "-D", "--distribution", metavar="DISTRIBUTION", help="Run the integration tests for the given distribution.", default=detect_distribution()[0], type=Distribution, choices=[Distribution(d) for d in Distribution.values()], ) parser.addoption( "-R", "--release", metavar="RELEASE", help="Run the integration tests for the given release.", ) parser.addoption( "-T", "--tools-tree-distribution", metavar="DISTRIBUTION", help="Use the given tools tree distribution to build the integration test images", type=Distribution, choices=[Distribution(d) for d in Distribution.values()], ) parser.addoption( "--tools-tree-release", metavar="RELEASE", help="Use the given tools tree release instead of the default one", ) parser.addoption( "--debug-shell", help="Pass --debug-shell when running mkosi", action="store_true", ) @pytest.fixture(scope="session") def config(request: Any) -> ImageConfig: distribution = cast(Distribution, request.config.getoption("--distribution")) release = cast(str, request.config.getoption("--release") or parse_config(["-d", str(distribution)])[1][0].release) return ImageConfig( distribution=distribution, release=release, tools_tree_distribution=cast(Distribution, request.config.getoption("--tools-tree-distribution")), tools_tree_release=request.config.getoption("--tools-tree-release"), debug_shell=request.config.getoption("--debug-shell"), ) @pytest.fixture(autouse=True) def ci_sections(request: Any) -> Iterator[None]: with ci_group(request.node.name): yield mkosi-24.3/tests/test_boot.py000066400000000000000000000075531465176501400163250ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import os import subprocess import pytest from mkosi.config import Bootloader, OutputFormat, QemuFirmware from mkosi.distributions import Distribution from mkosi.qemu import find_virtiofsd from mkosi.run import find_binary, run from mkosi.versioncomp import GenericVersion from . import Image, ImageConfig pytestmark = pytest.mark.integration def have_vmspawn() -> bool: return ( find_binary("systemd-vmspawn") is not None and GenericVersion(run(["systemd-vmspawn", "--version"], stdout=subprocess.PIPE).stdout.strip()) >= 256 ) @pytest.mark.parametrize("format", [f for f in OutputFormat if f not in (OutputFormat.confext, OutputFormat.sysext)]) def test_format(config: ImageConfig, format: OutputFormat) -> None: with Image( config, options=[ "--kernel-command-line=systemd.unit=mkosi-check-and-shutdown.service", "--incremental", "--ephemeral", ], ) as image: if image.config.distribution == Distribution.rhel_ubi and format in (OutputFormat.esp, OutputFormat.uki): pytest.skip("Cannot build RHEL-UBI images with format 'esp' or 'uki'") options = ["--format", str(format)] image.summary(options) image.genkey() image.build(options=options) if format in (OutputFormat.disk, OutputFormat.directory) and os.getuid() == 0: # systemd-resolved is enabled by default in Arch/Debian/Ubuntu (systemd default preset) but fails # to start in a systemd-nspawn container with --private-users so we mask it out here to avoid CI # failures. # FIXME: Remove when Arch/Debian/Ubuntu ship systemd v253 args = ["systemd.mask=systemd-resolved.service"] if format == OutputFormat.directory else [] image.boot(options=options, args=args) if format in (OutputFormat.cpio, OutputFormat.uki, OutputFormat.esp): pytest.skip("Default image is too large to be able to boot in CPIO/UKI/ESP format") if image.config.distribution == Distribution.rhel_ubi: return if format in (OutputFormat.tar, OutputFormat.oci, OutputFormat.none) or format.is_extension_image(): return if format == OutputFormat.directory and not find_virtiofsd(): return image.qemu(options=options) if have_vmspawn() and format in (OutputFormat.disk, OutputFormat.directory): image.vmspawn(options=options) # TODO: Remove the opensuse check again when https://bugzilla.opensuse.org/show_bug.cgi?id=1227464 is resolved # and we install the grub tools in the openSUSE tools tree again. if format != OutputFormat.disk or config.tools_tree_distribution == Distribution.opensuse: return image.qemu(options=options + ["--qemu-firmware=bios"]) @pytest.mark.parametrize("bootloader", Bootloader) def test_bootloader(config: ImageConfig, bootloader: Bootloader) -> None: if config.distribution == Distribution.rhel_ubi: return # TODO: Remove this again when https://bugzilla.opensuse.org/show_bug.cgi?id=1227464 is resolved and we install # the grub tools in the openSUSE tools tree again. if bootloader == Bootloader.grub and config.tools_tree_distribution == Distribution.opensuse: return firmware = QemuFirmware.linux if bootloader == Bootloader.none else QemuFirmware.auto with Image( config, options=[ "--kernel-command-line=systemd.unit=mkosi-check-and-shutdown.service", "--incremental", "--ephemeral", "--format=disk", "--bootloader", str(bootloader), "--qemu-firmware", str(firmware) ], ) as image: image.summary() image.genkey() image.build() image.qemu() mkosi-24.3/tests/test_config.py000066400000000000000000001035361465176501400166250ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import argparse import itertools import logging import operator import os from pathlib import Path from typing import Optional import pytest from mkosi import expand_kernel_specifiers from mkosi.config import ( Architecture, Compression, Config, ConfigFeature, ConfigTree, OutputFormat, Verb, config_parse_bytes, parse_config, parse_ini, ) from mkosi.distributions import Distribution, detect_distribution from mkosi.util import chdir def test_compression_enum_creation() -> None: assert Compression["none"] == Compression.none assert Compression["zstd"] == Compression.zstd assert Compression["zst"] == Compression.zstd assert Compression["xz"] == Compression.xz assert Compression["bz2"] == Compression.bz2 assert Compression["gz"] == Compression.gz assert Compression["lz4"] == Compression.lz4 assert Compression["lzma"] == Compression.lzma def test_compression_enum_bool() -> None: assert not bool(Compression.none) assert bool(Compression.zstd) assert bool(Compression.xz) assert bool(Compression.bz2) assert bool(Compression.gz) assert bool(Compression.lz4) assert bool(Compression.lzma) def test_compression_enum_str() -> None: assert str(Compression.none) == "none" assert str(Compression.zstd) == "zstd" assert str(Compression.zst) == "zstd" assert str(Compression.xz) == "xz" assert str(Compression.bz2) == "bz2" assert str(Compression.gz) == "gz" assert str(Compression.lz4) == "lz4" assert str(Compression.lzma) == "lzma" def test_parse_ini(tmp_path: Path) -> None: p = tmp_path / "ini" p.write_text( """\ [MySection] Value=abc Other=def ALLCAPS=txt # Comment ; Another comment [EmptySection] [AnotherSection] EmptyValue= Multiline=abc def qed ord """ ) g = parse_ini(p) assert next(g) == ("MySection", "Value", "abc") assert next(g) == ("MySection", "Other", "def") assert next(g) == ("MySection", "ALLCAPS", "txt") assert next(g) == ("MySection", "", "") assert next(g) == ("EmptySection", "", "") assert next(g) == ("AnotherSection", "EmptyValue", "") assert next(g) == ("AnotherSection", "Multiline", "abc\ndef\nqed\nord") def test_parse_config(tmp_path: Path) -> None: d = tmp_path (d / "mkosi.conf").write_text( """\ [Distribution] Distribution=ubuntu Architecture=arm64 Repositories=epel,epel-next [Content] Packages=abc Environment=MY_KEY=MY_VALUE [Output] Format=cpio ImageId=base [Host] Credentials=my.cred=my.value """ ) with chdir(d): _, [config] = parse_config() assert config.distribution == Distribution.ubuntu assert config.architecture == Architecture.arm64 assert config.packages == ["abc"] assert config.output_format == OutputFormat.cpio assert config.image_id == "base" with chdir(d): _, [config] = parse_config( [ "--distribution", "fedora", "--environment", "MY_KEY=CLI_VALUE", "--credential", "my.cred=cli.value", "--repositories", "universe", ] ) # Values from the CLI should take priority. assert config.distribution == Distribution.fedora assert config.environment["MY_KEY"] == "CLI_VALUE" assert config.credentials["my.cred"] == "cli.value" assert config.repositories == ["epel", "epel-next", "universe"] with chdir(d): _, [config] = parse_config( [ "--distribution", "", "--environment", "", "--credential", "", "--repositories", "", ] ) # Empty values on the CLIs resets non-collection based settings to their defaults and collection based settings to # empty collections. assert config.distribution == (detect_distribution()[0] or Distribution.custom) assert "MY_KEY" not in config.environment assert "my.cred" not in config.credentials assert config.repositories == [] (d / "mkosi.conf.d").mkdir() (d / "mkosi.conf.d/d1.conf").write_text( """\ [Distribution] Distribution=debian [Content] Packages=qed def [Output] ImageId=00-dropin ImageVersion=0 @Output=abc """ ) with chdir(d): _, [config] = parse_config(["--package", "last"]) # Setting a value explicitly in a dropin should override the default from mkosi.conf. assert config.distribution == Distribution.debian # Lists should be merged by appending the new values to the existing values. Any values from the CLI should be # appended to the values from the configuration files. assert config.packages == ["abc", "qed", "def", "last"] assert config.output_format == OutputFormat.cpio assert config.image_id == "00-dropin" assert config.image_version == "0" # '@' specifier should be automatically dropped. assert config.output == "abc" (d / "mkosi.version").write_text("1.2.3") (d / "mkosi.conf.d/d2.conf").write_text( """\ [Content] Packages= [Output] ImageId= """ ) with chdir(d): _, [config] = parse_config() # Test that empty assignment resets settings. assert config.packages == [] assert config.image_id is None # mkosi.version should only be used if no version is set explicitly. assert config.image_version == "0" (d / "mkosi.conf.d/d1.conf").unlink() with chdir(d): _, [config] = parse_config() # ImageVersion= is not set explicitly anymore, so now the version from mkosi.version should be used. assert config.image_version == "1.2.3" (d / "abc").mkdir() (d / "abc/mkosi.conf").write_text( """\ [Content] Bootable=yes BuildPackages=abc """ ) (d / "abc/mkosi.conf.d").mkdir() (d / "abc/mkosi.conf.d/abc.conf").write_text( """\ [Output] SplitArtifacts=yes """ ) with chdir(d): _, [config] = parse_config() assert config.bootable == ConfigFeature.auto assert config.split_artifacts is False # Passing the directory should include both the main config file and the dropin. _, [config] = parse_config(["--include", os.fspath(d / "abc")] * 2) assert config.bootable == ConfigFeature.enabled assert config.split_artifacts is True # The same extra config should not be parsed more than once. assert config.build_packages == ["abc"] # Passing the main config file should not include the dropin. _, [config] = parse_config(["--include", os.fspath(d / "abc/mkosi.conf")]) assert config.bootable == ConfigFeature.enabled assert config.split_artifacts is False (d / "mkosi.images").mkdir() for n in ("one", "two"): (d / "mkosi.images" / f"{n}.conf").write_text( f"""\ [Distribution] Repositories=append [Content] Packages={n} """ ) with chdir(d): _, [one, two, config] = parse_config(["--package", "qed", "--build-package", "def", "--repositories", "cli"]) # Universal settings should always come from the main image. assert one.distribution == config.distribution assert two.distribution == config.distribution assert one.release == config.release assert two.release == config.release # Non-universal settings should not be passed to the subimages. assert one.packages == ["one"] assert two.packages == ["two"] assert one.build_packages == [] assert two.build_packages == [] # But should apply to the main image of course. assert config.packages == ["qed"] assert config.build_packages == ["def"] # list based settings should be appended to in subimages assert one.repositories == ["append", "epel", "epel-next", "cli"] assert two.repositories == ["append", "epel", "epel-next", "cli"] def test_parse_includes_once(tmp_path: Path) -> None: d = tmp_path (d / "mkosi.conf").write_text( """\ [Content] Bootable=yes BuildPackages=abc """ ) (d / "abc.conf").write_text( """\ [Content] BuildPackages=def """ ) with chdir(d): _, [config] = parse_config(["--include", "abc.conf", "--include", "abc.conf"]) assert config.build_packages == ["def", "abc"] (d / "mkosi.images").mkdir() for n in ("one", "two"): (d / "mkosi.images" / f"{n}.conf").write_text( """\ [Config] Include=abc.conf """ ) with chdir(d): _, [one, two, config] = parse_config([]) assert one.build_packages == ["def"] assert two.build_packages == ["def"] def test_profiles(tmp_path: Path) -> None: d = tmp_path (d / "mkosi.profiles").mkdir() (d / "mkosi.profiles/profile.conf").write_text( """\ [Distribution] Distribution=fedora [Host] QemuKvm=yes """ ) (d / "mkosi.conf").write_text( """\ [Config] Profile=profile """ ) (d / "mkosi.conf.d").mkdir() (d / "mkosi.conf.d/abc.conf").write_text( """\ [Distribution] Distribution=debian """ ) with chdir(d): _, [config] = parse_config() assert config.profile == "profile" # mkosi.conf.d/ should override the profile assert config.distribution == Distribution.debian assert config.qemu_kvm == ConfigFeature.enabled (d / "mkosi.conf").unlink() with chdir(d): _, [config] = parse_config(["--profile", "profile"]) assert config.profile == "profile" # mkosi.conf.d/ should override the profile assert config.distribution == Distribution.debian assert config.qemu_kvm == ConfigFeature.enabled def test_override_default(tmp_path: Path) -> None: d = tmp_path (d / "mkosi.conf").write_text( """\ [Content] Environment=MY_KEY=MY_VALUE [Host] ToolsTree=default """ ) with chdir(d): _, [config] = parse_config(["--tools-tree", "", "--environment", ""]) assert config.tools_tree is None assert "MY_KEY" not in config.environment def test_local_config(tmp_path: Path) -> None: d = tmp_path (d / "mkosi.local.conf").write_text( """\ [Distribution] Distribution=debian [Content] WithTests=yes """ ) with chdir(d): _, [config] = parse_config() assert config.distribution == Distribution.debian (d / "mkosi.conf").write_text( """\ [Distribution] Distribution=fedora [Content] WithTests=no """ ) with chdir(d): _, [config] = parse_config() # Local config should take precedence over non-local config. assert config.distribution == Distribution.debian assert config.with_tests with chdir(d): _, [config] = parse_config(["--distribution", "fedora", "-T"]) assert config.distribution == Distribution.fedora assert not config.with_tests def test_parse_load_verb(tmp_path: Path) -> None: with chdir(tmp_path): assert parse_config(["build"])[0].verb == Verb.build assert parse_config(["clean"])[0].verb == Verb.clean assert parse_config(["genkey"])[0].verb == Verb.genkey assert parse_config(["bump"])[0].verb == Verb.bump assert parse_config(["serve"])[0].verb == Verb.serve assert parse_config(["build"])[0].verb == Verb.build assert parse_config(["shell"])[0].verb == Verb.shell assert parse_config(["boot"])[0].verb == Verb.boot assert parse_config(["qemu"])[0].verb == Verb.qemu assert parse_config(["journalctl"])[0].verb == Verb.journalctl assert parse_config(["coredumpctl"])[0].verb == Verb.coredumpctl with pytest.raises(SystemExit): parse_config(["invalid"]) def test_os_distribution(tmp_path: Path) -> None: with chdir(tmp_path): for dist in Distribution: _, [config] = parse_config(["-d", dist.value]) assert config.distribution == dist with pytest.raises(tuple((argparse.ArgumentError, SystemExit))): parse_config(["-d", "invalidDistro"]) with pytest.raises(tuple((argparse.ArgumentError, SystemExit))): parse_config(["-d"]) for dist in Distribution: Path("mkosi.conf").write_text(f"[Distribution]\nDistribution={dist}") _, [config] = parse_config() assert config.distribution == dist def test_parse_config_files_filter(tmp_path: Path) -> None: with chdir(tmp_path): confd = Path("mkosi.conf.d") confd.mkdir() (confd / "10-file.conf").write_text("[Content]\nPackages=yes") (confd / "20-file.noconf").write_text("[Content]\nPackages=nope") _, [config] = parse_config() assert config.packages == ["yes"] def test_compression(tmp_path: Path) -> None: with chdir(tmp_path): _, [config] = parse_config(["--format", "disk", "--compress-output", "False"]) assert config.compress_output == Compression.none def test_match_only(tmp_path: Path) -> None: with chdir(tmp_path): Path("mkosi.conf").write_text( """\ [Match] Format=|directory Format=|disk """ ) Path("mkosi.conf.d").mkdir() Path("mkosi.conf.d/10-abc.conf").write_text( """\ [Output] ImageId=abcde """ ) _, [config] = parse_config(["--format", "tar"]) assert config.image_id != "abcde" def test_match_multiple(tmp_path: Path) -> None: with chdir(tmp_path): Path("mkosi.conf").write_text( """\ [Match] Format=|disk Format=|directory [Match] Architecture=|x86-64 Architecture=|arm64 [Output] ImageId=abcde """ ) # Both sections are not matched, so image ID should not be "abcde". _, [config] = parse_config(["--format", "tar", "--architecture", "s390x"]) assert config.image_id != "abcde" # Only a single section is matched, so image ID should not be "abcde". _, [config] = parse_config(["--format", "disk", "--architecture", "s390x"]) assert config.image_id != "abcde" # Both sections are matched, so image ID should be "abcde". _, [config] = parse_config(["--format", "disk", "--architecture", "x86-64"]) assert config.image_id == "abcde" Path("mkosi.conf").write_text( """\ [TriggerMatch] Format=disk Architecture=x86-64 [TriggerMatch] Format=directory Architecture=arm64 [Output] ImageId=abcde """ ) # Both sections are not matched, so image ID should not be "abcde". _, [config] = parse_config(["--format", "tar", "--architecture", "s390x"]) assert config.image_id != "abcde" # The first section is matched, so image ID should be "abcde". _, [config] = parse_config(["--format", "disk", "--architecture", "x86-64"]) assert config.image_id == "abcde" # The second section is matched, so image ID should be "abcde". _, [config] = parse_config(["--format", "directory", "--architecture", "arm64"]) assert config.image_id == "abcde" # Parts of all section are matched, but none is matched fully, so image ID should not be "abcde". _, [config] = parse_config(["--format", "disk", "--architecture", "arm64"]) assert config.image_id != "abcde" Path("mkosi.conf").write_text( """\ [TriggerMatch] Format=|disk Format=|directory [TriggerMatch] Format=directory Architecture=arm64 [Output] ImageId=abcde """ ) # The first section is matched, so image ID should be "abcde". _, [config] = parse_config(["--format", "disk"]) assert config.image_id == "abcde" Path("mkosi.conf").write_text( """\ [TriggerMatch] Format=|disk Format=|directory Architecture=x86-64 [TriggerMatch] Format=directory Architecture=arm64 [Output] ImageId=abcde """ ) # No sections are matched, so image ID should be not "abcde". _, [config] = parse_config(["--format", "disk", "--architecture=arm64"]) assert config.image_id != "abcde" # Mixing both [Match] and [TriggerMatch] Path("mkosi.conf").write_text( """\ [Match] Format=disk [TriggerMatch] Architecture=arm64 [TriggerMatch] Architecture=x86-64 [Output] ImageId=abcde """ ) # Match and first TriggerMatch sections match _, [config] = parse_config(["--format", "disk", "--architecture=arm64"]) assert config.image_id == "abcde" # Match section matches, but no TriggerMatch section matches _, [config] = parse_config(["--format", "disk", "--architecture=s390x"]) assert config.image_id != "abcde" # Second TriggerMatch section matches, but the Match section does not _, [config] = parse_config(["--format", "tar", "--architecture=x86-64"]) assert config.image_id != "abcde" @pytest.mark.parametrize("dist1,dist2", itertools.combinations_with_replacement(Distribution, 2)) def test_match_distribution(tmp_path: Path, dist1: Distribution, dist2: Distribution) -> None: with chdir(tmp_path): parent = Path("mkosi.conf") parent.write_text( f"""\ [Distribution] Distribution={dist1} """ ) Path("mkosi.conf.d").mkdir() child1 = Path("mkosi.conf.d/child1.conf") child1.write_text( f"""\ [Match] Distribution={dist1} [Content] Packages=testpkg1 """ ) child2 = Path("mkosi.conf.d/child2.conf") child2.write_text( f"""\ [Match] Distribution={dist2} [Content] Packages=testpkg2 """ ) child3 = Path("mkosi.conf.d/child3.conf") child3.write_text( f"""\ [Match] Distribution=|{dist1} Distribution=|{dist2} [Content] Packages=testpkg3 """ ) _, [conf] = parse_config() assert "testpkg1" in conf.packages if dist1 == dist2: assert "testpkg2" in conf.packages else: assert "testpkg2" not in conf.packages assert "testpkg3" in conf.packages @pytest.mark.parametrize( "release1,release2", itertools.combinations_with_replacement([36, 37, 38], 2) ) def test_match_release(tmp_path: Path, release1: int, release2: int) -> None: with chdir(tmp_path): parent = Path("mkosi.conf") parent.write_text( f"""\ [Distribution] Distribution=fedora Release={release1} """ ) Path("mkosi.conf.d").mkdir() child1 = Path("mkosi.conf.d/child1.conf") child1.write_text( f"""\ [Match] Release={release1} [Content] Packages=testpkg1 """ ) child2 = Path("mkosi.conf.d/child2.conf") child2.write_text( f"""\ [Match] Release={release2} [Content] Packages=testpkg2 """ ) child3 = Path("mkosi.conf.d/child3.conf") child3.write_text( f"""\ [Match] Release=|{release1} Release=|{release2} [Content] Packages=testpkg3 """ ) _, [conf] = parse_config() assert "testpkg1" in conf.packages if release1 == release2: assert "testpkg2" in conf.packages else: assert "testpkg2" not in conf.packages assert "testpkg3" in conf.packages def test_match_build_sources(tmp_path: Path) -> None: d = tmp_path (d / "mkosi.conf").write_text( """\ [Match] BuildSources=kernel BuildSources=/kernel [Output] Output=abc """ ) with chdir(d): _, [config] = parse_config(["--build-sources", ".:kernel"]) assert config.output == "abc" def test_match_repositories(tmp_path: Path) -> None: d = tmp_path (d / "mkosi.conf").write_text( """\ [Match] Repositories=epel [Content] Output=qed """ ) with chdir(d): _, [config] = parse_config(["--repositories", "epel,epel-next"]) assert config.output == "qed" @pytest.mark.parametrize( "image1,image2", itertools.combinations_with_replacement( ["image_a", "image_b", "image_c"], 2 ) ) def test_match_imageid(tmp_path: Path, image1: str, image2: str) -> None: with chdir(tmp_path): parent = Path("mkosi.conf") parent.write_text( f"""\ [Distribution] Distribution=fedora [Output] ImageId={image1} """ ) Path("mkosi.conf.d").mkdir() child1 = Path("mkosi.conf.d/child1.conf") child1.write_text( f"""\ [Match] ImageId={image1} [Content] Packages=testpkg1 """ ) child2 = Path("mkosi.conf.d/child2.conf") child2.write_text( f"""\ [Match] ImageId={image2} [Content] Packages=testpkg2 """ ) child3 = Path("mkosi.conf.d/child3.conf") child3.write_text( f"""\ [Match] ImageId=|{image1} ImageId=|{image2} [Content] Packages=testpkg3 """ ) child4 = Path("mkosi.conf.d/child4.conf") child4.write_text( """\ [Match] ImageId=image* [Content] Packages=testpkg4 """ ) _, [conf] = parse_config() assert "testpkg1" in conf.packages if image1 == image2: assert "testpkg2" in conf.packages else: assert "testpkg2" not in conf.packages assert "testpkg3" in conf.packages assert "testpkg4" in conf.packages @pytest.mark.parametrize( "op,version", itertools.product( ["", "==", "<", ">", "<=", ">="], [122, 123, 124], ) ) def test_match_imageversion(tmp_path: Path, op: str, version: str) -> None: opfunc = { "==": operator.eq, "!=": operator.ne, "<": operator.lt, "<=": operator.le, ">": operator.gt, ">=": operator.ge, }.get(op, operator.eq,) with chdir(tmp_path): parent = Path("mkosi.conf") parent.write_text( """\ [Output] ImageId=testimage ImageVersion=123 """ ) Path("mkosi.conf.d").mkdir() child1 = Path("mkosi.conf.d/child1.conf") child1.write_text( f"""\ [Match] ImageVersion={op}{version} [Content] Packages=testpkg1 """ ) child2 = Path("mkosi.conf.d/child2.conf") child2.write_text( f"""\ [Match] ImageVersion=<200 ImageVersion={op}{version} [Content] Packages=testpkg2 """ ) child3 = Path("mkosi.conf.d/child3.conf") child3.write_text( f"""\ [Match] ImageVersion=>9000 ImageVersion={op}{version} [Content] Packages=testpkg3 """ ) _, [conf] = parse_config() assert ("testpkg1" in conf.packages) == opfunc(123, version) assert ("testpkg2" in conf.packages) == opfunc(123, version) assert "testpkg3" not in conf.packages def test_match_environment(tmp_path: Path) -> None: d = tmp_path (d / "mkosi.conf").write_text( """\ [Match] Environment=MYENV=abc [Content] ImageId=matched """ ) with chdir(d): _, [conf] = parse_config(["--environment", "MYENV=abc"]) assert conf.image_id == "matched" _, [conf] = parse_config(["--environment", "MYENV=abd"]) assert conf.image_id != "matched" _, [conf] = parse_config(["--environment", "MYEN=abc"]) assert conf.image_id != "matched" _, [conf] = parse_config(["--environment", "MYEN=abd"]) assert conf.image_id != "matched" (d / "mkosi.conf").write_text( """\ [Match] Environment=MYENV [Content] ImageId=matched """ ) with chdir(d): _, [conf] = parse_config(["--environment", "MYENV=abc"]) assert conf.image_id == "matched" _, [conf] = parse_config(["--environment", "MYENV=abd"]) assert conf.image_id == "matched" _, [conf] = parse_config(["--environment", "MYEN=abc"]) assert conf.image_id != "matched" @pytest.mark.parametrize( "skel,pkgmngr", itertools.product( [None, Path("/foo"), Path("/bar")], [None, Path("/foo"), Path("/bar")], ) ) def test_package_manager_tree(tmp_path: Path, skel: Optional[Path], pkgmngr: Optional[Path]) -> None: with chdir(tmp_path): config = Path("mkosi.conf") with config.open("w") as f: f.write("[Content]\n") if skel is not None: f.write(f"SkeletonTrees={skel}\n") if pkgmngr is not None: f.write(f"PackageManagerTrees={pkgmngr}\n") _, [conf] = parse_config() skel_expected = [ConfigTree(skel, None)] if skel is not None else [] pkgmngr_expected = [ConfigTree(pkgmngr, None)] if pkgmngr is not None else skel_expected assert conf.skeleton_trees == skel_expected assert conf.package_manager_trees == pkgmngr_expected def test_paths_with_default_factory(tmp_path: Path) -> None: """ If both paths= and default_factory= are defined, default_factory= should not be used when at least one of the files/directories from paths= has been found. """ with chdir(tmp_path): Path("mkosi.skeleton.tar").touch() _, [config] = parse_config() assert config.package_manager_trees == [ ConfigTree(Path.cwd() / "mkosi.skeleton.tar", None), ] Path("mkosi.pkgmngr.tar").touch() _, [config] = parse_config() assert config.package_manager_trees == [ ConfigTree(Path.cwd() / "mkosi.pkgmngr.tar", None), ] @pytest.mark.parametrize( "sections,args,warning_count", [ (["Output"], [], 0), (["Content"], [], 1), (["Content", "Output"], [], 1), (["Output", "Content"], [], 1), (["Output", "Content", "Distribution"], [], 2), (["Content"], ["--image-id=testimage"], 1), ], ) def test_wrong_section_warning( tmp_path: Path, caplog: pytest.LogCaptureFixture, sections: list[str], args: list[str], warning_count: int, ) -> None: with chdir(tmp_path): # Create a config with ImageId in the wrong section, # and sometimes in the correct section Path("mkosi.conf").write_text( "\n".join( f"""\ [{section}] ImageId=testimage """ for section in sections ) ) with caplog.at_level(logging.WARNING): # Parse the config, with --image-id sometimes given on the command line parse_config(args) assert len(caplog.records) == warning_count def test_config_parse_bytes() -> None: assert config_parse_bytes(None) is None assert config_parse_bytes("1") == 4096 assert config_parse_bytes("8000") == 8192 assert config_parse_bytes("8K") == 8192 assert config_parse_bytes("4097") == 8192 assert config_parse_bytes("1M") == 1024**2 assert config_parse_bytes("1.9M") == 1994752 assert config_parse_bytes("1G") == 1024**3 assert config_parse_bytes("7.3G") == 7838318592 with pytest.raises(SystemExit): config_parse_bytes("-1") with pytest.raises(SystemExit): config_parse_bytes("-2K") with pytest.raises(SystemExit): config_parse_bytes("-3M") with pytest.raises(SystemExit): config_parse_bytes("-4G") def test_specifiers(tmp_path: Path) -> None: d = tmp_path (d / "mkosi.conf").write_text( """\ [Distribution] Distribution=ubuntu Release=lunar Architecture=arm64 [Output] ImageId=my-image-id ImageVersion=1.2.3 OutputDirectory=abcde Output=test [Content] Environment=Distribution=%d Release=%r Architecture=%a ImageId=%i ImageVersion=%v OutputDirectory=%O Output=%o ConfigRootDirectory=%D ConfigRootConfdir=%C ConfigRootPwd=%P Filesystem=%F """ ) (d / "mkosi.conf.d").mkdir() (d / "mkosi.conf.d/abc.conf").write_text( """\ [Content] Environment=ConfigAbcDirectory=%D ConfigAbcConfdir=%C ConfigAbcPwd=%P """ ) (d / "mkosi.conf.d/qed").mkdir() (d / "mkosi.conf.d/qed/mkosi.conf").write_text( """ [Content] Environment=ConfigQedDirectory=%D ConfigQedConfdir=%C ConfigQedPwd=%P """ ) with chdir(d): _, [config] = parse_config() expected = { "Distribution": "ubuntu", "Release": "lunar", "Architecture": "arm64", "ImageId": "my-image-id", "ImageVersion": "1.2.3", "OutputDirectory": str(Path.cwd() / "abcde"), "Output": "test", "ConfigRootDirectory": os.fspath(d), "ConfigRootConfdir": os.fspath(d), "ConfigRootPwd": os.fspath(d), "ConfigAbcDirectory": os.fspath(d), "ConfigAbcConfdir": os.fspath(d / "mkosi.conf.d"), "ConfigAbcPwd": os.fspath(d), "ConfigQedDirectory": os.fspath(d), "ConfigQedConfdir": os.fspath(d / "mkosi.conf.d/qed"), "ConfigQedPwd": os.fspath(d / "mkosi.conf.d/qed"), "Filesystem": "ext4", } assert {k: v for k, v in config.environment.items() if k in expected} == expected def test_kernel_specifiers(tmp_path: Path) -> None: kver = "13.0.8-5.10.0-1057-oem" # taken from reporter of #1638 token = "MySystemImage" roothash = "67e893261799236dcf20529115ba9fae4fd7c2269e1e658d42269503e5760d38" boot_count = "3" def test_expand_kernel_specifiers(text: str) -> str: return expand_kernel_specifiers( text, kver=kver, token=token, roothash=roothash, boot_count=boot_count, ) assert test_expand_kernel_specifiers("&&") == "&" assert test_expand_kernel_specifiers("&k") == kver assert test_expand_kernel_specifiers("&e") == token assert test_expand_kernel_specifiers("&h") == roothash assert test_expand_kernel_specifiers("&c") == boot_count assert test_expand_kernel_specifiers("Image_1.0.3") == "Image_1.0.3" assert test_expand_kernel_specifiers("Image~&c+&h-&k-&e") == f"Image~{boot_count}+{roothash}-{kver}-{token}" def test_output_id_version(tmp_path: Path) -> None: d = tmp_path (d / "mkosi.conf").write_text( """ [Output] ImageId=output ImageVersion=1.2.3 """ ) with chdir(d): _, [config] = parse_config() assert config.output == "output_1.2.3" def test_deterministic() -> None: assert Config.default() == Config.default() def test_environment(tmp_path: Path) -> None: d = tmp_path (d / "mkosi.conf").write_text( """\ [Config] PassEnvironment=PassThisEnv [Content] Environment=TestValue2=300 TestValue3=400 PassThisEnv=abc EnvironmentFiles=other.env """ ) (d / "mkosi.env").write_text( """\ TestValue1=90 TestValue4=99 """ ) (d / "other.env").write_text( """\ TestValue1=100 TestValue2=200 """ ) (d / "mkosi.images").mkdir() (d / "mkosi.images/sub.conf").touch() with chdir(d): _, [sub, config] = parse_config() expected = { "TestValue1": "100", # from other.env "TestValue2": "300", # from mkosi.conf "TestValue3": "400", # from mkosi.conf "TestValue4": "99", # from mkosi.env } # Only check values for keys from expected, as config.environment contains other items as well assert {k: config.environment[k] for k in expected.keys()} == expected assert config.environment_files == [Path.cwd() / "mkosi.env", Path.cwd() / "other.env"] assert sub.environment["PassThisEnv"] == "abc" assert "TestValue2" not in sub.environment mkosi-24.3/tests/test_initrd.py000066400000000000000000000232371465176501400166500ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import contextlib import os import subprocess import tempfile import textwrap import time from collections.abc import Iterator from pathlib import Path from typing import Any import pytest from mkosi.distributions import Distribution from mkosi.log import die from mkosi.mounts import mount from mkosi.run import find_binary, run from mkosi.tree import copy_tree from mkosi.types import PathString from mkosi.user import INVOKING_USER from mkosi.versioncomp import GenericVersion from . import Image, ImageConfig, ci_group pytestmark = pytest.mark.integration @pytest.fixture(scope="module") def passphrase() -> Iterator[Path]: # We can't use tmp_path fixture because pytest creates it in a nested directory we can't access using our # unprivileged user. # TODO: Use delete_on_close=False and close() instead of flush() when we require Python 3.12 or newer. with tempfile.NamedTemporaryFile(prefix="mkosi.passphrase", mode="w") as passphrase: passphrase.write("mkosi") passphrase.flush() os.fchown(passphrase.fileno(), INVOKING_USER.uid, INVOKING_USER.gid) os.fchmod(passphrase.fileno(), 0o600) yield Path(passphrase.name) @pytest.fixture(scope="module") def initrd(request: Any, config: ImageConfig) -> Iterator[Image]: with ( ci_group(f"Initrd image {config.distribution}/{config.release}"), Image( config, options=[ "--directory", "", "--include=mkosi-initrd/", ], ) as initrd ): if initrd.config.distribution == Distribution.rhel_ubi: pytest.skip("Cannot build RHEL-UBI initrds") initrd.build() yield initrd def test_initrd(initrd: Image) -> None: with Image( initrd.config, options=[ "--initrd", Path(initrd.output_dir) / "initrd", "--kernel-command-line=systemd.unit=mkosi-check-and-shutdown.service", "--incremental", "--ephemeral", "--format=disk", ] ) as image: image.build() image.qemu() def wait_for_device(device: PathString) -> None: if ( find_binary("udevadm") and GenericVersion(run(["udevadm", "--version"], stdout=subprocess.PIPE).stdout.strip()) >= 251 ): run(["udevadm", "wait", "--timeout=30", "/dev/vg_mkosi/lv0"]) return for i in range(30): if Path(device).exists(): return time.sleep(1) die(f"Device {device} did not appear within 30 seconds") @pytest.mark.skipif(os.getuid() != 0, reason="mkosi-initrd LVM test can only be executed as root") def test_initrd_lvm(initrd: Image) -> None: with Image( initrd.config, options=[ "--initrd", Path(initrd.output_dir) / "initrd", "--kernel-command-line=systemd.unit=mkosi-check-and-shutdown.service", # LVM confuses systemd-repart so we mask it for this test. "--kernel-command-line=systemd.mask=systemd-repart.service", "--kernel-command-line=root=LABEL=root", "--kernel-command-line=rw", "--incremental", "--ephemeral", "--qemu-firmware=linux", ] ) as image, contextlib.ExitStack() as stack: image.build(["--format", "directory"]) drive = Path(image.output_dir) / "image.raw" drive.touch() os.truncate(drive, 5000 * 1024**2) lodev = run(["losetup", "--show", "--find", "--partscan", drive], stdout=subprocess.PIPE).stdout.strip() stack.callback(lambda: run(["losetup", "--detach", lodev])) run(["sfdisk", "--label", "gpt", lodev], input="type=E6D6D379-F507-44C2-A23C-238F2A3DF928 bootable") run(["lvm", "pvcreate", f"{lodev}p1"]) run(["lvm", "pvs"]) run(["lvm", "vgcreate", "vg_mkosi", f"{lodev}p1"]) run(["lvm", "vgchange", "-ay", "vg_mkosi"]) run(["lvm", "vgs"]) stack.callback(lambda: run(["vgchange", "-an", "vg_mkosi"])) run(["lvm", "lvcreate", "-l", "100%FREE", "-n", "lv0", "vg_mkosi"]) run(["lvm", "lvs"]) wait_for_device("/dev/vg_mkosi/lv0") run([f"mkfs.{image.config.distribution.filesystem()}", "-L", "root", "/dev/vg_mkosi/lv0"]) with tempfile.TemporaryDirectory() as mnt, mount(Path("/dev/vg_mkosi/lv0"), Path(mnt)): # The image might have been built unprivileged so we need to fix the file ownership. Making all the # files owned by root isn't completely correct but good enough for the purposes of the test. copy_tree(Path(image.output_dir) / "image", Path(mnt), preserve=False) stack.close() image.qemu(["--format=disk"]) def test_initrd_luks(initrd: Image, passphrase: Path) -> None: with tempfile.TemporaryDirectory() as repartd: os.chown(repartd, INVOKING_USER.uid, INVOKING_USER.gid) (Path(repartd) / "00-esp.conf").write_text( textwrap.dedent( """\ [Partition] Type=esp Format=vfat CopyFiles=/boot:/ CopyFiles=/efi:/ SizeMinBytes=1G SizeMaxBytes=1G """ ) ) (Path(repartd) / "05-bios.conf").write_text( textwrap.dedent( """\ [Partition] # UUID of the grub BIOS boot partition which grubs needs on GPT to # embed itself into. Type=21686148-6449-6e6f-744e-656564454649 SizeMinBytes=1M SizeMaxBytes=1M """ ) ) (Path(repartd) / "10-root.conf").write_text( textwrap.dedent( f"""\ [Partition] Type=root Format={initrd.config.distribution.filesystem()} Minimize=guess Encrypt=key-file CopyFiles=/ """ ) ) with Image( initrd.config, options=[ "--initrd", Path(initrd.output_dir) / "initrd", "--repart-dir", repartd, "--passphrase", passphrase, "--kernel-command-line=systemd.unit=mkosi-check-and-shutdown.service", "--credential=cryptsetup.passphrase=mkosi", "--incremental", "--ephemeral", "--format=disk", ] ) as image: image.build() image.qemu() @pytest.mark.skipif(os.getuid() != 0, reason="mkosi-initrd LUKS+LVM test can only be executed as root") def test_initrd_luks_lvm(config: ImageConfig, initrd: Image, passphrase: Path) -> None: with Image( config, options=[ "--initrd", Path(initrd.output_dir) / "initrd", "--kernel-command-line=systemd.unit=mkosi-check-and-shutdown.service", "--kernel-command-line=root=LABEL=root", "--kernel-command-line=rw", "--credential=cryptsetup.passphrase=mkosi", "--incremental", "--ephemeral", "--qemu-firmware=linux", ] ) as image, contextlib.ExitStack() as stack: image.build(["--format", "directory"]) drive = Path(image.output_dir) / "image.raw" drive.touch() os.truncate(drive, 5000 * 1024**2) lodev = run(["losetup", "--show", "--find", "--partscan", drive], stdout=subprocess.PIPE).stdout.strip() stack.callback(lambda: run(["losetup", "--detach", lodev])) run(["sfdisk", "--label", "gpt", lodev], input="type=E6D6D379-F507-44C2-A23C-238F2A3DF928 bootable") run( [ "cryptsetup", "--key-file", passphrase, "--use-random", "--pbkdf", "pbkdf2", "--pbkdf-force-iterations", "1000", "luksFormat", f"{lodev}p1", ] ) run(["cryptsetup", "--key-file", passphrase, "luksOpen", f"{lodev}p1", "lvm_root"]) stack.callback(lambda: run(["cryptsetup", "close", "lvm_root"])) luks_uuid = run(["cryptsetup", "luksUUID", f"{lodev}p1"], stdout=subprocess.PIPE).stdout.strip() run(["lvm", "pvcreate", "/dev/mapper/lvm_root"]) run(["lvm", "pvs"]) run(["lvm", "vgcreate", "vg_mkosi", "/dev/mapper/lvm_root"]) run(["lvm", "vgchange", "-ay", "vg_mkosi"]) run(["lvm", "vgs"]) stack.callback(lambda: run(["vgchange", "-an", "vg_mkosi"])) run(["lvm", "lvcreate", "-l", "100%FREE", "-n", "lv0", "vg_mkosi"]) run(["lvm", "lvs"]) wait_for_device("/dev/vg_mkosi/lv0") run([f"mkfs.{image.config.distribution.filesystem()}", "-L", "root", "/dev/vg_mkosi/lv0"]) with tempfile.TemporaryDirectory() as mnt, mount(Path("/dev/vg_mkosi/lv0"), Path(mnt)): # The image might have been built unprivileged so we need to fix the file ownership. Making all the # files owned by root isn't completely correct but good enough for the purposes of the test. copy_tree(Path(image.output_dir) / "image", Path(mnt), preserve=False) stack.close() image.qemu([ "--format=disk", f"--kernel-command-line=rd.luks.uuid={luks_uuid}", ]) def test_initrd_size(initrd: Image) -> None: # The fallback value is for CentOS and related distributions. maxsize = 1024**2 * { Distribution.fedora: 46, Distribution.debian: 40, Distribution.ubuntu: 36, Distribution.arch: 67, Distribution.opensuse: 39, }.get(initrd.config.distribution, 48) assert (Path(initrd.output_dir) / "initrd").stat().st_size <= maxsize mkosi-24.3/tests/test_json.py000066400000000000000000000377761465176501400163450ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import os import textwrap import uuid from pathlib import Path from typing import Optional import pytest from mkosi.config import ( Architecture, Args, BiosBootloader, Bootloader, Cacheonly, Compression, Config, ConfigFeature, ConfigTree, DocFormat, KeySource, ManifestFormat, Network, OutputFormat, QemuDrive, QemuFirmware, QemuVsockCID, SecureBootSignTool, ShimBootloader, Verb, Vmm, ) from mkosi.distributions import Distribution from mkosi.versioncomp import GenericVersion @pytest.mark.parametrize("path", [None, "/baz/qux"]) def test_args(path: Optional[Path]) -> None: dump = textwrap.dedent( f"""\ {{ "AutoBump": false, "Cmdline": [ "foo", "bar" ], "Debug": false, "DebugShell": false, "DebugWorkspace": false, "Directory": {f'"{os.fspath(path)}"' if path is not None else 'null'}, "DocFormat": "auto", "Force": 9001, "GenkeyCommonName": "test", "GenkeyValidDays": "100", "Json": false, "Pager": true, "Verb": "build" }} """ ) args = Args( auto_bump = False, cmdline = ["foo", "bar"], debug = False, debug_shell = False, debug_workspace = False, directory = Path(path) if path is not None else None, doc_format = DocFormat.auto, force = 9001, genkey_common_name = "test", genkey_valid_days = "100", json = False, pager = True, verb = Verb.build, ) assert args.to_json(indent=4, sort_keys=True) == dump.rstrip() assert Args.from_json(dump) == args def test_config() -> None: dump = textwrap.dedent( """\ { "Acl": true, "Architecture": "ia64", "Autologin": false, "BaseTrees": [ "/hello/world" ], "BiosBootloader": "none", "Bootable": "disabled", "Bootloader": "grub", "BuildDirectory": null, "BuildPackages": [ "pkg1", "pkg2" ], "BuildScripts": [ "/path/to/buildscript" ], "BuildSources": [ { "Source": "/qux", "Target": "/frob" } ], "BuildSourcesEphemeral": true, "CacheDirectory": "/is/this/the/cachedir", "CacheOnly": "always", "Checksum": false, "CleanPackageMetadata": "auto", "CleanScripts": [ "/clean" ], "CompressLevel": 3, "CompressOutput": "bz2", "ConfigureScripts": [ "/configure" ], "Credentials": { "credkey": "credval" }, "Dependencies": [ "dep1" ], "Distribution": "fedora", "Environment": { "BAR": "BAR", "Qux": "Qux", "foo": "foo" }, "EnvironmentFiles": [], "Ephemeral": true, "ExtraSearchPaths": [], "ExtraTrees": [], "FinalizeScripts": [], "Format": "uki", "ForwardJournal": "/mkosi.journal", "Hostname": null, "Image": "default", "ImageId": "myimage", "ImageVersion": "5", "Include": [], "Incremental": false, "InitrdInclude": [ "/foo/bar" ], "InitrdPackages": [ "clevis" ], "InitrdVolatilePackages": [ "abc" ], "Initrds": [ "/efi/initrd1", "/efi/initrd2" ], "KernelCommandLine": [], "KernelCommandLineExtra": [ "look", "im", "on", "the", "kernel", "command", "line" ], "KernelModulesExclude": [ "nvidia" ], "KernelModulesInclude": [ "loop" ], "KernelModulesIncludeHost": true, "KernelModulesInitrd": true, "KernelModulesInitrdExclude": [], "KernelModulesInitrdInclude": [], "KernelModulesInitrdIncludeHost": true, "Key": null, "Keymap": "wow, so much keymap", "LocalMirror": null, "Locale": "en_C.UTF-8", "LocaleMessages": "", "Machine": "machine", "MakeInitrd": false, "ManifestFormat": [ "json", "changelog" ], "MicrocodeHost": true, "MinimumVersion": "123", "Mirror": null, "NSpawnSettings": null, "Output": "outfile", "OutputDirectory": "/your/output/here", "Overlay": true, "PackageCacheDirectory": "/a/b/c", "PackageDirectories": [], "PackageManagerTrees": [ { "Source": "/foo/bar", "Target": null } ], "Packages": [], "PassEnvironment": [ "abc" ], "Passphrase": null, "PostInstallationScripts": [ "/bar/qux" ], "PostOutputScripts": [ "/foo/src" ], "PrepareScripts": [ "/run/foo" ], "Profile": "profile", "ProxyClientCertificate": "/my/client/cert", "ProxyClientKey": "/my/client/key", "ProxyExclude": [ "www.example.com" ], "ProxyPeerCertificate": "/my/peer/cert", "ProxyUrl": "https://my/proxy", "QemuArgs": [], "QemuCdrom": false, "QemuDrives": [ { "Directory": "/foo/bar", "FileId": "red", "Id": "abc", "Options": "abc,qed", "Size": 200 }, { "Directory": null, "FileId": "wcd", "Id": "abc", "Options": "", "Size": 200 } ], "QemuFirmware": "linux", "QemuFirmwareVariables": "/foo/bar", "QemuGui": true, "QemuKernel": null, "QemuKvm": "auto", "QemuMem": 123, "QemuSmp": 2, "QemuSwtpm": "auto", "QemuVsock": "enabled", "QemuVsockConnectionId": -2, "Release": "53", "RemoveFiles": [], "RemovePackages": [ "all" ], "RepartDirectories": [], "RepartOffline": true, "Repositories": [], "RepositoryKeyCheck": false, "RootPassword": [ "test1234", false ], "RootShell": "/bin/tcsh", "RuntimeBuildSources": true, "RuntimeNetwork": "interface", "RuntimeScratch": "enabled", "RuntimeSize": 8589934592, "RuntimeTrees": [ { "Source": "/foo/bar", "Target": "/baz" }, { "Source": "/bar/baz", "Target": "/qux" } ], "SELinuxRelabel": "disabled", "SectorSize": null, "SecureBoot": true, "SecureBootAutoEnroll": true, "SecureBootCertificate": null, "SecureBootKey": "/path/to/keyfile", "SecureBootKeySource": { "Source": "", "Type": "file" }, "SecureBootSignTool": "pesign", "Seed": "7496d7d8-7f08-4a2b-96c6-ec8c43791b60", "ShimBootloader": "none", "Sign": false, "SignExpectedPcr": "disabled", "SkeletonTrees": [ { "Source": "/foo/bar", "Target": "/" }, { "Source": "/bar/baz", "Target": "/qux" } ], "SourceDateEpoch": 12345, "SplitArtifacts": true, "Ssh": false, "SshCertificate": "/path/to/cert", "SshKey": null, "SyncScripts": [ "/sync" ], "Timezone": null, "ToolsTree": null, "ToolsTreeCertificates": true, "ToolsTreeDistribution": "fedora", "ToolsTreeMirror": null, "ToolsTreePackageManagerTrees": [ { "Source": "/a/b/c", "Target": "/" } ], "ToolsTreePackages": [], "ToolsTreeRelease": null, "ToolsTreeRepositories": [ "abc" ], "UnifiedKernelImageFormat": "myuki", "UnifiedKernelImages": "auto", "UnitProperties": [ "PROPERTY=VALUE" ], "UseSubvolumes": "auto", "VerityCertificate": "/path/to/cert", "VerityKey": null, "VerityKeySource": { "Source": "", "Type": "file" }, "VirtualMachineMonitor": "qemu", "VolatilePackageDirectories": [ "def" ], "VolatilePackages": [ "abc" ], "WithDocs": true, "WithNetwork": false, "WithRecommends": true, "WithTests": true, "WorkspaceDirectory": "/cwd" } """ ) args = Config( acl=True, architecture=Architecture.ia64, autologin=False, base_trees=[Path("/hello/world")], bios_bootloader=BiosBootloader.none, bootable=ConfigFeature.disabled, bootloader=Bootloader.grub, build_dir=None, build_packages=["pkg1", "pkg2"], build_scripts=[Path("/path/to/buildscript")], build_sources=[ConfigTree(Path("/qux"), Path("/frob"))], build_sources_ephemeral=True, cache_dir=Path("/is/this/the/cachedir"), cacheonly=Cacheonly.always, checksum= False, clean_package_metadata=ConfigFeature.auto, clean_scripts=[Path("/clean")], compress_level=3, compress_output=Compression.bz2, configure_scripts=[Path("/configure")], credentials= {"credkey": "credval"}, dependencies=["dep1"], distribution=Distribution.fedora, environment={"foo": "foo", "BAR": "BAR", "Qux": "Qux"}, environment_files=[], ephemeral=True, extra_search_paths=[], extra_trees=[], finalize_scripts=[], forward_journal=Path("/mkosi.journal"), hostname=None, vmm=Vmm.qemu, image="default", image_id="myimage", image_version="5", include=[], incremental=False, initrd_include=[Path("/foo/bar"),], initrd_packages=["clevis"], initrd_volatile_packages=["abc"], initrds=[Path("/efi/initrd1"), Path("/efi/initrd2")], microcode_host=True, kernel_command_line=[], kernel_command_line_extra=["look", "im", "on", "the", "kernel", "command", "line"], kernel_modules_exclude=["nvidia"], kernel_modules_include=["loop"], kernel_modules_include_host=True, kernel_modules_initrd=True, kernel_modules_initrd_exclude=[], kernel_modules_initrd_include=[], kernel_modules_initrd_include_host=True, key=None, keymap="wow, so much keymap", local_mirror=None, locale="en_C.UTF-8", locale_messages="", machine="machine", make_initrd=False, manifest_format=[ManifestFormat.json, ManifestFormat.changelog], minimum_version=GenericVersion("123"), mirror=None, nspawn_settings=None, output="outfile", output_dir=Path("/your/output/here"), output_format=OutputFormat.uki, overlay=True, package_cache_dir=Path("/a/b/c"), package_directories=[], package_manager_trees=[ConfigTree(Path("/foo/bar"), None)], packages=[], pass_environment=["abc"], passphrase=None, postinst_scripts=[Path("/bar/qux")], postoutput_scripts=[Path("/foo/src")], prepare_scripts=[Path("/run/foo")], profile="profile", proxy_client_certificate=Path("/my/client/cert"), proxy_client_key=Path("/my/client/key"), proxy_exclude=["www.example.com"], proxy_peer_certificate=Path("/my/peer/cert"), proxy_url="https://my/proxy", qemu_args=[], qemu_cdrom=False, qemu_drives=[ QemuDrive("abc", 200, Path("/foo/bar"), "abc,qed", "red"), QemuDrive("abc", 200, None, "", "wcd"), ], qemu_firmware=QemuFirmware.linux, qemu_firmware_variables=Path("/foo/bar"), qemu_gui=True, qemu_kernel=None, qemu_kvm=ConfigFeature.auto, qemu_mem=123, qemu_smp=2, qemu_swtpm=ConfigFeature.auto, qemu_vsock=ConfigFeature.enabled, qemu_vsock_cid=QemuVsockCID.hash, release="53", remove_files=[], remove_packages=["all"], repart_dirs=[], repart_offline=True, repositories=[], repository_key_check=False, root_password=("test1234", False), root_shell="/bin/tcsh", runtime_build_sources=True, runtime_network=Network.interface, runtime_scratch=ConfigFeature.enabled, runtime_size=8589934592, runtime_trees=[ConfigTree(Path("/foo/bar"), Path("/baz")), ConfigTree(Path("/bar/baz"), Path("/qux"))], sector_size=None, secure_boot=True, secure_boot_auto_enroll=True, secure_boot_certificate=None, secure_boot_key=Path("/path/to/keyfile"), secure_boot_key_source=KeySource(type=KeySource.Type.file), secure_boot_sign_tool=SecureBootSignTool.pesign, seed=uuid.UUID("7496d7d8-7f08-4a2b-96c6-ec8c43791b60"), selinux_relabel=ConfigFeature.disabled, shim_bootloader=ShimBootloader.none, sign=False, sign_expected_pcr=ConfigFeature.disabled, skeleton_trees=[ConfigTree(Path("/foo/bar"), Path("/")), ConfigTree(Path("/bar/baz"), Path("/qux"))], source_date_epoch=12345, split_artifacts=True, ssh=False, ssh_certificate=Path("/path/to/cert"), ssh_key=None, sync_scripts=[Path("/sync")], timezone=None, tools_tree=None, tools_tree_certificates=True, tools_tree_distribution=Distribution.fedora, tools_tree_mirror=None, tools_tree_package_manager_trees=[ConfigTree(Path("/a/b/c"), Path("/"))], tools_tree_packages=[], tools_tree_release=None, tools_tree_repositories=["abc"], unified_kernel_image_format="myuki", unified_kernel_images=ConfigFeature.auto, unit_properties=["PROPERTY=VALUE"], use_subvolumes=ConfigFeature.auto, verity_certificate=Path("/path/to/cert"), verity_key=None, verity_key_source=KeySource(type=KeySource.Type.file), volatile_package_directories=[Path("def")], volatile_packages=["abc"], with_docs=True, with_network=False, with_recommends=True, with_tests= True, workspace_dir=Path("/cwd"), ) assert args.to_json() == dump.rstrip() assert Config.from_json(dump) == args mkosi-24.3/tests/test_sysext.py000066400000000000000000000013471465176501400167140ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later from pathlib import Path import pytest from . import Image, ImageConfig pytestmark = pytest.mark.integration def test_sysext(config: ImageConfig) -> None: with Image( config, options=[ "--incremental", "--clean-package-metadata=no", "--format=directory", ], ) as image: image.build() with Image( image.config, options=[ "--directory", "", "--base-tree", Path(image.output_dir) / "image", "--overlay", "--package=dnsmasq", "--format=disk", ], ) as sysext: sysext.build() mkosi-24.3/tests/test_versioncomp.py000066400000000000000000000205641465176501400177230ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import itertools import pytest from mkosi.versioncomp import GenericVersion def test_conversion() -> None: assert GenericVersion("1") < 2 assert GenericVersion("1") < "2" assert GenericVersion("2") > 1 assert GenericVersion("2") > "1" assert GenericVersion("1") == "1" def test_generic_version_systemd() -> None: """Same as the first block of systemd/test/test-compare-versions.sh""" assert GenericVersion("1") < GenericVersion("2") assert GenericVersion("1") <= GenericVersion("2") assert GenericVersion("1") != GenericVersion("2") assert not (GenericVersion("1") > GenericVersion("2")) assert not (GenericVersion("1") == GenericVersion("2")) assert not (GenericVersion("1") >= GenericVersion("2")) assert GenericVersion.compare_versions("1", "2") == -1 assert GenericVersion.compare_versions("2", "2") == 0 assert GenericVersion.compare_versions("2", "1") == 1 def test_generic_version_spec() -> None: """Examples from the uapi group version format spec""" assert GenericVersion("11") == GenericVersion("11") assert GenericVersion("systemd-123") == GenericVersion("systemd-123") assert GenericVersion("bar-123") < GenericVersion("foo-123") assert GenericVersion("123a") > GenericVersion("123") assert GenericVersion("123.a") > GenericVersion("123") assert GenericVersion("123.a") < GenericVersion("123.b") assert GenericVersion("123a") > GenericVersion("123.a") assert GenericVersion("11α") == GenericVersion("11β") assert GenericVersion("A") < GenericVersion("a") assert GenericVersion("") < GenericVersion("0") assert GenericVersion("0.") > GenericVersion("0") assert GenericVersion("0.0") > GenericVersion("0") assert GenericVersion("0") > GenericVersion("~") assert GenericVersion("") > GenericVersion("~") assert GenericVersion("1_") == GenericVersion("1") assert GenericVersion("_1") == GenericVersion("1") assert GenericVersion("1_") < GenericVersion("1.2") assert GenericVersion("1_2_3") > GenericVersion("1.3.3") assert GenericVersion("1+") == GenericVersion("1") assert GenericVersion("+1") == GenericVersion("1") assert GenericVersion("1+") < GenericVersion("1.2") assert GenericVersion("1+2+3") > GenericVersion("1.3.3") @pytest.mark.parametrize( "s1,s2", itertools.combinations_with_replacement( enumerate( [ GenericVersion("122.1"), GenericVersion("123~rc1-1"), GenericVersion("123"), GenericVersion("123-a"), GenericVersion("123-a.1"), GenericVersion("123-1"), GenericVersion("123-1.1"), GenericVersion("123^post1"), GenericVersion("123.a-1"), GenericVersion("123.1-1"), GenericVersion("123a-1"), GenericVersion("124-1"), ], ), 2 ) ) def test_generic_version_strverscmp_improved_doc( s1: tuple[int, GenericVersion], s2: tuple[int, GenericVersion], ) -> None: """Example from the doc string of strverscmp_improved. strverscmp_improved can be found in systemd/src/fundamental/string-util-fundamental.c """ i1, v1 = s1 i2, v2 = s2 assert (v1 == v2) == (i1 == i2) assert (v1 < v2) == (i1 < i2) assert (v1 <= v2) == (i1 <= i2) assert (v1 > v2) == (i1 > i2) assert (v1 >= v2) == (i1 >= i2) assert (v1 != v2) == (i1 != i2) def RPMVERCMP(a: str, b: str, expected: int) -> None: assert (GenericVersion(a) > GenericVersion(b)) - (GenericVersion(a) < GenericVersion(b)) == expected def test_generic_version_rpmvercmp() -> None: # Tests copied from rpm's rpmio test suite, under the LGPL license: # https://github.com/rpm-software-management/rpm/blob/master/tests/rpmvercmp.at. # The original form is retained as much as possible for easy comparisons and updates. RPMVERCMP("1.0", "1.0", 0) RPMVERCMP("1.0", "2.0", -1) RPMVERCMP("2.0", "1.0", 1) RPMVERCMP("2.0.1", "2.0.1", 0) RPMVERCMP("2.0", "2.0.1", -1) RPMVERCMP("2.0.1", "2.0", 1) RPMVERCMP("2.0.1a", "2.0.1a", 0) RPMVERCMP("2.0.1a", "2.0.1", 1) RPMVERCMP("2.0.1", "2.0.1a", -1) RPMVERCMP("5.5p1", "5.5p1", 0) RPMVERCMP("5.5p1", "5.5p2", -1) RPMVERCMP("5.5p2", "5.5p1", 1) RPMVERCMP("5.5p10", "5.5p10", 0) RPMVERCMP("5.5p1", "5.5p10", -1) RPMVERCMP("5.5p10", "5.5p1", 1) RPMVERCMP("10xyz", "10.1xyz", 1) # Note: this is reversed from rpm's vercmp */ RPMVERCMP("10.1xyz", "10xyz", -1) # Note: this is reversed from rpm's vercmp */ RPMVERCMP("xyz10", "xyz10", 0) RPMVERCMP("xyz10", "xyz10.1", -1) RPMVERCMP("xyz10.1", "xyz10", 1) RPMVERCMP("xyz.4", "xyz.4", 0) RPMVERCMP("xyz.4", "8", -1) RPMVERCMP("8", "xyz.4", 1) RPMVERCMP("xyz.4", "2", -1) RPMVERCMP("2", "xyz.4", 1) RPMVERCMP("5.5p2", "5.6p1", -1) RPMVERCMP("5.6p1", "5.5p2", 1) RPMVERCMP("5.6p1", "6.5p1", -1) RPMVERCMP("6.5p1", "5.6p1", 1) RPMVERCMP("6.0.rc1", "6.0", 1) RPMVERCMP("6.0", "6.0.rc1", -1) RPMVERCMP("10b2", "10a1", 1) RPMVERCMP("10a2", "10b2", -1) RPMVERCMP("1.0aa", "1.0aa", 0) RPMVERCMP("1.0a", "1.0aa", -1) RPMVERCMP("1.0aa", "1.0a", 1) RPMVERCMP("10.0001", "10.0001", 0) RPMVERCMP("10.0001", "10.1", 0) RPMVERCMP("10.1", "10.0001", 0) RPMVERCMP("10.0001", "10.0039", -1) RPMVERCMP("10.0039", "10.0001", 1) RPMVERCMP("4.999.9", "5.0", -1) RPMVERCMP("5.0", "4.999.9", 1) RPMVERCMP("20101121", "20101121", 0) RPMVERCMP("20101121", "20101122", -1) RPMVERCMP("20101122", "20101121", 1) RPMVERCMP("2_0", "2_0", 0) RPMVERCMP("2.0", "2_0", -1) # Note: in rpm those compare equal RPMVERCMP("2_0", "2.0", 1) # Note: in rpm those compare equal # RhBug:178798 case */ RPMVERCMP("a", "a", 0) RPMVERCMP("a+", "a+", 0) RPMVERCMP("a+", "a_", 0) RPMVERCMP("a_", "a+", 0) RPMVERCMP("+a", "+a", 0) RPMVERCMP("+a", "_a", 0) RPMVERCMP("_a", "+a", 0) RPMVERCMP("+_", "+_", 0) RPMVERCMP("_+", "+_", 0) RPMVERCMP("_+", "_+", 0) RPMVERCMP("+", "_", 0) RPMVERCMP("_", "+", 0) # Basic testcases for tilde sorting RPMVERCMP("1.0~rc1", "1.0~rc1", 0) RPMVERCMP("1.0~rc1", "1.0", -1) RPMVERCMP("1.0", "1.0~rc1", 1) RPMVERCMP("1.0~rc1", "1.0~rc2", -1) RPMVERCMP("1.0~rc2", "1.0~rc1", 1) RPMVERCMP("1.0~rc1~git123", "1.0~rc1~git123", 0) RPMVERCMP("1.0~rc1~git123", "1.0~rc1", -1) RPMVERCMP("1.0~rc1", "1.0~rc1~git123", 1) # Basic testcases for caret sorting RPMVERCMP("1.0^", "1.0^", 0) RPMVERCMP("1.0^", "1.0", 1) RPMVERCMP("1.0", "1.0^", -1) RPMVERCMP("1.0^git1", "1.0^git1", 0) RPMVERCMP("1.0^git1", "1.0", 1) RPMVERCMP("1.0", "1.0^git1", -1) RPMVERCMP("1.0^git1", "1.0^git2", -1) RPMVERCMP("1.0^git2", "1.0^git1", 1) RPMVERCMP("1.0^git1", "1.01", -1) RPMVERCMP("1.01", "1.0^git1", 1) RPMVERCMP("1.0^20160101", "1.0^20160101", 0) RPMVERCMP("1.0^20160101", "1.0.1", -1) RPMVERCMP("1.0.1", "1.0^20160101", 1) RPMVERCMP("1.0^20160101^git1", "1.0^20160101^git1", 0) RPMVERCMP("1.0^20160102", "1.0^20160101^git1", 1) RPMVERCMP("1.0^20160101^git1", "1.0^20160102", -1) # Basic testcases for tilde and caret sorting */ RPMVERCMP("1.0~rc1^git1", "1.0~rc1^git1", 0) RPMVERCMP("1.0~rc1^git1", "1.0~rc1", 1) RPMVERCMP("1.0~rc1", "1.0~rc1^git1", -1) RPMVERCMP("1.0^git1~pre", "1.0^git1~pre", 0) RPMVERCMP("1.0^git1", "1.0^git1~pre", 1) RPMVERCMP("1.0^git1~pre", "1.0^git1", -1) # These are included here to document current, arguably buggy behaviors # for reference purposes and for easy checking against unintended # behavior changes. */ print("/* RPM version comparison oddities */") # RhBug:811992 case RPMVERCMP("1b.fc17", "1b.fc17", 0) RPMVERCMP("1b.fc17", "1.fc17", 1) # Note: this is reversed from rpm's vercmp, WAT! */ RPMVERCMP("1.fc17", "1b.fc17", -1) RPMVERCMP("1g.fc17", "1g.fc17", 0) RPMVERCMP("1g.fc17", "1.fc17", 1) RPMVERCMP("1.fc17", "1g.fc17", -1) # Non-ascii characters are considered equal so these are all the same, eh… */ RPMVERCMP("1.1.α", "1.1.α", 0) RPMVERCMP("1.1.α", "1.1.β", 0) RPMVERCMP("1.1.β", "1.1.α", 0) RPMVERCMP("1.1.αα", "1.1.α", 0) RPMVERCMP("1.1.α", "1.1.ββ", 0) RPMVERCMP("1.1.ββ", "1.1.αα", 0) mkosi-24.3/tools/000077500000000000000000000000001465176501400137355ustar00rootroot00000000000000mkosi-24.3/tools/do-a-release.sh000077500000000000000000000013111465176501400165260ustar00rootroot00000000000000#!/bin/sh # SPDX-License-Identifier: LGPL-2.1-or-later if [ -z "$1" ] ; then echo "Version number not specified." exit 1 fi VERSION="$1" if ! git diff-index --quiet HEAD; then echo "Repo has modified files." exit 1 fi sed -r -i "s/^version = \".*\"$/version = \"$VERSION\"/" pyproject.toml sed -r -i "s/^__version__ = \".*\"$/__version__ = \"$VERSION\"/" mkosi/config.py git add -p pyproject.toml mkosi git commit -m "Release $VERSION" git tag -s "v$VERSION" -m "mkosi $VERSION" VERSION_MAJOR=${VERSION%%.*} VERSION="$((VERSION_MAJOR + 1))~devel" sed -r -i "s/^__version__ = \".*\"$/__version__ = \"$VERSION\"/" mkosi/config.py git add -p mkosi git commit -m "Bump version to $VERSION" mkosi-24.3/tools/generate-zipapp.sh000077500000000000000000000004261465176501400173710ustar00rootroot00000000000000#!/bin/bash BUILDDIR=$(mktemp -d -q) cleanup() { rm -rf "$BUILDDIR" } trap cleanup EXIT mkdir -p builddir cp -r mkosi "${BUILDDIR}/" python3 -m zipapp \ -p "/usr/bin/env python3" \ -o builddir/mkosi \ -m mkosi.__main__:main \ "$BUILDDIR" mkosi-24.3/tools/make-man-page.sh000077500000000000000000000003461465176501400166770ustar00rootroot00000000000000#!/bin/sh # SPDX-License-Identifier: LGPL-2.1-or-later set -ex pandoc -t man -s -o mkosi/resources/mkosi.1 mkosi/resources/mkosi.md pandoc -t man -s -o mkosi/initrd/resources/mkosi-initrd.1 mkosi/initrd/resources/mkosi-initrd.md