pax_global_header00006660000000000000000000000064145534563220014523gustar00rootroot0000000000000052 comment=14b07c74d5b3a8b0f44d9a5c733f7902c7859417 mkosi-20.2/000077500000000000000000000000001455345632200125705ustar00rootroot00000000000000mkosi-20.2/.dir-locals.el000066400000000000000000000011301455345632200152140ustar00rootroot00000000000000; Sets emacs variables based on mode. ; A list of (major-mode . ((var1 . value1) (var2 . value2))) ; Mode can be nil, which gives default values. ; Note that we set a wider line width source files, but for everything else we ; stick to a more conservative 79 characters. ; NOTE: Keep this file in sync with .editorconfig. ((python-mode . ((indent-tabs-mode . nil) (tab-width . 4) (fill-column . 99))) (sh-mode . ((sh-basic-offset . 4) (sh-indentation . 4))) (nil . ((indent-tabs-mode . nil) (tab-width . 4) (fill-column . 79))) ) mkosi-20.2/.editorconfig000066400000000000000000000002631455345632200152460ustar00rootroot00000000000000root = true [*] end_of_line = lf insert_final_newline = true trim_trailing_whitespace = true charset = utf-8 indent_style = space indent_size = 4 [*.yaml,*.yml] indent_size = 2 mkosi-20.2/.github/000077500000000000000000000000001455345632200141305ustar00rootroot00000000000000mkosi-20.2/.github/workflows/000077500000000000000000000000001455345632200161655ustar00rootroot00000000000000mkosi-20.2/.github/workflows/ci.yml000066400000000000000000000123221455345632200173030ustar00rootroot00000000000000name: CI on: push: branches: - main pull_request: branches: - main jobs: unit-test: runs-on: ubuntu-22.04 concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true steps: - uses: actions/checkout@v3 - name: Install run: | sudo apt-get update sudo apt-get install pandoc python3-pytest python3 -m pip install --upgrade setuptools wheel pip python3 -m pip install mypy ruff npm install -g pyright - name: Run ruff run: | ruff --version ruff mkosi/ tests/ kernel-install/50-mkosi.install - name: Check that tabs are not used in code run: sh -c '! git grep -P "\\t" "*.py"' - name: Type Checking (mypy) run: | python3 -m mypy --version python3 -m mypy mkosi/ tests/ kernel-install/50-mkosi.install - name: Type Checking (pyright) run: | pyright --version pyright mkosi/ tests/ kernel-install/50-mkosi.install - name: Unit Tests run: | python3 -m pytest --version python3 -m pytest -sv tests/ - name: Test execution from current working directory run: python3 -m mkosi -h - name: Test execution from current working directory (sudo call) run: sudo python3 -m mkosi -h - name: Test venv installation run: | python3 -m venv testvenv testvenv/bin/python3 -m pip install --upgrade setuptools wheel pip testvenv/bin/python3 -m pip install . testvenv/bin/mkosi -h rm -rf testvenv - name: Test editable venv installation run: | python3 -m venv testvenv testvenv/bin/python3 -m pip install --upgrade setuptools wheel pip testvenv/bin/python3 -m pip install --editable . testvenv/bin/mkosi -h rm -rf testvenv - name: Test zipapp creation run: | ./tools/generate-zipapp.sh ./builddir/mkosi -h ./builddir/mkosi documentation - name: Test shell scripts run: | sudo apt-get update && sudo apt-get install --no-install-recommends shellcheck bash -c 'shopt -s globstar; shellcheck bin/mkosi tools/*.sh' - name: Test man page generation run: pandoc -s mkosi.md -o mkosi.1 integration-test: runs-on: ubuntu-22.04 needs: unit-test concurrency: group: ${{ github.workflow }}-${{ matrix.distro }}-${{ matrix.tools }}-${{ github.ref }} cancel-in-progress: true strategy: fail-fast: false matrix: distro: - arch - centos - debian - fedora - opensuse - ubuntu tools: - arch - debian - fedora - opensuse # TODO: Add Ubuntu and CentOS once they have systemd v254 or newer. exclude: # pacman and archlinux-keyring are not packaged in OpenSUSE. - distro: arch tools: opensuse # apt, debian-keyring and ubuntu-keyring are not packaged in OpenSUSE. - distro: debian tools: opensuse - distro: ubuntu tools: opensuse # rpm in Debian is currently missing # https://github.com/rpm-software-management/rpm/commit/ea3187cfcf9cac87e5bc5e7db79b0338da9e355e - distro: fedora tools: debian - distro: centos tools: debian # This combination results in rpm failing because of SIGPIPE. # TODO: Try again once Arch gets a new rpm release. - distro: centos tools: arch steps: - uses: actions/checkout@v3 - uses: ./ - name: Install run: | sudo apt-get update sudo apt-get install python3-pytest lvm2 cryptsetup-bin # Make sure the latest changes from the pull request are used. sudo ln -svf $PWD/bin/mkosi /usr/bin/mkosi working-directory: ./ - name: Configure run: | tee mkosi.local.conf < mkosi/resources/mkosi-tools/mkosi.pkgmngr/etc/dnf/dnf.conf # TODO: Remove once all distros have recent enough systemd that knows systemd.default_device_timeout_sec. mkdir -p mkosi-initrd/mkosi.extra/usr/lib/systemd/system.conf.d tee mkosi-initrd/mkosi.extra/usr/lib/systemd/system.conf.d/device-timeout.conf < Copyright (C) This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Also add information on how to contact you by electronic and paper mail. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the library, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the library `Frob' (a library for tweaking knobs) written by James Random Hacker. , 1 April 1990 Ty Coon, President of Vice That's all there is to it! mkosi-20.2/LICENSE.GPL2000066400000000000000000000431031455345632200143010ustar00rootroot00000000000000 GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Lesser General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. , 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. mkosi-20.2/MANIFEST.in000066400000000000000000000000201455345632200143160ustar00rootroot00000000000000include LICENSE mkosi-20.2/NEWS.md000066400000000000000000001216251455345632200136750ustar00rootroot00000000000000# mkosi Changelog ## v20.2 - Fixed a bug in signing unsigned shim EFI binaries. - We now build an early microcode initrd in the mkosi kernel-install plugin. - Added `PackageDirectories=` to allow providing extra packages to be made available during the build. - Fixed issue where `KernelModulesIncludeHost` was including unnecessary modules - Fixed `--mirror` specification for CentOS (and variants) and Fedora. Previously a subdirectory within the mirror had to be specified which prevented using CentOS and EPEL repositories from the same mirror. Now only the URL has be specified. - We now mount package manager cache directories when running scripts on the host so that any packages installed in scripts are properly cached. - We don't download filelists on Fedora anymore - Nested build sources don't cause errors anymore when trying to install packages. - We don't try to build the same tools tree more than once anymore when building multiple images. - We now create the `/etc/mtab` compatibility symlink in mkosi's sandbox. - We now always hash the root password ourselves instead of leaving it to `systemd-firstboot`. - `/srv` and `/mnt` are not mounted read-only anymore during builds. - Fixed a crash when running mkosi in a directory with fewer than two parent directories. - Implemented `RepositoryKeyCheck=` for apt-based distributions. ## v20.1 - `BuildSources=` are now mounted when we install packages so local packages can be made available in the sandbox. - Fixed check to see if we're running as root which makes sure we don't do shared mounts when running as root. - The extension release file is now actually written when building system or configuration extensions. - The nspawn settings are copied to the output directory again. - Incremental caching is now skipped when `Overlay=` is enabled as this combination isn't supported. - The SELinux relabel check is more granular and now checks for all required files instead of just whether there's a policy configured. - `qemu-system-xxx` binaries are now preferred over the generic `qemu` and `qemu-kvm` binaries. - Grub tools from the tools tree are now used to install grub instead of grub tools from the image itself. The grub tools were added to the default tools trees as well. - The pacman keyring in tools trees is now only populated from the Arch Linux keyring (and not the Debian/Ubuntu ones anymore). - `gpg` is allowed to access `/run/pscsd/pscsd.comm` on the host if it exists to allow interaction with smartcards. ## v20 - The current working directory is not mounted unconditionally to `/work/src` anymore. Instead, the default value for `BuildSources=` now mounts the current working directory to `/work/src`. This means that the current working directory is no longer implicitly included when `BuildSources=` is explicitly configured. - Assigning the empty string to a setting that takes a list of values now overrides any configured default value as well. - The github action does not build and install systemd from source anymore. Instead, `ToolsTree=default` can be used to make sure a recent version of systemd is used to do the image build. - Added `EnvironmentFiles=` to read environment variables from environment files. - We drastically reduced how much of the host system we expose to scripts. Aside from `/usr`, a few directories in `/etc`, `/tmp`, `/var/tmp` and various directories configured in mkosi settings, all host directories are hidden from scripts, package managers and other tools executed by mkosi. - Added `RuntimeScratch=` to automatically mount a directory with extra scratch space into mkosi-spawned containers and virtual machines. - Package manager trees can now be used to configure every tool invoked by mkosi while building an image that reads config files from `/etc` or `/usr`. - Added `SELinuxRelabel=` to specify whether to relabel selinux files or not. - Many fixes to tools trees were made and tools trees are now covered by CI. Some combinations aren't possible yet but we're actively working to make these possible. - `mkosi qemu` now supports direct kernel boots of `s390x` and `powerpc` images. - Added `HostArchitecture=` match to match against the host architecture. - We don't use the user's SSH public/private keypair anymore for `mkosi ssh` but instead use a separate key pair which can be generated by `mkosi genkey`. Users using `mkosi ssh` will have to run `mkosi genkey` once to generate the necessary files to keep `mkosi ssh` working. - We don't automatically set `--offline=no` anymore when we detect the `Subvolumes=` setting is used in a `systemd-repart` partition definition file. Instead, use the new `RepartOffline=` option to explicitly disable running `systemd-repart` in offline mode. - During the image build we now install UKIs/kernels/initrds to `/boot` instead of `/efi`. While this will generally not be noticeable, users with custom systemd-repart ESP partition definitions will need to add `CopyFiles=/boot:/` along with the usual `CopyFiles=/efi:/` to their ESP partition definitions. By installing UKIs/kernels/initrds to `/boot`, it becomes possible to use `/boot` to populate an XBOOTLDR partition which wasn't possible before. Note that this is also safe to do before `v20` so `CopyFiles=/boot:/` can unconditionally be added to any ESP partition definition files. - Added `QemuFirmwareVariables=` to allow specifying a custom OVMF variables file to use. - Added `MinimumVersion=` to allow specifying the minimum required mkosi version to build an image. - Added support for Arch Linux's debug repositories. - Merged the mkosi-initrd project into mkosi itself. mkosi-initrd is now used to build the default initrd. - Implemented mkosi-initrd for all supported distributions. - Added `ShimBootloader=` to support installing shim to the ESP. - Added sysext, confext and portable output formats. These will produce signed disk images that can be used as sysexts, confexts and portable services respectively. - Added `QemuVsockConnectionId=` to configure how to allocate the vsock connection ID when `QemUVsock=` is enabled. - Added documentation on how to build sysexts with mkosi. - Global systemd user presets are now also configured. - Implemented `WithDocs=` for `apt`. - On supported package managers, locale data for other locales is now stripped if the local is explicitly configured using `Locale=`. - All `rpm` plugins are now disabled when building images. - Added `KernelModulesIncludeHost=` and `KernelModulesInitrdIncludeHost=` to only include modules loaded on the host system in the image/initrd respectively. - Implemented `RemovePackages=` for Arch Linux. - Added `useradd` and `groupadd` scripts to configure these binaries to operate on the image during builds instead on the host. - Added microcode support. If installed into the image, an early microcode initrd will automatically be built and prepended to the initrd. - A passwordless root account may now be created by specifying `hashed:`. - The `Autologin=` feature was extended with support for `arm64`, `s390x` and `powerpc` architectures. - Added `SecureBootAutoEnroll=` to control automatic enrollment of secureboot keys separately from signing `systemd-boot` and generated UKIs. - `ImageVersion=` is no longer automatically appended to the output files, instead this is automatically appended to `Output=` if not specified and results in the `%o` specifier being equivalent to `%i` or `%i_%v` depending on whether `ImageVersion=` is specified. ## v19 - Support for RHEL was added! - Added `journalctl` and `coredumpctl` verbs for running the respective tools on built directory or disk images. - Added a `burn` verb to write the output image to a block device. - Added a new `esp` output format, which is largely similar to the existing `uki` output format but wraps it in a disk image with only an ESP. - `Presets` were renamed to `Images`. `mkosi.images/` is now used instead of `mkosi.presets/`, the `Presets=` setting was renamed to `Images=` and the `Presets` section was merged into the `Config` section. The old names can still be used for backwards compatibility. - Added profiles to support building variants of the same image in one repository. Profiles can be defined in `mkosi.profiles/` and one can be selected using the new `Profile=` setting. - mkosi will now parse `mkosi.local.conf` before any other config files if that exists. - Added a kernel-install plugin. This is only shipped in source tree and not included in the Python module. - Added a `--json` option to get the output of `mkosi summary` as JSON. - Added shorthand `-a` for `--autologin`. - Added a `--debug-workspace` option to not remove the workspace directory after a build. This is useful to inspect the workspace after failing builds. As a consequence the prefix for the default workspace directory prefix has been changed from `.mkosi-tmp` to `mkosi-workspace`. - Scripts with the `.chroot` extension are now executed in the image automatically. - Added `rpm` helper script to have `rpm` automatically operate on the image when running scripts. - Added `mkosi-as-caller` helper script that can be used in scripts to run commands as the user invoking mkosi. - `mkosi-chroot` will now start a shell if no arguments are specified. - Added `WithRecommends=` to configure whether to install recommended packages by default or not where this is supported. It is disabled by default. - Added `ToolsTreeMirror=` setting for configuring the mirror to use for the default tools tree. - `WithDocs=` is now enabled by default. - Added `BuildSourcesEphemeral=` to make source directories ephemeral when running scripts. This means any changes made to source directories while running scripts will be undone after the scripts have finished executing. - Added `QemuDrives=` to have mkosi create extra qemu drives and pass them to qemu when using the `qemu` verb. - Added `BuildSources=` match to match against configured build source targets. - `PackageManagerTrees=` was moved to the `Distribution` section. - We now automatically configure the qemu firmware, kernel cmdline and initrd based on what type of kernel is passed by the user via `-kernel` or `QemuKernel=`. - The mkosi repository itself now ships configuration to build basic bootable images that can be used to test mkosi. - Added support for enabling `updates-testing` repositories for Fedora. - GPG keys for CentOS, Fedora, Alma and Rocky are now looked up locally first before fetching them remotely. - Signatures are not required for local packages on Arch anymore. - Packages on opensuse are now always downloaded in advance before installation when using zypper. - The tar output is now reproducible. - We now make sure `git` can be executed from mkosi scripts without running into permission errors. - We don't create subdirectories beneath the configured cache directory anymore. - Workspace directories are now created outside of any source directories. mkosi will either use `XDG_CACHE_HOME`, `$HOME/.cache` or `/var/tmp` depending on the situation. - Added environment variable `MKOSI_DNF` to override which dnf to use for building images (`dnf` or `dnf5`). - The rootfs can now be modified when running build scripts (with all changes thrown away after the last build script has been executed). - mkosi now fails if configuration specified via the CLI does not apply to any image (because it is overridden). - Added a new doc on building rpms from source with mkosi (`docs/building-rpms-from-source.md`). - `/etc/resolv.conf` will now only be mounted for scripts when they are run with network access. ## v18 - `$SCRIPT` was renamed to `$CHROOT_SCRIPT`. `$SCRIPT` can still be used but is considered deprecated. - Added `RuntimeTrees=` setting to mount directories when booting images via `mkosi boot`, `mkosi shell` or `mkosi qemu`. The directories are mounted with a uid map that maps the user invoking mkosi to the root user so that all files in the directory appear as if owned by the root user in the container or virtual machine and any new files created in the directories are owned by the user invoking mkosi. To make this work in VMs, we use `VirtioFS` via `virtiofsd`. Note that this requires systemd v254 or newer to be installed in the image. - Added support for booting directory images with `mkosi qemu` via `VirtioFS`. When `CONFIG_VIRTIOFS` and `CONFIG_VIRTIO_PCI` are builtin modules, no initramfs is required to make this work. - Added `Include=` or `--include` to include extra configuration files or directories. - Added support for specifiers to access the current value of certain settings during configuration file parsing. - `mkosi` will now exit with an error when no configuration was provided. - Multiple scripts of the same type are now supported. - Custom distributions are now supported via the new `custom` distribution. When using `custom` as the distribution, the rootfs must be provided via base trees, skeleton trees or prepare scripts. - We now use local GPG keys for rpm based distributions if the `distribution-gpg-keys` package is installed on the host. - Added `RuntimeSize=` to grow the image to a specific size before booting it when using `mkosi boot` or `mkosi qemu`. - We now set `MKOSI_UID` and `MKOSI_GID` when running scripts which are set to the uid and gid of the user invoking mkosi respectively. These can be used to run commands as the user that invoked mkosi. - Added an `Architecture=` match - Initrds specified with `Initrds=` are now used for grub menuentries as well. - `ImageId=` and `ImageVersion=` are now written to os-release as `IMAGE_ID` and `IMAGE_VERSION` if provided. - We pass command line arguments passed to the `build` verb to the build script again. - We added support for the "RHEL Universal Base Image" distribution. ## v17.1 - Fixed bug where `--autologin` was broken when used in combination with a tools tree when using a packaged version of mkosi. ## v17 - Added `ToolsTreePackages=` to add extra packages to the default tools tree. - Added `SystemdVersion=` match to match on the host's systemd version - Added `Format=` match to match on the configured output format - `Presets=` can now be configured in global configuration files to select which presets to build - UKIs can now be booted using direct linux boot. - We don't try to make images UEFI bootable anymore on architectures that do not support UEFI - Fixed `--help` to show all options again - We now warn when settings are configured in the wrong section ## v16 - `mkosi.version` is now picked up from preset and dropin directories as well following the usual config precedence logic - Removed the "first assignment wins" logic from configuration parsing. Settings parsed later will now override earlier values - Removed the `!` operator for lists. Instead, assign the empty string to the list to remove all previous values. - Added support for configuring custom default values for settings by prefixing their name in the configuration file with `@`. - Added `QemuCdrom=` to attach the image to the virtual machine as a CD-ROM instead of a block device. - Added `SectorSize=` to set the sector size of the disk images built by systemd-repart. - Added back grub support (BIOS/UEFI). Note that we don't install grub on UEFI yet but we do add the necessary configuration and partitions. - Added `Bootloader=` option to configure which EFI bootloader to install. Added `uki` option to install just the UKI without systemd-boot and `grub` to generate grub configuration to chainload into the built UKIs. - Added `BiosBootloader=` to configure whether grub for BIOS gets installed or not. - Added `QemuFirmware=` to select which qemu firmware to use (OVMF, Seabios or direct kernel boot). - Added `QemuKernel=` to specify the kernel that should be used with direct kernel boot. - `/var/lib/dbus/machine-id` is now removed if it was added by a package manager postinstall script. - The manifest is not generated by default anymore. Use `ManifestFormat=json` to make sure the manifest is generated. - Added `SourceDateEpoch=` to enable more reproducible image builds. - Added `Seed=` to set the seed passed to systemd-repart. - Updated the default Fedora release to Fedora 39. - If `ToolsTree=` is set to `default`, mkosi will now build a default tools tree containing all the necessary tools to build images. The distribution and release to use can be configured with `ToolsTreeDistribution=` and `ToolsTreeRelease=` or are determined automatically based on the image being built. - Added `uki` output format. This is similar to `cpio`, except the cpio is packaged up as a UKI with a kernel image and stub picked up from the rootfs. ## v15.1 - The man page can be generated from the markdown file via `tools/make-man-page.sh`. - Fixed issue where not all packages and data files where included in the generated python package. - mkosi doesn't try to unshare the network namespace anymore when it doesn't have `CAP_NET_ADMIN`. - Fixed issue when the workspace was located in `/tmp`. - Don't try to run `timedatectl` or `ssh-add` when they're not installed. ## v15 - Migrated to systemd-repart. Many options are dropped in favor of specifying them directly in repart partition definition files: - Format=gpt_xxx options are replaced with a single "disk" options. Filesystem to use can now be specified with repart's Format= option - Format=plain_squashfs (Can be reproduced by a single repart squashfs root partition combined with SplitArtifacts=yes) - Verity= (Replaced by repart's Verity= options) - Encrypt= (Replaced by repart's Encrypt= option) - RootSize=, HomeSize=, VarSize=, TmpSize=, ESPSize=, SwapSize=, SrvSize= (Replaced by repart's size options) - UsrOnly= (replaced with `CopyFiles=/:/usr` in a usr partition definition) - OutputSplitRoot=, OutputSplitVerity=, (Replaced by repart's SplitName= option) - OutputSplitKernel= (UKI is now always written to its own output file) - GPTFirstLBA (Removed, no equivalent in repart) - ReadOnly= (Replaced by repart's ReadOnly= option per partition) - Minimize= (Replaced by repart's Minimize= option per partition) - CompressFs= (No equivalent in repart, can be replicated by replacing mkfs. in $PATH with a script that adds the necessary command line option) - MkSquashfs= (Can be replaced with a script in $PATH that invokes the correct binary) We also remove the WithoutUnifiedKernelImages= switch as building unified kernel images is trivial and fast these days. - Support for --qemu-boot was dropped - Support for --use-host-repositories was dropped, use --repository-directory instead - `RepositoryDirectory` was removed, use `PackageManagerTrees=` or `SkeletonTrees=` instead. - `--repositories` is now only usable on Debian/RPM based distros and can only be used to enable additional repositories. Specifically, it cannot be used on Arch Linux anymore to add new repositories. - The `_epel` distributions were removed. Use `--repositories=epel` instead to enable the EPEL repository. - Removed `-stream` from CentOS release specifiers. Instead of specifying `8-stream`, you know just specify `8`. - Removed default kernel command line arguments `rhgb`, `selinux=0` and `audit=0`. - Dropped --all and --all-directory as this functionality is better implemented by using a build system. - mkosi now builds images without needing root privileges. - Removed `--no-chown`, `--idmap` and `--nspawn-keep-unit` options as they were made obsolete by moving to rootless builds. - Removed `--source-file-transfer`, `--source-file-transfer-final`, `--source-resolve-symlinks` and `--source-resolve-symlinks-final` in favor of always mounting the source directory into the build image. `--source-file-transfer-final` might be reimplemented in the future using virtiofsd. - Dropped `--include-dir` option. Usage can be replaced by using `--incremental` and reading includes from the cached build image tree. - Removed `--machine-id` in favor of shipping images without a machine ID at all. - Removed `--skip-final-phase` as we only have a single phase now. - The post install script is only called for the final image now and not for the build image anymore. Use the prepare script instead. - `--ssh-key`, `--ssh-agent`, `--ssh-port` and `--ssh-timeout` options were dropped as the SSH support was reimplemented using VSock. `mkosi ssh` can only be used with images booted with `mkosi qemu`. Use `machinectl` to access images booted with `mkosi boot`. Use --extra-tree or --credential with the `.ssh.authorized_keys.root` credentials as alternatives for provisioning the public key inside the image. - Only configuration files matching `*.conf` are parsed in dropin directories now. - Removed `--qemu-headless`, we now start qemu in the terminal by default and configure the serial console at runtime. Use the new `--qemu-gui` option to start qemu in its graphical interface. - Removed `--netdev`. Can be replaced by manually installing systemd-networkd, putting a network file in the image and enabling systemd-networkd. - If `mkosi.extra/` or `mkosi.skeleton/` exist, they are now always used instead of only when no explicit extra/skeleton trees are defined. - mkosi doesn't install any default packages anymore aside from packages required by the distro or the base filesystem layout package if there are no required packages. In practice, this means systemd and other basic tools have to be installed explicitly from now on. - Removed `--base-packages` as it's not needed anymore since we don't install any packages by default anymore aside from the base filesystem layout package. - Removed `--qcow2` option in favor of supporting only raw disk images as the disk image output format. - Removed `--bmap` option as it can be trivially added manually by utilizing a finalize script. - The `never` value for `--with-network` was spun of into its own custom option `--cache-only`. - `--bootable` now defaults to `auto`. When set to `auto`, mkosi will generate a bootable image only if all the necessary packages are installed. Documentation was added in docs/bootable.md on how a bootable image can be generated on mainstream distros. - The RPM db is no longer rebuilt in bdb format on CentOS Stream 8. To be able to install packages on a CentOS Stream 8 image with a RPM db in sqlite format, rewrite the db in bdb format using `rpm --rebuilddb --define _db_backend bdb`. - Repositories are now only written to /etc/apt/sources.list if apt is installed in the image. - Removed the dependency on `debootstrap` to build Ubuntu or Debian images. - Apt now uses the keyring from the host instead of the keyring from the image. This means `debian-archive-keyring` or `ubuntu-archive-keyring` are now required to be installed to build Debian or Ubuntu images respectively. - `--base-image` is split into `--base-tree` and `--overlay`. - Removed `--cache-initrd`, instead, use a prebuilt initrd with `Initrds=` to avoid rebuilding the initrd all the time. - Disk images are now resized to 8G when booted to give some disk space to play around with in the booted image. - Removed `--install-directory=` option. This was originally added for caching the installation results, but this doesn't work properly as it might result in leftover files in the install directory from a previous installation, so we have to empty the directory before reusing it, invalidating the caching, so the option was removed. - Build scripts are now executed on the host. See the `SCRIPTS` section in the manual for more information. Existing build scripts will need to be updated to make sure they keep working. Specifically, most paths in scripts will need to be prefixed with $BUILDROOT to have them operate on the image instead of on the host system. To ensure the host system cannot be modified when running a script, most host directories are mounted read-only when running a script to ensure a script cannot modify the host in any way. Alternatively to making the script run on the host, the script can also still be executed in the image itself by putting the following snippet at the top of the script: ```sh if [ "$container" != "mkosi" ]; then exec mkosi-chroot "$SCRIPT" "$@" fi ``` - Removed `--tar-strip-selinux-context=` option. We now label all files properly if selinux is enabled and if users don't want the labels, they can simply exclude them when extracting the archive. - Gentoo is now marked as experimental and unsupported and there's no guarantee at all that it will work. Issues related to gentoo will generally not receive attention from core maintainers. All gentoo specific hacks outside of the gentoo implementation module have been removed. - A verb `documentation` has been added. Calling mkosi with this verb will show the documentation. This is useful when running mkosi during development to always have the documentation in the correct version available. By default it will try several ways to output the documentation, but a specific option can be chosen with the `--doc-format` option. Distro packagers are encouraged to add a file `mkosi.1` into the `mkosi/resources` directory of the Python package, if it is missing, as well es install it in the appropriate search path for man pages. The man page can be generated from the markdown file `mkosi/resources/mkosi.md` e.g via `pandoc -t man -s -o mkosi.1 mkosi.md`. - BuildSources= now takes source:target pairs which specify the source directory and where to mount it relative to the top level source directory when running scripts. (e.g. BuildSources=../my-project:my-project) ## v14 - Support for Clear Linux was dropped. See https://github.com/systemd/mkosi/pull/1037 for more information. - Support for Photon was dropped. See https://github.com/systemd/mkosi/pull/1048 for more information. - The Arch kernel/bootloader pacman hooks were removed. For anyone that still wants to use them, they can be found [here](https://github.com/systemd/mkosi/tree/v13/mkosi/resources/arch). - mkosi now creates `distro~release` subdirectories inside the build, cache and output directories for each `distro~release` combination that is built. This allows building for multiple distros without throwing away the results of a previous distro build every time. - The preferred names for mkosi configuration files and directories are now `mkosi.conf` and `mkosi.conf.d/` respectively. The old names (`mkosi.default` and `mkosi.default.d`) have been removed from the docs but are still supported for backwards compatibility. - `plain_squashfs` type images will now also be named with a `.raw` suffix. - `tar` type images will now respect the `--compress` option. - Pacman's `SigLevel` option was changed to use the same default value as used on Arch which is `SigLevel = Required DatabaseOptional`. If this results in keyring errors, you need to update the keyring by running `pacman-key --populate archlinux`. - Support for CentOS 7 was dropped. If you still need to support CentOS 7, we recommend using any mkosi version up to 13. - Support for BIOS/grub was dropped. because EFI hardware is widely available and legacy BIOS systems do not support the feature set to fully verify a boot chain from firmware to userland and it has become bothersome to maintain for little use. To generate BIOS images you can use any version of mkosi up to mkosi 13 or the new `--bios-size` option. This can be used to add a BIOS boot partition of the specified size on which `grub` (or any other bootloader) can be installed with the help of mkosi's script support (depending on your needs most likely `mkosi.postinst` or `mkosi.finalize`). This method can also be used for other EFI bootloaders that mkosi intentionally does not support. - mkosi now unconditionally copies the kernel, initrd and kernel cmdline from the image that were previously only copied out for Qemu boot. - mkosi now runs apt and dpkg on the host. As such, we now require apt and dpkg to be installed on the host along with debootstrap in order to be able to build debian/ubuntu images. - Split dm-verity artifacts default names have been changed to match what `systemd` and other tools expect: `image.root.raw`, `image.root.verity`, `image.root.roothash`, `image.root.roothash.p7s` (same for `usr` variants). - `mkosi` will again default to the same OS release as the host system when the host system uses the same distribution as the image that's being built. - By default, `mkosi` will now change the owner of newly created directories to `SUDO_UID` or `PKEXEC_UID` if defined, unless `--no-chown` is used. - If `systemd-nspawn` v252 or newer is used, bind-mounted directories with `systemd-nspawn` will use the new `rootidmap` option so files and directories created from within the container will be owned by the actual directory owner on the host. ## v13 - The `--network-veth` option has been renamed to `--netdev`. The old name made sense with virtual ethernet devices, but when booting images with qemu a TUN/TAP device is used instead. - The network config file installed by mkosi when the `--netdev` (previously `--network-veth`) option is used (formerly `/etc/systemd/network/80-mkosi-network-veth.network` in the image) now only matches network interfaces using the `virtio_net` driver. Please make sure you weren't relying on this file to configure any network interfaces other than the tun/tap virtio-net interface created by mkosi when booting the image in QEMU with the `--netdev` option. If you were relying on this config file when the host system uses the same distribution as the image that's being built. Instead, when no release is specified, mkosi will now always default to the default version embedded in mkosi itself. - `mkosi` will now use the `pacman` keyring from the host when building Arch images. This means that users will, on top of installing `archlinux-keyring`, also have to run `pacman-key --init` and `pacman-key --populate archlinux` on the host system to be able to build Arch images. Also, unless the package manager is configured to do it automatically, the host keyring will have to be updated after `archlinux-keyring` updates by running `pacman-key --populate archlinux` and `pacman-key --updatedb`. - Direct qemu linux boot is now supported with `BootProtocols=linux`. When enabled, the kernel image, initrd, and cmdline will be extracted from the image and passed to `qemu` by `mkosi qemu` to directly boot into the kernel image without a bootloader. This can be used to boot for example s390x images in `qemu`. - The initrd will now always be rebuilt after the extra trees and build artifacts have been installed into the image. - The github action has been migrated to Ubuntu Jammy. To migrate any jobs using the action, add `runs-on: ubuntu-22.04` to the job config. - All images are now configured by default with the `C.UTF-8` locale. - New `--repository-directory` option can be used to configure a directory with extra repository files to be used by the package manager when building an image. Note that this option is currently only supported for `pacman` and `dnf`-based distros. - Option `--skeleton-tree` is now supported on Debian-based distros. - Removed `--hostname` as its trivial to configure using systemd-firstboot. - Removed default locale configuration as its trivial to configure using systemd-firstboot and systemd writes a default locale well. ## v12 - Fix handling of baselayout in Gentoo installations. ## v11 - Support for Rocky Linux, Alma Linux, and Gentoo has been added! - A new `ManifestFormat=` option can be used to generate "manifest" files that describe what packages were installed. With `json`, a JSON file that shows the names and versions of all installed packages will be created. With `changelog`, a longer human-readable file that shows package descriptions and changelogs will be generated. This latter format should be considered experimental and likely to change in later versions. - A new `RemovePackages=` option can be used to uninstall packages after the build and finalize scripts have been done. This is useful for the case where packages are required by the build scripts, or pulled in as dependencies for scriptlets of other packages, but are not necessary in the final image. - A new `BaseImage=` option can be used to build "system extensions" a.k.a. "sysexts" — partial images which are mounted on top of an existing system to provide additional files under `/usr/`. See the [systemd-sysext man page](https://www.freedesktop.org/software/systemd/man/systemd-sysext.html) for more information. - A new `CleanPackageMetadata=` option can be used to force or disable the removal of package manager files. When this option is not used, they are removed when the package manager is not installed in the final image. - A new `UseHostRepositories=` option instructs mkosi to use repository configuration from the host system, instead of the internal list. - A new `SshAgent=` option configures the path to the ssh agent. - A new `SshPort=` option overrides the port used for ssh. - The `Verity=` setting supports a new value `signed`. When set, verity data will be signed and the result inserted as an additional partition in the image. See https://systemd.io/DISCOVERABLE_PARTITIONS for details about signed disk images. This information is used by `systemd-nspawn`, `systemd-dissect`, `systemd-sysext`, `systemd-portabled` and `systemd`'s `RootImage=` setting (among others) to cryptographically validate the image file systems before use. - The `--build-environment=` option was renamed to `--environment=` and extended to cover *all* invoked scripts, not just the `mkosi.build`. The old name is still understood. - With `--with-network=never`, `dnf` is called with `--cacheonly`, so that the package lists are not refreshed. This gives a degree of reproducibility when doing repeated installs with the same package set (and also makes installs significantly faster). - The `--debug=` option gained a new value `disk` to show information about disk sized and partition allocations. - Some sections and settings have been renamed for clarity: [Packages] is now [Content], `Password=`, `PasswordIsHashed=`, and `Autologin=` are now in [Content]. The old names are still supported, but not documented. - When `--prepare-script=`/`--build-script=`/`--finalize-script=` is used with an empty argument, the corresponding script will not be called. - Python 3.7 is the minimal supported version. - Note to packagers: the Python `cryptography` module is needed for signing of verity data. ## v10 - Minimum supported Python version is now 3.7. - Automatic configuration of the network for Arch Linux was removed to bring different distros more in line with each other. To add it back, add a postinstall script to configure your network manager of choice. - The `--default` option was changed to not affect the search location of `mkosi.default.d/`. mkosi now always searches for `mkosi.default.d/` in the working directory. - `quiet` was dropped from the default kernel command line. - `--source-file-transfer` and `--source-file-transfer-final` now accept an empty value as the argument which can be used to override a previous setting. - A new command `mkosi serve` can be used to serve build artifacts using a small embedded HTTP server. This is useful for `machinectl pull-raw …` and `machinectl pull-tar …`. - A new command `mkosi genkey` can be used to generate secure boot keys for use with mkosi's `--secure-boot` options. The number of days the keys should remain valid can be specified via `--secure-boot-valid-days=` and their CN via `--secure-boot-common-name=`. - When booting images with `qemu`, firmware that supports Secure Boot will be used if available. - `--source-resolve-symlinks` and `--source-resolve-symlinks-final` options are added to control how symlinks in the build sources are handled when `--source-file-transfer[-final]=copy-all` is used. - `--build-environment=` option was added to set variables for the build script. - `--usr-only` option was added to build images that comprise only the `/usr/` directory, instead of the whole root file system. This is useful for stateless systems where `/etc/` and `/var/` are populated by `systemd-tmpfiles`/`systemd-sysusers` and related calls at boot, or systems that are originally shipped without a root file system, but where `systemd-repart` adds one on the first boot. - Support for "image versions" has been added. The version number can be set with `--version-number=`. It is included in the default output filename and passed as `$IMAGE_VERSION` to the build script. In addition, `mkosi bump` can be used to increase the version number by one, and `--auto-bump` can be used to increase it automatically after successful builds. - Support for "image identifiers" has been added. The id can be set with `--image=id` and is passed to the build script as `$IMAGE_ID`. - The list of packages to install can be configured with `--base-packages=`. With `--base-packages=no`, only packages specified with `--packages=` will be installed. With `--base-packages=conditional`, various packages will be installed "conditionally", i.e. only if some other package is otherwise pulled in. For example, `systemd-udev` may be installed only if `systemd` is listed in `--packages=`. - CPIO output format has been added. This is useful for kernel initramfs images. - Output compression can be configured with `--compress-fs=` and `--compress-output=`, and support for `zstd` has been added. - `--ssh-key=` option was added to control the ssh key used to connect to the image. - `--remove-files=` option was added to remove file from the generated images. - Inline comments are now allowed in config files (anything from `#` until the end of the line will be ignored). - The development branch was renamed from `master` to `main`. ## v9 ### Highlighted Changes - The mkosi Github action now defaults to the current release of mkosi instead of the tip of the master branch. - Add a `ssh` verb and accompanying `--ssh` option. The latter sets up SSH keys for direct SSH access into a booted image, whereas the former can be used to start an SSH connection to the image. - Allow for distribution specific `mkosi.*` files in subdirectories of `mkosi.default.d/`. These files are only processed if a subdirectory named after the target distribution of the image is found in `mkosi.default.d/`. - The summary of used options for the image is now only printed when building the image for the first time or when the `summary` verb is used. - All of mkosi's output, except for the build script, will now go to stderr. There was no clear policy on this before and this choice makes it easier to use images generated and booted via mkosi with language servers using stdin and stdout for communication. - `--source-file-transfer` now defaults to `copy-git-others` to also include untracked files. - [black](https://github.com/psf/black) is now used as a code style and conformance with it is checked in CI. - Add a new `--ephemeral` option to boot into a temporary snapshot of the image that will be thrown away on shutdown. - Add a new option `--network-veth` to set up a virtual Ethernet link between the host and the image for usage with nspawn or QEMU - Add a new `--autologin` option to automatically log into the root account upon boot of the image. This is useful when using mkosi for boot tests. - Add a new `--hostonly` option to generate host specific initrds. This is useful when using mkosi for boot tests. - Add a new `--install-directory` option and special directory `mkosi.installdir/` that will be used as `$DESTDIR` for the build script, so that the contents of this directory can be shared between builds. - Add a new `--include-directory` option and special directory `mkosi.includedir/` that will be mounted at `/usr/include` during the build. This way headers files installed during the build can be made available to the host system, which is useful for usage with language servers. - Add a new `--source-file-transfer-final` option to complement `--source-file-transfer`. It does the same `--source-file-transfer` does for the build image, but for the final one. - Add a new `--tar-strip-selinux-context` option to remove SELinux xattrs. This is useful when an image with a target distribution not using SELinux is generated on a host that is using it. - Document the `--no-chown` option. Using this option, artifacts generated by mkosi are not chowned to the user invoking mkosi when it is invoked via sudo. It has been with as for a while, but hasn't been documented until now. ### Fixed Issues - [#506](https://github.com/systemd/mkosi/issues/506) - [#559](https://github.com/systemd/mkosi/issues/559) - [#561](https://github.com/systemd/mkosi/issues/561) - [#562](https://github.com/systemd/mkosi/issues/562) - [#575](https://github.com/systemd/mkosi/issues/575) - [#580](https://github.com/systemd/mkosi/issues/580) - [#593](https://github.com/systemd/mkosi/issues/593) ### Authors - Daan De Meyer - Joerg Behrmann - Luca Boccassi - Peter Hutterer - ValdikSS mkosi-20.2/README.md000066400000000000000000000110131455345632200140430ustar00rootroot00000000000000# mkosi — Build Bespoke OS Images A fancy wrapper around `dnf --installroot`, `apt`, `pacman` and `zypper` that generates customized disk images with a number of bells and whistles. For a longer description and available features and options, see the [man page](mkosi/resources/mkosi.md). Packaging status # Installation You can install mkosi from your distribution using its package manager or install the development version from git. If you install mkosi using your distribution's package manager, make sure it installs at least mkosi v16 or newer (Use `mkosi --version` to check). If your distribution only packages an older version of mkosi, it is recommended to install mkosi using one of the alternative installation methods listed below instead. ## Running mkosi from the repository To run mkosi straight from its git repository, you can invoke the shim `bin/mkosi`. The `MKOSI_INTERPRETER` environment variable can be set when using the `bin/mkosi` shim to configure the python interpreter used to execute mkosi. The shim can be symlinked to e.g. `/usr/local/bin` to make it accessible from the `PATH`. ```shell git clone https://github.com/systemd/mkosi ln -s $PWD/mkosi/bin/mkosi /usr/local/bin/mkosi mkosi --version ``` ## Python installation methods mkosi can also be installed straight from the git repository url using `pipx`: ```shell pipx install git+https://github.com/systemd/mkosi.git mkosi --version ``` which will transparently install mkosi into a Python virtual environment and a mkosi binary to `~/.local/bin`. This is, up to the path of the virtual environment and the mkosi binary, equivalent to ```shell python3 -m venv mkosivenv mkosivenv/bin/pip install git+https://github.com/systemd/mkosi.git mkosivenv/bin/mkosi --version ``` You can also package mkosi as a [zipapp](https://docs.python.org/3/library/zipapp.html) that you can deploy anywhere in your `PATH`. Running this will leave a `mkosi` binary in `builddir/` ```shell git clone https://github.com/systemd/mkosi cd mkosi tools/generate-zipapp.sh builddir/mkosi --version ``` Besides the mkosi binary, you can also call mkosi via ```shell python3 -m mkosi ``` when not installed as a zipapp. Please note, that the python module exists solely for the usage of the mkosi binary and is not to be considered a public API. ## kernel-install plugin mkosi can also be used as a kernel-install plugin to build initrds. To enable this feature, install `kernel-install/50-mkosi-initrd.install` into `/usr/lib/kernel/install.d` and install `mkosi-initrd/mkosi.conf` into `/usr/lib/mkosi-initrd`. Extra distro configuration for the initrd can be configured using drop-ins in `/usr/lib/mkosi-initrd`. Users can add their custom configuration in `/etc/mkosi-initrd`. Once installed, the mkosi plugin can be enabled by writing `initrd_generator=mkosi-initrd` to `/usr/lib/kernel/install.conf` or to `/etc/kernel/install.conf`. # Hacking on mkosi To hack on mkosi itself you will also need [mypy](https://github.com/python/mypy), for type checking, and [pytest](https://github.com/pytest-dev/pytest), to run tests. We check tests and typing in CI (see `.github/workflows`), but you can run the tests locally as well. # References * [Primary mkosi git repository on GitHub](https://github.com/systemd/mkosi/) * [mkosi — A Tool for Generating OS Images](http://0pointer.net/blog/mkosi-a-tool-for-generating-os-images.html) introductory blog post by Lennart Poettering (2017) * [The mkosi OS generation tool](https://lwn.net/Articles/726655/) story on LWN (2017) * [systemd-repart: Building Discoverable Disk Images](https://media.ccc.de/v/all-systems-go-2023-191-systemd-repart-building-discoverable-disk-images) and [mkosi: Building Bespoke Operating System Images](https://media.ccc.de/v/all-systems-go-2023-190-mkosi-building-bespoke-operating-system-images) talks at All Systems Go! 2023 * [Building RHEL and RHEL UBI images with mkosi](https://fedoramagazine.org/create-images-directly-from-rhel-and-rhel-ubi-package-using-mkosi/) an article in Fedora Magazine (2023) * [Building USIs with mkosi](https://overhead.neocities.org/blog/build-usi-mkosi/) * [Constellation 💖 mkosi — Minimal TCB, tailor-made for measured boot](https://www.edgeless.systems/blog/constellation-mkosi-minimal-tcb-tailor-made-for-measured-boot/) ## Community Find us on Matrix at [#mkosi:matrix.org](https://matrix.to/#/#mkosi:matrix.org). mkosi-20.2/action.yaml000066400000000000000000000035531455345632200147370ustar00rootroot00000000000000name: setup-mkosi description: Install mkosi runs: using: composite steps: - name: Permit unprivileged access to kvm, vhost-vsock and vhost-net devices shell: bash run: | sudo mkdir -p /etc/tmpfiles.d sudo cp /usr/lib/tmpfiles.d/static-nodes-permissions.conf /etc/tmpfiles.d/ sudo sed -i '/kvm/s/0660/0666/g' /etc/tmpfiles.d/static-nodes-permissions.conf sudo sed -i '/vhost/s/0660/0666/g' /etc/tmpfiles.d/static-nodes-permissions.conf sudo tee /etc/udev/rules.d/99-kvm4all.rules <<- EOF KERNEL=="kvm", GROUP="kvm", MODE="0666", OPTIONS+="static_node=kvm" KERNEL=="vhost-vsock", GROUP="kvm", MODE="0666", OPTIONS+="static_node=vhost-vsock" KERNEL=="vhost-net", GROUP="kvm", MODE="0666", OPTIONS+="static_node=vhost-net" EOF sudo udevadm control --reload-rules sudo modprobe kvm sudo modprobe vhost_vsock sudo modprobe vhost_net [[ -e /dev/kvm ]] && sudo udevadm trigger --name-match=kvm sudo udevadm trigger --name-match=vhost-vsock sudo udevadm trigger --name-match=vhost-net [[ -e /dev/kvm ]] && sudo chmod 666 /dev/kvm sudo chmod 666 /dev/vhost-vsock sudo chmod 666 /dev/vhost-net lsmod [[ -e /dev/kvm ]] && ls -l /dev/kvm ls -l /dev/vhost-* id - name: Dependencies shell: bash run: | # For archlinux-keyring and pacman sudo add-apt-repository ppa:michel-slm/kernel-utils sudo apt-get update sudo apt-get install --assume-yes --no-install-recommends \ archlinux-keyring \ bubblewrap \ debian-archive-keyring \ dnf \ pacman-package-manager \ systemd-container \ zypper sudo pacman-key --init sudo pacman-key --populate archlinux - name: Install shell: bash run: sudo ln -svf ${{ github.action_path }}/bin/mkosi /usr/bin/mkosi mkosi-20.2/bin/000077500000000000000000000000001455345632200133405ustar00rootroot00000000000000mkosi-20.2/bin/mkosi000077500000000000000000000012611455345632200144100ustar00rootroot00000000000000#!/usr/bin/env bash # SPDX-License-Identifier: LGPL-2.1+ set -e PYTHONPATH="$(dirname "$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")")" export PYTHONPATH if [ -z "$MKOSI_INTERPRETER" ]; then # Note the check seems to be inverted here because the if branch is executed when the exit status is 0 # which is equal to "False" in python. if python3 -c "import sys; sys.exit(sys.version_info < (3, 9))"; then MKOSI_INTERPRETER=python3 elif command -v python3.9 >/dev/null; then MKOSI_INTERPRETER=python3.9 else echo "mkosi needs python 3.9 or newer (found $(python3 --version))" exit 1 fi fi exec "$MKOSI_INTERPRETER" -B -m mkosi "$@" mkosi-20.2/docs/000077500000000000000000000000001455345632200135205ustar00rootroot00000000000000mkosi-20.2/docs/bootable.md000066400000000000000000000016431455345632200156350ustar00rootroot00000000000000# Building a bootable image on different distros To build a bootable image, you'll need to install a list of packages that differs depending on the distribution. We give an overview here of what's needed to generate a bootable image for some common distributions: ## Arch ``` [Content] Packages=linux systemd ``` ## Fedora ``` [Content] Packages=kernel systemd systemd-boot udev util-linux ``` ## CentOS ``` [Content] Packages=kernel systemd systemd-boot udev ``` ## Debian ``` [Content] Packages=linux-image-generic systemd systemd-boot systemd-sysv udev dbus ``` ## Ubuntu ``` [Content] Repositories=main,universe Packages=linux-image-generic systemd systemd-sysv udev dbus ``` ## Opensuse ``` [Content] Packages=kernel-default systemd udev ``` mkosi-20.2/docs/building-rpms-from-source.md000066400000000000000000000126401455345632200210600ustar00rootroot00000000000000# Building RPMs from source with mkosi If you want to build an RPM from source and install it within a mkosi image, you can do that with mkosi itself without using `mock`. The steps required are as follows: 1. Install `Requires` dependencies in the image 2. Install `BuildRequires` dependencies in the build overlay 3. Install dynamic `BuildRequires` dependencies in the build overlay 4. Build the RPM with `rpmbuild` 5. Install the built rpms in the image In the following examples, we'll use mkosi itself and its Fedora RPM spec as an example. To keep things snappy, we execute the first 3 steps in a prepare script so that they're cached on subsequent runs of mkosi if the `Incremental=` setting is enabled. First, we need access to the upstream sources and the RPM spec and related files. These can be mounted into the current working directory when running mkosi scripts by using the `BuildSources=` setting. For example, in `mkosi.local.conf`, we could have the following settings: ```conf [Content] BuildSources=../mkosi:mkosi ../fedora/mkosi:mkosi/rpm BuildSourcesEphemeral=yes ``` Which instructs mkosi to mount the local version of the mkosi upstream repository at `../mkosi` to `mkosi` in the current working directory when running mkosi. The Fedora RPM spec is mounted at `mkosi/rpm`. We enable the `BuildSourcesEphemeral=` option as `rpmbuild` will write quite a few files to the source directory as part of building the rpm which we don't want to remain there after the build finishes. We use `rpmspec` and `rpmbuild`, but these do not really support running from outside of the image that the RPM is being built in, so we have to make sure they're available inside the image by adding the following to `mkosi.conf`: ```conf [Content] Packages=rpm-build # If you don't want rpm-build in the final image. RemovePackages=rpm-build ``` The prepare script `mkosi.prepare` then looks as follows: ```shell #!/bin/sh set -e if [ "$1" = "build" ]; then DEPS="--buildrequires" else DEPS="--requires" fi mkosi-chroot \ rpmspec \ --query \ "$DEPS" \ --define "_topdir /var/tmp" \ --define "_sourcedir mkosi/rpm" \ mkosi/rpm/mkosi.spec | grep -E -v mkosi | xargs -d '\n' dnf install if [ "$1" = "build" ]; then until mkosi-chroot \ env --chdir=mkosi \ rpmbuild \ -bd \ --build-in-place \ --define "_topdir /var/tmp" \ --define "_sourcedir rpm" \ --define "_build_name_fmt %%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm" \ rpm/mkosi.spec do EXIT_STATUS=$? if [ $EXIT_STATUS -ne 11 ]; then exit $EXIT_STATUS fi dnf builddep /var/tmp/SRPMS/mkosi-*.buildreqs.nosrc.rpm done fi ``` To install non-dynamic dependencies, we use `rpmspec`. What's important is to set `_sourcedir` to the directory containing the RPM sources for the RPM spec that we want to build. We run `rpmspec` inside the image to make sure all the RPM macros have their expected values and then run `dnf` outside the image to install the required dependencies. We always set `_topdir` to `/var/tmp` to avoid polluting the image with `rpmbuild` artifacts. Subpackages from the same RPM might depend on each other. We need to filter out those dependencies using `grep -E -v `. After installing non-dynamic `Requires` and `BuildRequires` dependencies, we have to install the dynamic `BuildRequires` by running `rpmbuild -bd` until it succeeds or fails with an exit code that's not `11`. After each run of `rpmbuild -bd` that exits with exit code `11`, there will be an SRPM in the `SRPMS` subdirectory of the upstream sources directory of which the `BuildRequires` have to be installed for which we use `dnf builddep`. Now we have an image and build overlay with all the necessary dependencies installed to be able to build the RPM. Next is the build script. We suffix the build script with `.chroot` so that mkosi runs it entirely inside the image. In the build script, we invoke `rpmbuild -bb --build-in-place` to have `rpmbuild` build the RPM in place from the upstream sources. Because `--build-in-place` configures `_builddir` to the current working directory, we change directory to the upstream sources before invoking `rpmbuild`. Again, `_sourcedir` has to point to the RPM spec sources. We also have to override `_rpmdir` to point to the mkosi output directory (stored in `$OUTPUTDIR`). The build script `mkosi.build.chroot` then looks as follows: ```shell #!/bin/sh set -e env --chdir=mkosi \ rpmbuild \ -bb \ --build-in-place \ $([ "$WITH_TESTS" = "0" ] && echo --nocheck) \ --define "_topdir /var/tmp" \ --define "_sourcedir rpm" \ --define "_rpmdir $OUTPUTDIR" \ ${BUILDDIR:+--define} \ ${BUILDDIR:+"_vpath_builddir $BUILDDIR"} \ --define "_build_name_fmt %%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm" \ rpm/mkosi.spec ``` The `_vpath_builddir` directory will be used to store out-of-tree build artifacts for build systems that support out-of-tree builds (CMake, Meson) so we set it to mkosi's out-of-tree build directory in `$BUILDDIR` if one is provided. This will make subsequent RPM builds much faster as CMake or Meson will be able to do an incremental build. After the build script finishes, the produced rpms will be located in `$OUTPUTDIR`. We can now install them from the `mkosi.postinst` post-installation script: ```shell #!/bin/sh set -e rpm --install "$OUTPUTDIR"/*mkosi*.rpm ``` mkosi-20.2/docs/distribution-policy.md000066400000000000000000000033431455345632200200610ustar00rootroot00000000000000# Adding new distributions Merging support for a new distribution in mkosi depends on a few factors. Not all of these are required but depending on how many of these requirements are satisfied, the chances of us merging support for your distribution will improve: 1. Is the distribution somewhat popular? mkosi's goal is not to support every distribution under the sun, the distribution should have a substantial amount of users. 2. Does the distribution differentiate itself somehow from the distributions that are already supported? We're generally not interested in supporting distributions that only consist of minimal configuration changes to another distribution. 3. Is there a long-term maintainer for the distribution in mkosi? When proposing support for a new distribution, we expect you to be the maintainer for the distribution and to respond when pinged for support on distribution specific issues. 4. Does the distribution use a custom package manager or one of the already supported ones (apt, dnf, pacman, zypper)? Supporting new package managers in mkosi is generally a lot of work. We can support new ones if needed for a new distribution, but we will insist on the package manager having a somewhat sane design, with official support for building in a chroot and running unprivileged in a user namespace being the bare minimum features we expect from any new package manager. We will only consider new distributions that satisfy all or most of these requirements. However, you can still use mkosi with the distribution by setting the `Distribution` setting to `custom` and implementing either providing the rootfs via a skeleton tree or base tree, or by providing the rootfs via a prepare script. mkosi-20.2/docs/initrd.md000066400000000000000000000014771455345632200153440ustar00rootroot00000000000000# Building a custom initrd and using it in a mkosi image Building an image with a mkosi-built initrd is a two step process, because you will build two images - the initrd and your distribution image. 1. Build an initrd image using the `cpio` output format with the same target distributions as you want to use for your distribution image. mkosi compresses the `cpio` output format by default. ``` [Output] Format=cpio [Content] Packages=systemd udev kmod ``` 2. Invoke `mkosi` passing the initrd image via the `--initrd` option or add the `Initrd=` option to your mkosi config when building your distribution image. ```bash mkosi --initrd= ... ``` This will build an image using the provided initrd image. mkosi will add the kernel modules found in the distribution image to this initrd. mkosi-20.2/docs/sysext.md000066400000000000000000000046131455345632200154050ustar00rootroot00000000000000# Building system extensions with mkosi [System extension](https://uapi-group.org/specifications/specs/extension_image/) images may – dynamically at runtime — extend the base system with an overlay containing additional files. To build system extensions with mkosi, we first have to create a base image on top of which we can build our extension. To keep things manageable, we'll use mkosi's support for building multiple images so that we can build our base image and system extension in one go. Start by creating a temporary directory with a base configuration file `mkosi.conf` with some shared settings: ```conf [Output] OutputDirectory=mkosi.output CacheDirectory=mkosi.cache ``` From now on we'll assume all steps are executed inside the temporary directory. Now let's continue with the base image definition by writing the following to `mkosi.images/base/mkosi.conf`: ```conf [Output] Format=directory [Content] CleanPackageMetadata=no Packages=systemd udev ``` We use the `directory` output format here instead of the `disk` output so that we can build our extension without needing root privileges. Now that we have our base image, we can define a sysext that builds on top of it by writing the following to `mkosi.images/btrfs/mkosi.conf`: ```conf [Config] Dependencies=base [Output] Format=sysext Overlay=yes [Content] BaseTrees=%O/base Packages=btrfs-progs ``` `BaseTrees=` point to our base image and `Overlay=yes` instructs mkosi to only package the files added on top of the base tree. We can't sign the extension image without a key, so let's generate one with `mkosi genkey` (or write your own private key and certificate yourself to `mkosi.key` and `mkosi.crt` respectively). Note that this key will need to be loaded into your kernel keyring either at build time or via MOK for systemd to accept the system extension at runtime as trusted. Finally, you can build the base image and the extensions by running `mkosi -f`. You'll find `btrfs.raw` in `mkosi.output` which is the extension image. If you want to package up the base image into another format, for example an initrd, we can do that by adding the following to `mkosi.images/initrd/mkosi.conf`: ```conf [Config] Dependencies=base [Output] Format=cpio [Content] MakeInitrd=yes BaseTrees=%O/base ``` If we now run `mkosi -f` again, we'll find `initrd.cpio.zst` in `mkosi.output` with its accompanying extension still in `btrfs.raw`. mkosi-20.2/kernel-install/000077500000000000000000000000001455345632200155145ustar00rootroot00000000000000mkosi-20.2/kernel-install/50-mkosi.install000066400000000000000000000144561455345632200204620ustar00rootroot00000000000000#!/usr/bin/env python3 # SPDX-License-Identifier: LGPL-2.1+ import argparse import logging import os import shutil import tempfile from pathlib import Path from typing import NamedTuple, Optional from mkosi.archive import make_cpio from mkosi.config import OutputFormat, __version__ from mkosi.log import die, log_setup from mkosi.run import run, uncaught_exception_handler from mkosi.tree import copy_tree from mkosi.types import PathString from mkosi.util import umask class Context(NamedTuple): command: str kernel_version: str entry_dir: Path kernel_image: Path initrds: list[Path] staging_area: Path layout: str image_type: str initrd_generator: Optional[str] uki_generator: Optional[str] verbose: bool def we_are_wanted(context: Context) -> bool: return context.uki_generator == "mkosi" or context.initrd_generator in ("mkosi", "mkosi-initrd") def mandatory_variable(name: str) -> str: try: return os.environ[name] except KeyError: die(f"${name} must be set in the environment") def build_microcode_initrd(output: Path) -> Optional[Path]: amd = Path("/usr/lib/firmware/amd-ucode") intel = Path("/usr/lib/firmware/intel-ucode") if not amd.exists() and not intel.exists(): logging.debug("/usr/lib/firmware/{amd-ucode,intel-ucode} not found, not adding microcode initrd") return None with tempfile.TemporaryDirectory() as tmp: root = Path(tmp) / "initrd-microcode-root" destdir = root / "kernel/x86/microcode" with umask(~0o755): destdir.mkdir(parents=True, exist_ok=True) if amd.exists(): with (destdir / "AuthenticAMD.bin").open("wb") as f: for p in amd.iterdir(): f.write(p.read_bytes()) if intel.exists(): with (destdir / "GenuineIntel.bin").open("wb") as f: for p in intel.iterdir(): f.write(p.read_bytes()) make_cpio(root, output) return output @uncaught_exception_handler() def main() -> None: log_setup() parser = argparse.ArgumentParser( description='kernel-install plugin to build initrds or Unified Kernel Images using mkosi', allow_abbrev=False, usage='50-mkosi.install COMMAND KERNEL_VERSION ENTRY_DIR KERNEL_IMAGE INITRD…', ) parser.add_argument("command", metavar="COMMAND", help="The action to perform. Only 'add' is supported.") parser.add_argument("kernel_version", metavar="KERNEL_VERSION", help="Kernel version string") parser.add_argument("entry_dir", metavar="ENTRY_DIR", type=Path, help="Type#1 entry directory (ignored)") parser.add_argument("kernel_image", metavar="KERNEL_IMAGE", type=Path, help="Kernel image") parser.add_argument("initrds", metavar="INITRD…", type=Path, nargs="*", help="Initrd files") parser.add_argument("--version", action="version", version=f"mkosi {__version__}") context = Context( **vars(parser.parse_args()), staging_area=Path(mandatory_variable("KERNEL_INSTALL_STAGING_AREA")), layout=mandatory_variable("KERNEL_INSTALL_LAYOUT"), image_type=mandatory_variable("KERNEL_INSTALL_IMAGE_TYPE"), initrd_generator=os.getenv("KERNEL_INSTALL_INITRD_GENERATOR"), uki_generator=os.getenv("KERNEL_INSTALL_UKI_GENERATOR"), verbose=int(os.getenv("KERNEL_INSTALL_VERBOSE", 0)) > 0, ) if context.command != "add" or not we_are_wanted(context): return # If kernel-install was passed a UKI, there's no need to build anything ourselves. if context.image_type == "uki": return # If the initrd was provided on the kernel command line, we shouldn't generate our own. if context.layout != "uki" and context.initrds: return format = OutputFormat.uki if context.layout == "uki" else OutputFormat.cpio output = "initrd" if format == OutputFormat.cpio else "uki" cmdline: list[PathString] = [ "mkosi", "--directory", "", "--format", str(format), "--output", output, "--workspace-dir=/var/tmp", "--cache-dir=/var", "--output-dir", context.staging_area, "--extra-tree", f"/usr/lib/modules/{context.kernel_version}:/usr/lib/modules/{context.kernel_version}", "--extra-tree=/usr/lib/firmware:/usr/lib/firmware", "--kernel-modules-exclude=.*", "--kernel-modules-include-host=yes", ] if context.verbose: cmdline += ["--debug"] for d in ("/usr/lib/mkosi-initrd", "/etc/mkosi-initrd"): if Path(d).exists(): cmdline += ["--include", d] with tempfile.TemporaryDirectory() as d: # Make sure we don't use any of mkosi's default repositories. for p in ( "yum.repos.d/mkosi.repo", "apt/sources.list", "zypp/repos.d/mkosi.repo", "pacman.conf", ): (Path(d) / "etc" / p).parent.mkdir(parents=True, exist_ok=True) (Path(d) / "etc" / p).touch() # Copy in the host's package manager configuration. for p in ( "dnf", "yum.repos.d/", "apt", "zypp", "pacman.conf", "pacman.d/", ): if not (Path("/etc") / p).exists(): continue (Path(d) / "etc" / p).parent.mkdir(parents=True, exist_ok=True) copy_tree(Path("/etc") / p, Path(d) / "etc" / p, dereference=True) cmdline += ["--package-manager-tree", d] logging.info(f"Building {output}") run(cmdline) (context.staging_area / output).unlink() if format == OutputFormat.cpio: shutil.move(next(context.staging_area.glob("initrd*.cpio*")), context.staging_area / "initrd") build_microcode_initrd(context.staging_area / "microcode") else: (context.staging_area / f"{output}.vmlinuz").unlink() (context.staging_area / f"{output}.initrd").unlink() if __name__ == '__main__': main() mkosi-20.2/mkosi-initrd000077700000000000000000000000001455345632200226072mkosi/resources/mkosi-initrdustar00rootroot00000000000000mkosi-20.2/mkosi.conf000066400000000000000000000020661455345632200145650ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Output] # These images are (among other things) used for running mkosi which means we need some disk space available so # default to directory output where disk space isn't a problem. @Format=directory @CacheDirectory=mkosi.cache @OutputDirectory=mkosi.output [Content] Autologin=yes @ShimBootloader=signed BuildSources=. BuildSourcesEphemeral=yes Packages= attr autoconf automake ca-certificates gcc gdb gettext git less libtool make nano pkgconf strace tmux InitrdPackages= less RemoveFiles= # The grub install plugin doesn't play nice with booting from virtiofs. /usr/lib/kernel/install.d/20-grub.install # The dracut install plugin doesn't honor KERNEL_INSTALL_INITRD_GENERATOR. /usr/lib/kernel/install.d/50-dracut.install # Make sure that SELinux doesn't run in enforcing mode even if it's pulled in as a dependency. KernelCommandLine=console=ttyS0 enforcing=0 mkosi-20.2/mkosi.conf.d/000077500000000000000000000000001455345632200150605ustar00rootroot00000000000000mkosi-20.2/mkosi.conf.d/15-bootable.conf000066400000000000000000000002401455345632200177350ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Format=|disk Format=|directory [Match] Architecture=|x86-64 Architecture=|arm64 [Content] @Bootable=yes mkosi-20.2/mkosi.conf.d/15-x86-64.conf000066400000000000000000000000741455345632200170270ustar00rootroot00000000000000[Match] Architecture=x86-64 [Content] @BiosBootloader=grub mkosi-20.2/mkosi.conf.d/20-arch.conf000066400000000000000000000015311455345632200170630ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=arch [Content] ShimBootloader=unsigned Packages= apt archlinux-keyring base bash btrfs-progs bubblewrap ca-certificates coreutils cpio curl debian-archive-keyring dnf dosfstools e2fsprogs edk2-ovmf erofs-utils grub iproute iputils linux mtools openssh openssl pacman perf pesign python-cryptography qemu-base sbsigntools shadow shim socat squashfs-tools strace swtpm systemd systemd-ukify tar ukify util-linux virtiofsd xfsprogs xz zstd mkosi-20.2/mkosi.conf.d/20-centos.conf000066400000000000000000000003311455345632200174360ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=|centos Distribution=|alma Distribution=|rocky [Distribution] @Release=9 Repositories=epel epel-next [Content] Packages=linux-firmware mkosi-20.2/mkosi.conf.d/20-debian/000077500000000000000000000000001455345632200165215ustar00rootroot00000000000000mkosi-20.2/mkosi.conf.d/20-debian/mkosi.conf000066400000000000000000000002411455345632200205070ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=debian [Distribution] Repositories=non-free-firmware [Content] Packages= linux-perf mkosi-20.2/mkosi.conf.d/20-debian/mkosi.conf.d/000077500000000000000000000000001455345632200210115ustar00rootroot00000000000000mkosi-20.2/mkosi.conf.d/20-debian/mkosi.conf.d/20-arm64.conf000066400000000000000000000001761455345632200230340ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Architecture=arm64 [Content] Packages= linux-image-cloud-arm64 mkosi-20.2/mkosi.conf.d/20-debian/mkosi.conf.d/20-x86-64.conf000066400000000000000000000001771455345632200227600ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Architecture=x86-64 [Content] Packages= linux-image-cloud-amd64 mkosi-20.2/mkosi.conf.d/20-fedora/000077500000000000000000000000001455345632200165375ustar00rootroot00000000000000mkosi-20.2/mkosi.conf.d/20-fedora/mkosi.conf000066400000000000000000000004601455345632200205300ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=fedora [Distribution] @Release=39 [Content] Packages= amd-ucode-firmware archlinux-keyring btrfs-progs dnf5 dnf5-plugins fedora-review pacman systemd-ukify zypper mkosi-20.2/mkosi.conf.d/20-fedora/mkosi.conf.d/000077500000000000000000000000001455345632200210275ustar00rootroot00000000000000mkosi-20.2/mkosi.conf.d/20-fedora/mkosi.conf.d/20-uefi.conf000066400000000000000000000002101455345632200230360ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Architecture=|x86-64 Architecture=|arm64 [Content] Packages= sbsigntools mkosi-20.2/mkosi.conf.d/20-opensuse.conf000066400000000000000000000017051455345632200200120ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=opensuse [Distribution] @Release=tumbleweed [Content] Packages= bash btrfs-progs bubblewrap ca-certificates coreutils cpio curl distribution-gpg-keys dnf dosfstools e2fsprogs erofs-utils grep grub2-i386-pc iproute iputils kernel-kvmsmall mtools openssh-clients openssh-server openssl ovmf pesign perf qemu-headless sbsigntools shadow shim socat squashfs strace swtpm systemd systemd-boot systemd-container systemd-coredump systemd-experimental tar ucode-amd ucode-intel udev util-linux virtiofsd xfsprogs xz zstd zypper mkosi-20.2/mkosi.conf.d/20-rhel-ubi.conf000066400000000000000000000002551455345632200176570ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=rhel-ubi [Distribution] @Release=9 [Content] Bootable=no Packages= systemd systemd-udev mkosi-20.2/mkosi.conf.d/20-ubuntu/000077500000000000000000000000001455345632200166215ustar00rootroot00000000000000mkosi-20.2/mkosi.conf.d/20-ubuntu/mkosi.conf000066400000000000000000000004361455345632200206150ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=ubuntu [Distribution] @Release=lunar Repositories=universe [Content] Packages= linux-image-generic # TODO: Switch to linux-virtual once it supports reading credentials from SMBIOS. linux-tools-generic mkosi-20.2/mkosi.conf.d/20-ubuntu/mkosi.conf.d/000077500000000000000000000000001455345632200211115ustar00rootroot00000000000000mkosi-20.2/mkosi.conf.d/20-ubuntu/mkosi.conf.d/20-jammy.conf000066400000000000000000000001661455345632200233170ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Release=jammy [Distribution] PackageManagerTrees=mkosi.pkgmngr mkosi-20.2/mkosi.conf.d/20-ubuntu/mkosi.pkgmngr/000077500000000000000000000000001455345632200214075ustar00rootroot00000000000000mkosi-20.2/mkosi.conf.d/20-ubuntu/mkosi.pkgmngr/etc/000077500000000000000000000000001455345632200221625ustar00rootroot00000000000000mkosi-20.2/mkosi.conf.d/20-ubuntu/mkosi.pkgmngr/etc/apt/000077500000000000000000000000001455345632200227465ustar00rootroot00000000000000mkosi-20.2/mkosi.conf.d/20-ubuntu/mkosi.pkgmngr/etc/apt/sources.list.d/000077500000000000000000000000001455345632200256255ustar00rootroot00000000000000mkosi-20.2/mkosi.conf.d/20-ubuntu/mkosi.pkgmngr/etc/apt/sources.list.d/kernel-utils.sources000066400000000000000000000035131455345632200316520ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later Enabled: yes Types: deb URIs: https://ppa.launchpadcontent.net/michel-slm/kernel-utils/ubuntu Suites: jammy Components: main Signed-By: -----BEGIN PGP PUBLIC KEY BLOCK----- . xsFNBGJyEb0BEADR6CoHmzotr+Z1UmqkNQZ/z+tm4u3/KbjD/UKHOloAJK2L2Mf6 Eq1hbs2MlEYa7VtYcfq+NqluvTtqHckgE6sTFGbxQXMUDK+bcxqXmQUQcRw/Wytl BgRr6fCA+pK82W6Z5eFyYsfbZMqnIqw3rbtx02K43KFGOiP8Pj/FFcPXzf9q3+3k EXELs8y6N7OEYeloEs45RnBwkKETvMX08zbTZfn5owfYZRd4VpIZJ2BnZprzdzfP Z1ZTkkDTAUpZvpXFi5WtDx6rVP92+7OYxOjUKzQ9wFbKdkZVzqhfJIR2SHM2tHVz GwIZl6vYsAqLLZccQSS4nBCXIzUwRO28LwqzacjKabl197fk0C/IKFw2Z4/ZsCHb VcrnttD3G1AvQ+DMvEPTzB9vD4R7uNEkmg4UYokzk6yW9/KQm3lNbQBS+D5jZREv hRyz4ZLW9wzz13H655nXzJCIZvLWVpRLNzQDscxYlkYBONoH+HiGafsgZqw+ITc3 tDPTw0HqRNe+/oendXqOhtKhY2PRhD3vu5NgXLX7GuAHr7Dq9HMUyA5MxKH90e28 vaYUmwU0jfYGgNnSCpRrAOx0SlqKpMlwW9VkpJctGrYMZ/ts6yPdJC2OSWiJRlMa Xmf4IvsLrCGobd27y03TYl5Uq6OOpD0DSP2hGZQxYHOgMhhZxT99IJUlMwARAQAB zShMYXVuY2hwYWQgUFBBIGZvciBNaWNoZWwgQWxleGFuZHJlIFNhbGltwsGOBBMB CgA4FiEE+8ojhpUuuImlrBJjySgIhqCuA8oFAmJyEb0CGwMFCwkIBwIGFQoJCAsC BBYCAwECHgECF4AACgkQySgIhqCuA8orpw//YWpifbMc2F3zNx8oW2UyTsr2IXtu 4/pHVtDroYokMOvCy2IR6FhzXSMM35yQBVfn92T5MiG0pHqXNUIZstt/m3Qo7tnj 1AR9f0mRLTKHONQCUP91CiVHGGKfaYHiyQ9Pxxp+LUxFkoEXUfQPl6N0wfGCd0Rb k9gcFlOo3+duOFsd+1Aw3Gi83SFcl6Bc4P/3i+dfB4g1Nbte0ZzPnFWKKYRV0K6z 5uDbJJYdIS+nwIxVXb7cnMUrrHBr4cUDsIXnAwVN+zeK7Q4CrJOpR6ZDGNb5SGcO TaJPEOpCIcIKkzW9IzYm5NTzxhQHg7jvCPrGBuX3nTt1fEzCn5L2se9iwehtsMat WXwi+yIYlpce2vHRPZEb8ILMoCL50veAAZ2tAlHx9UnAPNtT+1PPzrKPcIVCAB2e fBgUBcCaQ62LWsIQX1B9qL4xhGX7Z4nFk2aXNlrHjnnf5gwFCJ/XiVuFgGetfRrV r2PgfFFOfUanJ4LMu8sfqurrNJXrYMHfA8+qIbTLyltlqsOiEROOa/Qje5KEqmbe vg/hbqRpGNHdYKP1OynqBK8VAgG9/g5qGR8FLXr1DXl5dzlqyiIkRQINd9O6XjnX LWPl1wsOXOCY/jWgMxktt8Mv9qaaZ4CT9cuwsm/aml270A3GKRYHLDFP3CkuMnqd 0vsZgWMIQtgQmXU= =7vwW -----END PGP PUBLIC KEY BLOCK----- mkosi-20.2/mkosi.conf.d/30-centos-fedora/000077500000000000000000000000001455345632200200315ustar00rootroot00000000000000mkosi-20.2/mkosi.conf.d/30-centos-fedora/mkosi.conf000066400000000000000000000022231455345632200220210ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=|centos Distribution=|alma Distribution=|rocky Distribution=|fedora [Content] Packages= apt bash bubblewrap ca-certificates centos-packager coreutils cpio curl-minimal debian-keyring distribution-gpg-keys dnf dnf-plugins-core dosfstools e2fsprogs erofs-utils fedora-packager fedora-packager-kerberos iproute iputils kernel-core mock mock-centos-sig-configs mock-core-configs mtools openssh-clients openssh-server openssl perf python3-cryptography qemu-kvm-core rpm-build rpminspect rpminspect-data-centos rpminspect-data-fedora shadow-utils shim socat squashfs-tools strace swtpm systemd systemd-container systemd-networkd systemd-resolved systemd-udev tar util-linux virtiofsd xfsprogs xz zstd mkosi-20.2/mkosi.conf.d/30-centos-fedora/mkosi.conf.d/000077500000000000000000000000001455345632200223215ustar00rootroot00000000000000mkosi-20.2/mkosi.conf.d/30-centos-fedora/mkosi.conf.d/20-uefi.conf000066400000000000000000000002521455345632200243360ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Architecture=|x86-64 Architecture=|arm64 [Content] Packages= systemd-boot pesign edk2-ovmf mkosi-20.2/mkosi.conf.d/30-centos-fedora/mkosi.conf.d/20-x86-64.conf000066400000000000000000000002061455345632200242610ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Architecture=x86-64 [Content] Packages= microcode_ctl grub2-pc mkosi-20.2/mkosi.conf.d/30-debian-ubuntu/000077500000000000000000000000001455345632200200425ustar00rootroot00000000000000mkosi-20.2/mkosi.conf.d/30-debian-ubuntu/mkosi.conf000066400000000000000000000020211455345632200220260ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=|debian Distribution=|ubuntu [Content] Packages= ?exact-name(systemd-ukify) apt archlinux-keyring bash btrfs-progs bubblewrap ca-certificates coreutils cpio curl dbus-broker debian-archive-keyring dnf dosfstools e2fsprogs erofs-utils iproute2 iputils-ping libtss2-dev makepkg mtools openssh-client openssh-server openssl ovmf pacman-package-manager pesign python3-cryptography python3-pefile qemu-system sbsigntool shim-signed socat squashfs-tools strace swtpm systemd systemd-container systemd-coredump systemd-sysv tar tzdata udev uidmap util-linux xfsprogs xz-utils zstd zypper mkosi-20.2/mkosi.conf.d/30-debian-ubuntu/mkosi.conf.d/000077500000000000000000000000001455345632200223325ustar00rootroot00000000000000mkosi-20.2/mkosi.conf.d/30-debian-ubuntu/mkosi.conf.d/20-systemd-extra.conf000066400000000000000000000002101455345632200262220ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Release=!jammy [Content] Packages= systemd-boot systemd-resolved mkosi-20.2/mkosi.conf.d/30-debian-ubuntu/mkosi.conf.d/20-x86-64.conf000066400000000000000000000002371455345632200242760ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Architecture=x86-64 [Content] Packages= amd64-microcode grub-pc intel-microcode mkosi-20.2/mkosi.conf.d/30-rpm/000077500000000000000000000000001455345632200160765ustar00rootroot00000000000000mkosi-20.2/mkosi.conf.d/30-rpm/mkosi.build.chroot000077500000000000000000000005501455345632200215410ustar00rootroot00000000000000#!/bin/sh # SPDX-License-Identifier: LGPL-2.1-or-later set -ex rpmbuild \ -bb \ --build-in-place \ $([ "$WITH_TESTS" = "0" ] && echo --nocheck) \ --define "_topdir /var/tmp" \ --define "_sourcedir rpm" \ --define "_rpmdir $PACKAGEDIR" \ --define "_build_name_fmt %%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm" \ rpm/mkosi.spec mkosi-20.2/mkosi.conf.d/30-rpm/mkosi.conf000066400000000000000000000001331455345632200200640ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] BuildSources=rpm Distribution=fedora mkosi-20.2/mkosi.conf.d/30-rpm/mkosi.postinst000077500000000000000000000001301455345632200210220ustar00rootroot00000000000000#!/bin/sh # SPDX-License-Identifier: LGPL-2.1-or-later set -e dnf install --best mkosi mkosi-20.2/mkosi.conf.d/30-rpm/mkosi.prepare000077500000000000000000000015051455345632200206040ustar00rootroot00000000000000#!/bin/sh # SPDX-License-Identifier: LGPL-2.1-or-later set -e if [ "$1" = "build" ]; then DEPS="--buildrequires" else DEPS="--requires" fi mkosi-chroot \ rpmspec \ --query \ "$DEPS" \ --define "_topdir /var/tmp" \ --define "_sourcedir rpm" \ rpm/mkosi.spec | grep -E -v mkosi | xargs -d '\n' dnf install if [ "$1" = "build" ]; then until mkosi-chroot \ rpmbuild \ -bd \ --build-in-place \ --define "_topdir /var/tmp" \ --define "_sourcedir rpm" \ --define "_build_name_fmt %%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm" \ rpm/mkosi.spec do EXIT_STATUS=$? if [ $EXIT_STATUS -ne 11 ]; then exit $EXIT_STATUS fi dnf builddep /var/tmp/SRPMS/mkosi-*.buildreqs.nosrc.rpm done fi mkosi-20.2/mkosi.extra/000077500000000000000000000000001455345632200150345ustar00rootroot00000000000000mkosi-20.2/mkosi.extra/usr/000077500000000000000000000000001455345632200156455ustar00rootroot00000000000000mkosi-20.2/mkosi.extra/usr/lib/000077500000000000000000000000001455345632200164135ustar00rootroot00000000000000mkosi-20.2/mkosi.extra/usr/lib/systemd/000077500000000000000000000000001455345632200201035ustar00rootroot00000000000000mkosi-20.2/mkosi.extra/usr/lib/systemd/mkosi-check-and-shutdown.sh000077500000000000000000000003511455345632200252470ustar00rootroot00000000000000#!/bin/bash # SPDX-License-Identifier: LGPL-2.1-or-later set -eux systemctl --failed --no-legend | tee /failed-services # Exit with non-zero EC if the /failed-services file is not empty (we have -e set) [[ ! -s /failed-services ]] mkosi-20.2/mkosi.extra/usr/lib/systemd/system-preset/000077500000000000000000000000001455345632200227275ustar00rootroot00000000000000mkosi-20.2/mkosi.extra/usr/lib/systemd/system-preset/00-mkosi.preset000066400000000000000000000016111455345632200255110ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later # mkosi adds its own ssh units via the --ssh switch so disable the default ones. disable ssh.service disable sshd.service # Make sure dbus-broker is started by default on Debian/Ubuntu. enable dbus-broker.service # Make sure we have networking available. enable systemd-networkd.service enable systemd-networkd-wait-online.service enable systemd-resolved.service # We install dnf in some images but it's only going to be used rarely, # so let's not have dnf create its cache. disable dnf-makecache.* # The rpmdb is already in the right location, don't try to migrate it. disable rpmdb-migrate.service # We have journald to receive audit data so let's make sure we're not running auditd as well disable auditd.service # systemd-timesyncd is not enabled by default in the default systemd preset so enable it here instead. enable systemd-timesyncd.service mkosi-20.2/mkosi.extra/usr/lib/systemd/system-preset/99-mkosi.preset000066400000000000000000000002111455345632200255260ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later # Make sure that services are disabled by default (primarily for Debian/Ubuntu). disable * mkosi-20.2/mkosi.extra/usr/lib/systemd/system/000077500000000000000000000000001455345632200214275ustar00rootroot00000000000000mkosi-20.2/mkosi.extra/usr/lib/systemd/system/mkosi-check-and-shutdown.service000066400000000000000000000005251455345632200276210ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Unit] Description=Check if any service failed and then shut down the machine After=multi-user.target network-online.target Requires=multi-user.target SuccessAction=exit FailureAction=exit SuccessActionExitStatus=123 [Service] Type=oneshot ExecStart=/usr/lib/systemd/mkosi-check-and-shutdown.sh mkosi-20.2/mkosi.md000077700000000000000000000000001455345632200210272mkosi/resources/mkosi.mdustar00rootroot00000000000000mkosi-20.2/mkosi.prepare.chroot000077500000000000000000000002521455345632200165710ustar00rootroot00000000000000#!/bin/sh # SPDX-License-Identifier: LGPL-2.1-or-later if [ "$1" = "final" ] && command -v pacman-key; then pacman-key --init pacman-key --populate archlinux fi mkosi-20.2/mkosi/000077500000000000000000000000001455345632200137125ustar00rootroot00000000000000mkosi-20.2/mkosi/__init__.py000066400000000000000000004147521455345632200160400ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1+ import contextlib import dataclasses import datetime import hashlib import io import itertools import json import logging import os import resource import shlex import shutil import subprocess import sys import tempfile import textwrap import uuid from collections.abc import Iterator, Mapping, Sequence from pathlib import Path from typing import Optional, TextIO, Union, cast import mkosi.resources from mkosi.archive import extract_tar, make_cpio, make_tar from mkosi.burn import run_burn from mkosi.config import ( Args, BiosBootloader, Bootloader, Compression, Config, ConfigFeature, DocFormat, JsonEncoder, ManifestFormat, OutputFormat, SecureBootSignTool, ShimBootloader, Verb, __version__, format_bytes, format_tree, parse_config, summary, want_selinux_relabel, yes_no, ) from mkosi.context import Context from mkosi.distributions import Distribution from mkosi.installer import ( clean_package_manager_metadata, finalize_package_manager_mounts, package_manager_scripts, ) from mkosi.kmod import gen_required_kernel_modules, process_kernel_modules from mkosi.log import ARG_DEBUG, complete_step, die, log_notice, log_step from mkosi.manifest import Manifest from mkosi.mounts import finalize_ephemeral_source_mounts, mount_overlay from mkosi.pager import page from mkosi.partition import Partition, finalize_root, finalize_roothash from mkosi.qemu import KernelType, copy_ephemeral, run_qemu, run_ssh from mkosi.run import ( CLONE_NEWNS, become_root, find_binary, fork_and_wait, log_process_failure, run, unshare, ) from mkosi.sandbox import chroot_cmd, finalize_passwd_mounts from mkosi.tree import copy_tree, move_tree, rmtree from mkosi.types import PathString from mkosi.util import ( INVOKING_USER, format_rlimit, make_executable, one_zero, read_env_file, read_os_release, resource_path, round_up, scopedenv, umask, ) from mkosi.versioncomp import GenericVersion MKOSI_AS_CALLER = ( "setpriv", f"--reuid={INVOKING_USER.uid}", f"--regid={INVOKING_USER.gid}", "--clear-groups", ) @contextlib.contextmanager def mount_base_trees(context: Context) -> Iterator[None]: if not context.config.base_trees or not context.config.overlay: yield return with complete_step("Mounting base trees…"), contextlib.ExitStack() as stack: bases = [] (context.workspace / "bases").mkdir(exist_ok=True) for path in context.config.base_trees: d = context.workspace / f"bases/{path.name}-{uuid.uuid4().hex}" if path.is_dir(): bases += [path] elif path.suffix == ".tar": extract_tar( path, d, tools=context.config.tools(), sandbox=context.sandbox(options=["--bind", d, d]), ) bases += [d] elif path.suffix == ".raw": run(["systemd-dissect", "-M", path, d]) stack.callback(lambda: run(["systemd-dissect", "-U", d])) bases += [d] else: die(f"Unsupported base tree source {path}") stack.enter_context(mount_overlay(bases, context.root, context.root)) yield def remove_files(context: Context) -> None: """Remove files based on user-specified patterns""" if not context.config.remove_files: return with complete_step("Removing files…"): for pattern in context.config.remove_files: rmtree(*context.root.glob(pattern.lstrip("/")), sandbox=context.sandbox(options=["--bind", context.root, context.root])) def install_distribution(context: Context) -> None: if context.config.base_trees: if not context.config.packages: return with complete_step(f"Installing extra packages for {str(context.config.distribution).capitalize()}"): context.config.distribution.install_packages(context, context.config.packages) else: with complete_step(f"Installing {str(context.config.distribution).capitalize()}"): context.config.distribution.install(context) if not context.config.overlay: if not (context.root / "etc/machine-id").exists(): # Uninitialized means we want it to get initialized on first boot. with umask(~0o444): (context.root / "etc/machine-id").write_text("uninitialized\n") # Ensure /efi exists so that the ESP is mounted there, as recommended by # https://0pointer.net/blog/linux-boot-partitions.html. Use the most restrictive access mode we # can without tripping up mkfs tools since this directory is only meant to be overmounted and # should not be read from or written to. with umask(~0o500): (context.root / "efi").mkdir(exist_ok=True) if context.config.packages: context.config.distribution.install_packages(context, context.config.packages) for f in ("var/lib/systemd/random-seed", "var/lib/systemd/credential.secret", "etc/machine-info", "var/lib/dbus/machine-id"): # Using missing_ok=True still causes an OSError if the mount is read-only even if the # file doesn't exist so do an explicit exists() check first. if (context.root / f).exists(): (context.root / f).unlink() def install_build_packages(context: Context) -> None: if not context.config.build_scripts or not context.config.build_packages: return with ( complete_step(f"Installing build packages for {context.config.distribution.pretty_name()}"), mount_build_overlay(context), ): context.config.distribution.install_packages(context, context.config.build_packages) def remove_packages(context: Context) -> None: """Remove packages listed in config.remove_packages""" if not context.config.remove_packages: return with complete_step(f"Removing {len(context.config.remove_packages)} packages…"): try: context.config.distribution.remove_packages(context, context.config.remove_packages) except NotImplementedError: die(f"Removing packages is not supported for {context.config.distribution}") def check_root_populated(context: Context) -> None: """Check that the root was populated by looking for a os-release file.""" osrelease = context.root / "usr/lib/os-release" if not osrelease.exists(): die( f"{osrelease} not found.", hint=( "The root must be populated by the distribution, or from base trees, " "skeleton trees, and prepare scripts." ) ) def configure_os_release(context: Context) -> None: """Write IMAGE_ID and IMAGE_VERSION to /usr/lib/os-release in the image.""" if not context.config.image_id and not context.config.image_version: return if context.config.overlay or context.config.output_format in (OutputFormat.sysext, OutputFormat.confext): return for candidate in ["usr/lib/os-release", "etc/os-release", "usr/lib/initrd-release", "etc/initrd-release"]: osrelease = context.root / candidate # at this point we know we will either change or add to the file newosrelease = osrelease.with_suffix(".new") if not osrelease.is_file() or osrelease.is_symlink(): continue image_id_written = image_version_written = False with osrelease.open("r") as old, newosrelease.open("w") as new: # fix existing values for line in old.readlines(): if context.config.image_id and line.startswith("IMAGE_ID="): new.write(f'IMAGE_ID="{context.config.image_id}"\n') image_id_written = True elif context.config.image_version and line.startswith("IMAGE_VERSION="): new.write(f'IMAGE_VERSION="{context.config.image_version}"\n') image_version_written = True else: new.write(line) # append if they were missing if context.config.image_id and not image_id_written: new.write(f'IMAGE_ID="{context.config.image_id}"\n') if context.config.image_version and not image_version_written: new.write(f'IMAGE_VERSION="{context.config.image_version}"\n') newosrelease.rename(osrelease) def configure_extension_release(context: Context) -> None: if context.config.output_format not in (OutputFormat.sysext, OutputFormat.confext): return prefix = "SYSEXT" if context.config.output_format == OutputFormat.sysext else "CONFEXT" d = "usr/lib" if context.config.output_format == OutputFormat.sysext else "etc" p = context.root / d / f"extension-release.d/extension-release.{context.config.output}" p.parent.mkdir(parents=True, exist_ok=True) osrelease = read_os_release(context.root) extrelease = read_env_file(p) if p.exists() else {} new = p.with_suffix(".new") with new.open("w") as f: for k, v in extrelease.items(): f.write(f"{k}={v}\n") if "ID" not in extrelease: f.write(f"ID={osrelease.get('ID', '_any')}\n") if "VERSION_ID" not in extrelease and (version := osrelease.get("VERSION_ID")): f.write(f"VERSION_ID={version}\n") if f"{prefix}_ID" not in extrelease and context.config.image_id: f.write(f"{prefix}_ID={context.config.image_id}\n") if f"{prefix}_VERSION_ID" not in extrelease and context.config.image_version: f.write(f"{prefix}_VERSION_ID={context.config.image_version}\n") if f"{prefix}_SCOPE" not in extrelease: f.write(f"{prefix}_SCOPE=initrd system portable\n") if "ARCHITECTURE" not in extrelease: f.write(f"ARCHITECTURE={context.config.architecture}\n") new.rename(p) def configure_autologin_service(context: Context, service: str, extra: str) -> None: dropin = context.root / f"usr/lib/systemd/system/{service}.d/autologin.conf" with umask(~0o755): dropin.parent.mkdir(parents=True, exist_ok=True) with umask(~0o644): dropin.write_text( textwrap.dedent( f"""\ [Service] ExecStart= ExecStart=-agetty -o '-f -p -- \\\\u' --autologin root {extra} $TERM StandardInput=tty StandardOutput=tty """ ) ) def configure_autologin(context: Context) -> None: if not context.config.autologin: return with complete_step("Setting up autologin…"): configure_autologin_service(context, "console-getty.service", "--noclear --keep-baud console 115200,38400,9600") configure_autologin_service(context, "getty@tty1.service", "--noclear -") configure_autologin_service(context, "serial-getty@ttyS0.service", "--keep-baud 115200,57600,38400,9600 -") if context.config.architecture.default_serial_tty() != "ttyS0": configure_autologin_service(context, f"serial-getty@{context.config.architecture.default_serial_tty()}.service", "--keep-baud 115200,57600,38400,9600 -") @contextlib.contextmanager def mount_cache_overlay(context: Context) -> Iterator[None]: if not context.config.incremental or not context.config.base_trees or context.config.overlay: yield return d = context.workspace / "cache-overlay" with umask(~0o755): d.mkdir(exist_ok=True) with mount_overlay([context.root], d, context.root): yield @contextlib.contextmanager def mount_build_overlay(context: Context, volatile: bool = False) -> Iterator[Path]: d = context.workspace / "build-overlay" if not d.is_symlink(): with umask(~0o755): d.mkdir(exist_ok=True) with contextlib.ExitStack() as stack: lower = [context.root] if volatile: lower += [d] upper = None else: upper = d stack.enter_context(mount_overlay(lower, upper, context.root)) yield context.root @contextlib.contextmanager def finalize_scripts(scripts: Mapping[str, Sequence[PathString]] = {}) -> Iterator[Path]: with tempfile.TemporaryDirectory(prefix="mkosi-scripts") as d: for name, script in scripts.items(): # Make sure we don't end up in a recursive loop when we name a script after the binary it execs # by removing the scripts directory from the PATH when we execute a script. (Path(d) / name).write_text( textwrap.dedent( f"""\ #!/bin/sh DIR="$(cd "$(dirname "$0")" && pwd)" PATH="$(echo "$PATH" | tr ':' '\\n' | grep -v "$DIR" | tr '\\n' ':')" export PATH if [ $# -gt 0 ]; then exec {shlex.join(str(s) for s in script)} "$@" else exec {shlex.join(str(s) for s in script)} sh -i fi """ ) ) make_executable(Path(d) / name) os.utime(Path(d) / name, (0, 0)) yield Path(d) def finalize_host_scripts( context: Context, helpers: Mapping[str, Sequence[PathString]], ) -> contextlib.AbstractContextManager[Path]: scripts: dict[str, Sequence[PathString]] = {} if find_binary("git", root=context.config.tools()): scripts["git"] = ("git", "-c", "safe.directory=*") for binary in ("useradd", "groupadd"): if find_binary(binary, root=context.config.tools()): scripts[binary] = (binary, "--root", context.root) return finalize_scripts(scripts | dict(helpers) | package_manager_scripts(context)) def finalize_chroot_scripts(context: Context) -> contextlib.AbstractContextManager[Path]: git = {"git": ("git", "-c", "safe.directory=*")} if find_binary("git", root=context.root) else {} return finalize_scripts(git) def run_prepare_scripts(context: Context, build: bool) -> None: if not context.config.prepare_scripts: return if build and not context.config.build_scripts: return env = dict( ARCHITECTURE=str(context.config.architecture), BUILDROOT=str(context.root), SRCDIR="/work/src", CHROOT_SRCDIR="/work/src", PACKAGEDIR="/work/packages", SCRIPT="/work/prepare", CHROOT_SCRIPT="/work/prepare", MKOSI_UID=str(INVOKING_USER.uid), MKOSI_GID=str(INVOKING_USER.gid), WITH_DOCS=one_zero(context.config.with_docs), WITH_NETWORK=one_zero(context.config.with_network), WITH_TESTS=one_zero(context.config.with_tests), ) with ( mount_build_overlay(context) if build else contextlib.nullcontext(), finalize_chroot_scripts(context) as cd, finalize_ephemeral_source_mounts(context.config) as sources, ): if build: step_msg = "Running prepare script {} in build overlay…" arg = "build" else: step_msg = "Running prepare script {}…" arg = "final" for script in context.config.prepare_scripts: chroot = chroot_cmd( context.root, resolve=True, options=[ "--bind", "/work", "/work", "--chdir", "/work/src", "--setenv", "BUILDROOT", "/", ], ) helpers: dict[str, Sequence[PathString]] = { "mkosi-chroot": chroot, "mkosi-as-caller" : MKOSI_AS_CALLER, } with ( finalize_host_scripts(context, helpers) as hd, complete_step(step_msg.format(script)), ): run( ["/work/prepare", arg], env=env | context.config.environment, stdin=sys.stdin, sandbox=context.sandbox( network=True, options=sources + [ "--ro-bind", script, "/work/prepare", "--ro-bind", cd, "/work/scripts", "--bind", context.root, context.root, *finalize_package_manager_mounts(context), "--chdir", "/work/src", ], scripts=hd, ) + (chroot if script.suffix == ".chroot" else []), ) def run_build_scripts(context: Context) -> None: if not context.config.build_scripts: return env = dict( ARCHITECTURE=str(context.config.architecture), BUILDROOT=str(context.root), DESTDIR="/work/dest", CHROOT_DESTDIR="/work/dest", OUTPUTDIR="/work/out", CHROOT_OUTPUTDIR="/work/out", SRCDIR="/work/src", CHROOT_SRCDIR="/work/src", PACKAGEDIR="/work/packages", SCRIPT="/work/build-script", CHROOT_SCRIPT="/work/build-script", MKOSI_UID=str(INVOKING_USER.uid), MKOSI_GID=str(INVOKING_USER.gid), WITH_DOCS=one_zero(context.config.with_docs), WITH_NETWORK=one_zero(context.config.with_network), WITH_TESTS=one_zero(context.config.with_tests), ) if context.config.build_dir is not None: env |= dict( BUILDDIR="/work/build", CHROOT_BUILDDIR="/work/build", ) with ( mount_build_overlay(context, volatile=True), finalize_chroot_scripts(context) as cd, finalize_ephemeral_source_mounts(context.config) as sources, ): for script in context.config.build_scripts: chroot = chroot_cmd( context.root, resolve=context.config.with_network, options=[ "--bind", "/work", "/work", "--chdir", "/work/src", "--setenv", "BUILDROOT", "/", *(["--setenv", "BUILDDIR", "/work/build"] if context.config.build_dir else []), ], ) helpers = { "mkosi-chroot": chroot, "mkosi-as-caller": MKOSI_AS_CALLER, } cmdline = context.args.cmdline if context.args.verb == Verb.build else [] with ( finalize_host_scripts(context, helpers) as hd, complete_step(f"Running build script {script}…"), ): run( ["/work/build-script", *cmdline], env=env | context.config.environment, stdin=sys.stdin, sandbox=context.sandbox( network=context.config.with_network, options=sources + [ "--ro-bind", script, "/work/build-script", "--ro-bind", cd, "/work/scripts", "--bind", context.root, context.root, "--bind", context.install_dir, "/work/dest", "--bind", context.staging, "/work/out", *( ["--bind", os.fspath(context.config.build_dir), "/work/build"] if context.config.build_dir else [] ), *finalize_package_manager_mounts(context), "--chdir", "/work/src", ], scripts=hd, ) + (chroot if script.suffix == ".chroot" else []), ) if any(context.packages.iterdir()): with complete_step("Rebuilding local package repository"): context.config.distribution.createrepo(context) def run_postinst_scripts(context: Context) -> None: if not context.config.postinst_scripts: return env = dict( ARCHITECTURE=str(context.config.architecture), BUILDROOT=str(context.root), OUTPUTDIR="/work/out", CHROOT_OUTPUTDIR="/work/out", SCRIPT="/work/postinst", CHROOT_SCRIPT="/work/postinst", SRCDIR="/work/src", CHROOT_SRCDIR="/work/src", PACKAGEDIR="/work/packages", MKOSI_UID=str(INVOKING_USER.uid), MKOSI_GID=str(INVOKING_USER.gid), ) with ( finalize_chroot_scripts(context) as cd, finalize_ephemeral_source_mounts(context.config) as sources, ): for script in context.config.postinst_scripts: chroot = chroot_cmd( context.root, resolve=context.config.with_network, options=[ "--bind", "/work", "/work", "--chdir", "/work/src", "--setenv", "BUILDROOT", "/", ], ) helpers = { "mkosi-chroot": chroot, "mkosi-as-caller": MKOSI_AS_CALLER, } with ( finalize_host_scripts(context, helpers) as hd, complete_step(f"Running postinstall script {script}…"), ): run( ["/work/postinst", "final"], env=env | context.config.environment, stdin=sys.stdin, sandbox=context.sandbox( network=context.config.with_network, options=sources + [ "--ro-bind", script, "/work/postinst", "--ro-bind", cd, "/work/scripts", "--bind", context.root, context.root, "--bind", context.staging, "/work/out", *finalize_package_manager_mounts(context), "--chdir", "/work/src", ], scripts=hd, ) + (chroot if script.suffix == ".chroot" else []), ) def run_finalize_scripts(context: Context) -> None: if not context.config.finalize_scripts: return env = dict( ARCHITECTURE=str(context.config.architecture), BUILDROOT=str(context.root), OUTPUTDIR="/work/out", CHROOT_OUTPUTDIR="/work/out", SRCDIR="/work/src", CHROOT_SRCDIR="/work/src", PACKAGEDIR="/work/packages", SCRIPT="/work/finalize", CHROOT_SCRIPT="/work/finalize", MKOSI_UID=str(INVOKING_USER.uid), MKOSI_GID=str(INVOKING_USER.gid), ) with ( finalize_chroot_scripts(context) as cd, finalize_ephemeral_source_mounts(context.config) as sources, ): for script in context.config.finalize_scripts: chroot = chroot_cmd( context.root, resolve=context.config.with_network, options=[ "--bind", "/work", "/work", "--chdir", "/work/src", "--setenv", "BUILDROOT", "/", ], ) helpers = { "mkosi-chroot": chroot, "mkosi-as-caller": MKOSI_AS_CALLER, } with ( finalize_host_scripts(context, helpers) as hd, complete_step(f"Running finalize script {script}…"), ): run( ["/work/finalize"], env=env | context.config.environment, stdin=sys.stdin, sandbox=context.sandbox( network=context.config.with_network, options=sources + [ "--ro-bind", script, "/work/finalize", "--ro-bind", cd, "/work/scripts", "--bind", context.root, context.root, "--bind", context.staging, "/work/out", *finalize_package_manager_mounts(context), "--chdir", "/work/src", ], scripts=hd, ) + (chroot if script.suffix == ".chroot" else []), ) def certificate_common_name(context: Context, certificate: Path) -> str: output = run( [ "openssl", "x509", "-noout", "-subject", "-nameopt", "multiline", "-in", certificate, ], stdout=subprocess.PIPE, sandbox=context.sandbox(options=["--ro-bind", certificate, certificate]), ).stdout for line in output.splitlines(): if not line.strip().startswith("commonName"): continue _, sep, value = line.partition("=") if not sep: die("Missing '=' delimiter in openssl output") return value.strip() die(f"Certificate {certificate} is missing Common Name") def pesign_prepare(context: Context) -> None: assert context.config.secure_boot_key assert context.config.secure_boot_certificate if (context.workspace / "pesign").exists(): return (context.workspace / "pesign").mkdir() # pesign takes a certificate directory and a certificate common name as input arguments, so we have # to transform our input key and cert into that format. Adapted from # https://www.mankier.com/1/pesign#Examples-Signing_with_the_certificate_and_private_key_in_individual_files with open(context.workspace / "secure-boot.p12", "wb") as f: run( [ "openssl", "pkcs12", "-export", # Arcane incantation to create a pkcs12 certificate without a password. "-keypbe", "NONE", "-certpbe", "NONE", "-nomaciter", "-passout", "pass:", "-inkey", context.config.secure_boot_key, "-in", context.config.secure_boot_certificate, ], stdout=f, sandbox=context.sandbox( options=[ "--ro-bind", context.config.secure_boot_key, context.config.secure_boot_key, "--ro-bind", context.config.secure_boot_certificate, context.config.secure_boot_certificate, ], ), ) (context.workspace / "pesign").mkdir(exist_ok=True) run( [ "pk12util", "-K", "", "-W", "", "-i", context.workspace / "secure-boot.p12", "-d", context.workspace / "pesign", ], sandbox=context.sandbox( options=[ "--ro-bind", context.workspace / "secure-boot.p12", context.workspace / "secure-boot.p12", "--bind", context.workspace / "pesign", context.workspace / "pesign", ], ), ) def efi_boot_binary(context: Context) -> Path: arch = context.config.architecture.to_efi() assert arch return Path(f"efi/EFI/BOOT/BOOT{arch.upper()}.EFI") def shim_second_stage_binary(context: Context) -> Path: arch = context.config.architecture.to_efi() assert arch if context.config.distribution == Distribution.opensuse: return Path("efi/EFI/BOOT/grub.EFI") else: return Path(f"efi/EFI/BOOT/grub{arch}.EFI") def sign_efi_binary(context: Context, input: Path, output: Path) -> None: assert context.config.secure_boot_key assert context.config.secure_boot_certificate if ( context.config.secure_boot_sign_tool == SecureBootSignTool.sbsign or context.config.secure_boot_sign_tool == SecureBootSignTool.auto and find_binary("sbsign", root=context.config.tools()) is not None ): with open(output, "wb") as f: run( [ "sbsign", "--key", context.config.secure_boot_key, "--cert", context.config.secure_boot_certificate, "--output", "/dev/stdout", input, ], stdout=f, sandbox=context.sandbox( options=[ "--ro-bind", context.config.secure_boot_key, context.config.secure_boot_key, "--ro-bind", context.config.secure_boot_certificate, context.config.secure_boot_certificate, "--ro-bind", input, input, ] ), ) elif ( context.config.secure_boot_sign_tool == SecureBootSignTool.pesign or context.config.secure_boot_sign_tool == SecureBootSignTool.auto and find_binary("pesign", root=context.config.tools()) is not None ): pesign_prepare(context) with open(output, "wb") as f: run( [ "pesign", "--certdir", context.workspace / "pesign", "--certificate", certificate_common_name(context, context.config.secure_boot_certificate), "--sign", "--force", "--in", input, "--out", "/dev/stdout", ], stdout=f, sandbox=context.sandbox( options=[ "--ro-bind", context.workspace / "pesign", context.workspace / "pesign", "--ro-bind", input, input, ] ), ) else: die("One of sbsign or pesign is required to use SecureBoot=") def install_systemd_boot(context: Context) -> None: if not want_efi(context.config): return if context.config.bootloader != Bootloader.systemd_boot: return if not any(gen_kernel_images(context)) and context.config.bootable == ConfigFeature.auto: return if not find_binary("bootctl", root=context.config.tools()): if context.config.bootable == ConfigFeature.enabled: die("An EFI bootable image with systemd-boot was requested but bootctl was not found") return directory = context.root / "usr/lib/systemd/boot/efi" if not directory.exists() or not any(directory.iterdir()): if context.config.bootable == ConfigFeature.enabled: die("A EFI bootable image with systemd-boot was requested but systemd-boot was not found at " f"{directory.relative_to(context.root)}") return if context.config.secure_boot: with complete_step("Signing systemd-boot binaries…"): for input in itertools.chain(directory.glob('*.efi'), directory.glob('*.EFI')): output = directory / f"{input}.signed" sign_efi_binary(context, input, output) with complete_step("Installing systemd-boot…"): run( ["bootctl", "install", "--root", context.root, "--all-architectures", "--no-variables"], env={"SYSTEMD_ESP_PATH": "/efi"}, sandbox=context.sandbox(options=["--bind", context.root, context.root]), ) if context.config.shim_bootloader != ShimBootloader.none: shutil.copy2( context.root / f"efi/EFI/systemd/systemd-boot{context.config.architecture.to_efi()}.efi", context.root / shim_second_stage_binary(context), ) if context.config.secure_boot and context.config.secure_boot_auto_enroll: assert context.config.secure_boot_key assert context.config.secure_boot_certificate with complete_step("Setting up secure boot auto-enrollment…"): keys = context.root / "efi/loader/keys/auto" with umask(~0o700): keys.mkdir(parents=True, exist_ok=True) # sbsiglist expects a DER certificate. with umask(~0o600), open(context.workspace / "mkosi.der", "wb") as f: run( [ "openssl", "x509", "-outform", "DER", "-in", context.config.secure_boot_certificate, ], stdout=f, sandbox=context.sandbox( options=[ "--ro-bind", context.config.secure_boot_certificate, context.config.secure_boot_certificate, ], ), ) with umask(~0o600), open(context.workspace / "mkosi.esl", "wb") as f: run( [ "sbsiglist", "--owner", str(uuid.uuid4()), "--type", "x509", "--output", "/dev/stdout", context.workspace / "mkosi.der", ], stdout=f, sandbox=context.sandbox( options=["--ro-bind", context.workspace / "mkosi.der", context.workspace / "mkosi.der"] ), ) # We reuse the key for all secure boot databases to keep things simple. for db in ["PK", "KEK", "db"]: with umask(~0o600), open(keys / f"{db}.auth", "wb") as f: run( [ "sbvarsign", "--attr", "NON_VOLATILE,BOOTSERVICE_ACCESS,RUNTIME_ACCESS,TIME_BASED_AUTHENTICATED_WRITE_ACCESS", "--key", context.config.secure_boot_key, "--cert", context.config.secure_boot_certificate, "--output", "/dev/stdout", db, context.workspace / "mkosi.esl", ], stdout=f, sandbox=context.sandbox( options=[ "--ro-bind", context.config.secure_boot_key, context.config.secure_boot_key, "--ro-bind", context.config.secure_boot_certificate, context.config.secure_boot_certificate, "--ro-bind", context.workspace / "mkosi.esl", context.workspace / "mkosi.esl", ], ), ) def find_and_install_shim_binary( context: Context, name: str, signed: Sequence[str], unsigned: Sequence[str], output: Path, ) -> None: if context.config.shim_bootloader == ShimBootloader.signed: for pattern in signed: for p in context.root.glob(pattern): if p.is_symlink() and p.readlink().is_absolute(): logging.warning(f"Ignoring signed {name} EFI binary which is an absolute path to {p.readlink()}") continue rel = p.relative_to(context.root) if (context.root / output).is_dir(): output /= rel.name log_step(f"Installing signed {name} EFI binary from /{rel} to /{output}") shutil.copy2(p, context.root / output) return if context.config.bootable == ConfigFeature.enabled: die(f"Couldn't find signed {name} EFI binary installed in the image") else: for pattern in unsigned: for p in context.root.glob(pattern): if p.is_symlink() and p.readlink().is_absolute(): logging.warning(f"Ignoring unsigned {name} EFI binary which is an absolute path to {p.readlink()}") continue rel = p.relative_to(context.root) if (context.root / output).is_dir(): output /= rel.name if context.config.secure_boot: log_step(f"Signing and installing unsigned {name} EFI binary from /{rel} to /{output}") sign_efi_binary(context, p, context.root / output) else: log_step(f"Installing unsigned {name} EFI binary /{rel} to /{output}") shutil.copy2(p, context.root / output) return if context.config.bootable == ConfigFeature.enabled: die(f"Couldn't find unsigned {name} EFI binary installed in the image") def install_shim(context: Context) -> None: if not want_efi(context.config): return if context.config.shim_bootloader == ShimBootloader.none: return if not any(gen_kernel_images(context)) and context.config.bootable == ConfigFeature.auto: return dst = efi_boot_binary(context) with umask(~0o700): (context.root / dst).parent.mkdir(parents=True, exist_ok=True) arch = context.config.architecture.to_efi() signed = [ f"usr/lib/shim/shim{arch}.efi.signed.latest", # Ubuntu f"usr/lib/shim/shim{arch}.efi.signed", # Debian f"boot/efi/EFI/*/shim{arch}.efi", # Fedora/CentOS "usr/share/efi/*/shim.efi", # OpenSUSE ] unsigned = [ f"usr/lib/shim/shim{arch}.efi", # Debian/Ubuntu f"usr/share/shim/*/*/shim{arch}.efi", # Fedora/CentOS f"usr/share/shim/shim{arch}.efi", # Arch ] find_and_install_shim_binary(context, "shim", signed, unsigned, dst) signed = [ f"usr/lib/shim/mm{arch}.efi.signed", # Debian f"usr/lib/shim/mm{arch}.efi", # Ubuntu f"boot/efi/EFI/*/mm{arch}.efi", # Fedora/CentOS "usr/share/efi/*/MokManager.efi", # OpenSUSE ] unsigned = [ f"usr/lib/shim/mm{arch}.efi", # Debian/Ubuntu f"usr/share/shim/*/*/mm{arch}.efi", # Fedora/CentOS f"usr/share/shim/mm{arch}.efi", # Arch ] find_and_install_shim_binary(context, "mok", signed, unsigned, dst.parent) def find_grub_bios_directory(context: Context) -> Optional[Path]: for d in ("usr/lib/grub/i386-pc", "usr/share/grub2/i386-pc"): if (p := context.root / d).exists() and any(p.iterdir()): return p return None def find_grub_binary(binary: str, root: Path = Path("/")) -> Optional[Path]: assert "grub" in binary and "grub2" not in binary return find_binary(binary, binary.replace("grub", "grub2"), root=root) def want_grub_efi(context: Context) -> bool: if context.config.bootable == ConfigFeature.disabled: return False if context.config.bootloader != Bootloader.grub: return False if context.config.overlay or context.config.output_format.is_extension_image(): return False if not any((context.root / "efi").rglob("grub*.efi")): if context.config.bootable == ConfigFeature.enabled: die("A bootable EFI image with grub was requested but grub for EFI is not installed in /efi") return False return True def want_grub_bios(context: Context, partitions: Sequence[Partition] = ()) -> bool: if context.config.bootable == ConfigFeature.disabled: return False if context.config.output_format != OutputFormat.disk: return False if context.config.bios_bootloader != BiosBootloader.grub: return False if context.config.overlay: return False have = find_grub_bios_directory(context) is not None if not have and context.config.bootable == ConfigFeature.enabled: die("A BIOS bootable image with grub was requested but grub for BIOS is not installed") bios = any(p.type == Partition.GRUB_BOOT_PARTITION_UUID for p in partitions) if partitions and not bios and context.config.bootable == ConfigFeature.enabled: die("A BIOS bootable image with grub was requested but no BIOS Boot Partition was configured") esp = any(p.type == "esp" for p in partitions) if partitions and not esp and context.config.bootable == ConfigFeature.enabled: die("A BIOS bootable image with grub was requested but no ESP partition was configured") root = any(p.type.startswith("root") or p.type.startswith("usr") for p in partitions) if partitions and not root and context.config.bootable == ConfigFeature.enabled: die("A BIOS bootable image with grub was requested but no root or usr partition was configured") installed = True for binary in ("grub-mkimage", "grub-bios-setup"): if find_grub_binary(binary, root=context.config.tools()): continue if context.config.bootable == ConfigFeature.enabled: die(f"A BIOS bootable image with grub was requested but {binary} was not found") installed = False return (have and bios and esp and root and installed) if partitions else have def prepare_grub_config(context: Context) -> Optional[Path]: config = context.root / "efi" / context.config.distribution.grub_prefix() / "grub.cfg" with umask(~0o700): config.parent.mkdir(exist_ok=True) # For some unknown reason, if we don't set the timeout to zero, grub never leaves its menu, so we default # to a zero timeout, but only if the config file hasn't been provided by the user. if not config.exists(): with umask(~0o600), config.open("w") as f: f.write("set timeout=0\n") return config def prepare_grub_efi(context: Context) -> None: if not want_grub_efi(context): return # Signed EFI grub shipped by distributions reads its configuration from /EFI//grub.cfg in # the ESP so let's put a shim there to redirect to the actual configuration file. earlyconfig = context.root / "efi/EFI" / context.config.distribution.name / "grub.cfg" with umask(~0o700): earlyconfig.parent.mkdir(parents=True, exist_ok=True) # Read the actual config file from the root of the ESP. earlyconfig.write_text(f"configfile /{context.config.distribution.grub_prefix()}/grub.cfg\n") config = prepare_grub_config(context) assert config with config.open("a") as f: f.write('if [ "${grub_platform}" == "efi" ]; then\n') for uki in (context.root / "boot/EFI/Linux").glob("*.efi"): f.write( textwrap.dedent( f"""\ menuentry "{uki.stem}" {{ chainloader /{uki.relative_to(context.root / "boot")} }} """ ) ) f.write("fi\n") def prepare_grub_bios(context: Context, partitions: Sequence[Partition]) -> None: if not want_grub_bios(context, partitions): return config = prepare_grub_config(context) assert config root = finalize_root(partitions) assert root token = find_entry_token(context) dst = context.root / "boot" / token with umask(~0o700): dst.mkdir(exist_ok=True) with config.open("a") as f: f.write('if [ "${grub_platform}" == "pc" ]; then\n') for kver, kimg in gen_kernel_images(context): kdst = dst / kver with umask(~0o700): kdst.mkdir(exist_ok=True) microcode = build_microcode_initrd(context) kmods = build_kernel_modules_initrd(context, kver) with umask(~0o600): kimg = Path(shutil.copy2(context.root / kimg, kdst / "vmlinuz")) initrds = [Path(shutil.copy2(microcode, kdst / "microcode"))] if microcode else [] initrds += [ Path(shutil.copy2(initrd, dst / initrd.name)) for initrd in (context.config.initrds or [build_initrd(context)]) ] initrds += [Path(shutil.copy2(kmods, kdst / "kmods"))] image = Path("/") / kimg.relative_to(context.root / "boot") cmdline = " ".join(context.config.kernel_command_line) initrds = " ".join( [os.fspath(Path("/") / initrd.relative_to(context.root / "boot")) for initrd in initrds] ) f.write( textwrap.dedent( f"""\ menuentry "{token}-{kver}" {{ linux {image} {root} {cmdline} initrd {initrds} }} """ ) ) f.write('fi\n') # grub-install insists on opening the root partition device to probe it's filesystem which requires root # so we're forced to reimplement its functionality. Luckily that's pretty simple, run grub-mkimage to # generate the required core.img and copy the relevant files to the ESP. mkimage = find_grub_binary("grub-mkimage", root=context.config.tools()) assert mkimage directory = find_grub_bios_directory(context) assert directory dst = context.root / "efi" / context.config.distribution.grub_prefix() / "i386-pc" dst.mkdir(parents=True, exist_ok=True) with tempfile.NamedTemporaryFile("w", prefix="grub-early-config") as earlyconfig: earlyconfig.write( textwrap.dedent( f"""\ search --no-floppy --set=root --file /{context.config.distribution.grub_prefix()}/grub.cfg set prefix=($root)/{context.config.distribution.grub_prefix()} """ ) ) earlyconfig.flush() run( [ mkimage, "--directory", directory, "--config", earlyconfig.name, "--prefix", f"/{context.config.distribution.grub_prefix()}", "--output", dst / "core.img", "--format", "i386-pc", *(["--verbose"] if ARG_DEBUG.get() else []), # Modules required to find and read from the XBOOTLDR partition which has all the other modules. "fat", "part_gpt", "biosdisk", "search", "search_fs_file", ], sandbox=context.sandbox( options=[ "--bind", context.root, context.root, "--ro-bind", earlyconfig.name, earlyconfig.name, ], ), ) for p in directory.glob("*.mod"): shutil.copy2(p, dst) for p in directory.glob("*.lst"): shutil.copy2(p, dst) shutil.copy2(directory / "modinfo.sh", dst) shutil.copy2(directory / "boot.img", dst) dst = context.root / "efi" / context.config.distribution.grub_prefix() / "fonts" with umask(~0o700): dst.mkdir(exist_ok=True) for d in ("grub", "grub2"): unicode = context.root / "usr/share" / d / "unicode.pf2" if unicode.exists(): shutil.copy2(unicode, dst) def install_grub_bios(context: Context, partitions: Sequence[Partition]) -> None: if not want_grub_bios(context, partitions): return setup = find_grub_binary("grub-bios-setup", root=context.config.tools()) assert setup with ( complete_step("Installing grub boot loader…"), tempfile.NamedTemporaryFile(mode="w") as mountinfo, ): # grub-bios-setup insists on being able to open the root device that --directory is located on, which # needs root privileges. However, it only uses the root device when it is unable to embed itself in the # bios boot partition. To make installation work unprivileged, we trick grub to think that the root # device is our image by mounting over its /proc/self/mountinfo file (where it gets its information from) # with our own file correlating the root directory to our image file. mountinfo.write(f"1 0 1:1 / / - fat {context.staging / context.config.output_with_format}\n") mountinfo.flush() # We don't setup the mountinfo bind mount with bwrap because we need to know the child process pid to # be able to do the mount and we don't know the pid beforehand. run( [ "sh", "-c", f"mount --bind {mountinfo.name} /proc/$$/mountinfo && exec $0 \"$@\"", setup, "--directory", context.root / "efi" / context.config.distribution.grub_prefix() / "i386-pc", *(["--verbose"] if ARG_DEBUG.get() else []), context.staging / context.config.output_with_format, ], sandbox=context.sandbox( options=[ "--bind", context.root, context.root, "--bind", context.staging, context.staging, "--bind", mountinfo.name, mountinfo.name, ], ), ) def install_tree( context: Context, src: Path, dst: Path, *, target: Optional[Path] = None, preserve: bool = True, ) -> None: t = dst if target: t = dst / target.relative_to("/") with umask(~0o755): t.parent.mkdir(parents=True, exist_ok=True) def copy() -> None: copy_tree( src, t, preserve=preserve, use_subvolumes=context.config.use_subvolumes, tools=context.config.tools(), sandbox=context.sandbox(options=["--ro-bind", src, src, "--bind", t.parent, t.parent]), ) if src.is_dir() or (src.is_file() and target): copy() elif src.suffix == ".tar": extract_tar( src, t, tools=context.config.tools(), # Make sure tar uses user/group information from the root directory instead of the host. sandbox=context.sandbox(options=["--bind", dst, dst, *finalize_passwd_mounts(dst)]), ) elif src.suffix == ".raw": run( ["systemd-dissect", "--copy-from", src, "/", t], sandbox=context.sandbox( devices=True, network=True, options=["--ro-bind", src, src, "--bind", t.parent, t.parent], ), ) else: # If we get an unknown file without a target, we just copy it into /. copy() def install_base_trees(context: Context) -> None: if not context.config.base_trees or context.config.overlay: return with complete_step("Copying in base trees…"): for path in context.config.base_trees: install_tree(context, path, context.root) def install_skeleton_trees(context: Context) -> None: if not context.config.skeleton_trees: return with complete_step("Copying in skeleton file trees…"): for tree in context.config.skeleton_trees: install_tree(context, tree.source, context.root, target=tree.target, preserve=False) def install_package_manager_trees(context: Context) -> None: # Ensure /etc exists in the package manager tree (context.pkgmngr / "etc").mkdir(exist_ok=True) # Required to be able to access certificates in the sandbox when running from nix. if Path("/etc/static").is_symlink(): (context.pkgmngr / "etc/static").symlink_to(Path("/etc/static").readlink()) if not context.config.package_manager_trees: return with complete_step("Copying in package manager file trees…"): for tree in context.config.package_manager_trees: install_tree(context, tree.source, context.pkgmngr, target=tree.target, preserve=False) def install_package_directories(context: Context) -> None: if not context.config.package_directories: return with complete_step("Copying in extra packages…"): for d in context.config.package_directories: install_tree(context, d, context.packages) if any(context.packages.iterdir()): with complete_step("Initializing local package repository…"): context.config.distribution.createrepo(context) def install_extra_trees(context: Context) -> None: if not context.config.extra_trees: return with complete_step("Copying in extra file trees…"): for tree in context.config.extra_trees: install_tree(context, tree.source, context.root, target=tree.target, preserve=False) def install_build_dest(context: Context) -> None: if not any(context.install_dir.iterdir()): return with complete_step("Copying in build tree…"): install_tree(context, context.install_dir, context.root) def gzip_binary(context: Context) -> str: return "pigz" if find_binary("pigz", root=context.config.tools()) else "gzip" def gen_kernel_images(context: Context) -> Iterator[tuple[str, Path]]: if not (context.root / "usr/lib/modules").exists(): return for kver in sorted( (k for k in (context.root / "usr/lib/modules").iterdir() if k.is_dir()), key=lambda k: GenericVersion(k.name), reverse=True ): # Make sure we look for anything that remotely resembles vmlinuz, as # the arch specific install scripts in the kernel source tree sometimes # do weird stuff. But let's make sure we're not returning UKIs as the # UKI on Fedora is named vmlinuz-virt.efi. for kimg in kver.glob("vmlinuz*"): if KernelType.identify(context.config, kimg) != KernelType.uki: yield kver.name, kimg break def build_initrd(context: Context) -> Path: if context.config.distribution == Distribution.custom: die("Building a default initrd is not supported for custom distributions") # Default values are assigned via the parser so we go via the argument parser to construct # the config for the initrd. if context.config.root_password: password, hashed = context.config.root_password rootpwopt = f"hashed:{password}" if hashed else password else: rootpwopt = None cmdline = [ "--directory", "", "--distribution", str(context.config.distribution), "--release", context.config.release, "--architecture", str(context.config.architecture), *(["--mirror", context.config.mirror] if context.config.mirror else []), "--repository-key-check", str(context.config.repository_key_check), "--repositories", ",".join(context.config.repositories), "--package-manager-tree", ",".join(format_tree(t) for t in context.config.package_manager_trees), # Note that when compress_output == Compression.none == 0 we don't pass --compress-output which means the # default compression will get picked. This is exactly what we want so that initrds are always compressed. *(["--compress-output", str(context.config.compress_output)] if context.config.compress_output else []), "--with-network", str(context.config.with_network), "--cache-only", str(context.config.cache_only), "--output-dir", str(context.workspace / "initrd"), *(["--workspace-dir", str(context.config.workspace_dir)] if context.config.workspace_dir else []), "--cache-dir", str(context.cache_dir), *(["--local-mirror", str(context.config.local_mirror)] if context.config.local_mirror else []), "--incremental", str(context.config.incremental), "--acl", str(context.config.acl), *(f"--package={package}" for package in context.config.initrd_packages), "--package-directory", str(context.packages), "--output", f"{context.config.output}-initrd", *(["--image-id", context.config.image_id] if context.config.image_id else []), *(["--image-version", context.config.image_version] if context.config.image_version else []), *( ["--source-date-epoch", str(context.config.source_date_epoch)] if context.config.source_date_epoch is not None else [] ), *(["--locale", context.config.locale] if context.config.locale else []), *(["--locale-messages", context.config.locale_messages] if context.config.locale_messages else []), *(["--keymap", context.config.keymap] if context.config.keymap else []), *(["--timezone", context.config.timezone] if context.config.timezone else []), *(["--hostname", context.config.hostname] if context.config.hostname else []), *(["--root-password", rootpwopt] if rootpwopt else []), *([f"--environment={k}='{v}'" for k, v in context.config.environment.items()]), *(["--tools-tree", str(context.config.tools_tree)] if context.config.tools_tree else []), *([f"--extra-search-path={p}" for p in context.config.extra_search_paths]), *(["-f"] * context.args.force), ] with resource_path(mkosi.resources, "mkosi-initrd") as r: cmdline += ["--include", os.fspath(r)] for include in context.config.initrd_include: cmdline += ["--include", os.fspath(include)] args, [config] = parse_config(cmdline + ["build"]) make_executable( *config.prepare_scripts, *config.postinst_scripts, *config.finalize_scripts, *config.build_scripts, ) config = dataclasses.replace(config, image="default-initrd") assert config.output_dir config.output_dir.mkdir(exist_ok=True) if (config.output_dir / config.output).exists(): return config.output_dir / config.output with complete_step("Building default initrd"): build_image(args, config) return config.output_dir / config.output def build_microcode_initrd(context: Context) -> Optional[Path]: microcode = context.workspace / "initrd-microcode.img" if microcode.exists(): return microcode amd = context.root / "usr/lib/firmware/amd-ucode" intel = context.root / "usr/lib/firmware/intel-ucode" if not amd.exists() and not intel.exists(): logging.debug("/usr/lib/firmware/{amd-ucode,intel-ucode} not found, not adding microcode initrd") return None root = context.workspace / "initrd-microcode-root" destdir = root / "kernel/x86/microcode" with umask(~0o755): destdir.mkdir(parents=True, exist_ok=True) if amd.exists(): with (destdir / "AuthenticAMD.bin").open("wb") as f: for p in amd.iterdir(): f.write(p.read_bytes()) if intel.exists(): with (destdir / "GenuineIntel.bin").open("wb") as f: for p in intel.iterdir(): f.write(p.read_bytes()) make_cpio( root, microcode, tools=context.config.tools(), sandbox=context.sandbox(options=["--ro-bind", root, root]), ) return microcode def build_kernel_modules_initrd(context: Context, kver: str) -> Path: kmods = context.workspace / f"initrd-kernel-modules-{kver}.img" if kmods.exists(): return kmods make_cpio( context.root, kmods, files=gen_required_kernel_modules( context.root, kver, include=context.config.kernel_modules_initrd_include, exclude=context.config.kernel_modules_initrd_exclude, host=context.config.kernel_modules_initrd_include_host, sandbox=context.sandbox(options=["--ro-bind", context.root, context.root]), ), tools=context.config.tools(), sandbox=context.sandbox(options=["--ro-bind", context.root, context.root]), ) # Debian/Ubuntu do not compress their kernel modules, so we compress the initramfs instead. Note that # this is not ideal since the compressed kernel modules will all be decompressed on boot which # requires significant memory. if context.config.distribution.is_apt_distribution(): maybe_compress(context, Compression.zstd, kmods, kmods) return kmods def join_initrds(initrds: Sequence[Path], output: Path) -> Path: assert initrds if len(initrds) == 1: shutil.copy2(initrds[0], output) return output seq = io.BytesIO() for p in initrds: initrd = p.read_bytes() n = len(initrd) padding = b'\0' * (round_up(n, 4) - n) # pad to 32 bit alignment seq.write(initrd) seq.write(padding) output.write_bytes(seq.getbuffer()) return output def python_binary(config: Config) -> str: # If there's no tools tree, prefer the interpreter from MKOSI_INTERPRETER. If there is a tools # tree, just use the default python3 interpreter. return "python3" if config.tools_tree else os.getenv("MKOSI_INTERPRETER", "python3") def extract_pe_section(context: Context, binary: Path, section: str, output: Path) -> None: # When using a tools tree, we want to use the pefile module from the tools tree instead of requiring that # python-pefile is installed on the host. So we execute python as a subprocess to make sure we load # pefile from the tools tree if one is used. # TODO: Use ignore_padding=True instead of length once we can depend on a newer pefile. pefile = textwrap.dedent( f"""\ import pefile import sys from pathlib import Path pe = pefile.PE("{binary}", fast_load=True) section = {{s.Name.decode().strip("\\0"): s for s in pe.sections}}["{section}"] sys.stdout.buffer.write(section.get_data(length=section.Misc_VirtualSize)) """ ) with open(output, "wb") as f: run( [python_binary(context.config)], input=pefile, stdout=f, sandbox=context.sandbox(options=["--ro-bind", binary, binary]) ) def build_uki( context: Context, stub: Path, kver: str, kimg: Path, initrds: Sequence[Path], cmdline: Sequence[str], output: Path, roothash: Optional[str] = None, ) -> None: cmdline = list(cmdline) if roothash: cmdline += [roothash] cmdline += context.config.kernel_command_line # Older versions of systemd-stub expect the cmdline section to be null terminated. We can't embed # nul terminators in argv so let's communicate the cmdline via a file instead. (context.workspace / "cmdline").write_text(f"{' '.join(cmdline).strip()}\x00") if not (arch := context.config.architecture.to_efi()): die(f"Architecture {context.config.architecture} does not support UEFI") cmd: list[PathString] = [ find_binary("ukify", root=context.config.tools()) or "/usr/lib/systemd/ukify", "--cmdline", f"@{context.workspace / 'cmdline'}", "--os-release", f"@{context.root / 'usr/lib/os-release'}", "--stub", stub, "--output", output, "--efi-arch", arch, "--uname", kver, ] options: list[PathString] = [ "--bind", output.parent, output.parent, "--ro-bind", context.workspace / "cmdline", context.workspace / "cmdline", "--ro-bind", context.root / "usr/lib/os-release", context.root / "usr/lib/os-release", "--ro-bind", stub, stub, ] if context.config.secure_boot: assert context.config.secure_boot_key assert context.config.secure_boot_certificate cmd += ["--sign-kernel"] if context.config.secure_boot_sign_tool != SecureBootSignTool.pesign: cmd += [ "--signtool", "sbsign", "--secureboot-private-key", context.config.secure_boot_key, "--secureboot-certificate", context.config.secure_boot_certificate, ] options += [ "--ro-bind", context.config.secure_boot_key, context.config.secure_boot_key, "--ro-bind", context.config.secure_boot_certificate, context.config.secure_boot_certificate, ] else: pesign_prepare(context) cmd += [ "--signtool", "pesign", "--secureboot-certificate-dir", context.workspace / "pesign", "--secureboot-certificate-name", certificate_common_name(context, context.config.secure_boot_certificate), ] options += ["--ro-bind", context.workspace / "pesign", context.workspace / "pesign"] sign_expected_pcr = ( context.config.sign_expected_pcr == ConfigFeature.enabled or ( context.config.sign_expected_pcr == ConfigFeature.auto and find_binary("systemd-measure", "/usr/lib/systemd/systemd-measure", root=context.config.tools()) ) ) if sign_expected_pcr: cmd += [ "--pcr-private-key", context.config.secure_boot_key, "--pcr-banks", "sha1,sha256", ] options += ["--ro-bind", context.config.secure_boot_key, context.config.secure_boot_key] cmd += ["build", "--linux", kimg] options += ["--ro-bind", kimg, kimg] for initrd in initrds: cmd += ["--initrd", initrd] options += ["--ro-bind", initrd, initrd] with complete_step(f"Generating unified kernel image for kernel version {kver}"): run(cmd, sandbox=context.sandbox(options=options)) def want_efi(config: Config) -> bool: # Do we want to make the image bootable on EFI firmware? # Note that this returns True also in the case where autodetection might later # cause the system to not be made bootable on EFI firmware after the filesystem # has been populated. if config.output_format in (OutputFormat.uki, OutputFormat.esp): return True if config.bootable == ConfigFeature.disabled: return False if config.bootloader == Bootloader.none: return False if ( (config.output_format == OutputFormat.cpio or config.output_format.is_extension_image() or config.overlay) and config.bootable == ConfigFeature.auto ): return False if config.architecture.to_efi() is None: if config.bootable == ConfigFeature.enabled: die(f"Cannot make image bootable on UEFI on {config.architecture} architecture") return False return True def find_entry_token(context: Context) -> str: if ( "--version" not in run(["kernel-install", "--help"], stdout=subprocess.PIPE, sandbox=context.sandbox()).stdout or systemd_tool_version(context.config, "kernel-install") < "255.1" ): return context.config.image_id or context.config.distribution.name output = json.loads(run(["kernel-install", "--root", context.root, "--json=pretty", "inspect"], sandbox=context.sandbox(options=["--ro-bind", context.root, context.root]), stdout=subprocess.PIPE).stdout) logging.debug(json.dumps(output, indent=4)) return cast(str, output["EntryToken"]) def install_uki(context: Context, partitions: Sequence[Partition]) -> None: # Iterates through all kernel versions included in the image and generates a combined # kernel+initrd+cmdline+osrelease EFI file from it and places it in the /EFI/Linux directory of the ESP. # sd-boot iterates through them and shows them in the menu. These "unified" single-file images have the # benefit that they can be signed like normal EFI binaries, and can encode everything necessary to boot a # specific root device, including the root hash. if not want_efi(context.config) or context.config.output_format in (OutputFormat.uki, OutputFormat.esp): return arch = context.config.architecture.to_efi() stub = context.root / f"usr/lib/systemd/boot/efi/linux{arch}.efi.stub" if not stub.exists() and context.config.bootable == ConfigFeature.auto: return roothash = finalize_roothash(partitions) for kver, kimg in gen_kernel_images(context): # See https://systemd.io/AUTOMATIC_BOOT_ASSESSMENT/#boot-counting boot_count = "" if (context.root / "etc/kernel/tries").exists(): boot_count = f'+{(context.root / "etc/kernel/tries").read_text().strip()}' if context.config.bootloader == Bootloader.uki: if context.config.shim_bootloader != ShimBootloader.none: boot_binary = context.root / shim_second_stage_binary(context) else: boot_binary = context.root / efi_boot_binary(context) else: token = find_entry_token(context) if roothash: _, _, h = roothash.partition("=") boot_binary = context.root / f"boot/EFI/Linux/{token}-{kver}-{h}{boot_count}.efi" else: boot_binary = context.root / f"boot/EFI/Linux/{token}-{kver}{boot_count}.efi" microcode = build_microcode_initrd(context) initrds = [microcode] if microcode else [] initrds += context.config.initrds or [build_initrd(context)] if context.config.kernel_modules_initrd: initrds += [build_kernel_modules_initrd(context, kver)] # Make sure the parent directory where we'll be writing the UKI exists. with umask(~0o700): boot_binary.parent.mkdir(parents=True, exist_ok=True) if (context.root / "etc/kernel/cmdline").exists(): cmdline = [(context.root / "etc/kernel/cmdline").read_text().strip()] elif (context.root / "usr/lib/kernel/cmdline").exists(): cmdline = [(context.root / "usr/lib/kernel/cmdline").read_text().strip()] else: cmdline = [] build_uki(context, stub, kver, context.root / kimg, initrds, cmdline, boot_binary, roothash=roothash) if not (context.staging / context.config.output_split_initrd).exists(): # Extract the combined initrds from the UKI so we can use it to direct kernel boot with qemu # if needed. extract_pe_section(context, boot_binary, ".initrd", context.staging / context.config.output_split_initrd) if not (context.staging / context.config.output_split_uki).exists(): shutil.copy(boot_binary, context.staging / context.config.output_split_uki) # ukify will have signed the kernel image as well. Let's make sure we put the signed kernel # image in the output directory instead of the unsigned one by reading it from the UKI. extract_pe_section(context, boot_binary, ".linux", context.staging / context.config.output_split_kernel) print_output_size(boot_binary) if context.config.bootloader == Bootloader.uki: break if ( context.config.bootable == ConfigFeature.enabled and not (context.staging / context.config.output_split_uki).exists() ): die("A bootable image was requested but no kernel was found") def make_uki(context: Context, stub: Path, kver: str, kimg: Path, output: Path) -> None: microcode = build_microcode_initrd(context) make_cpio( context.root, context.workspace / "initrd", tools=context.config.tools(), sandbox=context.sandbox( # Make sure cpio uses user/group information from the root directory instead of the host. options=["--ro-bind", context.root, context.root, *finalize_passwd_mounts(context.root)], ), ) maybe_compress(context, context.config.compress_output, context.workspace / "initrd", context.workspace / "initrd") initrds = [microcode] if microcode else [] initrds += [context.workspace / "initrd"] build_uki(context, stub, kver, kimg, initrds, [], output) extract_pe_section(context, output, ".linux", context.staging / context.config.output_split_kernel) extract_pe_section(context, output, ".initrd", context.staging / context.config.output_split_initrd) def compressor_command(context: Context, compression: Compression) -> list[PathString]: """Returns a command suitable for compressing archives.""" if compression == Compression.gz: return [gzip_binary(context), "--fast", "--stdout", "-"] elif compression == Compression.xz: return ["xz", "--check=crc32", "--fast", "-T0", "--stdout", "-"] elif compression == Compression.zstd: return ["zstd", "-q", "-T0", "--stdout", "-"] else: die(f"Unknown compression {compression}") def maybe_compress(context: Context, compression: Compression, src: Path, dst: Optional[Path] = None) -> None: if not compression or src.is_dir(): if dst: move_tree( src, dst, use_subvolumes=context.config.use_subvolumes, tools=context.config.tools(), sandbox=context.sandbox(options=["--bind", src.parent, src.parent, "--bind", dst.parent, dst.parent]), ) return if not dst: dst = src.parent / f"{src.name}.{compression}" with complete_step(f"Compressing {src} with {compression}"): with src.open("rb") as i: src.unlink() # if src == dst, make sure dst doesn't truncate the src file but creates a new file. with dst.open("wb") as o: run(compressor_command(context, compression), stdin=i, stdout=o, sandbox=context.sandbox()) def copy_vmlinuz(context: Context) -> None: if (context.staging / context.config.output_split_kernel).exists(): return for _, kimg in gen_kernel_images(context): shutil.copy(context.root / kimg, context.staging / context.config.output_split_kernel) break def copy_nspawn_settings(context: Context) -> None: if context.config.nspawn_settings is None: return None with complete_step("Copying nspawn settings file…"): shutil.copy2(context.config.nspawn_settings, context.staging / context.config.output_nspawn_settings) def copy_initrd(context: Context) -> None: if (context.staging / context.config.output_split_initrd).exists(): return if context.config.bootable == ConfigFeature.disabled: return if context.config.output_format not in (OutputFormat.disk, OutputFormat.directory): return for kver, _ in gen_kernel_images(context): microcode = build_microcode_initrd(context) initrds = [microcode] if microcode else [] initrds += context.config.initrds or [build_initrd(context)] if context.config.kernel_modules_initrd: kver = next(gen_kernel_images(context))[0] initrds += [build_kernel_modules_initrd(context, kver)] join_initrds(initrds, context.staging / context.config.output_split_initrd) break def hash_file(of: TextIO, path: Path) -> None: bs = 16 * 1024**2 h = hashlib.sha256() with path.open("rb") as sf: while (buf := sf.read(bs)): h.update(buf) of.write(h.hexdigest() + " *" + path.name + "\n") def calculate_sha256sum(context: Context) -> None: if not context.config.checksum: return if context.config.output_format == OutputFormat.directory: return with complete_step("Calculating SHA256SUMS…"): with open(context.workspace / context.config.output_checksum, "w") as f: for p in context.staging.iterdir(): hash_file(f, p) (context.workspace / context.config.output_checksum).rename(context.staging / context.config.output_checksum) def calculate_signature(context: Context) -> None: if not context.config.sign or not context.config.checksum: return if context.config.output_format == OutputFormat.directory: return # GPG messes with the user's home directory so we run it as the invoking user. cmdline: list[PathString] = [ "setpriv", f"--reuid={INVOKING_USER.uid}", f"--regid={INVOKING_USER.gid}", "--clear-groups", "gpg", "--detach-sign", ] # Need to specify key before file to sign if context.config.key is not None: cmdline += ["--default-key", context.config.key] cmdline += ["--output", "-", "-"] home = Path(context.config.environment.get("GNUPGHOME", INVOKING_USER.home() / ".gnupg")) if not home.exists(): die(f"GPG home {home} not found") env = dict(GNUPGHOME=os.fspath(home)) if sys.stderr.isatty(): env |= dict(GPGTTY=os.ttyname(sys.stderr.fileno())) options: list[PathString] = ["--perms", "755", "--dir", home, "--bind", home, home] # gpg can communicate with smartcard readers via this socket so bind mount it in if it exists. if (p := Path("/run/pcscd/pcscd.comm")).exists(): options += ["--perms", "755", "--dir", p.parent, "--bind", p, p] with ( complete_step("Signing SHA256SUMS…"), open(context.staging / context.config.output_checksum, "rb") as i, open(context.staging / context.config.output_signature, "wb") as o, ): run(cmdline, env=env, stdin=i, stdout=o, sandbox=context.sandbox(options=options)) def dir_size(path: Union[Path, os.DirEntry[str]]) -> int: dir_sum = 0 for entry in os.scandir(path): if entry.is_symlink(): # We can ignore symlinks because they either point into our tree, # in which case we'll include the size of target directory anyway, # or outside, in which case we don't need to. continue elif entry.is_file(): dir_sum += entry.stat().st_blocks * 512 elif entry.is_dir(): dir_sum += dir_size(entry) return dir_sum def save_manifest(context: Context, manifest: Optional[Manifest]) -> None: if not manifest: return if manifest.has_data(): if ManifestFormat.json in context.config.manifest_format: with complete_step(f"Saving manifest {context.config.output_manifest}"): with open(context.staging / context.config.output_manifest, 'w') as f: manifest.write_json(f) if ManifestFormat.changelog in context.config.manifest_format: with complete_step(f"Saving report {context.config.output_changelog}"): with open(context.staging / context.config.output_changelog, 'w') as f: manifest.write_package_report(f) def print_output_size(path: Path) -> None: if path.is_dir(): log_step(f"{path} size is " + format_bytes(dir_size(path)) + ".") else: size = format_bytes(path.stat().st_size) space = format_bytes(path.stat().st_blocks * 512) log_step(f"{path} size is {size}, consumes {space}.") def cache_tree_paths(config: Config) -> tuple[Path, Path, Path]: fragments = [config.distribution, config.release, config.architecture] if config.image: fragments += [config.image] key = '~'.join(str(s) for s in fragments) assert config.cache_dir return ( config.cache_dir / f"{key}.cache", config.cache_dir / f"{key}.build.cache", config.cache_dir / f"{key}.manifest", ) def check_inputs(config: Config) -> None: """ Make sure all the inputs exist that aren't checked during config parsing because they might be created by an earlier build. """ for base in config.base_trees: if not base.exists(): die(f"Base tree {base} not found") if config.tools_tree and not config.tools_tree.exists(): die(f"Tools tree {config.tools_tree} not found") for name, trees in (("Skeleton", config.skeleton_trees), ("Package manager", config.package_manager_trees), ("Extra", config.extra_trees)): for tree in trees: if not tree.source.exists(): die(f"{name} tree {tree.source} not found") if config.bootable != ConfigFeature.disabled: for p in config.initrds: if not p.exists(): die(f"Initrd {p} not found") if not p.is_file(): die(f"Initrd {p} is not a file") for script in config.prepare_scripts + config.build_scripts + config.postinst_scripts + config.finalize_scripts: if not os.access(script, os.X_OK): die(f"{script} is not executable") def check_outputs(config: Config) -> None: for f in ( config.output_with_compression, config.output_checksum if config.checksum else None, config.output_signature if config.sign else None, config.output_nspawn_settings if config.nspawn_settings else None, ): if f and (config.output_dir_or_cwd() / f).exists(): die(f"Output path {f} exists already. (Consider invocation with --force.)") def systemd_tool_version(config: Config, tool: PathString) -> GenericVersion: return GenericVersion( run([tool, "--version"], stdout=subprocess.PIPE, sandbox=config.sandbox()).stdout.split()[2].strip("()") ) def check_tool(config: Config, *tools: PathString, reason: str, hint: Optional[str] = None) -> Path: tool = find_binary(*tools, root=config.tools()) if not tool: die(f"Could not find '{tools[0]}' which is required to {reason}.", hint=hint) return tool def check_systemd_tool( config: Config, *tools: PathString, version: str, reason: str, hint: Optional[str] = None, ) -> None: tool = check_tool(config, *tools, reason=reason, hint=hint) v = systemd_tool_version(config, tool) if v < version: die(f"Found '{tool}' with version {v} but version {version} or newer is required to {reason}.", hint=f"Use ToolsTree=default to get a newer version of '{tools[0]}'.") def check_tools(config: Config, verb: Verb) -> None: if verb == Verb.build: if want_efi(config): check_systemd_tool( config, "ukify", "/usr/lib/systemd/ukify", version="254", reason="build bootable images", hint="Use ToolsTree=default to download most required tools including ukify automatically or use " "Bootable=no to create a non-bootable image which doesn't require ukify", ) if config.output_format in (OutputFormat.disk, OutputFormat.esp): check_systemd_tool(config, "systemd-repart", version="254", reason="build disk images") if config.selinux_relabel == ConfigFeature.enabled: check_tool(config, "setfiles", reason="relabel files") if verb == Verb.boot: check_systemd_tool(config, "systemd-nspawn", version="254", reason="boot images") def configure_ssh(context: Context) -> None: if not context.config.ssh: return unitdir = context.root / "usr/lib/systemd/system" with umask(~0o755): unitdir.mkdir(parents=True, exist_ok=True) with umask(~0o644): (unitdir / "ssh.socket").write_text( textwrap.dedent( """\ [Unit] Description=Mkosi SSH Server VSock Socket ConditionVirtualization=!container Wants=sshd-keygen.target [Socket] ListenStream=vsock::22 Accept=yes [Install] WantedBy=sockets.target """ ) ) (unitdir / "ssh@.service").write_text( textwrap.dedent( """\ [Unit] Description=Mkosi SSH Server After=sshd-keygen.target [Service] # We disable PAM because of an openssh-server bug where it sets PAM_RHOST=UNKNOWN when -i is # used causing a very slow reverse DNS lookup by pam. ExecStart=sshd -i -o UsePAM=no StandardInput=socket RuntimeDirectoryPreserve=yes RuntimeDirectory=sshd # ssh always exits with 255 even on normal disconnect, so let's mark that as success so we # don't get noisy logs about SSH service failures. SuccessExitStatus=255 """ ) ) preset = context.root / "usr/lib/systemd/system-preset/80-mkosi-ssh.preset" with umask(~0o755): preset.parent.mkdir(parents=True, exist_ok=True) with umask(~0o644): preset.write_text("enable ssh.socket\n") def configure_initrd(context: Context) -> None: if context.config.overlay or context.config.output_format.is_extension_image(): return if ( not (context.root / "init").exists() and not (context.root / "init").is_symlink() and (context.root / "usr/lib/systemd/systemd").exists() ): (context.root / "init").symlink_to("/usr/lib/systemd/systemd") if not context.config.make_initrd: return if not (context.root / "etc/initrd-release").exists() and not (context.root / "etc/initrd-release").is_symlink(): (context.root / "etc/initrd-release").symlink_to("/etc/os-release") def configure_clock(context: Context) -> None: if context.config.overlay or context.config.output_format.is_extension_image(): return with umask(~0o644): (context.root / "usr/lib/clock-epoch").touch() def run_depmod(context: Context, *, force: bool = False) -> None: if context.config.overlay or context.config.output_format.is_extension_image(): return outputs = ( "modules.dep", "modules.dep.bin", "modules.symbols", "modules.symbols.bin", ) for kver, _ in gen_kernel_images(context): modulesd = context.root / "usr/lib/modules" / kver if ( not force and not context.config.kernel_modules_exclude and all((modulesd / o).exists() for o in outputs) ): mtime = (modulesd / "modules.dep").stat().st_mtime if all(m.stat().st_mtime <= mtime for m in modulesd.rglob("*.ko*")): continue process_kernel_modules( context.root, kver, include=context.config.kernel_modules_include, exclude=context.config.kernel_modules_exclude, host=context.config.kernel_modules_include_host, sandbox=context.sandbox(options=["--ro-bind", context.root, context.root]), ) with complete_step(f"Running depmod for {kver}"): run(["depmod", "--all", "--basedir", context.root, kver], sandbox=context.sandbox(options=["--bind", context.root, context.root])) def run_sysusers(context: Context) -> None: if not find_binary("systemd-sysusers", root=context.config.tools()): logging.info("systemd-sysusers is not installed, not generating system users") return with complete_step("Generating system users"): run(["systemd-sysusers", "--root", context.root], sandbox=context.sandbox(options=["--bind", context.root, context.root])) def run_tmpfiles(context: Context) -> None: if not find_binary("systemd-tmpfiles", root=context.config.tools()): logging.info("systemd-tmpfiles is not installed, not generating volatile files") return with complete_step("Generating volatile files"): cmdline = [ "systemd-tmpfiles", f"--root={context.root}", "--boot", "--create", "--remove", # Exclude APIVFS and temporary files directories. *(f"--exclude-prefix={d}" for d in ("/tmp", "/var/tmp", "/run", "/proc", "/sys", "/dev")), ] result = run( cmdline, sandbox=context.sandbox( options=[ "--bind", context.root, context.root, # systemd uses acl.h to parse ACLs in tmpfiles snippets which uses the host's passwd so we have to # mount the image's passwd over it to make ACL parsing work. *finalize_passwd_mounts(context.root) ], ), env={"SYSTEMD_TMPFILES_FORCE_SUBVOL": "0"}, check=False, ) # systemd-tmpfiles can exit with DATAERR or CANTCREAT in some cases which are handled as success by the # systemd-tmpfiles service so we handle those as success as well. if result.returncode not in (0, 65, 73): log_process_failure(cmdline, result.returncode) raise subprocess.CalledProcessError(result.returncode, cmdline) def run_preset(context: Context) -> None: if not find_binary("systemctl", root=context.config.tools()): logging.info("systemctl is not installed, not applying presets") return with complete_step("Applying presets…"): run(["systemctl", "--root", context.root, "preset-all"], sandbox=context.sandbox(options=["--bind", context.root, context.root])) run(["systemctl", "--root", context.root, "--global", "preset-all"], sandbox=context.sandbox(options=["--bind", context.root, context.root])) def run_hwdb(context: Context) -> None: if context.config.overlay or context.config.output_format.is_extension_image(): return if not find_binary("systemd-hwdb", root=context.config.tools()): logging.info("systemd-hwdb is not installed, not generating hwdb") return with complete_step("Generating hardware database"): run(["systemd-hwdb", "--root", context.root, "--usr", "--strict", "update"], sandbox=context.sandbox(options=["--bind", context.root, context.root])) # Remove any existing hwdb in /etc in favor of the one we just put in /usr. (context.root / "etc/udev/hwdb.bin").unlink(missing_ok=True) def run_firstboot(context: Context) -> None: if context.config.overlay or context.config.output_format.is_extension_image(): return password, hashed = context.config.root_password or (None, False) if password and not hashed: password = run(["openssl", "passwd", "-stdin", "-6"], sandbox=context.sandbox(), input=password, stdout=subprocess.PIPE).stdout.strip() settings = ( ("--locale", "firstboot.locale", context.config.locale), ("--locale-messages", "firstboot.locale-messages", context.config.locale_messages), ("--keymap", "firstboot.keymap", context.config.keymap), ("--timezone", "firstboot.timezone", context.config.timezone), ("--hostname", None, context.config.hostname), ("--root-password-hashed", "passwd.hashed-password.root", password), ("--root-shell", "passwd.shell.root", context.config.root_shell), ) options = [] creds = [] for option, cred, value in settings: # Check for None as password might be the empty string if value is None: continue options += [option, value] if cred: creds += [(cred, value)] if not options and not creds: return with complete_step("Applying first boot settings"): run(["systemd-firstboot", "--root", context.root, "--force", *options], sandbox=context.sandbox(options=["--bind", context.root, context.root])) # Initrds generally don't ship with only /usr so there's not much point in putting the credentials in # /usr/lib/credstore. if context.config.output_format != OutputFormat.cpio or not context.config.make_initrd: with umask(~0o755): (context.root / "usr/lib/credstore").mkdir(exist_ok=True) for cred, value in creds: with umask(~0o600 if "password" in cred else ~0o644): (context.root / "usr/lib/credstore" / cred).write_text(value) def run_selinux_relabel(context: Context) -> None: if not (selinux := want_selinux_relabel(context.config, context.root)): return policy, fc, binpolicy = selinux with complete_step(f"Relabeling files using {policy} policy"): run(["setfiles", "-mFr", context.root, "-c", binpolicy, fc, context.root], sandbox=context.sandbox(options=["--bind", context.root, context.root]), check=context.config.selinux_relabel == ConfigFeature.enabled) def need_build_overlay(config: Config) -> bool: return bool(config.build_scripts and (config.build_packages or config.prepare_scripts)) def save_cache(context: Context) -> None: if not context.config.incremental or context.config.overlay: return final, build, manifest = cache_tree_paths(context.config) with complete_step("Installing cache copies"): rmtree(final, sandbox=context.sandbox(options=["--bind", final.parent, final.parent])) # We only use the cache-overlay directory for caching if we have a base tree, otherwise we just # cache the root directory. if (context.workspace / "cache-overlay").exists(): move_tree( context.workspace / "cache-overlay", final, use_subvolumes=context.config.use_subvolumes, tools=context.config.tools(), sandbox=context.sandbox( options=[ "--bind", context.workspace, context.workspace, "--bind", final.parent, final.parent, ], ), ) else: move_tree( context.root, final, use_subvolumes=context.config.use_subvolumes, sandbox=context.sandbox( options=[ "--bind", context.root.parent, context.root.parent, "--bind", final.parent, final.parent, ], ), ) if need_build_overlay(context.config) and (context.workspace / "build-overlay").exists(): rmtree(build, sandbox=context.sandbox(options=["--bind", build.parent, build.parent])) move_tree( context.workspace / "build-overlay", build, use_subvolumes=context.config.use_subvolumes, tools=context.config.tools(), sandbox=context.sandbox( options=[ "--bind", context.workspace, context.workspace, "--bind", build.parent, build.parent, ], ), ) manifest.write_text( json.dumps( context.config.cache_manifest(), cls=JsonEncoder, indent=4, sort_keys=True, ) ) def reuse_cache(context: Context) -> bool: if not context.config.incremental or context.config.overlay: return False final, build, manifest = cache_tree_paths(context.config) if not final.exists() or (need_build_overlay(context.config) and not build.exists()): return False if manifest.exists(): prev = json.loads(manifest.read_text()) if prev != json.loads(json.dumps(context.config.cache_manifest(), cls=JsonEncoder)): return False else: return False # Either we're running as root and the cache is owned by root or we're running unprivileged inside a user # namespace and we'll think the cache is owned by root. However, if we're running as root and the cache was # generated by an unprivileged build, the cache will not be owned by root and we should not use it. for p in (final, build): if p.exists() and p.stat().st_uid != 0: return False with complete_step("Copying cached trees"): install_tree(context, final, context.root) if need_build_overlay(context.config): (context.workspace / "build-overlay").symlink_to(build) return True def save_uki_components(context: Context) -> tuple[Optional[Path], Optional[str], Optional[Path]]: if context.config.output_format not in (OutputFormat.uki, OutputFormat.esp): return None, None, None try: kver, kimg = next(gen_kernel_images(context)) except StopIteration: die("A kernel must be installed in the image to build a UKI") kimg = shutil.copy2(context.root / kimg, context.workspace) if not (arch := context.config.architecture.to_efi()): die(f"Architecture {context.config.architecture} does not support UEFI") stub = context.root / f"usr/lib/systemd/boot/efi/linux{arch}.efi.stub" if not stub.exists(): die(f"sd-stub not found at /{stub.relative_to(context.root)} in the image") stub = shutil.copy2(stub, context.workspace) return stub, kver, kimg def make_image( context: Context, msg: str, skip: Sequence[str] = [], split: bool = False, root: Optional[Path] = None, definitions: Sequence[Path] = [], ) -> list[Partition]: cmdline: list[PathString] = [ "systemd-repart", "--empty=allow", "--size=auto", "--dry-run=no", "--json=pretty", "--no-pager", f"--offline={yes_no(context.config.repart_offline)}", "--seed", str(context.config.seed) if context.config.seed else "random", context.staging / context.config.output_with_format, ] options: list[PathString] = ["--bind", context.staging, context.staging] if root: cmdline += ["--root", root] options += ["--bind", root, root] if not context.config.architecture.is_native(): cmdline += ["--architecture", str(context.config.architecture)] if not (context.staging / context.config.output_with_format).exists(): cmdline += ["--empty=create"] if context.config.passphrase: cmdline += ["--key-file", context.config.passphrase] options += ["--ro-bind", context.config.passphrase, context.config.passphrase] if context.config.verity_key: cmdline += ["--private-key", context.config.verity_key] options += ["--ro-bind", context.config.verity_key, context.config.verity_key] if context.config.verity_certificate: cmdline += ["--certificate", context.config.verity_certificate] options += ["--ro-bind", context.config.verity_certificate, context.config.verity_certificate] if skip: cmdline += ["--defer-partitions", ",".join(skip)] if split: cmdline += ["--split=yes"] if context.config.sector_size: cmdline += ["--sector-size", str(context.config.sector_size)] for d in definitions: cmdline += ["--definitions", d] options += ["--ro-bind", d, d] with complete_step(msg): output = json.loads( run( cmdline, stdout=subprocess.PIPE, env=context.config.environment, sandbox=context.sandbox(devices=not context.config.repart_offline, options=options), ).stdout ) logging.debug(json.dumps(output, indent=4)) partitions = [Partition.from_dict(d) for d in output] if split: for p in partitions: if p.split_path: maybe_compress(context, context.config.compress_output, p.split_path) return partitions def make_disk( context: Context, msg: str, skip: Sequence[str] = [], split: bool = False, ) -> list[Partition]: if context.config.output_format != OutputFormat.disk: return [] if context.config.repart_dirs: definitions = context.config.repart_dirs else: defaults = context.workspace / "repart-definitions" if not defaults.exists(): defaults.mkdir() if (arch := context.config.architecture.to_efi()): bootloader = context.root / f"efi/EFI/BOOT/BOOT{arch.upper()}.EFI" else: bootloader = None esp = ( context.config.bootable == ConfigFeature.enabled or (context.config.bootable == ConfigFeature.auto and bootloader and bootloader.exists()) ) bios = (context.config.bootable != ConfigFeature.disabled and want_grub_bios(context)) if esp or bios: # Even if we're doing BIOS, let's still use the ESP to store the kernels, initrds and grub # modules. We cant use UKIs so we have to put each kernel and initrd on the ESP twice, so # let's make the ESP twice as big in that case. (defaults / "00-esp.conf").write_text( textwrap.dedent( f"""\ [Partition] Type=esp Format=vfat CopyFiles=/boot:/ CopyFiles=/efi:/ SizeMinBytes={"1G" if bios else "512M"} SizeMaxBytes={"1G" if bios else "512M"} """ ) ) # If grub for BIOS is installed, let's add a BIOS boot partition onto which we can install grub. if bios: (defaults / "05-bios.conf").write_text( textwrap.dedent( f"""\ [Partition] Type={Partition.GRUB_BOOT_PARTITION_UUID} SizeMinBytes=1M SizeMaxBytes=1M """ ) ) (defaults / "10-root.conf").write_text( textwrap.dedent( f"""\ [Partition] Type=root Format={context.config.distribution.filesystem()} CopyFiles=/ Minimize=guess """ ) ) definitions = [defaults] return make_image(context, msg=msg, skip=skip, split=split, root=context.root, definitions=definitions) def make_esp(context: Context, uki: Path) -> list[Partition]: if not (arch := context.config.architecture.to_efi()): die(f"Architecture {context.config.architecture} does not support UEFI") definitions = context.workspace / "esp-definitions" definitions.mkdir(exist_ok=True) # Use a minimum of 36MB or 260MB depending on sector size because otherwise the generated FAT filesystem will have # too few clusters to be considered a FAT32 filesystem by OVMF which will refuse to boot from it. # See https://superuser.com/questions/1702331/what-is-the-minimum-size-of-a-4k-native-partition-when-formatted-with-fat32/1717643#1717643 if context.config.sector_size == 512: m = 36 # TODO: Figure out minimum size for 2K sector size else: m = 260 # Always reserve 10MB for filesystem metadata. size = max(uki.stat().st_size, (m - 10) * 1024**2) + 10 * 1024**2 # TODO: Remove the extra 4096 for the max size once https://github.com/systemd/systemd/pull/29954 is in a stable # release. (definitions / "00-esp.conf").write_text( textwrap.dedent( f"""\ [Partition] Type=esp Format=vfat CopyFiles={uki}:/EFI/BOOT/BOOT{arch.upper()}.EFI SizeMinBytes={size} SizeMaxBytes={size + 4096} """ ) ) return make_image(context, msg="Generating ESP image", definitions=[definitions]) def make_extension_image(context: Context, output: Path) -> None: cmdline: list[PathString] = [ "systemd-repart", "--root", context.root, "--dry-run=no", "--no-pager", f"--offline={yes_no(context.config.repart_offline)}", "--seed", str(context.config.seed) if context.config.seed else "random", "--empty=create", "--size=auto", output, ] options: list[PathString] = [ "--bind", output.parent, output.parent, "--ro-bind", context.root, context.root, ] if not context.config.architecture.is_native(): cmdline += ["--architecture", str(context.config.architecture)] if context.config.passphrase: cmdline += ["--key-file", context.config.passphrase] options += ["--ro-bind", context.config.passphrase, context.config.passphrase] if context.config.verity_key: cmdline += ["--private-key", context.config.verity_key] options += ["--ro-bind", context.config.verity_key, context.config.verity_key] if context.config.verity_certificate: cmdline += ["--certificate", context.config.verity_certificate] options += ["--ro-bind", context.config.verity_certificate, context.config.verity_certificate] if context.config.sector_size: cmdline += ["--sector-size", str(context.config.sector_size)] env = { option: value for option, value in context.config.environment.items() if option.startswith("SYSTEMD_REPART_MKFS_OPTIONS_") or option == "SOURCE_DATE_EPOCH" } with ( resource_path(mkosi.resources, f"repart/definitions/{context.config.output_format}.repart.d") as r, complete_step(f"Building {context.config.output_format} extension image") ): options += ["--ro-bind", r, r] run( cmdline + ["--definitions", r], env=env, sandbox=context.sandbox(devices=not context.config.repart_offline, options=options), ) def finalize_staging(context: Context) -> None: # Our output unlinking logic removes everything prefixed with the name of the image, so let's make # sure that everything we put into the output directory is prefixed with the name of the output. for f in context.staging.iterdir(): # Skip the symlink we create without the version that points to the output with the version. if f.name.startswith(context.config.output) and f.is_symlink(): continue name = f.name if not name.startswith(context.config.output): name = f"{context.config.output}-{name}" if name != f.name: f.rename(context.staging / name) for f in context.staging.iterdir(): # Make sure all build outputs that are not directories are owned by the user running mkosi. if not f.is_dir(): os.chown(f, INVOKING_USER.uid, INVOKING_USER.gid, follow_symlinks=False) move_tree( f, context.config.output_dir_or_cwd(), use_subvolumes=context.config.use_subvolumes, tools=context.config.tools(), sandbox=context.sandbox( options=[ "--bind", context.staging, context.staging, "--bind", context.config.output_dir_or_cwd(), context.config.output_dir_or_cwd(), ], ), ) def normalize_mtime(root: Path, mtime: Optional[int], directory: Optional[Path] = None) -> None: if mtime is None: return directory = directory or Path("") if not (root / directory).exists(): return with complete_step(f"Normalizing modification times of /{directory}"): os.utime(root / directory, (mtime, mtime), follow_symlinks=False) for p in (root / directory).rglob("*"): os.utime(p, (mtime, mtime), follow_symlinks=False) @contextlib.contextmanager def setup_workspace(args: Args, config: Config) -> Iterator[Path]: with contextlib.ExitStack() as stack: workspace = Path(tempfile.mkdtemp(dir=config.workspace_dir_or_default(), prefix="mkosi-workspace")) sandbox = config.sandbox( options=["--bind", config.workspace_dir_or_default(), config.workspace_dir_or_default()], ) stack.callback(lambda: rmtree(workspace, sandbox=sandbox)) with scopedenv({"TMPDIR" : os.fspath(workspace)}): try: yield Path(workspace) except BaseException: if args.debug_workspace: stack.pop_all() log_notice(f"Workspace: {workspace}") workspace.chmod(0o755) raise def build_image(args: Args, config: Config) -> None: manifest = Manifest(config) if config.manifest_format else None with setup_workspace(args, config) as workspace: context = Context(args, config, workspace) install_package_manager_trees(context) install_package_directories(context) with mount_base_trees(context): install_base_trees(context) cached = reuse_cache(context) context.config.distribution.setup(context) if not cached: with mount_cache_overlay(context): install_skeleton_trees(context) install_distribution(context) run_prepare_scripts(context, build=False) install_build_packages(context) run_prepare_scripts(context, build=True) run_depmod(context, force=True) save_cache(context) reuse_cache(context) check_root_populated(context) run_build_scripts(context) if context.config.output_format == OutputFormat.none: # Touch an empty file to indicate the image was built. (context.staging / context.config.output).touch() finalize_staging(context) return install_build_dest(context) install_extra_trees(context) run_postinst_scripts(context) configure_autologin(context) configure_os_release(context) configure_extension_release(context) configure_initrd(context) configure_ssh(context) configure_clock(context) install_systemd_boot(context) install_shim(context) run_sysusers(context) run_tmpfiles(context) run_preset(context) run_depmod(context) run_firstboot(context) run_hwdb(context) # These might be removed by the next steps, # so let's save them for later if needed. stub, kver, kimg = save_uki_components(context) remove_packages(context) if manifest: with complete_step("Recording packages in manifest…"): manifest.record_packages(context.root) clean_package_manager_metadata(context) remove_files(context) run_selinux_relabel(context) run_finalize_scripts(context) normalize_mtime(context.root, context.config.source_date_epoch) partitions = make_disk(context, skip=("esp", "xbootldr"), msg="Generating disk image") install_uki(context, partitions) prepare_grub_efi(context) prepare_grub_bios(context, partitions) normalize_mtime(context.root, context.config.source_date_epoch, directory=Path("boot")) normalize_mtime(context.root, context.config.source_date_epoch, directory=Path("efi")) partitions = make_disk(context, msg="Formatting ESP/XBOOTLDR partitions") install_grub_bios(context, partitions) if context.config.split_artifacts: make_disk(context, split=True, msg="Extracting partitions") copy_nspawn_settings(context) copy_vmlinuz(context) copy_initrd(context) if context.config.output_format == OutputFormat.tar: make_tar( context.root, context.staging / context.config.output_with_format, tools=context.config.tools(), # Make sure tar uses user/group information from the root directory instead of the host. sandbox=context.sandbox( options=["--ro-bind", context.root, context.root, *finalize_passwd_mounts(context.root)], ), ) elif context.config.output_format == OutputFormat.cpio: make_cpio( context.root, context.staging / context.config.output_with_format, tools=context.config.tools(), # Make sure cpio uses user/group information from the root directory instead of the host. sandbox=context.sandbox( options=["--ro-bind", context.root, context.root, *finalize_passwd_mounts(context.root)], ), ) elif context.config.output_format == OutputFormat.uki: assert stub and kver and kimg make_uki(context, stub, kver, kimg, context.staging / context.config.output_with_format) elif context.config.output_format == OutputFormat.esp: assert stub and kver and kimg make_uki(context, stub, kver, kimg, context.staging / context.config.output_split_uki) make_esp(context, context.staging / context.config.output_split_uki) elif context.config.output_format.is_extension_image(): make_extension_image(context, context.staging / context.config.output_with_format) elif context.config.output_format == OutputFormat.directory: context.root.rename(context.staging / context.config.output_with_format) if config.output_format not in (OutputFormat.uki, OutputFormat.esp): maybe_compress(context, context.config.compress_output, context.staging / context.config.output_with_format, context.staging / context.config.output_with_compression) calculate_sha256sum(context) calculate_signature(context) save_manifest(context, manifest) output_base = context.staging / context.config.output if not output_base.exists() or output_base.is_symlink(): output_base.unlink(missing_ok=True) output_base.symlink_to(context.config.output_with_compression) finalize_staging(context) print_output_size(config.output_dir_or_cwd() / config.output_with_compression) def setfacl(config: Config, root: Path, uid: int, allow: bool) -> None: run( [ "setfacl", "--physical", "--modify" if allow else "--remove", f"user:{uid}:rwx" if allow else f"user:{uid}", "-", ], # Supply files via stdin so we don't clutter --debug run output too much input="\n".join([str(root), *(os.fspath(p) for p in root.rglob("*") if p.is_dir())]), sandbox=config.sandbox(options=["--bind", root, root]), ) @contextlib.contextmanager def acl_maybe_toggle(config: Config, root: Path, uid: int, *, always: bool) -> Iterator[None]: if not config.acl: yield return # getfacl complains about absolute paths so make sure we pass a relative one. if root.exists(): sandbox = config.sandbox(options=["--bind", root, root, "--chdir", root]) has_acl = f"user:{uid}:rwx" in run(["getfacl", "-n", "."], sandbox=sandbox, stdout=subprocess.PIPE).stdout if not has_acl and not always: yield return else: has_acl = False try: if has_acl: with complete_step(f"Removing ACLs from {root}"): setfacl(config, root, uid, allow=False) yield finally: if has_acl or always: with complete_step(f"Adding ACLs to {root}"): setfacl(config, root, uid, allow=True) @contextlib.contextmanager def acl_toggle_build(config: Config, uid: int) -> Iterator[None]: if not config.acl: yield return extras = [t.source for t in config.extra_trees] skeletons = [t.source for t in config.skeleton_trees] with contextlib.ExitStack() as stack: for p in (*config.base_trees, *extras, *skeletons): if p and p.is_dir(): stack.enter_context(acl_maybe_toggle(config, p, uid, always=False)) for p in (config.cache_dir, config.build_dir): if p: stack.enter_context(acl_maybe_toggle(config, p, uid, always=True)) if config.output_format == OutputFormat.directory: stack.enter_context(acl_maybe_toggle(config, config.output_dir_or_cwd() / config.output, uid, always=True)) yield @contextlib.contextmanager def acl_toggle_boot(config: Config, uid: int) -> Iterator[None]: if not config.acl or config.output_format != OutputFormat.directory: yield return with acl_maybe_toggle(config, config.output_dir_or_cwd() / config.output, uid, always=False): yield def run_shell(args: Args, config: Config) -> None: opname = "acquire shell in" if args.verb == Verb.shell else "boot" if config.output_format in (OutputFormat.tar, OutputFormat.cpio): die(f"Sorry, can't {opname} a {config.output_format} archive.") if config.output_format.use_outer_compression() and config.compress_output: die(f"Sorry, can't {opname} a compressed image.") cmdline: list[PathString] = ["systemd-nspawn", "--quiet", "--link-journal=no"] # If we copied in a .nspawn file, make sure it's actually honoured if config.nspawn_settings: cmdline += ["--settings=trusted"] if args.verb == Verb.boot: cmdline += ["--boot"] else: cmdline += [ f"--rlimit=RLIMIT_CORE={format_rlimit(resource.RLIMIT_CORE)}", "--console=autopipe", ] # Underscores are not allowed in machine names so replace them with hyphens. name = config.name().replace("_", "-") cmdline += ["--machine", name] for k, v in config.credentials.items(): cmdline += [f"--set-credential={k}:{v}"] with contextlib.ExitStack() as stack: # Make sure the latest nspawn settings are always used. if config.nspawn_settings: if not (config.output_dir_or_cwd() / f"{name}.nspawn").exists(): stack.callback(lambda: (config.output_dir_or_cwd() / f"{name}.nspawn").unlink(missing_ok=True)) shutil.copy2(config.nspawn_settings, config.output_dir_or_cwd() / f"{name}.nspawn") if config.ephemeral: fname = stack.enter_context(copy_ephemeral(config, config.output_dir_or_cwd() / config.output)) else: fname = config.output_dir_or_cwd() / config.output if config.output_format == OutputFormat.disk and args.verb == Verb.boot: run( [ "systemd-repart", "--image", fname, *([f"--size={config.runtime_size}"] if config.runtime_size else []), "--no-pager", "--dry-run=no", "--offline=no", fname, ], stdin=sys.stdin, env=config.environment, sandbox=config.sandbox(network=True, devices=True, options=["--bind", fname, fname]), ) if config.output_format == OutputFormat.directory: cmdline += ["--directory", fname] owner = os.stat(fname).st_uid if owner != 0: cmdline += [f"--private-users={str(owner)}"] else: cmdline += ["--image", fname] for tree in config.runtime_trees: target = Path("/root/src") / (tree.target or tree.source.name) # We add norbind because very often RuntimeTrees= will be used to mount the source directory into the # container and the output directory from which we're running will very likely be a subdirectory of the # source directory which would mean we'd be mounting the container root directory as a subdirectory in # itself which tends to lead to all kinds of weird issues, which we avoid by not doing a recursive mount # which means the container root directory mounts will be skipped. cmdline += ["--bind", f"{tree.source}:{target}:norbind,rootidmap"] if config.runtime_scratch == ConfigFeature.enabled or ( config.runtime_scratch == ConfigFeature.auto and config.output_format == OutputFormat.disk ): scratch = stack.enter_context(tempfile.TemporaryDirectory(dir="/var/tmp")) os.chmod(scratch, 0o1777) cmdline += ["--bind", f"{scratch}:/var/tmp"] if args.verb == Verb.boot: # Add nspawn options first since systemd-nspawn ignores all options after the first argument. cmdline += args.cmdline # kernel cmdline config of the form systemd.xxx= get interpreted by systemd when running in nspawn as # well. cmdline += config.kernel_command_line cmdline += config.kernel_command_line_extra elif args.cmdline: cmdline += ["--"] cmdline += args.cmdline run( cmdline, stdin=sys.stdin, stdout=sys.stdout, env=os.environ, log=False, sandbox=config.sandbox(devices=True, network=True, relaxed=True), ) def run_systemd_tool(tool: str, args: Args, config: Config) -> None: if config.output_format not in (OutputFormat.disk, OutputFormat.directory): die(f"{config.output_format} images cannot be inspected with {tool}") if ( args.verb in (Verb.journalctl, Verb.coredumpctl) and config.output_format == OutputFormat.disk and os.getuid() != 0 ): die(f"Must be root to run the {args.verb} command") if (tool_path := find_binary(tool, root=config.tools())) is None: die(f"Failed to find {tool}") if config.ephemeral: die(f"Images booted in ephemeral mode cannot be inspected with {tool}") image_arg_name = "root" if config.output_format == OutputFormat.directory else "image" run( [ tool_path, f"--{image_arg_name}={config.output_dir_or_cwd() / config.output}", *args.cmdline ], stdin=sys.stdin, stdout=sys.stdout, env=os.environ, log=False, preexec_fn=become_root, sandbox=config.sandbox(network=True, devices=config.output_format == OutputFormat.disk, relaxed=True), ) def run_journalctl(args: Args, config: Config) -> None: run_systemd_tool("journalctl", args, config) def run_coredumpctl(args: Args, config: Config) -> None: run_systemd_tool("coredumpctl", args, config) def run_serve(args: Args, config: Config) -> None: """Serve the output directory via a tiny HTTP server""" run([python_binary(config), "-m", "http.server", "8081"], stdin=sys.stdin, stdout=sys.stdout, sandbox=config.sandbox(network=True, relaxed=True, options=["--chdir", config.output_dir_or_cwd()])) def generate_key_cert_pair(args: Args) -> None: """Generate a private key and accompanying X509 certificate using openssl""" keylength = 2048 expiration_date = datetime.date.today() + datetime.timedelta(int(args.genkey_valid_days)) cn = expand_specifier(args.genkey_common_name) for f in ("mkosi.key", "mkosi.crt"): if Path(f).exists() and not args.force: die(f"{f} already exists", hint=("To generate new keys, first remove mkosi.key and mkosi.crt")) log_step(f"Generating keys rsa:{keylength} for CN {cn!r}.") logging.info( textwrap.dedent( f""" The keys will expire in {args.genkey_valid_days} days ({expiration_date:%A %d. %B %Y}). Remember to roll them over to new ones before then. """ ) ) run( [ "openssl", "req", "-new", "-x509", "-newkey", f"rsa:{keylength}", "-keyout", "mkosi.key", "-out", "mkosi.crt", "-days", str(args.genkey_valid_days), "-subj", f"/CN={cn}/", "-nodes" ], env=dict(OPENSSL_CONF="/dev/null"), ) def bump_image_version() -> None: """Write current image version plus one to mkosi.version""" version = Path("mkosi.version").read_text().strip() v = version.split(".") try: m = int(v[-1]) except ValueError: new_version = version + ".2" logging.info( "Last component of current version is not a decimal integer, " f"appending '.2', bumping '{version}' → '{new_version}'." ) else: new_version = ".".join(v[:-1] + [str(m + 1)]) logging.info(f"Increasing last component of version by one, bumping '{version}' → '{new_version}'.") Path("mkosi.version").write_text(f"{new_version}\n") os.chown("mkosi.version", INVOKING_USER.uid, INVOKING_USER.gid) def show_docs(args: Args) -> None: if args.doc_format == DocFormat.auto: formats = [DocFormat.man, DocFormat.pandoc, DocFormat.markdown, DocFormat.system] else: formats = [args.doc_format] while formats: form = formats.pop(0) try: if form == DocFormat.man: with resource_path(mkosi.resources, "mkosi.1") as man: if not man.exists(): raise FileNotFoundError() run(["man", "--local-file", man]) return elif form == DocFormat.pandoc: if not find_binary("pandoc"): logging.error("pandoc is not available") with resource_path(mkosi.resources, "mkosi.md") as mdr: pandoc = run(["pandoc", "-t", "man", "-s", mdr], stdout=subprocess.PIPE) run(["man", "--local-file", "-"], input=pandoc.stdout) return elif form == DocFormat.markdown: with resource_path(mkosi.resources, "mkosi.md") as mdr: page(mdr.read_text(), args.pager) return elif form == DocFormat.system: run(["man", "mkosi"]) return except (FileNotFoundError, subprocess.CalledProcessError) as e: if not formats: if isinstance(e, FileNotFoundError): die("The mkosi package does not contain the man page.") raise e def expand_specifier(s: str) -> str: return s.replace("%u", INVOKING_USER.name()) @contextlib.contextmanager def prepend_to_environ_path(config: Config) -> Iterator[None]: if config.tools_tree or not config.extra_search_paths: yield return with tempfile.TemporaryDirectory(prefix="mkosi.path") as d: for path in config.extra_search_paths: if not path.is_dir(): (Path(d) / path.name).symlink_to(path.absolute()) news = [os.fspath(path) for path in [Path(d), *config.extra_search_paths] if path.is_dir()] olds = os.getenv("PATH", "").split(":") os.environ["PATH"] = ":".join(news + olds) try: yield finally: os.environ["PATH"] = ":".join(olds) @contextlib.contextmanager def finalize_default_tools(args: Args, config: Config) -> Iterator[Config]: if not config.tools_tree_distribution: die(f"{config.distribution} does not have a default tools tree distribution", hint="use ToolsTreeDistribution= to set one explicitly") cmdline = [ "--directory", "", "--distribution", str(config.tools_tree_distribution), *(["--release", config.tools_tree_release] if config.tools_tree_release else []), *(["--mirror", config.tools_tree_mirror] if config.tools_tree_mirror else []), "--repository-key-check", str(config.repository_key_check), "--cache-only", str(config.cache_only), *(["--output-dir", str(config.output_dir)] if config.output_dir else []), *(["--workspace-dir", str(config.workspace_dir)] if config.workspace_dir else []), *(["--cache-dir", str(config.cache_dir)] if config.cache_dir else []), "--incremental", str(config.incremental), "--acl", str(config.acl), *([f"--package={package}" for package in config.tools_tree_packages]), "--output", f"{config.tools_tree_distribution}-tools", *(["--source-date-epoch", str(config.source_date_epoch)] if config.source_date_epoch is not None else []), *([f"--environment={k}='{v}'" for k, v in config.environment.items()]), *([f"--extra-search-path={p}" for p in config.extra_search_paths]), *(["-f"] * args.force), ] with resource_path(mkosi.resources, "mkosi-tools") as r: _, [tools] = parse_config(cmdline + ["--include", os.fspath(r), "build"]) make_executable( *tools.prepare_scripts, *tools.postinst_scripts, *tools.finalize_scripts, *tools.build_scripts, ) tools = dataclasses.replace(tools, image=f"{config.tools_tree_distribution}-tools") yield tools def check_workspace_directory(config: Config) -> None: wd = config.workspace_dir_or_default() if wd.is_relative_to(Path.cwd()): die(f"The workspace directory ({wd}) cannot be located in the current working directory ({Path.cwd()})", hint="Use WorkspaceDirectory= to configure a different workspace directory") for tree in config.build_sources: if wd.is_relative_to(tree.source): die(f"The workspace directory ({wd}) cannot be a subdirectory of any source directory ({tree.source})", hint="Use WorkspaceDirectory= to configure a different workspace directory") def needs_clean(args: Args, config: Config) -> bool: return ( args.force > 0 or not (config.output_dir_or_cwd() / config.output_with_compression).exists() or # When the output is a directory, its name is the same as the symlink we create that points to the actual # output when not building a directory. So if the full output path exists, we have to check that it's not # a symlink as well. (config.output_dir_or_cwd() / config.output_with_compression).is_symlink() ) def run_clean(args: Args, config: Config) -> None: if not needs_clean(args, config): return become_root() # We remove any cached images if either the user used --force twice, or he/she called "clean" with it # passed once. Let's also remove the downloaded package cache if the user specified one additional # "--force". if args.verb == Verb.clean: remove_build_cache = args.force > 0 remove_package_cache = args.force > 1 else: remove_build_cache = args.force > 1 remove_package_cache = args.force > 2 if (outputs := list(config.output_dir_or_cwd().glob(f"{config.output}*"))): with complete_step(f"Removing output files of {config.name()} image…"): rmtree(*outputs) if remove_build_cache: if config.cache_dir: with complete_step(f"Removing cache entries of {config.name()} image…"): rmtree(*(p for p in cache_tree_paths(config) if p.exists())) if config.build_dir and config.build_dir.exists() and any(config.build_dir.iterdir()): with complete_step(f"Clearing out build directory of {config.name()} image…"): rmtree(*config.build_dir.iterdir()) if remove_package_cache and config.cache_dir and config.cache_dir.exists() and any(config.cache_dir.iterdir()): with complete_step(f"Clearing out package cache of {config.name()} image…"): rmtree( *( config.cache_dir / p / d for p in ("cache", "lib") for d in ("apt", "dnf", "libdnf5", "pacman", "zypp") ), ) def run_build(args: Args, config: Config) -> None: check_inputs(config) if (uid := os.getuid()) != 0: become_root() unshare(CLONE_NEWNS) if uid == 0: run(["mount", "--make-rslave", "/"]) # For extra safety when running as root, remount a bunch of stuff read-only. for d in ("/usr", "/etc", "/opt", "/boot", "/efi", "/media"): if Path(d).exists(): run(["mount", "--rbind", d, d, "--options", "ro"]) with ( complete_step(f"Building {config.name()} image"), prepend_to_environ_path(config), ): # After tools have been mounted, check if we have what we need check_tools(config, Verb.build) # Create these as the invoking user to make sure they're owned by the user running mkosi. for p in ( config.output_dir, config.cache_dir, config.build_dir, config.workspace_dir, ): if p: run(["mkdir", "--parents", p], user=INVOKING_USER.uid, group=INVOKING_USER.gid) with acl_toggle_build(config, INVOKING_USER.uid): build_image(args, config) def run_verb(args: Args, images: Sequence[Config]) -> None: images = list(images) if args.verb.needs_root() and os.getuid() != 0: die(f"Must be root to run the {args.verb} command") if args.verb == Verb.documentation: return show_docs(args) if args.verb == Verb.genkey: return generate_key_cert_pair(args) if all(config == Config.default() for config in images): die("No configuration found", hint="Make sure you're running mkosi from a directory with configuration files") if args.verb == Verb.bump: return bump_image_version() if args.verb == Verb.summary: if args.json: text = json.dumps( {"Images": [config.to_dict() for config in images]}, cls=JsonEncoder, indent=4, sort_keys=True ) else: text = "\n".join(summary(config) for config in images) page(text, args.pager) return for config in images: if not config.minimum_version or config.minimum_version <= __version__: continue die(f"mkosi {config.minimum_version} or newer is required to build this configuration (found {__version__})") for config in images: if not config.repart_offline and os.getuid() != 0: die(f"Must be root to build {config.name()} image configured with RepartOffline=no") for config in images: check_workspace_directory(config) for config in images: if args.verb == Verb.build and not args.force: check_outputs(config) # First, process all directory removals because otherwise if different images share directories a later # image build could end up deleting the output generated by an earlier image build. for config in images: if not args.verb.needs_build() and args.verb != Verb.clean: continue if config.tools_tree and config.tools_tree.name == "default": with finalize_default_tools(args, config) as tools: fork_and_wait(lambda: run_clean(args, tools)) # pyright: ignore fork_and_wait(lambda: run_clean(args, config)) if args.verb == Verb.clean: return build = False for i, config in enumerate(images): if not args.verb.needs_build(): continue with ( finalize_default_tools(args, config) if config.tools_tree and config.tools_tree.name == "default" else contextlib.nullcontext() as tools ): images[i] = config = dataclasses.replace( config, tools_tree=tools.output_dir_or_cwd() / tools.output if tools else config.tools_tree, ) if tools and not (tools.output_dir_or_cwd() / tools.output_with_compression).exists(): fork_and_wait(lambda: run_build(args, tools)) # pyright: ignore if (config.output_dir_or_cwd() / config.output_with_compression).exists(): continue fork_and_wait(lambda: run_build(args, config)) build = True if build and args.auto_bump: bump_image_version() if args.verb == Verb.build: return last = images[-1] with prepend_to_environ_path(last): check_tools(last, args.verb) with ( acl_toggle_boot(last, INVOKING_USER.uid) if args.verb in (Verb.shell, Verb.boot) else contextlib.nullcontext() ): { Verb.shell: run_shell, Verb.boot: run_shell, Verb.qemu: run_qemu, Verb.ssh: run_ssh, Verb.serve: run_serve, Verb.journalctl: run_journalctl, Verb.coredumpctl: run_coredumpctl, Verb.burn: run_burn, }[args.verb](args, last) mkosi-20.2/mkosi/__main__.py000066400000000000000000000020221455345632200160000ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1+ # PYTHON_ARGCOMPLETE_OK import faulthandler import signal import sys from types import FrameType from typing import Optional from mkosi import run_verb from mkosi.config import parse_config from mkosi.log import log_setup from mkosi.run import find_binary, run, uncaught_exception_handler from mkosi.util import INVOKING_USER def onsigterm(signal: int, frame: Optional[FrameType]) -> None: raise KeyboardInterrupt() @uncaught_exception_handler() def main() -> None: signal.signal(signal.SIGTERM, onsigterm) log_setup() # Ensure that the name and home of the user we are running as are resolved as early as possible. INVOKING_USER.init() args, images = parse_config(sys.argv[1:]) if args.debug: faulthandler.enable() try: run_verb(args, images) finally: if sys.stderr.isatty() and find_binary("tput"): run(["tput", "cnorm"], check=False) run(["tput", "smam"], check=False) if __name__ == "__main__": main() mkosi-20.2/mkosi/archive.py000066400000000000000000000073411455345632200157120ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1+ import os from collections.abc import Iterable, Sequence from pathlib import Path from typing import Optional from mkosi.log import log_step from mkosi.run import find_binary, run from mkosi.types import PathString def tar_binary(*, tools: Path = Path("/")) -> str: # Some distros (Mandriva) install BSD tar as "tar", hence prefer # "gtar" if it exists, which should be GNU tar wherever it exists. # We are interested in exposing same behaviour everywhere hence # it's preferable to use the same implementation of tar # everywhere. In particular given the limited/different SELinux # support in BSD tar and the different command line syntax # compared to GNU tar. return "gtar" if find_binary("gtar", root=tools) else "tar" def cpio_binary(*, tools: Path = Path("/")) -> str: return "gcpio" if find_binary("gcpio", root=tools) else "cpio" def tar_exclude_apivfs_tmp() -> list[str]: return [ "--exclude", "./dev/*", "--exclude", "./proc/*", "--exclude", "./sys/*", "--exclude", "./tmp/*", "--exclude", "./run/*", "--exclude", "./var/tmp/*", ] def make_tar(src: Path, dst: Path, *, tools: Path = Path("/"), sandbox: Sequence[PathString] = ()) -> None: log_step(f"Creating tar archive {dst}…") with dst.open("wb") as f: run( [ tar_binary(tools=tools), "--create", "--file", "-", "--directory", src, "--acls", "--selinux", # --xattrs implies --format=pax "--xattrs", # PAX format emits additional headers for atime, ctime and mtime # that would make the archive non-reproducible. "--pax-option=delete=atime,delete=ctime,delete=mtime", "--sparse", "--force-local", *tar_exclude_apivfs_tmp(), ".", ], stdout=f, sandbox=sandbox, ) def extract_tar( src: Path, dst: Path, *, log: bool = True, tools: Path = Path("/"), sandbox: Sequence[PathString] = (), ) -> None: if log: log_step(f"Extracting tar archive {src}…") with src.open("rb") as f: run( [ tar_binary(tools=tools), "--extract", "--file", "-", "--directory", dst, "--keep-directory-symlink", "--no-overwrite-dir", "--same-permissions", "--same-owner" if (dst / "etc/passwd").exists() else "--numeric-owner", "--same-order", "--acls", "--selinux", "--xattrs", "--force-local", *tar_exclude_apivfs_tmp(), ], stdin=f, sandbox=sandbox, ) def make_cpio( src: Path, dst: Path, *, files: Optional[Iterable[Path]] = None, tools: Path = Path("/"), sandbox: Sequence[PathString] = (), ) -> None: if not files: files = src.rglob("*") files = sorted(files) log_step(f"Creating cpio archive {dst}…") with dst.open("wb") as f: run( [ cpio_binary(tools=tools), "--create", "--reproducible", "--null", "--format=newc", "--quiet", "--directory", src, ], input="\0".join(os.fspath(f.relative_to(src)) for f in files), stdout=f, # Make sure cpio uses user/group information from the root directory instead of the host. sandbox=sandbox, ) mkosi-20.2/mkosi/burn.py000066400000000000000000000020451455345632200152330ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1+ import os import sys from mkosi.config import Args, Config, OutputFormat from mkosi.log import complete_step, die from mkosi.run import run def run_burn(args: Args, config: Config) -> None: if config.output_format not in (OutputFormat.disk, OutputFormat.esp): die(f"{config.output_format} images cannot be burned to disk") fname = config.output_dir_or_cwd() / config.output if len(args.cmdline) != 1: die("Expected device argument.") device = args.cmdline[0] cmd = [ "systemd-repart", "--no-pager", "--pretty=no", "--offline=yes", "--empty=force", "--dry-run=no", f"--copy-from={fname}", device, ] with complete_step("Burning 🔥🔥🔥 to medium…", "Burnt. 🔥🔥🔥"): run( cmd, stdin=sys.stdin, stdout=sys.stdout, env=os.environ, log=False, sandbox=config.sandbox(devices=True, network=True, relaxed=True), ) mkosi-20.2/mkosi/config.py000066400000000000000000003603301455345632200155360ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1+ import argparse import base64 import contextlib import copy import dataclasses import enum import fnmatch import functools import graphlib import inspect import json import logging import math import operator import os.path import platform import re import shlex import shutil import string import subprocess import tempfile import textwrap import uuid from collections.abc import Collection, Iterable, Iterator, Sequence from pathlib import Path from typing import Any, Callable, Optional, TypeVar, Union, cast from mkosi.distributions import Distribution, detect_distribution from mkosi.log import ARG_DEBUG, ARG_DEBUG_SHELL, Style, die from mkosi.pager import page from mkosi.run import find_binary, run from mkosi.sandbox import sandbox_cmd from mkosi.types import PathString, SupportsRead from mkosi.util import INVOKING_USER, StrEnum, chdir, flatten, is_power_of_2 from mkosi.versioncomp import GenericVersion __version__ = "20.2" ConfigParseCallback = Callable[[Optional[str], Optional[Any]], Any] ConfigMatchCallback = Callable[[str, Any], bool] ConfigDefaultCallback = Callable[[argparse.Namespace], Any] class Verb(StrEnum): build = enum.auto() clean = enum.auto() summary = enum.auto() shell = enum.auto() boot = enum.auto() qemu = enum.auto() ssh = enum.auto() serve = enum.auto() bump = enum.auto() help = enum.auto() genkey = enum.auto() documentation = enum.auto() journalctl = enum.auto() coredumpctl = enum.auto() burn = enum.auto() def supports_cmdline(self) -> bool: return self in ( Verb.build, Verb.shell, Verb.boot, Verb.qemu, Verb.ssh, Verb.journalctl, Verb.coredumpctl, Verb.burn, ) def needs_build(self) -> bool: return self in ( Verb.build, Verb.shell, Verb.boot, Verb.qemu, Verb.serve, Verb.journalctl, Verb.coredumpctl, Verb.burn, ) def needs_root(self) -> bool: return self in (Verb.shell, Verb.boot, Verb.burn) def needs_credentials(self) -> bool: return self in (Verb.summary, Verb.qemu, Verb.boot, Verb.shell) class ConfigFeature(StrEnum): auto = enum.auto() enabled = enum.auto() disabled = enum.auto() @dataclasses.dataclass(frozen=True) class ConfigTree: source: Path target: Optional[Path] def with_prefix(self, prefix: Path = Path("/")) -> tuple[Path, Path]: return (self.source, prefix / os.fspath(self.target).lstrip("/") if self.target else prefix) @dataclasses.dataclass(frozen=True) class QemuDrive: id: str size: int directory: Optional[Path] options: Optional[str] # We use negative numbers for specifying special constants # for VSock CIDs since they're not valid CIDs anyway. class QemuVsockCID(enum.IntEnum): auto = -1 hash = -2 @classmethod def format(cls, cid: int) -> str: if cid == QemuVsockCID.auto: return "auto" if cid == QemuVsockCID.hash: return "hash" return str(cid) class SecureBootSignTool(StrEnum): auto = enum.auto() sbsign = enum.auto() pesign = enum.auto() class OutputFormat(StrEnum): confext = enum.auto() cpio = enum.auto() directory = enum.auto() disk = enum.auto() esp = enum.auto() none = enum.auto() portable = enum.auto() sysext = enum.auto() tar = enum.auto() uki = enum.auto() def extension(self) -> str: return { OutputFormat.confext: ".raw", OutputFormat.cpio: ".cpio", OutputFormat.disk: ".raw", OutputFormat.esp: ".raw", OutputFormat.portable: ".raw", OutputFormat.sysext: ".raw", OutputFormat.tar: ".tar", OutputFormat.uki: ".efi", }.get(self, "") def use_outer_compression(self) -> bool: return self in (OutputFormat.tar, OutputFormat.cpio, OutputFormat.disk) or self.is_extension_image() def is_extension_image(self) -> bool: return self in (OutputFormat.sysext, OutputFormat.confext, OutputFormat.portable) class ManifestFormat(StrEnum): json = enum.auto() # the standard manifest in json format changelog = enum.auto() # human-readable text file with package changelogs class Compression(StrEnum): none = enum.auto() zstd = enum.auto() zst = "zstd" xz = enum.auto() bz2 = enum.auto() gz = enum.auto() lz4 = enum.auto() lzma = enum.auto() def __bool__(self) -> bool: return self != Compression.none def extension(self) -> str: return { Compression.zstd: ".zst" }.get(self, f".{self}") class DocFormat(StrEnum): auto = enum.auto() markdown = enum.auto() man = enum.auto() pandoc = enum.auto() system = enum.auto() class Bootloader(StrEnum): none = enum.auto() uki = enum.auto() systemd_boot = enum.auto() grub = enum.auto() class BiosBootloader(StrEnum): none = enum.auto() grub = enum.auto() class ShimBootloader(StrEnum): none = enum.auto() signed = enum.auto() unsigned = enum.auto() class QemuFirmware(StrEnum): auto = enum.auto() linux = enum.auto() uefi = enum.auto() bios = enum.auto() class Architecture(StrEnum): alpha = enum.auto() arc = enum.auto() arm = enum.auto() arm64 = enum.auto() ia64 = enum.auto() loongarch64 = enum.auto() mips_le = enum.auto() mips64_le = enum.auto() parisc = enum.auto() ppc = enum.auto() ppc64 = enum.auto() ppc64_le = enum.auto() riscv32 = enum.auto() riscv64 = enum.auto() s390 = enum.auto() s390x = enum.auto() tilegx = enum.auto() x86 = enum.auto() x86_64 = enum.auto() @staticmethod def from_uname(s: str) -> "Architecture": a = { "aarch64" : Architecture.arm64, "aarch64_be" : Architecture.arm64, "armv8l" : Architecture.arm, "armv8b" : Architecture.arm, "armv7ml" : Architecture.arm, "armv7mb" : Architecture.arm, "armv7l" : Architecture.arm, "armv7b" : Architecture.arm, "armv6l" : Architecture.arm, "armv6b" : Architecture.arm, "armv5tl" : Architecture.arm, "armv5tel" : Architecture.arm, "armv5tejl" : Architecture.arm, "armv5tejb" : Architecture.arm, "armv5teb" : Architecture.arm, "armv5tb" : Architecture.arm, "armv4tl" : Architecture.arm, "armv4tb" : Architecture.arm, "armv4l" : Architecture.arm, "armv4b" : Architecture.arm, "alpha" : Architecture.alpha, "arc" : Architecture.arc, "arceb" : Architecture.arc, "x86_64" : Architecture.x86_64, "i686" : Architecture.x86, "i586" : Architecture.x86, "i486" : Architecture.x86, "i386" : Architecture.x86, "ia64" : Architecture.ia64, "parisc64" : Architecture.parisc, "parisc" : Architecture.parisc, "loongarch64" : Architecture.loongarch64, "mips64" : Architecture.mips64_le, "mips" : Architecture.mips_le, "ppc64le" : Architecture.ppc64_le, "ppc64" : Architecture.ppc64, "ppc" : Architecture.ppc, "riscv64" : Architecture.riscv64, "riscv32" : Architecture.riscv32, "riscv" : Architecture.riscv64, "s390x" : Architecture.s390x, "s390" : Architecture.s390, "tilegx" : Architecture.tilegx, }.get(s) if not a: die(f"Architecture {a} is not supported") return a def to_efi(self) -> Optional[str]: return { Architecture.x86_64 : "x64", Architecture.x86 : "ia32", Architecture.arm64 : "aa64", Architecture.arm : "arm", Architecture.riscv64 : "riscv64", Architecture.loongarch64 : "loongarch64", }.get(self) def to_qemu(self) -> str: a = { Architecture.alpha : "alpha", Architecture.arm : "arm", Architecture.arm64 : "aarch64", Architecture.loongarch64 : "loongarch64", Architecture.mips64_le : "mips", Architecture.mips_le : "mips", Architecture.parisc : "hppa", Architecture.ppc : "ppc", Architecture.ppc64 : "ppc64", Architecture.ppc64_le : "ppc64", Architecture.riscv32 : "riscv32", Architecture.riscv64 : "riscv64", Architecture.s390x : "s390x", Architecture.x86 : "i386", Architecture.x86_64 : "x86_64", }.get(self) if not a: die(f"Architecture {self} not supported by QEMU") return a def default_serial_tty(self) -> str: return { Architecture.arm : "ttyAMA0", Architecture.arm64 : "ttyAMA0", Architecture.s390 : "ttysclp0", Architecture.s390x : "ttysclp0", Architecture.ppc : "hvc0", Architecture.ppc64 : "hvc0", Architecture.ppc64_le : "hvc0", }.get(self, "ttyS0") def supports_smbios(self, firmware: QemuFirmware) -> bool: if self in (Architecture.x86, Architecture.x86_64): return True return self in (Architecture.arm, Architecture.arm64) and firmware == QemuFirmware.uefi def supports_fw_cfg(self) -> bool: return self in (Architecture.x86, Architecture.x86_64, Architecture.arm, Architecture.arm64) def supports_smm(self) -> bool: return self in (Architecture.x86, Architecture.x86_64) def default_qemu_machine(self) -> str: m = { Architecture.x86 : "q35", Architecture.x86_64 : "q35", Architecture.arm : "virt", Architecture.arm64 : "virt", Architecture.s390 : "s390-ccw-virtio", Architecture.s390x : "s390-ccw-virtio", Architecture.ppc : "pseries", Architecture.ppc64 : "pseries", Architecture.ppc64_le : "pseries", } if self not in m: die(f"No qemu machine defined for architecture {self}") return m[self] def default_qemu_nic_model(self) -> str: return { Architecture.s390 : "virtio", Architecture.s390x : "virtio", }.get(self, "virtio-net-pci") def is_native(self) -> bool: return self == self.native() @classmethod def native(cls) -> "Architecture": return cls.from_uname(platform.machine()) def parse_boolean(s: str) -> bool: "Parse 1/true/yes/y/t/on as true and 0/false/no/n/f/off/None as false" s_l = s.lower() if s_l in {"1", "true", "yes", "y", "t", "on", "always"}: return True if s_l in {"0", "false", "no", "n", "f", "off", "never"}: return False die(f"Invalid boolean literal: {s!r}") def parse_path(value: str, *, required: bool = True, resolve: bool = True, expanduser: bool = True, expandvars: bool = True, secret: bool = False, absolute: bool = False) -> Path: if expandvars: value = os.path.expandvars(value) path = Path(value) if expanduser: if path.is_relative_to("~") and not INVOKING_USER.is_running_user(): path = INVOKING_USER.home() / path.relative_to("~") path = path.expanduser() if required and not path.exists(): die(f"{value} does not exist") if absolute and not path.is_absolute(): die(f"{value} must be an absolute path") if resolve: path = path.resolve() if secret and path.exists(): mode = path.stat().st_mode & 0o777 if mode & 0o007: die(textwrap.dedent(f"""\ Permissions of '{path}' of '{mode:04o}' are too open. When creating secret files use an access mode that restricts access to the owner only. """)) return path def make_tree_parser(absolute: bool = True) -> Callable[[str], ConfigTree]: def parse_tree(value: str) -> ConfigTree: src, sep, tgt = value.partition(':') return ConfigTree( source=parse_path(src, required=False), target=parse_path( tgt, required=False, resolve=False, expanduser=False, absolute=absolute, ) if sep else None, ) return parse_tree def config_match_build_sources(match: str, value: list[ConfigTree]) -> bool: return Path(match.lstrip("/")) in [tree.target for tree in value if tree.target] def config_parse_string(value: Optional[str], old: Optional[str]) -> Optional[str]: return value or None def config_make_string_matcher(allow_globs: bool = False) -> ConfigMatchCallback: def config_match_string(match: str, value: str) -> bool: if allow_globs: return fnmatch.fnmatchcase(value, match) else: return match == value return config_match_string def config_parse_boolean(value: Optional[str], old: Optional[bool]) -> Optional[bool]: if value is None: return False if not value: return None return parse_boolean(value) def parse_feature(value: str) -> ConfigFeature: if value == ConfigFeature.auto.name: return ConfigFeature.auto return ConfigFeature.enabled if parse_boolean(value) else ConfigFeature.disabled def config_parse_feature(value: Optional[str], old: Optional[ConfigFeature]) -> Optional[ConfigFeature]: if value is None: return ConfigFeature.auto if not value: return None return parse_feature(value) def config_match_feature(match: str, value: ConfigFeature) -> bool: return value == parse_feature(match) def config_parse_compression(value: Optional[str], old: Optional[Compression]) -> Optional[Compression]: if not value: return None try: return Compression[value] except KeyError: return Compression.zstd if parse_boolean(value) else Compression.none def config_parse_seed(value: Optional[str], old: Optional[str]) -> Optional[uuid.UUID]: if not value or value == "random": return None try: return uuid.UUID(value) except ValueError: die(f"{value} is not a valid UUID") def config_parse_source_date_epoch(value: Optional[str], old: Optional[int]) -> Optional[int]: if not value: return None try: timestamp = int(value) except ValueError: raise ValueError(f"{value} is not a valid timestamp") if timestamp < 0: raise ValueError(f"{value} is negative") return timestamp def config_default_compression(namespace: argparse.Namespace) -> Compression: if namespace.output_format in (OutputFormat.tar, OutputFormat.cpio, OutputFormat.uki, OutputFormat.esp): if namespace.distribution.is_centos_variant() and int(namespace.release) <= 8: return Compression.xz else: return Compression.zstd else: return Compression.none def config_default_output(namespace: argparse.Namespace) -> str: output = namespace.image_id or namespace.image or "image" if namespace.image_version: output += f"_{namespace.image_version}" return output def config_default_distribution(namespace: argparse.Namespace) -> Distribution: detected = detect_distribution()[0] if not detected: logging.info( "Distribution of your host can't be detected or isn't a supported target. " "Defaulting to Distribution=custom." ) return Distribution.custom return detected def config_default_release(namespace: argparse.Namespace) -> str: # If the configured distribution matches the host distribution, use the same release as the host. hd, hr = detect_distribution() if namespace.distribution == hd and hr is not None: return hr return cast(str, namespace.distribution.default_release()) def config_default_source_date_epoch(namespace: argparse.Namespace) -> Optional[int]: for env in namespace.environment: if env.startswith("SOURCE_DATE_EPOCH="): return config_parse_source_date_epoch(env.removeprefix("SOURCE_DATE_EPOCH="), None) return config_parse_source_date_epoch(os.environ.get("SOURCE_DATE_EPOCH"), None) def config_default_kernel_command_line(namespace: argparse.Namespace) -> list[str]: return [f"console={namespace.architecture.default_serial_tty()}"] def make_enum_parser(type: type[enum.Enum]) -> Callable[[str], enum.Enum]: def parse_enum(value: str) -> enum.Enum: try: return type(value) except ValueError: die(f"'{value}' is not a valid {type.__name__}") return parse_enum def config_make_enum_parser(type: type[enum.Enum]) -> ConfigParseCallback: def config_parse_enum(value: Optional[str], old: Optional[enum.Enum]) -> Optional[enum.Enum]: return make_enum_parser(type)(value) if value else None return config_parse_enum def config_make_enum_matcher(type: type[enum.Enum]) -> ConfigMatchCallback: def config_match_enum(match: str, value: enum.Enum) -> bool: return make_enum_parser(type)(match) == value return config_match_enum def config_make_list_parser(delimiter: str, *, parse: Callable[[str], Any] = str, unescape: bool = False, reset: bool = True) -> ConfigParseCallback: def config_parse_list(value: Optional[str], old: Optional[list[Any]]) -> Optional[list[Any]]: new = old.copy() if old else [] if value is None: return [] if unescape: lex = shlex.shlex(value, posix=True) lex.whitespace_split = True lex.whitespace = f"\n{delimiter}" lex.commenters = "" values = list(lex) else: values = value.replace(delimiter, "\n").split("\n") # Empty strings reset the list. if reset and len(values) == 1 and values[0] == "": return [] return new + [parse(v) for v in values if v] return config_parse_list def config_match_version(match: str, value: str) -> bool: version = GenericVersion(value) for sigil, opfunc in { "==": operator.eq, "!=": operator.ne, "<=": operator.le, ">=": operator.ge, ">": operator.gt, "<": operator.lt, }.items(): if match.startswith(sigil): op = opfunc comp_version = GenericVersion(match[len(sigil):]) break else: # default to equality if no operation is specified op = operator.eq comp_version = GenericVersion(match) # all constraints must be fulfilled if not op(version, comp_version): return False return True def config_make_dict_parser(delimiter: str, *, parse: Callable[[str], tuple[str, Any]], unescape: bool = False, reset: bool = True) -> ConfigParseCallback: def config_parse_dict(value: Optional[str], old: Optional[dict[str, Any]]) -> Optional[dict[str, Any]]: new = old.copy() if old else {} if value is None: return {} if unescape: lex = shlex.shlex(value, posix=True) lex.whitespace_split = True lex.whitespace = f"\n{delimiter}" lex.commenters = "" values = list(lex) else: values = value.replace(delimiter, "\n").split("\n") # Empty strings reset the dict. if reset and len(values) == 1 and values[0] == "": return {} return new | dict(parse(v) for v in values if v) return config_parse_dict def parse_environment(value: str) -> tuple[str, str]: key, sep, value = value.partition("=") key, value = key.strip(), value.strip() value = value if sep else os.getenv(key, "") return (key, value) def parse_credential(value: str) -> tuple[str, str]: key, _, value = value.partition("=") key, value = key.strip(), value.strip() return (key, value) def make_path_parser(*, required: bool = True, resolve: bool = True, expanduser: bool = True, expandvars: bool = True, secret: bool = False) -> Callable[[str], Path]: return functools.partial( parse_path, required=required, resolve=resolve, expanduser=expanduser, expandvars=expandvars, secret=secret, ) def config_make_path_parser(*, required: bool = True, resolve: bool = True, expanduser: bool = True, expandvars: bool = True, secret: bool = False) -> ConfigParseCallback: def config_parse_path(value: Optional[str], old: Optional[Path]) -> Optional[Path]: if not value: return None return parse_path( value, required=required, resolve=resolve, expanduser=expanduser, expandvars=expandvars, secret=secret, ) return config_parse_path def is_valid_filename(s: str) -> bool: s = s.strip() return not (s == "." or s == ".." or "/" in s) def config_parse_output(value: Optional[str], old: Optional[str]) -> Optional[str]: if not value: return None if not is_valid_filename(value): die(f"{value!r} is not a valid filename.", hint="Output= or --output= requires a filename with no path components. " "Use OutputDirectory= or --output-dir= to configure the output directory.") return value def match_path_exists(value: str) -> bool: if not value: return False return Path(value).exists() def config_parse_root_password(value: Optional[str], old: Optional[tuple[str, bool]]) -> Optional[tuple[str, bool]]: if not value: return None value = value.strip() hashed = value.startswith("hashed:") value = value.removeprefix("hashed:") return (value, hashed) def match_systemd_version(value: str) -> bool: if not value: return False version = run(["systemctl", "--version"], stdout=subprocess.PIPE).stdout.strip().split()[1] return config_match_version(value, version) def match_host_architecture(value: str) -> bool: return Architecture(value) == Architecture.native() def parse_bytes(value: str) -> int: if value.endswith("G"): factor = 1024**3 elif value.endswith("M"): factor = 1024**2 elif value.endswith("K"): factor = 1024 else: factor = 1 if factor > 1: value = value[:-1] result = math.ceil(float(value) * factor) if result <= 0: die("Size out of range") rem = result % 4096 if rem != 0: result += 4096 - rem return result def config_parse_bytes(value: Optional[str], old: Optional[int] = None) -> Optional[int]: if not value: return None return parse_bytes(value) def config_parse_profile(value: Optional[str], old: Optional[int] = None) -> Optional[str]: if not value: return None if not is_valid_filename(value): die(f"{value!r} is not a valid profile", hint="Profile= or --profile= requires a name with no path components.") return value def parse_drive(value: str) -> QemuDrive: parts = value.split(":", maxsplit=3) if not parts or not parts[0]: die(f"No ID specified for drive '{value}'") if len(parts) < 2: die(f"Missing size in drive '{value}") if len(parts) > 4: die(f"Too many components in drive '{value}") id = parts[0] if not is_valid_filename(id): die(f"Unsupported path character in drive id '{id}'") size = parse_bytes(parts[1]) directory = parse_path(parts[2]) if len(parts) > 2 and parts[2] else None options = parts[3] if len(parts) > 3 and parts[3] else None return QemuDrive(id=id, size=size, directory=directory, options=options) def config_parse_sector_size(value: Optional[str], old: Optional[int]) -> Optional[int]: if not value: return None try: size = int(value) except ValueError: die(f"'{value}' is not a valid number") if size < 512 or size > 4096: die(f"Sector size not between 512 and 4096: {size}") if not is_power_of_2(size): die(f"Sector size not power of 2: {size}") return size def config_parse_vsock_cid(value: Optional[str], old: Optional[int]) -> Optional[int]: if not value: return None if value == "auto": return QemuVsockCID.auto if value == "hash": return QemuVsockCID.hash try: cid = int(value) except ValueError: die(f"VSock connection ID '{value}' is not a valid number or one of 'auto' or 'hash'") if cid not in range(3, 0xFFFFFFFF): die(f"{cid} is not in the valid VSock connection ID range [3, 0xFFFFFFFF)") return cid def config_parse_minimum_version(value: Optional[str], old: Optional[GenericVersion]) -> Optional[GenericVersion]: if not value: return old new = GenericVersion(value) if not old: return new return max(old, new) @dataclasses.dataclass(frozen=True) class ConfigSetting: dest: str section: str parse: ConfigParseCallback = config_parse_string match: Optional[ConfigMatchCallback] = None name: str = "" default: Any = None default_factory: Optional[ConfigDefaultCallback] = None default_factory_depends: tuple[str, ...] = tuple() paths: tuple[str, ...] = () path_read_text: bool = False path_secret: bool = False path_default: bool = True specifier: str = "" # settings for argparse short: Optional[str] = None long: str = "" choices: Optional[Any] = None metavar: Optional[str] = None nargs: Optional[str] = None const: Optional[Any] = None help: Optional[str] = None # backward compatibility compat_names: tuple[str, ...] = () def __post_init__(self) -> None: if not self.name: object.__setattr__(self, 'name', ''.join(x.capitalize() for x in self.dest.split('_') if x)) if not self.long: object.__setattr__(self, "long", f"--{self.dest.replace('_', '-')}") @dataclasses.dataclass(frozen=True) class Match: name: str match: Callable[[str], bool] class CustomHelpFormatter(argparse.HelpFormatter): def _format_action_invocation(self, action: argparse.Action) -> str: if not action.option_strings or action.nargs == 0: return super()._format_action_invocation(action) default = self._get_default_metavar_for_optional(action) args_string = self._format_args(action, default) return ", ".join(action.option_strings) + " " + args_string def _split_lines(self, text: str, width: int) -> list[str]: """Wraps text to width, each line separately. If the first line of text ends in a colon, we assume that this is a list of option descriptions, and subindent them. Otherwise, the text is wrapped without indentation. """ lines = text.splitlines() subindent = ' ' if lines[0].endswith(':') else '' return flatten(textwrap.wrap(line, width, break_long_words=False, break_on_hyphens=False, subsequent_indent=subindent) for line in lines) def parse_chdir(path: str) -> Optional[Path]: if not path: # The current directory should be ignored return None # Immediately change the current directory so that it's taken into # account when parsing the following options that take a relative path try: os.chdir(path) except (FileNotFoundError, NotADirectoryError): die(f"{path} is not a directory!") except OSError as e: die(f"Cannot change the directory to {path}: {e}") # Keep track of the current directory return Path.cwd() class IgnoreAction(argparse.Action): """Argparse action for deprecated options that can be ignored.""" def __init__( self, option_strings: Sequence[str], dest: str, nargs: Union[int, str, None] = None, default: Any = argparse.SUPPRESS, help: Optional[str] = argparse.SUPPRESS, ) -> None: super().__init__(option_strings, dest, nargs=nargs, default=default, help=help) def __call__( self, parser: argparse.ArgumentParser, namespace: argparse.Namespace, values: Union[str, Sequence[Any], None], option_string: Optional[str] = None ) -> None: logging.warning(f"{option_string} is no longer supported") class PagerHelpAction(argparse._HelpAction): def __call__( self, parser: argparse.ArgumentParser, namespace: argparse.Namespace, values: Union[str, Sequence[Any], None] = None, option_string: Optional[str] = None ) -> None: page(parser.format_help(), namespace.pager) parser.exit() @dataclasses.dataclass(frozen=True) class Args: verb: Verb cmdline: list[str] force: int directory: Optional[Path] debug: bool debug_shell: bool debug_workspace: bool pager: bool genkey_valid_days: str genkey_common_name: str auto_bump: bool doc_format: DocFormat json: bool @classmethod def default(cls) -> "Args": """Alternative constructor to generate an all-default MkosiArgs. This prevents MkosiArgs being generated with defaults values implicitly. """ with tempfile.TemporaryDirectory() as tempdir: with chdir(tempdir): args, _ = parse_config([]) return args @classmethod def from_namespace(cls, ns: argparse.Namespace) -> "Args": return cls(**{ k: v for k, v in vars(ns).items() if k in inspect.signature(cls).parameters }) def to_dict(self) -> dict[str, Any]: def key_transformer(k: str) -> str: return "".join(p.capitalize() for p in k.split("_")) return {key_transformer(k): v for k, v in dataclasses.asdict(self).items()} def to_json(self, *, indent: Optional[int] = 4, sort_keys: bool = True) -> str: """Dump MkosiArgs as JSON string.""" return json.dumps(self.to_dict(), cls=JsonEncoder, indent=indent, sort_keys=sort_keys) @classmethod def _load_json(cls, s: Union[str, dict[str, Any], SupportsRead[str], SupportsRead[bytes]]) -> dict[str, Any]: """Load JSON and transform it into a dictionary suitable compatible with instantiating a MkosiArgs object.""" if isinstance(s, str): j = json.loads(s) elif isinstance(s, dict): j = s elif hasattr(s, "read"): j = json.load(s) else: raise ValueError(f"{cls.__name__} can only be constructed from JSON from strings, dictionaries and files.") value_transformer = json_type_transformer(cls) def key_transformer(k: str) -> str: return "_".join(part.lower() for part in FALLBACK_NAME_TO_DEST_SPLITTER.split(k)) return {(tk := key_transformer(k)): value_transformer(tk, v) for k, v in j.items()} @classmethod def from_json(cls, s: Union[str, dict[str, Any], SupportsRead[str], SupportsRead[bytes]]) -> "Args": """Instantiate a MkosiArgs object from a full JSON dump.""" j = cls._load_json(s) return cls(**j) @classmethod def from_partial_json(cls, s: Union[str, dict[str, Any], SupportsRead[str], SupportsRead[bytes]]) -> "Args": """Return a new MkosiArgs with defaults overwritten by the attributes from passed in JSON.""" j = cls._load_json(s) return dataclasses.replace(cls.default(), **j) @dataclasses.dataclass(frozen=True) class Config: """Type-hinted storage for command line arguments. Only user configuration is stored here while dynamic state exists in Mkosicontext. If a field of the same name exists in both classes always access the value from context. """ profile: Optional[str] include: list[Path] initrd_include: list[Path] images: tuple[str, ...] dependencies: tuple[str, ...] minimum_version: Optional[GenericVersion] distribution: Distribution release: str architecture: Architecture mirror: Optional[str] local_mirror: Optional[str] repository_key_check: bool repositories: list[str] cache_only: bool package_manager_trees: list[ConfigTree] output_format: OutputFormat manifest_format: list[ManifestFormat] output: str compress_output: Compression output_dir: Optional[Path] workspace_dir: Optional[Path] cache_dir: Optional[Path] build_dir: Optional[Path] image_id: Optional[str] image_version: Optional[str] split_artifacts: bool repart_dirs: list[Path] sector_size: Optional[int] repart_offline: bool overlay: bool use_subvolumes: ConfigFeature seed: Optional[uuid.UUID] packages: list[str] build_packages: list[str] package_directories: list[Path] with_recommends: bool with_docs: bool base_trees: list[Path] skeleton_trees: list[ConfigTree] extra_trees: list[ConfigTree] remove_packages: list[str] remove_files: list[str] clean_package_metadata: ConfigFeature source_date_epoch: Optional[int] prepare_scripts: list[Path] build_scripts: list[Path] postinst_scripts: list[Path] finalize_scripts: list[Path] build_sources: list[ConfigTree] build_sources_ephemeral: bool environment: dict[str, str] environment_files: list[Path] with_tests: bool with_network: bool bootable: ConfigFeature bootloader: Bootloader bios_bootloader: BiosBootloader shim_bootloader: ShimBootloader initrds: list[Path] initrd_packages: list[str] kernel_command_line: list[str] kernel_modules_include: list[str] kernel_modules_exclude: list[str] kernel_modules_include_host: bool kernel_modules_initrd: bool kernel_modules_initrd_include: list[str] kernel_modules_initrd_exclude: list[str] kernel_modules_initrd_include_host: bool locale: Optional[str] locale_messages: Optional[str] keymap: Optional[str] timezone: Optional[str] hostname: Optional[str] root_password: Optional[tuple[str, bool]] root_shell: Optional[str] autologin: bool make_initrd: bool ssh: bool selinux_relabel: ConfigFeature secure_boot: bool secure_boot_auto_enroll: bool secure_boot_key: Optional[Path] secure_boot_certificate: Optional[Path] secure_boot_sign_tool: SecureBootSignTool verity_key: Optional[Path] verity_certificate: Optional[Path] sign_expected_pcr: ConfigFeature passphrase: Optional[Path] checksum: bool sign: bool key: Optional[str] incremental: bool nspawn_settings: Optional[Path] extra_search_paths: list[Path] ephemeral: bool credentials: dict[str, str] kernel_command_line_extra: list[str] acl: bool tools_tree: Optional[Path] tools_tree_distribution: Optional[Distribution] tools_tree_release: Optional[str] tools_tree_mirror: Optional[str] tools_tree_packages: list[str] runtime_trees: list[ConfigTree] runtime_size: Optional[int] runtime_scratch: ConfigFeature ssh_key: Optional[Path] ssh_certificate: Optional[Path] # QEMU-specific options qemu_gui: bool qemu_smp: str qemu_mem: str qemu_kvm: ConfigFeature qemu_vsock: ConfigFeature qemu_vsock_cid: int qemu_swtpm: ConfigFeature qemu_cdrom: bool qemu_firmware: QemuFirmware qemu_firmware_variables: Optional[Path] qemu_kernel: Optional[Path] qemu_drives: list[QemuDrive] qemu_args: list[str] image: Optional[str] def name(self) -> str: return self.image_id or self.image or "default" def output_dir_or_cwd(self) -> Path: return self.output_dir or Path.cwd() def workspace_dir_or_default(self) -> Path: if self.workspace_dir: return self.workspace_dir if (cache := os.getenv("XDG_CACHE_HOME")) and Path(cache).exists(): return Path(cache) # If we're running from /home and there's a cache or output directory in /home, we want to use a workspace # directory in /home as well as /home might be on a separate partition or subvolume which means that to take # advantage of reflinks and such, the workspace directory has to be on the same partition/subvolume. if ( Path.cwd().is_relative_to(INVOKING_USER.home()) and (INVOKING_USER.home() / ".cache").exists() and ( self.cache_dir and self.cache_dir.is_relative_to(INVOKING_USER.home()) or self.output_dir and self.output_dir.is_relative_to(INVOKING_USER.home()) ) ): return INVOKING_USER.home() / ".cache" return Path("/var/tmp") def tools(self) -> Path: return self.tools_tree or Path("/") @classmethod def default(cls) -> "Config": """Alternative constructor to generate an all-default MkosiArgs. This prevents MkosiArgs being generated with defaults values implicitly. """ with chdir("/"): _, [config] = parse_config([]) return config @classmethod def from_namespace(cls, ns: argparse.Namespace) -> "Config": return cls(**{ k: v for k, v in vars(ns).items() if k in inspect.signature(cls).parameters }) @property def output_with_format(self) -> str: return self.output + self.output_format.extension() @property def output_with_compression(self) -> str: output = self.output_with_format if self.compress_output and self.output_format.use_outer_compression(): output += self.compress_output.extension() return output @property def output_split_uki(self) -> str: return f"{self.output}.efi" @property def output_split_kernel(self) -> str: return f"{self.output}.vmlinuz" @property def output_split_initrd(self) -> str: return f"{self.output}.initrd" @property def output_nspawn_settings(self) -> str: return f"{self.output}.nspawn" @property def output_checksum(self) -> str: return f"{self.output}.SHA256SUMS" @property def output_signature(self) -> str: return f"{self.output}.SHA256SUMS.gpg" @property def output_manifest(self) -> str: return f"{self.output}.manifest" @property def output_changelog(self) -> str: return f"{self.output}.changelog" def cache_manifest(self) -> dict[str, Any]: return { "distribution": self.distribution, "release": self.release, "mirror": self.mirror, "architecture": self.architecture, "packages": self.packages, "build_packages": self.build_packages, "repositories": self.repositories, "overlay": self.overlay, "prepare_scripts": [ base64.b64encode(script.read_bytes()).decode() for script in self.prepare_scripts ], # We don't use the full path here since tests will often use temporary directories for the output directory # which would trigger a rebuild every time. "tools_tree": self.tools_tree.name if self.tools_tree else None, "tools_tree_distribution": self.tools_tree_distribution, "tools_tree_release": self.tools_tree_release, "tools_tree_mirror": self.tools_tree_mirror, "tools_tree_packages": self.tools_tree_packages, } def to_dict(self) -> dict[str, Any]: def key_transformer(k: str) -> str: if (s := SETTINGS_LOOKUP_BY_DEST.get(k)) is not None: return s.name return "".join(p.capitalize() for p in k.split("_")) return {key_transformer(k): v for k, v in dataclasses.asdict(self).items()} def to_json(self, *, indent: Optional[int] = 4, sort_keys: bool = True) -> str: """Dump MkosiConfig as JSON string.""" return json.dumps(self.to_dict(), cls=JsonEncoder, indent=indent, sort_keys=sort_keys) @classmethod def _load_json(cls, s: Union[str, dict[str, Any], SupportsRead[str], SupportsRead[bytes]]) -> dict[str, Any]: """Load JSON and transform it into a dictionary suitable compatible with instantiating a MkosiConfig object.""" if isinstance(s, str): j = json.loads(s) elif isinstance(s, dict): j = s elif hasattr(s, "read"): j = json.load(s) else: raise ValueError(f"{cls.__name__} can only be constructed from JSON from strings, dictionaries and files.") value_transformer = json_type_transformer(cls) def key_transformer(k: str) -> str: if (s := SETTINGS_LOOKUP_BY_NAME.get(k)) is not None: return s.dest return "_".join(part.lower() for part in FALLBACK_NAME_TO_DEST_SPLITTER.split(k)) return {(tk := key_transformer(k)): value_transformer(tk, v) for k, v in j.items()} @classmethod def from_json(cls, s: Union[str, dict[str, Any], SupportsRead[str], SupportsRead[bytes]]) -> "Config": """Instantiate a MkosiConfig object from a full JSON dump.""" j = cls._load_json(s) return cls(**j) @classmethod def from_partial_json(cls, s: Union[str, dict[str, Any], SupportsRead[str], SupportsRead[bytes]]) -> "Config": """Return a new MkosiConfig with defaults overwritten by the attributes from passed in JSON.""" j = cls._load_json(s) return dataclasses.replace(cls.default(), **j) def sandbox( self, *, network: bool = False, devices: bool = False, relaxed: bool = False, scripts: Optional[Path] = None, options: Sequence[PathString] = (), ) -> list[PathString]: mounts: list[PathString] = ( flatten(("--ro-bind", d, d) for d in self.extra_search_paths) if not relaxed and not self.tools_tree else [] ) return sandbox_cmd( network=network, devices=devices, relaxed=relaxed, scripts=scripts, tools=self.tools(), options=[*options, *mounts], ) def parse_ini(path: Path, only_sections: Collection[str] = ()) -> Iterator[tuple[str, str, str]]: """ We have our own parser instead of using configparser as the latter does not support specifying the same setting multiple times in the same configuration file. """ section: Optional[str] = None setting: Optional[str] = None value: Optional[str] = None for line in textwrap.dedent(path.read_text()).splitlines(): # Systemd unit files allow both '#' and ';' to indicate comments so we do the same. for c in ("#", ";"): comment = line.find(c) if comment >= 0: line = line[:comment] if not line.strip(): continue # If we have a section, setting and value, any line that's indented is considered part of the # setting's value. if section and setting and value is not None and line[0].isspace(): value = f"{value}\n{line.strip()}" continue # So the line is not indented, that means we either found a new section or a new setting. Either way, # let's yield the previous setting and its value before parsing the new section/setting. if section and setting and value is not None: yield section, setting, value setting = value = None line = line.strip() if line[0] == '[': if line[-1] != ']': die(f"{line} is not a valid section") # Yield 3 empty strings to indicate we've finished the current section. if section: yield "", "", "" section = line[1:-1].strip() if not section: die("Section name cannot be empty or whitespace") continue if not section: die(f"Setting {line} is located outside of section") if only_sections and section not in only_sections: continue setting, delimiter, value = line.partition("=") if not delimiter: die(f"Setting {setting} must be followed by '='") if not setting: die(f"Missing setting name before '=' in {line}") setting = setting.strip() value = value.strip() # Make sure we yield any final setting and its value. if section and setting and value is not None: yield section, setting, value SETTINGS = ( ConfigSetting( dest="include", section="Config", parse=config_make_list_parser(delimiter=",", reset=False, parse=make_path_parser()), help="Include configuration from the specified file or directory", ), ConfigSetting( dest="initrd_include", section="Config", parse=config_make_list_parser(delimiter=",", reset=False, parse=make_path_parser()), help="Include configuration from the specified file or directory when building the initrd", ), ConfigSetting( dest="profile", section="Config", help="Build the specified profile", parse=config_parse_profile, match=config_make_string_matcher(), ), ConfigSetting( dest="images", compat_names=("Presets",), long="--image", section="Config", parse=config_make_list_parser(delimiter=","), help="Specify which images to build", ), ConfigSetting( dest="dependencies", long="--dependency", section="Config", parse=config_make_list_parser(delimiter=","), help="Specify other images that this image depends on", ), ConfigSetting( dest="minimum_version", section="Config", parse=config_parse_minimum_version, help="Specify the minimum required mkosi version", ), ConfigSetting( dest="distribution", short="-d", section="Distribution", specifier="d", parse=config_make_enum_parser(Distribution), match=config_make_enum_matcher(Distribution), default_factory=config_default_distribution, choices=Distribution.values(), help="Distribution to install", ), ConfigSetting( dest="release", short="-r", section="Distribution", specifier="r", parse=config_parse_string, match=config_make_string_matcher(), default_factory=config_default_release, default_factory_depends=("distribution",), help="Distribution release to install", ), ConfigSetting( dest="architecture", section="Distribution", specifier="a", parse=config_make_enum_parser(Architecture), match=config_make_enum_matcher(Architecture), default=Architecture.native(), choices=Architecture.values(), help="Override the architecture of installation", ), ConfigSetting( dest="mirror", short="-m", section="Distribution", help="Distribution mirror to use", ), ConfigSetting( dest="local_mirror", section="Distribution", help="Use a single local, flat and plain mirror to build the image", ), ConfigSetting( dest="repository_key_check", metavar="BOOL", nargs="?", section="Distribution", default=True, parse=config_parse_boolean, help="Controls signature and key checks on repositories", ), ConfigSetting( dest="repositories", metavar="REPOS", section="Distribution", parse=config_make_list_parser(delimiter=","), help="Repositories to use", ), ConfigSetting( dest="cache_only", metavar="BOOL", section="Distribution", parse=config_parse_boolean, help="Only use the package cache when installing packages", ), ConfigSetting( dest="package_manager_trees", long="--package-manager-tree", metavar="PATH", section="Distribution", parse=config_make_list_parser(delimiter=",", parse=make_tree_parser()), default_factory=lambda ns: ns.skeleton_trees, default_factory_depends=("skeleton_trees",), help="Use a package manager tree to configure the package manager", ), ConfigSetting( dest="output_format", short="-t", long="--format", metavar="FORMAT", name="Format", section="Output", specifier="t", parse=config_make_enum_parser(OutputFormat), match=config_make_enum_matcher(OutputFormat), default=OutputFormat.disk, choices=OutputFormat.values(), help="Output Format", ), ConfigSetting( dest="manifest_format", metavar="FORMAT", section="Output", parse=config_make_list_parser(delimiter=",", parse=make_enum_parser(ManifestFormat)), help="Manifest Format", ), ConfigSetting( dest="output", short="-o", metavar="NAME", section="Output", specifier="o", parse=config_parse_output, default_factory=config_default_output, default_factory_depends=("image_id", "image_version"), help="Output name", ), ConfigSetting( dest="compress_output", metavar="ALG", nargs="?", section="Output", parse=config_parse_compression, default_factory=config_default_compression, default_factory_depends=("distribution", "release", "output_format"), help="Enable whole-output compression (with images or archives)", ), ConfigSetting( dest="output_dir", short="-O", metavar="DIR", name="OutputDirectory", section="Output", specifier="O", parse=config_make_path_parser(required=False), paths=("mkosi.output",), help="Output directory", ), ConfigSetting( dest="workspace_dir", metavar="DIR", name="WorkspaceDirectory", section="Output", parse=config_make_path_parser(required=False), help="Workspace directory", ), ConfigSetting( dest="cache_dir", metavar="PATH", name="CacheDirectory", section="Output", parse=config_make_path_parser(required=False), paths=("mkosi.cache",), help="Package cache path", ), ConfigSetting( dest="build_dir", metavar="PATH", name="BuildDirectory", section="Output", parse=config_make_path_parser(required=False), paths=("mkosi.builddir",), help="Path to use as persistent build directory", ), ConfigSetting( dest="image_version", match=config_match_version, section="Output", specifier="v", help="Set version for image", paths=("mkosi.version",), path_read_text=True, ), ConfigSetting( dest="image_id", match=config_make_string_matcher(allow_globs=True), section="Output", specifier="i", help="Set ID for image", ), ConfigSetting( dest="split_artifacts", metavar="BOOL", nargs="?", section="Output", parse=config_parse_boolean, help="Generate split partitions", ), ConfigSetting( dest="repart_dirs", long="--repart-dir", metavar="PATH", name="RepartDirectories", section="Output", parse=config_make_list_parser(delimiter=",", parse=make_path_parser()), paths=("mkosi.repart",), help="Directory containing systemd-repart partition definitions", ), ConfigSetting( dest="sector_size", section="Output", parse=config_parse_sector_size, help="Set the disk image sector size", ), ConfigSetting( dest="repart_offline", section="Output", parse=config_parse_boolean, help="Build disk images without using loopback devices", default=True, ), ConfigSetting( dest="overlay", metavar="BOOL", nargs="?", section="Output", parse=config_parse_boolean, help="Only output the additions on top of the given base trees", ), ConfigSetting( dest="use_subvolumes", metavar="FEATURE", nargs="?", section="Output", parse=config_parse_feature, help="Use btrfs subvolumes for faster directory operations where possible", ), ConfigSetting( dest="seed", metavar="UUID", section="Output", parse=config_parse_seed, help="Set the seed for systemd-repart", ), ConfigSetting( dest="packages", short="-p", long="--package", metavar="PACKAGE", section="Content", parse=config_make_list_parser(delimiter=","), help="Add an additional package to the OS image", ), ConfigSetting( dest="build_packages", long="--build-package", metavar="PACKAGE", section="Content", parse=config_make_list_parser(delimiter=","), help="Additional packages needed for build scripts", ), ConfigSetting( dest="package_directories", long="--package-directory", metavar="PATH", section="Content", parse=config_make_list_parser(delimiter=",", parse=make_path_parser()), help="Specify a directory containing extra packages", ), ConfigSetting( dest="with_recommends", metavar="BOOL", nargs="?", section="Content", parse=config_parse_boolean, help="Install recommended packages", ), ConfigSetting( dest="with_docs", metavar="BOOL", nargs="?", section="Content", parse=config_parse_boolean, default=True, help="Install documentation", ), ConfigSetting( dest="base_trees", long='--base-tree', metavar='PATH', section="Content", parse=config_make_list_parser(delimiter=",", parse=make_path_parser(required=False)), help='Use the given tree as base tree (e.g. lower sysext layer)', ), ConfigSetting( dest="skeleton_trees", long="--skeleton-tree", metavar="PATH", section="Content", parse=config_make_list_parser(delimiter=",", parse=make_tree_parser()), paths=("mkosi.skeleton", "mkosi.skeleton.tar"), path_default=False, help="Use a skeleton tree to bootstrap the image before installing anything", ), ConfigSetting( dest="extra_trees", long="--extra-tree", metavar="PATH", section="Content", parse=config_make_list_parser(delimiter=",", parse=make_tree_parser()), paths=("mkosi.extra", "mkosi.extra.tar"), path_default=False, help="Copy an extra tree on top of image", ), ConfigSetting( dest="remove_packages", long="--remove-package", metavar="PACKAGE", section="Content", parse=config_make_list_parser(delimiter=","), help="Remove package from the image OS image after installation", ), ConfigSetting( dest="remove_files", metavar="GLOB", section="Content", parse=config_make_list_parser(delimiter=","), help="Remove files from built image", ), ConfigSetting( dest="clean_package_metadata", metavar="FEATURE", section="Content", parse=config_parse_feature, help="Remove package manager database and other files", ), ConfigSetting( dest="source_date_epoch", metavar="TIMESTAMP", section="Content", parse=config_parse_source_date_epoch, default_factory=config_default_source_date_epoch, default_factory_depends=("environment",), help="Set the $SOURCE_DATE_EPOCH timestamp", ), ConfigSetting( dest="prepare_scripts", long="--prepare-script", metavar="PATH", section="Content", parse=config_make_list_parser(delimiter=",", parse=make_path_parser()), paths=("mkosi.prepare", "mkosi.prepare.chroot"), path_default=False, help="Prepare script to run inside the image before it is cached", compat_names=("PrepareScript",), ), ConfigSetting( dest="build_scripts", long="--build-script", metavar="PATH", section="Content", parse=config_make_list_parser(delimiter=",", parse=make_path_parser()), paths=("mkosi.build", "mkosi.build.chroot"), path_default=False, help="Build script to run inside image", compat_names=("BuildScript",), ), ConfigSetting( dest="postinst_scripts", long="--postinst-script", metavar="PATH", name="PostInstallationScripts", section="Content", parse=config_make_list_parser(delimiter=",", parse=make_path_parser()), paths=("mkosi.postinst", "mkosi.postinst.chroot"), path_default=False, help="Postinstall script to run inside image", compat_names=("PostInstallationScript",), ), ConfigSetting( dest="finalize_scripts", long="--finalize-script", metavar="PATH", section="Content", parse=config_make_list_parser(delimiter=",", parse=make_path_parser()), paths=("mkosi.finalize", "mkosi.finalize.chroot"), path_default=False, help="Postinstall script to run outside image", compat_names=("FinalizeScript",), ), ConfigSetting( dest="build_sources", metavar="PATH", section="Content", parse=config_make_list_parser(delimiter=",", parse=make_tree_parser(absolute=False)), match=config_match_build_sources, default_factory=lambda ns: [ConfigTree(ns.directory, None)] if ns.directory else [], help="Path for sources to build", ), ConfigSetting( dest="build_sources_ephemeral", metavar="BOOL", section="Content", parse=config_parse_boolean, help="Make build sources ephemeral when running scripts", ), ConfigSetting( dest="environment", short="-E", metavar="NAME[=VALUE]", section="Content", parse=config_make_dict_parser(delimiter=" ", parse=parse_environment, unescape=True), help="Set an environment variable when running scripts", ), ConfigSetting( dest="environment_files", long="--env-file", metavar="PATH", section="Content", parse=config_make_list_parser(delimiter=",", parse=make_path_parser()), paths=("mkosi.env",), path_default=False, help="Enviroment files to set when running scripts", ), ConfigSetting( dest="with_tests", short="-T", long="--without-tests", nargs="?", const="no", section="Content", parse=config_parse_boolean, default=True, help="Do not run tests as part of build scripts, if supported", ), ConfigSetting( dest="with_network", metavar="BOOL", nargs="?", section="Content", parse=config_parse_boolean, help="Run build and postinst scripts with network access (instead of private network)", ), ConfigSetting( dest="bootable", metavar="FEATURE", nargs="?", section="Content", parse=config_parse_feature, match=config_match_feature, help="Generate ESP partition with systemd-boot and UKIs for installed kernels", ), ConfigSetting( dest="bootloader", metavar="BOOTLOADER", section="Content", parse=config_make_enum_parser(Bootloader), choices=Bootloader.values(), default=Bootloader.systemd_boot, help="Specify which UEFI bootloader to use", ), ConfigSetting( dest="bios_bootloader", metavar="BOOTLOADER", section="Content", parse=config_make_enum_parser(BiosBootloader), choices=BiosBootloader.values(), default=BiosBootloader.none, help="Specify which BIOS bootloader to use", ), ConfigSetting( dest="shim_bootloader", metavar="BOOTLOADER", section="Content", parse=config_make_enum_parser(ShimBootloader), choices=ShimBootloader.values(), default=ShimBootloader.none, help="Specify whether to use shim", ), ConfigSetting( dest="initrds", long="--initrd", metavar="PATH", section="Content", parse=config_make_list_parser(delimiter=",", parse=make_path_parser(required=False)), help="Add a user-provided initrd to image", ), ConfigSetting( dest="initrd_packages", long="--initrd-package", metavar="PACKAGE", section="Content", parse=config_make_list_parser(delimiter=","), help="Add additional packages to the default initrd", ), ConfigSetting( dest="kernel_command_line", metavar="OPTIONS", section="Content", parse=config_make_list_parser(delimiter=" "), default_factory_depends=("architecture",), default_factory=config_default_kernel_command_line, help="Set the kernel command line (only bootable images)", ), ConfigSetting( dest="kernel_modules_include", metavar="REGEX", section="Content", parse=config_make_list_parser(delimiter=","), help="Include the specified kernel modules in the image", ), ConfigSetting( dest="kernel_modules_include_host", metavar="BOOL", section="Content", parse=config_parse_boolean, help="Include the currently loaded modules on the host in the image", ), ConfigSetting( dest="kernel_modules_exclude", metavar="REGEX", section="Content", parse=config_make_list_parser(delimiter=","), help="Exclude the specified kernel modules from the image", ), ConfigSetting( dest="kernel_modules_initrd", metavar="BOOL", nargs="?", section="Content", parse=config_parse_boolean, default=True, help="When building a bootable image, add an extra initrd containing the kernel modules", ), ConfigSetting( dest="kernel_modules_initrd_include", metavar="REGEX", section="Content", parse=config_make_list_parser(delimiter=","), help="When building a kernel modules initrd, include the specified kernel modules", ), ConfigSetting( dest="kernel_modules_initrd_include_host", metavar="BOOL", section="Content", parse=config_parse_boolean, help="When building a kernel modules initrd, include the currently loaded modules on the host in the image", ), ConfigSetting( dest="kernel_modules_initrd_exclude", metavar="REGEX", section="Content", parse=config_make_list_parser(delimiter=","), help="When building a kernel modules initrd, exclude the specified kernel modules", ), ConfigSetting( dest="locale", section="Content", parse=config_parse_string, help="Set the system locale", ), ConfigSetting( dest="locale_messages", metavar="LOCALE", section="Content", parse=config_parse_string, help="Set the messages locale", ), ConfigSetting( dest="keymap", metavar="KEYMAP", section="Content", parse=config_parse_string, help="Set the system keymap", ), ConfigSetting( dest="timezone", metavar="TIMEZONE", section="Content", parse=config_parse_string, help="Set the system timezone", ), ConfigSetting( dest="hostname", metavar="HOSTNAME", section="Content", parse=config_parse_string, help="Set the system hostname", ), ConfigSetting( dest="root_password", metavar="PASSWORD", section="Content", parse=config_parse_root_password, paths=("mkosi.rootpw",), path_read_text=True, path_secret=True, help="Set the password for root", ), ConfigSetting( dest="root_shell", metavar="SHELL", section="Content", parse=config_parse_string, help="Set the shell for root", ), ConfigSetting( dest="autologin", short="-a", metavar="BOOL", nargs="?", section="Content", parse=config_parse_boolean, help="Enable root autologin", ), ConfigSetting( dest="make_initrd", metavar="BOOL", nargs="?", section="Content", parse=config_parse_boolean, help="Make sure the image can be used as an initramfs", ), ConfigSetting( dest="ssh", metavar="BOOL", nargs="?", section="Content", parse=config_parse_boolean, help="Set up SSH access from the host to the final image via 'mkosi ssh'", ), ConfigSetting( dest="selinux_relabel", name="SELinuxRelabel", metavar="FEATURE", section="Content", parse=config_parse_feature, help="Specify whether to relabel all files with setfiles", ), ConfigSetting( dest="secure_boot", metavar="BOOL", nargs="?", section="Validation", parse=config_parse_boolean, help="Sign the resulting kernel/initrd image for UEFI SecureBoot", ), ConfigSetting( dest="secure_boot_auto_enroll", metavar="BOOL", section="Validation", parse=config_parse_boolean, default=True, help="Automatically enroll the secureboot signing key on first boot", ), ConfigSetting( dest="secure_boot_key", metavar="PATH", section="Validation", parse=config_make_path_parser(secret=True), paths=("mkosi.key",), help="UEFI SecureBoot private key in PEM format", ), ConfigSetting( dest="secure_boot_certificate", metavar="PATH", section="Validation", parse=config_make_path_parser(), paths=("mkosi.crt",), help="UEFI SecureBoot certificate in X509 format", ), ConfigSetting( dest="secure_boot_sign_tool", metavar="TOOL", section="Validation", parse=config_make_enum_parser(SecureBootSignTool), default=SecureBootSignTool.auto, choices=SecureBootSignTool.values(), help="Tool to use for signing PE binaries for secure boot", ), ConfigSetting( dest="verity_key", metavar="PATH", section="Validation", parse=config_make_path_parser(secret=True), paths=("mkosi.key",), help="Private key for signing verity signature in PEM format", ), ConfigSetting( dest="verity_certificate", metavar="PATH", section="Validation", parse=config_make_path_parser(), paths=("mkosi.crt",), help="Certificate for signing verity signature in X509 format", ), ConfigSetting( dest="sign_expected_pcr", metavar="FEATURE", section="Validation", parse=config_parse_feature, help="Measure the components of the unified kernel image (UKI) and embed the PCR signature into the UKI", ), ConfigSetting( dest="passphrase", metavar="PATH", section="Validation", parse=config_make_path_parser(required=False, secret=True), paths=("mkosi.passphrase",), help="Path to a file containing the passphrase to use when LUKS encryption is selected", ), ConfigSetting( dest="checksum", metavar="BOOL", nargs="?", section="Validation", parse=config_parse_boolean, help="Write SHA256SUMS file", ), ConfigSetting( dest="sign", metavar="BOOL", nargs="?", section="Validation", parse=config_parse_boolean, help="Write and sign SHA256SUMS file", ), ConfigSetting( dest="key", section="Validation", help="GPG key to use for signing", ), ConfigSetting( dest="incremental", short="-i", metavar="BOOL", nargs="?", section="Host", parse=config_parse_boolean, help="Make use of and generate intermediary cache images", ), ConfigSetting( dest="nspawn_settings", name="NSpawnSettings", long="--settings", metavar="PATH", section="Host", parse=config_make_path_parser(), paths=("mkosi.nspawn",), help="Add in .nspawn settings file", ), ConfigSetting( dest="extra_search_paths", long="--extra-search-path", metavar="PATH", section="Host", parse=config_make_list_parser(delimiter=",", parse=make_path_parser()), help="List of comma-separated paths to look for programs before looking in PATH", ), ConfigSetting( dest="ephemeral", metavar="BOOL", section="Host", parse=config_parse_boolean, help=('If specified, the container/VM is run with a temporary snapshot of the output ' 'image that is removed immediately when the container/VM terminates'), nargs="?", ), ConfigSetting( dest="credentials", long="--credential", metavar="NAME=VALUE", section="Host", parse=config_make_dict_parser(delimiter=" ", parse=parse_credential, unescape=True), help="Pass a systemd credential to systemd-nspawn or qemu", ), ConfigSetting( dest="kernel_command_line_extra", metavar="OPTIONS", section="Host", parse=config_make_list_parser(delimiter=" "), help="Append extra entries to the kernel command line when booting the image", ), ConfigSetting( dest="acl", metavar="BOOL", nargs="?", section="Host", parse=config_parse_boolean, help="Set ACLs on generated directories to permit the user running mkosi to remove them", ), ConfigSetting( dest="tools_tree", metavar="PATH", section="Host", parse=config_make_path_parser(required=False), paths=("mkosi.tools",), help="Look up programs to execute inside the given tree", ), ConfigSetting( dest="tools_tree_distribution", metavar="DISTRIBUTION", section="Host", parse=config_make_enum_parser(Distribution), default_factory_depends=("distribution",), default_factory=lambda ns: ns.distribution.default_tools_tree_distribution(), help="Set the distribution to use for the default tools tree", ), ConfigSetting( dest="tools_tree_release", metavar="RELEASE", section="Host", parse=config_parse_string, default_factory_depends=("tools_tree_distribution",), default_factory=lambda ns: d.default_release() if (d := ns.tools_tree_distribution) else None, help="Set the release to use for the default tools tree", ), ConfigSetting( dest="tools_tree_mirror", metavar="MIRROR", section="Host", default_factory_depends=("distribution", "tools_tree_distribution"), default_factory=lambda ns: ns.mirror if ns.mirror and ns.distribution == ns.tools_tree_distribution else None, help="Set the mirror to use for the default tools tree", ), ConfigSetting( dest="tools_tree_packages", long="--tools-tree-package", metavar="PACKAGE", section="Host", parse=config_make_list_parser(delimiter=","), help="Add additional packages to the default tools tree", ), ConfigSetting( dest="runtime_trees", long="--runtime-tree", metavar="SOURCE:[TARGET]", section="Host", parse=config_make_list_parser(delimiter=",", parse=make_tree_parser(absolute=False)), help="Additional mounts to add when booting the image", ), ConfigSetting( dest="runtime_size", metavar="SIZE", section="Host", parse=config_parse_bytes, help="Grow disk images to the specified size before booting them", ), ConfigSetting( dest="runtime_scratch", metavar="FEATURE", section="Host", parse=config_parse_feature, help="Mount extra scratch space to /var/tmp", ), ConfigSetting( dest="ssh_key", metavar="PATH", section="Host", parse=config_make_path_parser(secret=True), paths=("mkosi.key",), help="Private key for use with mkosi ssh in PEM format", ), ConfigSetting( dest="ssh_certificate", metavar="PATH", section="Host", parse=config_make_path_parser(), paths=("mkosi.crt",), help="Certificate for use with mkosi ssh in X509 format", ), ConfigSetting( dest="qemu_gui", metavar="BOOL", nargs="?", section="Host", parse=config_parse_boolean, help="Start QEMU in graphical mode", ), ConfigSetting( dest="qemu_smp", metavar="SMP", section="Host", default="1", help="Configure guest's SMP settings", ), ConfigSetting( dest="qemu_mem", metavar="MEM", section="Host", default="2G", help="Configure guest's RAM size", ), ConfigSetting( dest="qemu_kvm", metavar="FEATURE", nargs="?", section="Host", parse=config_parse_feature, help="Configure whether to use KVM or not", ), ConfigSetting( dest="qemu_vsock", metavar="FEATURE", nargs="?", section="Host", parse=config_parse_feature, help="Configure whether to use qemu with a vsock or not", ), ConfigSetting( dest="qemu_vsock_cid", name="QemuVsockConnectionId", long="--qemu-vsock-cid", metavar="NUMBER|auto|hash", section="Host", parse=config_parse_vsock_cid, default=QemuVsockCID.hash, help="Specify the VSock connection ID to use", ), ConfigSetting( dest="qemu_swtpm", metavar="FEATURE", nargs="?", section="Host", parse=config_parse_feature, help="Configure whether to use qemu with swtpm or not", ), ConfigSetting( dest="qemu_cdrom", metavar="BOOLEAN", nargs="?", section="Host", parse=config_parse_boolean, help="Attach the image as a CD-ROM to the virtual machine", ), ConfigSetting( dest="qemu_firmware", metavar="FIRMWARE", section="Host", parse=config_make_enum_parser(QemuFirmware), default=QemuFirmware.auto, help="Set qemu firmware to use", choices=QemuFirmware.values(), ), ConfigSetting( dest="qemu_firmware_variables", metavar="PATH", section="Host", parse=config_make_path_parser(), help="Set the path to the qemu firmware variables file to use", ), ConfigSetting( dest="qemu_kernel", metavar="PATH", section="Host", parse=config_make_path_parser(), help="Specify the kernel to use for qemu direct kernel boot", ), ConfigSetting( dest="qemu_drives", long="--qemu-drive", metavar="DRIVE", section="Host", parse=config_make_list_parser(delimiter=" ", parse=parse_drive), help="Specify a qemu drive that mkosi should create and pass to qemu", ), ConfigSetting( dest="qemu_args", metavar="ARGS", section="Host", parse=config_make_list_parser(delimiter=" "), # Suppress the command line option because it's already possible to pass qemu args as normal # arguments. help=argparse.SUPPRESS, ), ) SETTINGS_LOOKUP_BY_NAME = {name: s for s in SETTINGS for name in [s.name, *s.compat_names]} SETTINGS_LOOKUP_BY_DEST = {s.dest: s for s in SETTINGS} SETTINGS_LOOKUP_BY_SPECIFIER = {s.specifier: s for s in SETTINGS if s.specifier} MATCHES = ( Match( name="PathExists", match=match_path_exists, ), Match( name="SystemdVersion", match=match_systemd_version, ), Match( name="HostArchitecture", match=match_host_architecture, ), ) MATCH_LOOKUP = {m.name: m for m in MATCHES} # This regular expression can be used to split "AutoBump" -> ["Auto", "Bump"] # and "NSpawnSettings" -> ["NSpawn", "Settings"] # The first part (?<=[a-z]) is a positive look behind for a lower case letter # and (?=[A-Z]) is a lookahead assertion matching an upper case letter but not # consuming it FALLBACK_NAME_TO_DEST_SPLITTER = re.compile("(?<=[a-z])(?=[A-Z])") def create_argument_parser(action: type[argparse.Action]) -> argparse.ArgumentParser: parser = argparse.ArgumentParser( prog="mkosi", description="Build Bespoke OS Images", # the synopsis below is supposed to be indented by two spaces usage="\n " + textwrap.dedent("""\ mkosi [options...] {b}summary{e} mkosi [options...] {b}build{e} [command line...] mkosi [options...] {b}shell{e} [command line...] mkosi [options...] {b}boot{e} [nspawn settings...] mkosi [options...] {b}qemu{e} [qemu parameters...] mkosi [options...] {b}ssh{e} [command line...] mkosi [options...] {b}journalctl{e} [command line...] mkosi [options...] {b}coredumpctl{e} [command line...] mkosi [options...] {b}clean{e} mkosi [options...] {b}serve{e} mkosi [options...] {b}bump{e} mkosi [options...] {b}genkey{e} mkosi [options...] {b}documentation{e} mkosi [options...] {b}help{e} mkosi -h | --help mkosi --version """).format(b=Style.bold, e=Style.reset), add_help=False, allow_abbrev=False, argument_default=argparse.SUPPRESS, formatter_class=CustomHelpFormatter, ) parser.add_argument( "--version", action="version", version="%(prog)s " + __version__, help=argparse.SUPPRESS, ) parser.add_argument( "-f", "--force", action="count", dest="force", default=0, help="Remove existing image file before operation", ) parser.add_argument( "-C", "--directory", type=parse_chdir, default=Path.cwd(), help="Change to specified directory before doing anything", metavar="PATH", ) parser.add_argument( "--debug", help="Turn on debugging output", action="store_true", default=False, ) parser.add_argument( "--debug-shell", help="Spawn an interactive shell in the image if a chroot command fails", action="store_true", default=False, ) parser.add_argument( "--debug-workspace", help="When an error occurs, the workspace directory will not be deleted", action="store_true", default=False, ) parser.add_argument( "--no-pager", action="store_false", dest="pager", default=True, help="Enable paging for long output", ) parser.add_argument( "--genkey-valid-days", metavar="DAYS", help="Number of days keys should be valid when generating keys", default="730", ) parser.add_argument( "--genkey-common-name", metavar="CN", help="Template for the CN when generating keys", default="mkosi of %u", ) parser.add_argument( "-B", "--auto-bump", help="Automatically bump image version after building", action="store_true", default=False, ) parser.add_argument( "--doc-format", help="The format to show documentation in", default=DocFormat.auto, type=DocFormat, ) parser.add_argument( "--json", help="Show summary as JSON", action="store_true", default=False, ) # These can be removed once mkosi v15 is available in LTS distros and compatibility with <= v14 # is no longer needed in build infrastructure (e.g.: OBS). parser.add_argument( "--nspawn-keep-unit", nargs=0, action=IgnoreAction, ) parser.add_argument( "--default", action=IgnoreAction, ) parser.add_argument( "--cache", action=IgnoreAction, ) parser.add_argument( "verb", type=Verb, choices=list(Verb), default=Verb.build, help=argparse.SUPPRESS, ) parser.add_argument( "cmdline", nargs=argparse.REMAINDER, help=argparse.SUPPRESS, ) parser.add_argument( "-h", "--help", action=PagerHelpAction, help=argparse.SUPPRESS, ) last_section = None for s in SETTINGS: if s.section != last_section: group = parser.add_argument_group(f"{s.section} configuration options") last_section = s.section opts = [s.short, s.long] if s.short else [s.long] group.add_argument( # type: ignore *opts, dest=s.dest, choices=s.choices, metavar=s.metavar, nargs=s.nargs, # type: ignore const=s.const, help=s.help, action=action, ) try: import argcomplete argcomplete.autocomplete(parser) except ImportError: pass return parser def resolve_deps(images: Sequence[Config], include: Sequence[str]) -> list[Config]: graph = {config.image: config.dependencies for config in images} if include: if any((missing := i) not in graph for i in include): die(f"No image found with name {missing}") deps = set() queue = [*include] while queue: if (image := queue.pop(0)) not in deps: deps.add(image) queue.extend(graph[image]) images = [config for config in images if config.image in deps] graph = {config.image: config.dependencies for config in images} try: order = list(graphlib.TopologicalSorter(graph).static_order()) except graphlib.CycleError as e: die(f"Image dependency cycle detected: {' => '.join(e.args[1])}") return sorted(images, key=lambda i: order.index(i.image)) def parse_config(argv: Sequence[str] = ()) -> tuple[Args, tuple[Config, ...]]: # Compare inodes instead of paths so we can't get tricked by bind mounts and such. parsed_includes: set[tuple[int, int]] = set() immutable_settings: set[str] = set() def expand_specifiers(text: str, namespace: argparse.Namespace, defaults: argparse.Namespace) -> str: percent = False result: list[str] = [] for c in text: if percent: percent = False if c == "%": result += "%" else: s = SETTINGS_LOOKUP_BY_SPECIFIER.get(c) if not s: logging.warning(f"Unknown specifier '%{c}' found in {text}, ignoring") continue if (v := finalize_default(s, namespace, defaults)) is None: logging.warning( f"Setting {s.name} specified by specifier '%{c}' in {text} is not yet set, ignoring" ) continue result += str(v) elif c == "%": percent = True else: result += c if percent: result += "%" return "".join(result) @contextlib.contextmanager def parse_new_includes( namespace: argparse.Namespace, defaults: argparse.Namespace, ) -> Iterator[None]: current_num_of_includes = len(getattr(namespace, "include", [])) try: yield finally: # Parse any includes that were added after yielding. for p in getattr(namespace, "include", [])[current_num_of_includes:]: st = p.stat() if (st.st_dev, st.st_ino) in parsed_includes: continue with chdir(p if p.is_dir() else Path.cwd()): parse_config(p if p.is_file() else Path("."), namespace, defaults) parsed_includes.add((st.st_dev, st.st_ino)) class ConfigAction(argparse.Action): def __call__( self, parser: argparse.ArgumentParser, namespace: argparse.Namespace, values: Union[str, Sequence[Any], None], option_string: Optional[str] = None ) -> None: assert option_string is not None if values is None and self.nargs == "?": values = self.const or "yes" try: s = SETTINGS_LOOKUP_BY_DEST[self.dest] except KeyError: die(f"Unknown setting {option_string}") with parse_new_includes(namespace, defaults): if values is None or isinstance(values, str): setattr(namespace, s.dest, s.parse(values, getattr(namespace, self.dest, None))) else: for v in values: assert isinstance(v, str) setattr(namespace, s.dest, s.parse(v, getattr(namespace, self.dest, None))) def finalize_default( setting: ConfigSetting, namespace: argparse.Namespace, defaults: argparse.Namespace ) -> Optional[Any]: if (v := getattr(namespace, setting.dest, None)) is not None: return v for d in setting.default_factory_depends: finalize_default(SETTINGS_LOOKUP_BY_DEST[d], namespace, defaults) # If the setting was assigned the empty string, we don't use any configured default value. if not hasattr(namespace, setting.dest) and setting.dest in defaults: default = getattr(defaults, setting.dest) elif setting.default_factory: default = setting.default_factory(namespace) elif setting.default is None: default = setting.parse(None, None) else: default = setting.default with parse_new_includes(namespace, defaults): setattr(namespace, setting.dest, default) return default def match_config(path: Path, namespace: argparse.Namespace, defaults: argparse.Namespace) -> bool: triggered: Optional[bool] = None # If the config file does not exist, we assume it matches so that we look at the other files in the # directory as well (mkosi.conf.d/ and extra files). if not path.exists(): return True for section, k, v in parse_ini(path, only_sections=["Match"]): if not section: if triggered is False: return False triggered = None continue trigger = v.startswith("|") v = v.removeprefix("|") negate = v.startswith("!") v = v.removeprefix("!") if not v: die("Match value cannot be empty") if (s := SETTINGS_LOOKUP_BY_NAME.get(k)): if not s.match: die(f"{k} cannot be used in [Match]") if k != s.name: logging.warning(f"Setting {k} is deprecated, please use {s.name} instead.") # If we encounter a setting in [Match] that has not been explicitly configured yet, # we assign the default value first so that we can [Match] on default values for # settings. if finalize_default(s, namespace, defaults) is None: result = False else: result = s.match(v, getattr(namespace, s.dest)) elif (m := MATCH_LOOKUP.get(k)): result = m.match(v) else: die(f"{k} cannot be used in [Match]") if negate: result = not result if not trigger and not result: return False if trigger: triggered = bool(triggered) or result return triggered is not False def parse_config( path: Path, namespace: argparse.Namespace, defaults: argparse.Namespace, profiles: bool = False, ) -> bool: s: Optional[ConfigSetting] # Make mypy happy extras = path.is_dir() if path.is_dir(): path = path / "mkosi.conf" if not match_config(path, namespace, defaults): return False if extras: if (path.parent / "mkosi.local.conf").exists(): parse_config(path.parent / "mkosi.local.conf", namespace, defaults) for s in SETTINGS: ns = defaults if s.path_default else namespace for f in s.paths: p = parse_path( f, secret=s.path_secret, required=False, resolve=False, expanduser=False, expandvars=False, ) if p.exists(): setattr( ns, s.dest, s.parse(p.read_text().rstrip("\n") if s.path_read_text else f, getattr(ns, s.dest, None)), ) if path.exists(): logging.debug(f"Including configuration file {Path.cwd() / path}") for section, k, v in parse_ini(path, only_sections={s.section for s in SETTINGS} | {"Preset"}): if not section: continue name = k.removeprefix("@") ns = namespace if k == name else defaults if not (s := SETTINGS_LOOKUP_BY_NAME.get(name)): die(f"Unknown setting {k}") if name in immutable_settings: die(f"Setting {name} cannot be modified anymore at this point") if section != s.section: logging.warning(f"Setting {k} should be configured in [{s.section}], not [{section}].") if name != s.name: canonical = s.name if k == name else f"@{s.name}" logging.warning(f"Setting {k} is deprecated, please use {canonical} instead.") v = expand_specifiers(v, namespace, defaults) with parse_new_includes(namespace, defaults): setattr(ns, s.dest, s.parse(v, getattr(ns, s.dest, None))) if profiles: finalize_default(SETTINGS_LOOKUP_BY_DEST["profile"], namespace, defaults) profile = getattr(namespace, "profile") immutable_settings.add("Profile") if profile: for p in (profile, f"{profile}.conf"): p = Path("mkosi.profiles") / p if p.exists(): break else: die(f"Profile '{profile}' not found in mkosi.profiles/") setattr(namespace, "profile", profile) with chdir(p if p.is_dir() else Path.cwd()): parse_config(p if p.is_file() else Path("."), namespace, defaults) if extras and (path.parent / "mkosi.conf.d").exists(): for p in sorted((path.parent / "mkosi.conf.d").iterdir()): if p.is_dir() or p.suffix == ".conf": with chdir(p if p.is_dir() else Path.cwd()): parse_config(p if p.is_file() else Path("."), namespace, defaults) return True def finalize_defaults(namespace: argparse.Namespace, defaults: argparse.Namespace) -> None: for s in SETTINGS: finalize_default(s, namespace, defaults) images = [] namespace = argparse.Namespace() defaults = argparse.Namespace() argv = list(argv) # Make sure the verb command gets explicitly passed. Insert a -- before the positional verb argument # otherwise it might be considered as an argument of a parameter with nargs='?'. For example mkosi -i # summary would be treated as -i=summary. for verb in Verb: try: v_i = argv.index(verb.name) except ValueError: continue if v_i > 0 and argv[v_i - 1] != "--": argv.insert(v_i, "--") break else: argv += ["--", "build"] namespace = argparse.Namespace() argparser = create_argument_parser(ConfigAction) argparser.parse_args(argv, namespace) cli_ns = copy.deepcopy(namespace) args = load_args(namespace) if ARG_DEBUG.get(): logging.getLogger().setLevel(logging.DEBUG) if args.verb == Verb.help: PagerHelpAction.__call__(None, argparser, namespace) # type: ignore include = () if args.directory is not None: parse_config(Path("."), namespace, defaults, profiles=True) finalize_default(SETTINGS_LOOKUP_BY_DEST["images"], namespace, defaults) include = getattr(namespace, "images") immutable_settings.add("Images") d: Optional[Path] for d in (Path("mkosi.images"), Path("mkosi.presets")): if Path(d).exists(): break else: d = None if d: for p in d.iterdir(): if not p.is_dir() and not p.suffix == ".conf": continue name = p.name.removesuffix(".conf") if not name: die(f"{p} is not a valid image name") ns_copy = copy.deepcopy(namespace) defaults_copy = copy.deepcopy(defaults) setattr(ns_copy, "image", name) with chdir(p if p.is_dir() else Path.cwd()): if not parse_config(p if p.is_file() else Path("."), ns_copy, defaults_copy): continue finalize_defaults(ns_copy, defaults_copy) images += [ns_copy] if not images: setattr(namespace, "image", None) finalize_defaults(namespace, defaults) images = [namespace] for s in vars(cli_ns): if s not in SETTINGS_LOOKUP_BY_DEST: continue if getattr(cli_ns, s) is None: continue if isinstance(getattr(cli_ns, s), (list, tuple)): continue if any(getattr(config, s) == getattr(cli_ns, s) for config in images): continue setting = SETTINGS_LOOKUP_BY_DEST[s].long a = getattr(cli_ns, s) die( f"{setting}={a} was specified on the command line but is not allowed to be configured by any images.", hint="Prefix the setting with '@' in the image configuration file to allow overriding it from the command line.", # noqa: E501 ) if not images: die("No images defined in mkosi.images/") images = [load_config(args, ns) for ns in images] images = resolve_deps(images, include) return args, tuple(images) def load_credentials(args: argparse.Namespace) -> dict[str, str]: if not args.verb.needs_credentials(): return {} creds = { "agetty.autologin": "root", "login.noauth": "yes", } d = Path("mkosi.credentials") if args.directory is not None and d.is_dir(): for e in d.iterdir(): if os.access(e, os.X_OK): creds[e.name] = run([e], stdout=subprocess.PIPE, env=os.environ).stdout else: creds[e.name] = e.read_text() creds |= args.credentials if "firstboot.timezone" not in creds and find_binary("timedatectl"): tz = run( ["timedatectl", "show", "-p", "Timezone", "--value"], stdout=subprocess.PIPE, check=False, ).stdout.strip() if tz: creds["firstboot.timezone"] = tz if "firstboot.locale" not in creds: creds["firstboot.locale"] = "C.UTF-8" if "ssh.authorized_keys.root" not in creds: if args.ssh_certificate: pubkey = run(["openssl", "x509", "-in", args.ssh_certificate, "-pubkey", "-noout"], stdout=subprocess.PIPE, env=dict(OPENSSL_CONF="/dev/null")).stdout.strip() sshpubkey = run(["ssh-keygen", "-f", "/dev/stdin", "-i", "-m", "PKCS8"], input=pubkey, stdout=subprocess.PIPE).stdout.strip() creds["ssh.authorized_keys.root"] = sshpubkey elif args.ssh: die("Ssh= is enabled but no SSH certificate was found", hint="Run 'mkosi genkey' to automatically create one") return creds def load_kernel_command_line_extra(args: argparse.Namespace) -> list[str]: tty = args.architecture.default_serial_tty() columns, lines = shutil.get_terminal_size() cmdline = [ # Make sure we set up networking in the VM/container. "systemd.wants=network.target", # Make sure we don't load vmw_vmci which messes with virtio vsock. "module_blacklist=vmw_vmci", f"systemd.tty.term.{tty}={os.getenv('TERM', 'vt220')}", f"systemd.tty.columns.{tty}={columns}", f"systemd.tty.rows.{tty}={lines}", ] if not any(s.startswith("ip=") for s in args.kernel_command_line_extra): cmdline += ["ip=enc0:any", "ip=enp0s1:any", "ip=enp0s2:any", "ip=host0:any", "ip=none"] if not any(s.startswith("loglevel=") for s in args.kernel_command_line_extra): cmdline += ["loglevel=4"] if not any(s.startswith("SYSTEMD_SULOGIN_FORCE=") for s in args.kernel_command_line_extra): cmdline += ["SYSTEMD_SULOGIN_FORCE=1"] if args.qemu_cdrom: # CD-ROMs are read-only so tell systemd to boot in volatile mode. cmdline += ["systemd.volatile=yes"] if not args.qemu_gui: columns, lines = shutil.get_terminal_size() cmdline += [ f"systemd.tty.term.console={os.getenv('TERM', 'vt220')}", f"systemd.tty.columns.console={columns}", f"systemd.tty.rows.console={lines}", f"console={tty}", ] for s in args.kernel_command_line_extra: key, sep, value = s.partition("=") if " " in value: value = f'"{value}"' cmdline += [key if not sep else f"{key}={value}"] return cmdline def load_environment(args: argparse.Namespace) -> dict[str, str]: env = { "SYSTEMD_TMPFILES_FORCE_SUBVOL": "0", "KERNEL_INSTALL_BYPASS": "1", "SYSTEMD_HWDB_UPDATE_BYPASS": "1", } if args.image_id is not None: env["IMAGE_ID"] = args.image_id if args.image_version is not None: env["IMAGE_VERSION"] = args.image_version if args.source_date_epoch is not None: env["SOURCE_DATE_EPOCH"] = str(args.source_date_epoch) if proxy := os.getenv("http_proxy"): env["http_proxy"] = proxy if proxy := os.getenv("https_proxy"): env["https_proxy"] = proxy if dnf := os.getenv("MKOSI_DNF"): env["MKOSI_DNF"] = dnf env |= dict(parse_environment(line) for f in args.environment_files for line in f.read_text().strip().splitlines()) env |= args.environment return env def load_args(args: argparse.Namespace) -> Args: if args.cmdline and not args.verb.supports_cmdline(): die(f"Arguments after verb are not supported for {args.verb}.") if args.debug: ARG_DEBUG.set(args.debug) if args.debug_shell: ARG_DEBUG_SHELL.set(args.debug_shell) return Args.from_namespace(args) def load_config(args: Args, config: argparse.Namespace) -> Config: if config.build_dir: config.build_dir = config.build_dir / f"{config.distribution}~{config.release}~{config.architecture}" if config.sign: config.checksum = True config.credentials = load_credentials(config) config.kernel_command_line_extra = load_kernel_command_line_extra(config) config.environment = load_environment(config) if config.secure_boot and args.verb != Verb.genkey: if config.secure_boot_key is None and config.secure_boot_certificate is None: die("UEFI SecureBoot enabled, but couldn't find the certificate and private key.", hint="Consider generating them with 'mkosi genkey'.") if config.secure_boot_key is None: die("UEFI SecureBoot enabled, certificate was found, but not the private key.", hint="Consider placing it in mkosi.key") if config.secure_boot_certificate is None: die("UEFI SecureBoot enabled, private key was found, but not the certificate.", hint="Consider placing it in mkosi.crt") if config.repositories and not ( config.distribution.is_dnf_distribution() or config.distribution.is_apt_distribution() or config.distribution == Distribution.arch ): die("Sorry, the --repositories option is only supported on pacman, dnf and apt based distributions") if config.overlay and not config.base_trees: die("--overlay can only be used with --base-tree") if config.incremental and not config.cache_dir: die("A cache directory must be configured in order to use --incremental") # For unprivileged builds we need the userxattr OverlayFS mount option, which is only available # in Linux v5.11 and later. if ( (config.build_scripts or config.base_trees) and GenericVersion(platform.release()) < GenericVersion("5.11") and os.geteuid() != 0 ): die("This unprivileged build configuration requires at least Linux v5.11") return Config.from_namespace(config) def yes_no(b: bool) -> str: return "yes" if b else "no" def none_to_na(s: Optional[object]) -> str: return "n/a" if s is None else str(s) def none_to_random(s: Optional[object]) -> str: return "random" if s is None else str(s) def none_to_none(s: Optional[object]) -> str: return "none" if s is None else str(s) def none_to_default(s: Optional[object]) -> str: return "default" if s is None else str(s) def line_join_list(array: Iterable[PathString]) -> str: if not array: return "none" items = (str(none_to_none(cast(Path, item))) for item in array) return "\n ".join(items) def format_tree(tree: ConfigTree) -> str: return f"{tree.source}:{tree.target}" if tree.target else f"{tree.source}" def line_join_tree_list(array: Sequence[ConfigTree]) -> str: if not array: return "none" items = [format_tree(tree) for tree in array] return "\n ".join(items) def format_bytes(num_bytes: int) -> str: if num_bytes >= 1024**3: return f"{num_bytes/1024**3 :0.1f}G" if num_bytes >= 1024**2: return f"{num_bytes/1024**2 :0.1f}M" if num_bytes >= 1024: return f"{num_bytes/1024 :0.1f}K" return f"{num_bytes}B" def format_bytes_or_none(num_bytes: Optional[int]) -> str: return format_bytes(num_bytes) if num_bytes is not None else "none" def summary(config: Config) -> str: def bold(s: Any) -> str: return f"{Style.bold}{s}{Style.reset}" maniformats = (" ".join(i.name for i in config.manifest_format)) or "(none)" env = [f"{k}={v}" for k, v in config.environment.items()] summary = f"""\ {bold(f"IMAGE: {config.image or 'default'}")} {bold("CONFIG")}: Profile: {none_to_none(config.profile)} Include: {line_join_list(config.include)} Initrd Include: {line_join_list(config.initrd_include)} Images: {line_join_list(config.images)} Dependencies: {line_join_list(config.dependencies)} Minimum Version: {none_to_none(config.minimum_version)} {bold("DISTRIBUTION")}: Distribution: {bold(config.distribution)} Release: {bold(none_to_na(config.release))} Architecture: {config.architecture} Mirror: {none_to_default(config.mirror)} Local Mirror (build): {none_to_none(config.local_mirror)} Repo Signature/Key check: {yes_no(config.repository_key_check)} Repositories: {line_join_list(config.repositories)} Use Only Package Cache: {yes_no(config.cache_only)} Package Manager Trees: {line_join_tree_list(config.package_manager_trees)} {bold("OUTPUT")}: Output Format: {config.output_format} Manifest Formats: {maniformats} Output: {bold(config.output_with_compression)} Compression: {config.compress_output} Output Directory: {config.output_dir_or_cwd()} Workspace Directory: {config.workspace_dir_or_default()} Cache Directory: {none_to_none(config.cache_dir)} Build Directory: {none_to_none(config.build_dir)} Image ID: {config.image_id} Image Version: {config.image_version} Split Artifacts: {yes_no(config.split_artifacts)} Repart Directories: {line_join_list(config.repart_dirs)} Sector Size: {none_to_default(config.sector_size)} Repart Offline: {yes_no(config.repart_offline)} Overlay: {yes_no(config.overlay)} Use Subvolumes: {config.use_subvolumes} Seed: {none_to_random(config.seed)} {bold("CONTENT")}: Packages: {line_join_list(config.packages)} Build Packages: {line_join_list(config.build_packages)} With Documentation: {yes_no(config.with_docs)} Base Trees: {line_join_list(config.base_trees)} Skeleton Trees: {line_join_tree_list(config.skeleton_trees)} Extra Trees: {line_join_tree_list(config.extra_trees)} Remove Packages: {line_join_list(config.remove_packages)} Remove Files: {line_join_list(config.remove_files)} Clean Package Manager Metadata: {config.clean_package_metadata} Source Date Epoch: {none_to_none(config.source_date_epoch)} Prepare Scripts: {line_join_list(config.prepare_scripts)} Build Scripts: {line_join_list(config.build_scripts)} Postinstall Scripts: {line_join_list(config.postinst_scripts)} Finalize Scripts: {line_join_list(config.finalize_scripts)} Build Sources: {line_join_tree_list(config.build_sources)} Build Sources Ephemeral: {yes_no(config.build_sources_ephemeral)} Script Environment: {line_join_list(env)} Environment Files: {line_join_list(config.environment_files)} Run Tests in Build Scripts: {yes_no(config.with_tests)} Scripts With Network: {yes_no(config.with_network)} Bootable: {config.bootable} Bootloader: {config.bootloader} BIOS Bootloader: {config.bios_bootloader} Shim Bootloader: {config.shim_bootloader} Initrds: {line_join_list(config.initrds)} Initrd Packages: {line_join_list(config.initrd_packages)} Kernel Command Line: {line_join_list(config.kernel_command_line)} Kernel Modules Include: {line_join_list(config.kernel_modules_include)} Kernel Modules Exclude: {line_join_list(config.kernel_modules_exclude)} Kernel Modules Include Host: {yes_no(config.kernel_modules_initrd_include_host)} Kernel Modules Initrd: {yes_no(config.kernel_modules_initrd)} Kernel Modules Initrd Include: {line_join_list(config.kernel_modules_initrd_include)} Kernel Modules Initrd Exclude: {line_join_list(config.kernel_modules_initrd_exclude)} Kernel Modules Initrd Include Host: {yes_no(config.kernel_modules_initrd_include_host)} Locale: {none_to_default(config.locale)} Locale Messages: {none_to_default(config.locale_messages)} Keymap: {none_to_default(config.keymap)} Timezone: {none_to_default(config.timezone)} Hostname: {none_to_default(config.hostname)} Root Password: {("(set)" if config.root_password else "(default)")} Root Shell: {none_to_default(config.root_shell)} Autologin: {yes_no(config.autologin)} Make Initrd: {yes_no(config.make_initrd)} SSH: {yes_no(config.ssh)} SELinux Relabel: {config.selinux_relabel} """ if config.output_format.is_extension_image() or config.output_format in ( OutputFormat.disk, OutputFormat.uki, OutputFormat.esp, ): summary += f"""\ {bold("VALIDATION")}: UEFI SecureBoot: {yes_no(config.secure_boot)} UEFI SecureBoot AutoEnroll: {yes_no(config.secure_boot_auto_enroll)} SecureBoot Signing Key: {none_to_none(config.secure_boot_key)} SecureBoot Certificate: {none_to_none(config.secure_boot_certificate)} SecureBoot Sign Tool: {config.secure_boot_sign_tool} Verity Signing Key: {none_to_none(config.verity_key)} Verity Certificate: {none_to_none(config.verity_certificate)} Sign Expected PCRs: {config.sign_expected_pcr} Passphrase: {none_to_none(config.passphrase)} Checksum: {yes_no(config.checksum)} Sign: {yes_no(config.sign)} GPG Key: ({"default" if config.key is None else config.key}) """ summary += f"""\ {bold("HOST CONFIGURATION")}: Incremental: {yes_no(config.incremental)} NSpawn Settings: {none_to_none(config.nspawn_settings)} Extra Search Paths: {line_join_list(config.extra_search_paths)} Ephemeral: {config.ephemeral} Credentials: {line_join_list(config.credentials.keys())} Extra Kernel Command Line: {line_join_list(config.kernel_command_line_extra)} Use ACLs: {yes_no(config.acl)} Tools Tree: {config.tools_tree} Tools Tree Distribution: {none_to_none(config.tools_tree_distribution)} Tools Tree Release: {none_to_none(config.tools_tree_release)} Tools Tree Mirror: {none_to_default(config.tools_tree_mirror)} Tools Tree Packages: {line_join_list(config.tools_tree_packages)} Runtime Trees: {line_join_tree_list(config.runtime_trees)} Runtime Size: {format_bytes_or_none(config.runtime_size)} Runtime Scratch: {config.runtime_scratch} SSH Signing Key: {none_to_none(config.ssh_key)} SSH Certificate: {none_to_none(config.ssh_certificate)} QEMU GUI: {yes_no(config.qemu_gui)} QEMU CPU Cores: {config.qemu_smp} QEMU Memory: {config.qemu_mem} QEMU Use KVM: {config.qemu_kvm} QEMU Use VSock: {config.qemu_vsock} QEMU VSock Connection ID: {QemuVsockCID.format(config.qemu_vsock_cid)} QEMU Use Swtpm: {config.qemu_swtpm} QEMU Use CD-ROM: {yes_no(config.qemu_cdrom)} QEMU Firmware: {config.qemu_firmware} QEMU Firmware Variables: {none_to_none(config.qemu_firmware_variables)} QEMU Extra Arguments: {line_join_list(config.qemu_args)} """ return summary class JsonEncoder(json.JSONEncoder): def default(self, o: Any) -> Any: if isinstance(o, StrEnum): return str(o) elif isinstance(o, GenericVersion): return str(o) elif isinstance(o, os.PathLike): return os.fspath(o) elif isinstance(o, uuid.UUID): return str(o) elif isinstance(o, (Args, Config)): return o.to_dict() return json.JSONEncoder.default(self, o) E = TypeVar("E", bound=StrEnum) def json_type_transformer(refcls: Union[type[Args], type[Config]]) -> Callable[[str, Any], Any]: fields_by_name = {field.name: field for field in dataclasses.fields(refcls)} def path_transformer(path: str, fieldtype: type[Path]) -> Path: return Path(path) def optional_path_transformer(path: Optional[str], fieldtype: type[Optional[Path]]) -> Optional[Path]: return Path(path) if path is not None else None def path_list_transformer(pathlist: list[str], fieldtype: type[list[Path]]) -> list[Path]: return [Path(p) for p in pathlist] def optional_uuid_transformer(optuuid: Optional[str], fieldtype: type[Optional[uuid.UUID]]) -> Optional[uuid.UUID]: return uuid.UUID(optuuid) if optuuid is not None else None def root_password_transformer( rootpw: Optional[list[Union[str, bool]]], fieldtype: type[Optional[tuple[str, bool]]] ) -> Optional[tuple[str, bool]]: if rootpw is None: return None return (cast(str, rootpw[0]), cast(bool, rootpw[1])) def config_tree_transformer(trees: list[dict[str, Any]], fieldtype: type[ConfigTree]) -> list[ConfigTree]: # TODO: exchange for TypeGuard and list comprehension once on 3.10 ret = [] for d in trees: assert "source" in d assert "target" in d ret.append( ConfigTree( source=Path(d["source"]), target=Path(d["target"]) if d["target"] is not None else None, ) ) return ret def enum_transformer(enumval: str, fieldtype: type[E]) -> E: return fieldtype(enumval) def optional_enum_transformer(enumval: Optional[str], fieldtype: type[Optional[E]]) -> Optional[E]: return fieldtype(enumval) if enumval is not None else None # type: ignore def enum_list_transformer(enumlist: list[str], fieldtype: type[list[E]]) -> list[E]: enumtype = fieldtype.__args__[0] # type: ignore return [enumtype[e] for e in enumlist] def str_tuple_transformer(strtup: list[str], fieldtype: list[tuple[str, ...]]) -> tuple[str, ...]: return tuple(strtup) def config_drive_transformer(drives: list[dict[str, Any]], fieldtype: type[QemuDrive]) -> list[QemuDrive]: # TODO: exchange for TypeGuard and list comprehension once on 3.10 ret = [] for d in drives: assert "id" in d assert "size" in d assert "directory" in d assert "options" in d ret.append( QemuDrive( id=d["id"], size=int(d["size"]), directory=Path(d["directory"]) if d["directory"] else None, options=d["options"], ) ) return ret def generic_version_transformer( version: Optional[str], fieldtype: type[Optional[GenericVersion]], ) -> Optional[GenericVersion]: return GenericVersion(version) if version is not None else None transformers = { Path: path_transformer, Optional[Path]: optional_path_transformer, list[Path]: path_list_transformer, Optional[uuid.UUID]: optional_uuid_transformer, Optional[tuple[str, bool]]: root_password_transformer, list[ConfigTree]: config_tree_transformer, tuple[str, ...]: str_tuple_transformer, Architecture: enum_transformer, BiosBootloader: enum_transformer, ShimBootloader: enum_transformer, Bootloader: enum_transformer, Compression: enum_transformer, ConfigFeature: enum_transformer, Distribution: enum_transformer, OutputFormat: enum_transformer, QemuFirmware: enum_transformer, SecureBootSignTool: enum_transformer, Optional[Distribution]: optional_enum_transformer, list[ManifestFormat]: enum_list_transformer, Verb: enum_transformer, DocFormat: enum_transformer, list[QemuDrive]: config_drive_transformer, GenericVersion: generic_version_transformer, } def json_transformer(key: str, val: Any) -> Any: fieldtype: Optional[dataclasses.Field[Any]] = fields_by_name.get(key) # It is unlikely that the type of a field will be None only, so let's not bother with a different sentinel # value if fieldtype is None: raise ValueError(f"{refcls} has no field {key}") transformer = cast(Optional[Callable[[str, type], Any]], transformers.get(fieldtype.type)) if transformer is not None: try: return transformer(val, fieldtype.type) except (ValueError, IndexError, AssertionError) as e: raise ValueError(f"Unable to parse {val:r} for attribute {key:r} for {refcls.__name__}") from e return val return json_transformer def want_selinux_relabel(config: Config, root: Path, fatal: bool = True) -> Optional[tuple[str, Path, Path]]: if config.selinux_relabel == ConfigFeature.disabled: return None selinux = root / "etc/selinux/config" if not selinux.exists(): if fatal and config.selinux_relabel == ConfigFeature.enabled: die("SELinux relabel is requested but could not find selinux config at /etc/selinux/config") return None policy = run(["sh", "-c", f". {selinux} && echo $SELINUXTYPE"], sandbox=config.sandbox(options=["--ro-bind", selinux, selinux]), stdout=subprocess.PIPE).stdout.strip() if not policy: if fatal and config.selinux_relabel == ConfigFeature.enabled: die("SELinux relabel is requested but no selinux policy is configured in /etc/selinux/config") return None if not find_binary("setfiles", root=config.tools()): if fatal and config.selinux_relabel == ConfigFeature.enabled: die("SELinux relabel is requested but setfiles is not installed") return None fc = root / "etc/selinux" / policy / "contexts/files/file_contexts" if not fc.exists(): if fatal and config.selinux_relabel == ConfigFeature.enabled: die(f"SELinux relabel is requested but SELinux file contexts not found in {fc}") return None binpolicydir = root / "etc/selinux" / policy / "policy" # The policy file is named policy.XX where XX is the policy version that indicates what features are # available. We check for string.digits instead of using isdecimal() as the latter checks for more than just # digits. policies = [p for p in binpolicydir.glob("*") if p.suffix and all(c in string.digits for c in p.suffix[1:])] if not policies: if fatal and config.selinux_relabel == ConfigFeature.enabled: die(f"SELinux relabel is requested but SELinux binary policy not found in {binpolicydir}") return None binpolicy = sorted(policies, key=lambda p: GenericVersion(p.name), reverse=True)[0] return policy, fc, binpolicy mkosi-20.2/mkosi/context.py000066400000000000000000000057771455345632200157700ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1+ import os from collections.abc import Sequence from pathlib import Path from typing import Optional from mkosi.config import Args, Config from mkosi.tree import make_tree from mkosi.types import PathString from mkosi.util import flatten, umask class Context: """State related properties.""" def __init__(self, args: Args, config: Config, workspace: Path) -> None: self.args = args self.config = config self.workspace = workspace with umask(~0o755): # Using a btrfs subvolume as the upperdir in an overlayfs results in EXDEV so make sure we create # the root directory as a regular directory if the Overlay= option is enabled. if config.overlay: self.root.mkdir() else: make_tree( self.root, use_subvolumes=self.config.use_subvolumes, tools=config.tools(), sandbox=config.sandbox(options=["--bind", self.workspace, self.workspace]), ) self.staging.mkdir() self.pkgmngr.mkdir() self.packages.mkdir() self.install_dir.mkdir(exist_ok=True) self.cache_dir.mkdir(parents=True, exist_ok=True) @property def root(self) -> Path: return self.workspace / "root" @property def staging(self) -> Path: return self.workspace / "staging" @property def pkgmngr(self) -> Path: return self.workspace / "pkgmngr" @property def packages(self) -> Path: return self.workspace / "packages" @property def cache_dir(self) -> Path: return self.config.cache_dir or (self.workspace / "cache") @property def install_dir(self) -> Path: return self.workspace / "dest" def sandbox( self, *, network: bool = False, devices: bool = False, scripts: Optional[Path] = None, options: Sequence[PathString] = (), ) -> list[PathString]: return self.config.sandbox( network=network, devices=devices, scripts=scripts, options=[ # These mounts are writable so bubblewrap can create extra directories or symlinks inside of it as # needed. This isn't a problem as the package manager directory is created by mkosi and thrown away # when the build finishes. *flatten( ["--bind", os.fspath(self.pkgmngr / "etc" / p.name), f"/etc/{p.name}"] for p in (self.pkgmngr / "etc").iterdir() ), *options, *(["--ro-bind", os.fspath(p), os.fspath(p)] if (p := self.pkgmngr / "usr").exists() else []), ], ) + ( [ "sh", "-c", f"mount -t overlay -o lowerdir={self.pkgmngr / 'usr'}:/usr overlayfs /usr && exec $0 \"$@\"", ] if (self.pkgmngr / "usr").exists() else [] ) mkosi-20.2/mkosi/distributions/000077500000000000000000000000001455345632200166145ustar00rootroot00000000000000mkosi-20.2/mkosi/distributions/__init__.py000066400000000000000000000134751455345632200207370ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1+ import enum import importlib import re import urllib.parse from collections.abc import Sequence from typing import TYPE_CHECKING, Optional, cast from mkosi.util import StrEnum, read_os_release if TYPE_CHECKING: from mkosi.config import Architecture from mkosi.context import Context class PackageType(StrEnum): none = enum.auto() rpm = enum.auto() deb = enum.auto() pkg = enum.auto() ebuild = enum.auto() class DistributionInstaller: @classmethod def pretty_name(cls) -> str: raise NotImplementedError @classmethod def setup(cls, context: "Context") -> None: raise NotImplementedError @classmethod def install(cls, context: "Context") -> None: raise NotImplementedError @classmethod def install_packages(cls, context: "Context", packages: Sequence[str]) -> None: raise NotImplementedError @classmethod def remove_packages(cls, context: "Context", packages: Sequence[str]) -> None: raise NotImplementedError @classmethod def filesystem(cls) -> str: return "ext4" @classmethod def architecture(cls, arch: "Architecture") -> str: raise NotImplementedError @classmethod def package_type(cls) -> PackageType: return PackageType.none @classmethod def default_release(cls) -> str: return "" @classmethod def default_tools_tree_distribution(cls) -> Optional["Distribution"]: return None @classmethod def grub_prefix(cls) -> str: return "grub" @classmethod def createrepo(cls, context: "Context") -> None: raise NotImplementedError class Distribution(StrEnum): # Please consult docs/distribution-policy.md and contact one # of the mkosi maintainers before implementing a new distribution. fedora = enum.auto() debian = enum.auto() ubuntu = enum.auto() arch = enum.auto() opensuse = enum.auto() mageia = enum.auto() centos = enum.auto() rhel = enum.auto() rhel_ubi = enum.auto() openmandriva = enum.auto() rocky = enum.auto() alma = enum.auto() gentoo = enum.auto() custom = enum.auto() def is_centos_variant(self) -> bool: return self in ( Distribution.centos, Distribution.alma, Distribution.rocky, Distribution.rhel, Distribution.rhel_ubi, ) def is_dnf_distribution(self) -> bool: return self in ( Distribution.fedora, Distribution.mageia, Distribution.centos, Distribution.rhel, Distribution.rhel_ubi, Distribution.openmandriva, Distribution.rocky, Distribution.alma, ) def is_apt_distribution(self) -> bool: return self in (Distribution.debian, Distribution.ubuntu) def pretty_name(self) -> str: return self.installer().pretty_name() def setup(self, context: "Context") -> None: return self.installer().setup(context) def install(self, context: "Context") -> None: return self.installer().install(context) def install_packages(self, context: "Context", packages: Sequence[str]) -> None: return self.installer().install_packages(context, packages) def remove_packages(self, context: "Context", packages: Sequence[str]) -> None: return self.installer().remove_packages(context, packages) def filesystem(self) -> str: return self.installer().filesystem() def architecture(self, arch: "Architecture") -> str: return self.installer().architecture(arch) def package_type(self) -> PackageType: return self.installer().package_type() def default_release(self) -> str: return self.installer().default_release() def default_tools_tree_distribution(self) -> Optional["Distribution"]: return self.installer().default_tools_tree_distribution() def grub_prefix(self) -> str: return self.installer().grub_prefix() def createrepo(self, context: "Context") -> None: return self.installer().createrepo(context) def installer(self) -> type[DistributionInstaller]: modname = str(self).replace('-', '_') mod = importlib.import_module(f"mkosi.distributions.{modname}") installer = getattr(mod, "Installer") assert issubclass(installer, DistributionInstaller) return cast(type[DistributionInstaller], installer) def detect_distribution() -> tuple[Optional[Distribution], Optional[str]]: try: os_release = read_os_release() except FileNotFoundError: return None, None dist_id = os_release.get("ID", "linux") dist_id_like = os_release.get("ID_LIKE", "").split() version = os_release.get("VERSION", None) version_id = os_release.get("VERSION_ID", None) version_codename = os_release.get("VERSION_CODENAME", None) extracted_codename = None if version: # extract Debian release codename m = re.search(r"\((.*?)\)", version) if m: extracted_codename = m.group(1) d: Optional[Distribution] = None for the_id in [dist_id, *dist_id_like]: d = Distribution.__members__.get(the_id, None) if d is not None: break if d in {Distribution.debian, Distribution.ubuntu} and (version_codename or extracted_codename): version_id = version_codename or extracted_codename return d, version_id def join_mirror(mirror: str, link: str) -> str: # urljoin() behaves weirdly if the base does not end with a / or the path starts with a / so fix them up as needed. if not mirror.endswith("/"): mirror = f"{mirror}/" link = link.removeprefix("/") return urllib.parse.urljoin(mirror, link) mkosi-20.2/mkosi/distributions/alma.py000066400000000000000000000022321455345632200200770ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1+ from mkosi.context import Context from mkosi.distributions import centos, join_mirror from mkosi.installer.rpm import RpmRepository, find_rpm_gpgkey class Installer(centos.Installer): @classmethod def pretty_name(cls) -> str: return "AlmaLinux" @staticmethod def gpgurls(context: Context) -> tuple[str, ...]: return ( find_rpm_gpgkey( context, f"RPM-GPG-KEY-AlmaLinux-{context.config.release}", f"https://repo.almalinux.org/almalinux/RPM-GPG-KEY-AlmaLinux-{context.config.release}", ), ) @classmethod def repository_variants(cls, context: Context, repo: str) -> list[RpmRepository]: if context.config.mirror: url = f"baseurl={join_mirror(context.config.mirror, f'almalinux/$releasever/{repo}/$basearch/os')}" else: url = f"mirrorlist=https://mirrors.almalinux.org/mirrorlist/$releasever/{repo.lower()}" return [RpmRepository(repo, url, cls.gpgurls(context))] @classmethod def sig_repositories(cls, context: Context) -> list[RpmRepository]: return [] mkosi-20.2/mkosi/distributions/arch.py000066400000000000000000000056411455345632200201110ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1+ from collections.abc import Sequence from mkosi.config import Architecture from mkosi.context import Context from mkosi.distributions import Distribution, DistributionInstaller, PackageType from mkosi.installer.pacman import ( PacmanRepository, createrepo_pacman, invoke_pacman, setup_pacman, ) from mkosi.log import die class Installer(DistributionInstaller): @classmethod def pretty_name(cls) -> str: return "Arch Linux" @classmethod def filesystem(cls) -> str: return "ext4" @classmethod def package_type(cls) -> PackageType: return PackageType.pkg @classmethod def default_release(cls) -> str: return "rolling" @classmethod def default_tools_tree_distribution(cls) -> Distribution: return Distribution.arch @classmethod def createrepo(cls, context: "Context") -> None: return createrepo_pacman(context) @classmethod def setup(cls, context: Context) -> None: if context.config.local_mirror: repos = [PacmanRepository("core", context.config.local_mirror)] else: repos = [] if context.config.architecture == Architecture.arm64: url = f"{context.config.mirror or 'http://mirror.archlinuxarm.org'}/$arch/$repo" else: url = f"{context.config.mirror or 'https://geo.mirror.pkgbuild.com'}/$repo/os/$arch" # Testing repositories have to go before regular ones to to take precedence. for id in ( "core-testing", "core-testing-debug", "extra-testing", "extra-testing-debug", "core-debug", "extra-debug", ): if id in context.config.repositories: repos += [PacmanRepository(id, url)] for id in ("core", "extra"): repos += [PacmanRepository(id, url)] setup_pacman(context, repos) @classmethod def install(cls, context: Context) -> None: cls.install_packages(context, ["filesystem"], apivfs=False) @classmethod def install_packages(cls, context: Context, packages: Sequence[str], apivfs: bool = True) -> None: invoke_pacman( context, "--sync", ["--refresh", "--needed", "--assume-installed", "initramfs"], packages, apivfs=apivfs, ) @classmethod def remove_packages(cls, context: Context, packages: Sequence[str]) -> None: invoke_pacman(context, "--remove", ["--nosave", "--recursive"], packages) @classmethod def architecture(cls, arch: Architecture) -> str: a = { Architecture.x86_64 : "x86_64", Architecture.arm64 : "aarch64", }.get(arch) if not a: die(f"Architecture {a} is not supported by Arch Linux") return a mkosi-20.2/mkosi/distributions/centos.py000066400000000000000000000430631455345632200204670ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1+ import os import shutil from collections.abc import Iterable, Sequence from mkosi.config import Architecture from mkosi.context import Context from mkosi.distributions import ( Distribution, DistributionInstaller, PackageType, join_mirror, ) from mkosi.installer.dnf import createrepo_dnf, invoke_dnf, setup_dnf from mkosi.installer.rpm import RpmRepository, find_rpm_gpgkey from mkosi.log import complete_step, die from mkosi.tree import rmtree from mkosi.versioncomp import GenericVersion def move_rpm_db(context: Context) -> None: """Link /var/lib/rpm to /usr/lib/sysimage/rpm for compat with old rpm""" olddb = context.root / "var/lib/rpm" newdb = context.root / "usr/lib/sysimage/rpm" if newdb.exists() and not newdb.is_symlink(): with complete_step("Moving rpm database /usr/lib/sysimage/rpm → /var/lib/rpm"): rmtree(olddb, sandbox=context.sandbox(options=["--bind", olddb.parent, olddb.parent])) shutil.move(newdb, olddb) newdb.symlink_to(os.path.relpath(olddb, start=newdb.parent)) class Installer(DistributionInstaller): @classmethod def pretty_name(cls) -> str: return "CentOS" @classmethod def filesystem(cls) -> str: return "xfs" @classmethod def package_type(cls) -> PackageType: return PackageType.rpm @classmethod def default_release(cls) -> str: return "9" @classmethod def default_tools_tree_distribution(cls) -> Distribution: return Distribution.fedora @classmethod def grub_prefix(cls) -> str: return "grub2" @classmethod def createrepo(cls, context: Context) -> None: return createrepo_dnf(context) @classmethod def setup(cls, context: Context) -> None: if GenericVersion(context.config.release) <= 7: die(f"{cls.pretty_name()} 7 or earlier variants are not supported") setup_dnf(context, cls.repositories(context)) (context.pkgmngr / "etc/dnf/vars/stream").write_text(f"{context.config.release}-stream\n") @classmethod def install(cls, context: Context) -> None: # Make sure glibc-minimal-langpack is installed instead of glibc-all-langpacks. cls.install_packages(context, ["filesystem", "glibc-minimal-langpack"], apivfs=False) # On Fedora, the default rpmdb has moved to /usr/lib/sysimage/rpm so if that's the case we # need to move it back to /var/lib/rpm on CentOS. move_rpm_db(context) @classmethod def install_packages(cls, context: Context, packages: Sequence[str], apivfs: bool = True) -> None: invoke_dnf(context, "install", packages, apivfs=apivfs) @classmethod def remove_packages(cls, context: Context, packages: Sequence[str]) -> None: invoke_dnf(context, "remove", packages) @classmethod def architecture(cls, arch: Architecture) -> str: a = { Architecture.x86_64 : "x86_64", Architecture.ppc64_le : "ppc64le", Architecture.s390x : "s390x", Architecture.arm64 : "aarch64", }.get(arch) if not a: die(f"Architecture {a} is not supported by {cls.pretty_name()}") return a @staticmethod def gpgurls(context: Context) -> tuple[str, ...]: keys = ("RPM-GPG-KEY-CentOS-Official", "RPM-GPG-KEY-CentOS-SIG-Extras") return tuple(find_rpm_gpgkey(context, key, f"https://www.centos.org/keys/{key}") for key in keys) @classmethod def repository_variants(cls, context: Context, repo: str) -> Iterable[RpmRepository]: if context.config.local_mirror: yield RpmRepository(repo, f"baseurl={context.config.local_mirror}", cls.gpgurls(context)) elif mirror := context.config.mirror: if GenericVersion(context.config.release) <= 8: yield RpmRepository( repo.lower(), f"baseurl={join_mirror(mirror, f'centos/$stream/{repo}/$basearch/os')}", cls.gpgurls(context), ) yield RpmRepository( f"{repo.lower()}-debuginfo", f"baseurl={join_mirror(mirror, 'centos-debuginfo/$stream/$basearch')}", cls.gpgurls(context), enabled=False, ) yield RpmRepository( f"{repo.lower()}-source", f"baseurl={join_mirror(mirror, f'centos/$stream/{repo}/Source')}", cls.gpgurls(context), enabled=False, ) else: if repo == "extras": yield RpmRepository( repo.lower(), f"baseurl={join_mirror(mirror, f'centos-stream/SIGs/$stream/{repo}/$basearch/extras-common')}", cls.gpgurls(context), ) yield RpmRepository( f"{repo.lower()}-source", f"baseurl={join_mirror(mirror, f'centos-stream/SIGs/$stream/{repo}/source/extras-common')}", cls.gpgurls(context), enabled=False, ) else: yield RpmRepository( repo.lower(), f"baseurl={join_mirror(mirror, f'centos-stream/$stream/{repo}/$basearch/os')}", cls.gpgurls(context), ) yield RpmRepository( f"{repo.lower()}-debuginfo", f"baseurl={join_mirror(mirror, f'centos-stream/$stream/{repo}/$basearch/debug/tree')}", cls.gpgurls(context), enabled=False, ) yield RpmRepository( f"{repo.lower()}-source", f"baseurl={join_mirror(mirror, f'centos-stream/$stream/{repo}/source/tree')}", cls.gpgurls(context), enabled=False, ) else: if GenericVersion(context.config.release) <= 8: yield RpmRepository( repo.lower(), f"mirrorlist=http://mirrorlist.centos.org/?release=$stream&arch=$basearch&repo={repo}", cls.gpgurls(context), ) # These can't be retrieved from the mirrorlist. yield RpmRepository( f"{repo.lower()}-debuginfo", "baseurl=http://debuginfo.centos.org/$stream/$basearch", cls.gpgurls(context), enabled=False, ) yield RpmRepository( f"{repo.lower()}-source", f"baseurl=https://vault.centos.org/centos/$stream/{repo}/Source", cls.gpgurls(context), enabled=False, ) else: url = "metalink=https://mirrors.centos.org/metalink" if repo == "extras": yield RpmRepository( repo.lower(), f"{url}?arch=$basearch&repo=centos-extras-sig-extras-common-$stream", cls.gpgurls(context), ) yield RpmRepository( f"{repo.lower()}-source", f"{url}?arch=source&repo=centos-extras-sig-extras-common-source-$stream", cls.gpgurls(context), enabled=False, ) else: yield RpmRepository( repo.lower(), f"{url}?arch=$basearch&repo=centos-{repo.lower()}-$stream", cls.gpgurls(context), ) yield RpmRepository( f"{repo.lower()}-debuginfo", f"{url}?arch=$basearch&repo=centos-{repo.lower()}-debug-$stream", cls.gpgurls(context), enabled=False, ) yield RpmRepository( f"{repo.lower()}-source", f"{url}?arch=source&repo=centos-{repo.lower()}-source-$stream", cls.gpgurls(context), enabled=False, ) @classmethod def repositories(cls, context: Context) -> Iterable[RpmRepository]: if context.config.local_mirror: yield from cls.repository_variants(context, "AppStream") else: yield from cls.repository_variants(context, "BaseOS") yield from cls.repository_variants(context, "AppStream") yield from cls.repository_variants(context, "extras") if GenericVersion(context.config.release) >= 9: yield from cls.repository_variants(context, "CRB") else: yield from cls.repository_variants(context, "PowerTools") yield from cls.epel_repositories(context) yield from cls.sig_repositories(context) @classmethod def epel_repositories(cls, context: Context) -> Iterable[RpmRepository]: gpgurls = ( find_rpm_gpgkey( context, f"RPM-GPG-KEY-EPEL-{context.config.release}", f"https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-{context.config.release}", ), ) if context.config.local_mirror: return if mirror := context.config.mirror: for repo, dir in ( ("epel", "epel"), ("epel-next", "epel/next"), ("epel-testing", "epel/testing"), ("epel-next-testing", "epel/testing/next") ): yield RpmRepository( repo, f"baseurl={join_mirror(mirror, f'fedora/{dir}/$releasever/Everything/$basearch')}", gpgurls, enabled=False, ) yield RpmRepository( f"{repo}-debuginfo", f"baseurl={join_mirror(mirror, f'fedora/{dir}/$releasever/Everything/$basearch/debug')}", gpgurls, enabled=False, ) yield RpmRepository( f"{repo}-source", f"baseurl={join_mirror(mirror, f'fedora/{dir}/$releasever/Everything/source/tree')}", gpgurls, enabled=False, ) else: url = "metalink=https://mirrors.fedoraproject.org/metalink?arch=$basearch" for repo in ("epel", "epel-next"): yield RpmRepository(repo, f"{url}&repo={repo}-$releasever", gpgurls, enabled=False) yield RpmRepository( f"{repo}-debuginfo", f"{url}&repo={repo}-debug-$releasever", gpgurls, enabled=False ) yield RpmRepository( f"{repo}-source", f"{url}&repo={repo}-source-$releasever", gpgurls, enabled=False ) yield RpmRepository( "epel-testing", f"{url}&repo=testing-epel$releasever", gpgurls, enabled=False ) yield RpmRepository( "epel-testing-debuginfo", f"{url}&repo=testing-debug-epel$releasever", gpgurls, enabled=False ) yield RpmRepository( "epel-testing-source", f"{url}&repo=testing-source-epel$releasever", gpgurls, enabled=False ) yield RpmRepository( "epel-next-testing", f"{url}&repo=epel-testing-next-$releasever", gpgurls, enabled=False ) yield RpmRepository( "epel-next-testing-debuginfo", f"{url}&repo=epel-testing-next-debug-$releasever", gpgurls, enabled=False, ) yield RpmRepository( "epel-next-testing-source", f"{url}&repo=epel-testing-next-source-$releasever", gpgurls, enabled=False, ) @classmethod def sig_repositories(cls, context: Context) -> Iterable[RpmRepository]: if context.config.local_mirror: return sigs = ( ( "hyperscale", (f"packages-{c}" for c in ("main", "experimental", "facebook", "hotfixes", "spin", "intel")), ("RPM-GPG-KEY-CentOS-SIG-HyperScale",), ), ) for sig, components, keys in sigs: gpgurls = tuple(find_rpm_gpgkey(context, key, f"https://www.centos.org/keys/{key}") for key in keys) for c in components: if mirror := context.config.mirror: if GenericVersion(context.config.release) <= 8: yield RpmRepository( f"{sig}-{c}", f"baseurl={join_mirror(mirror, f'centos/$stream/{sig}/$basearch/{c}')}", gpgurls, enabled=False, ) yield RpmRepository( f"{sig}-{c}-debuginfo", f"baseurl={join_mirror(mirror, f'centos-debuginfo/$stream/{sig}/$basearch')}", gpgurls, enabled=False, ) yield RpmRepository( f"{sig}-{c}-source", f"baseurl={join_mirror(mirror, f'centos/$stream/{sig}/Source')}", gpgurls, enabled=False, ) else: yield RpmRepository( f"{sig}-{c}", f"baseurl={join_mirror(mirror, f'centos-stream/SIGs/$stream/{sig}/$basearch/{c}')}", gpgurls, enabled=False, ) yield RpmRepository( f"{sig}-{c}-debuginfo", f"baseurl={join_mirror(mirror, f'centos-stream/SIGs/$stream/{sig}/$basearch/{c}/debug')}", gpgurls, enabled=False, ) yield RpmRepository( f"{sig}-{c}-source", f"baseurl={join_mirror(mirror, f'centos-stream/SIGs/$stream/{sig}/source/{c}')}", gpgurls, enabled=False, ) else: if GenericVersion(context.config.release) <= 8: yield RpmRepository( f"{sig}-{c}", f"mirrorlist=http://mirrorlist.centos.org/?release=$stream&arch=$basearch&repo={sig}-{c}", gpgurls, enabled=False, ) # These can't be retrieved from the mirrorlist. yield RpmRepository( f"{sig}-{c}-debuginfo", f"baseurl=http://debuginfo.centos.org/centos/$stream/{sig}/$basearch", gpgurls, enabled=False, ) yield RpmRepository( f"{sig}-{c}-source", f"baseurl=https://vault.centos.org/$stream/{sig}/Source/{c}", gpgurls, enabled=False, ) else: url = "metalink=https://mirrors.centos.org/metalink" yield RpmRepository( f"{sig}-{c}", f"{url}?arch=$basearch&repo=centos-{sig}-sig-{c}-$stream", gpgurls, enabled=False, ) yield RpmRepository( f"{sig}-{c}-debuginfo", f"{url}?arch=$basearch&repo=centos-{sig}-sig-{c}-debug-$stream", gpgurls, enabled=False, ) yield RpmRepository( f"{sig}-{c}-source", f"{url}?arch=source&repo=centos-{sig}-sig-{c}-source-$stream", gpgurls, enabled=False, ) yield RpmRepository( f"{sig}-{c}-testing", f"baseurl=https://buildlogs.centos.org/centos/$stream/{sig}/$basearch/{c}", gpgurls, enabled=False, ) if GenericVersion(context.config.release) >= 9: yield RpmRepository( f"{sig}-{c}-testing-debuginfo", f"baseurl=https://buildlogs.centos.org/centos/$stream/{sig}/$basearch/{c}", gpgurls, enabled=False, ) mkosi-20.2/mkosi/distributions/custom.py000066400000000000000000000016341455345632200205040ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1+ from collections.abc import Sequence from mkosi.config import Architecture from mkosi.context import Context from mkosi.distributions import DistributionInstaller from mkosi.log import die class Installer(DistributionInstaller): @classmethod def architecture(cls, arch: Architecture) -> str: return str(arch) @classmethod def setup(cls, context: Context) -> None: pass @classmethod def install(cls, context: Context) -> None: pass @classmethod def install_packages(cls, context: Context, packages: Sequence[str]) -> None: if packages: die("Installing packages is not supported for custom distributions'") @classmethod def remove_packages(cls, context: Context, packages: Sequence[str]) -> None: if packages: die("Removing packages is not supported for custom distributions") mkosi-20.2/mkosi/distributions/debian.py000066400000000000000000000223741455345632200204200ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1+ import shutil import tempfile from collections.abc import Sequence from pathlib import Path from mkosi.archive import extract_tar from mkosi.config import Architecture from mkosi.context import Context from mkosi.distributions import Distribution, DistributionInstaller, PackageType from mkosi.installer.apt import createrepo_apt, invoke_apt, setup_apt from mkosi.log import die from mkosi.run import run from mkosi.sandbox import finalize_passwd_mounts from mkosi.util import umask class Installer(DistributionInstaller): @classmethod def pretty_name(cls) -> str: return "Debian" @classmethod def filesystem(cls) -> str: return "ext4" @classmethod def package_type(cls) -> PackageType: return PackageType.deb @classmethod def default_release(cls) -> str: return "testing" @classmethod def default_tools_tree_distribution(cls) -> Distribution: return Distribution.debian @staticmethod def repositories(context: Context, local: bool = True) -> list[str]: archives = ("deb", "deb-src") components = ' '.join(("main", *context.config.repositories)) if context.config.local_mirror and local: return [f"deb [trusted=yes] {context.config.local_mirror} {context.config.release} {components}"] mirror = context.config.mirror or "http://deb.debian.org/debian" signedby = "[signed-by=/usr/share/keyrings/debian-archive-keyring.gpg]" repos = [ f"{archive} {signedby} {mirror} {context.config.release} {components}" for archive in archives ] # Debug repos are typically not mirrored. url = "http://deb.debian.org/debian-debug" repos += [f"deb {signedby} {url} {context.config.release}-debug {components}"] if context.config.release in ("unstable", "sid"): return repos repos += [ f"{archive} {signedby} {mirror} {context.config.release}-updates {components}" for archive in archives ] # Security updates repos are never mirrored. url = "http://security.debian.org/debian-security " repos += [ f"{archive} {signedby} {url} {context.config.release}-security {components}" for archive in archives ] return repos @classmethod def setup(cls, context: Context) -> None: setup_apt(context, cls.repositories(context)) @classmethod def createrepo(cls, context: "Context") -> None: return createrepo_apt(context) @classmethod def install(cls, context: Context) -> None: # Instead of using debootstrap, we replicate its core functionality here. Because dpkg does not have # an option to delay running pre-install maintainer scripts when it installs a package, it's # impossible to use apt directly to bootstrap a Debian chroot since dpkg will try to run a maintainer # script which depends on some basic tool to be available in the chroot from a deb which hasn't been # unpacked yet, causing the script to fail. To avoid these issues, we have to extract all the # essential debs first, and only then run the maintainer scripts for them. # First, we set up merged usr. # This list is taken from https://salsa.debian.org/installer-team/debootstrap/-/blob/master/functions#L1369. subdirs = ["bin", "sbin", "lib"] + { "amd64" : ["lib32", "lib64", "libx32"], "i386" : ["lib64", "libx32"], "mips" : ["lib32", "lib64"], "mipsel" : ["lib32", "lib64"], "mips64el" : ["lib32", "lib64", "libo32"], "loongarch64" : ["lib32", "lib64"], "powerpc" : ["lib64"], "ppc64" : ["lib32", "lib64"], "ppc64el" : ["lib64"], "s390x" : ["lib32"], "sparc" : ["lib64"], "sparc64" : ["lib32", "lib64"], "x32" : ["lib32", "lib64", "libx32"], }.get(context.config.distribution.architecture(context.config.architecture), []) with umask(~0o755): for d in subdirs: (context.root / d).symlink_to(f"usr/{d}") (context.root / f"usr/{d}").mkdir(parents=True, exist_ok=True) invoke_apt(context, "apt-get", "update", apivfs=False) # Next, we invoke apt-get install to download all the essential packages. With DPkg::Pre-Install-Pkgs, # we specify a shell command that will receive the list of packages that will be installed on stdin. # By configuring Debug::pkgDpkgPm=1, apt-get install will not actually execute any dpkg commands, so # all it does is download the essential debs and tell us their full in the apt cache without actually # installing them. with tempfile.NamedTemporaryFile(mode="r") as f: invoke_apt( context, "apt-get", "install", [ "-oDebug::pkgDPkgPm=1", f"-oDPkg::Pre-Install-Pkgs::=cat >{f.name}", "?essential", "?exact-name(usr-is-merged)", ], apivfs=False, mounts=("--bind", f.name, f.name), ) essential = f.read().strip().splitlines() # Now, extract the debs to the chroot by first extracting the sources tar file out of the deb and # then extracting the tar file into the chroot. for deb in essential: with ( # The deb paths will be in the form of "/var/cache/apt/" so we transform them to the corresponding # path in mkosi's package cache directory. open(context.cache_dir / Path(deb).relative_to("/var"), "rb") as i, tempfile.NamedTemporaryFile() as o ): run(["dpkg-deb", "--fsys-tarfile", "/dev/stdin"], stdin=i, stdout=o, sandbox=context.sandbox()) extract_tar( Path(o.name), context.root, log=False, tools=context.config.tools(), # Make sure tar uses user/group information from the root directory instead of the host. sandbox=context.sandbox( options=["--bind", context.root, context.root, *finalize_passwd_mounts(context.root)], ), ) # Finally, run apt to properly install packages in the chroot without having to worry that maintainer # scripts won't find basic tools that they depend on. cls.install_packages(context, [Path(deb).name.partition("_")[0].removesuffix(".deb") for deb in essential]) @classmethod def install_packages(cls, context: Context, packages: Sequence[str], apivfs: bool = True) -> None: # Debian policy is to start daemons by default. The policy-rc.d script can be used choose which ones to # start. Let's install one that denies all daemon startups. # See https://people.debian.org/~hmh/invokerc.d-policyrc.d-specification.txt for more information. # Note: despite writing in /usr/sbin, this file is not shipped by the OS and instead should be managed by # the admin. policyrcd = context.root / "usr/sbin/policy-rc.d" with umask(~0o644): policyrcd.write_text("#!/bin/sh\nexit 101\n") invoke_apt(context, "apt-get", "update", apivfs=False) invoke_apt(context, "apt-get", "install", packages, apivfs=apivfs) install_apt_sources(context, cls.repositories(context, local=False)) policyrcd.unlink() for d in context.root.glob("boot/vmlinuz-*"): kver = d.name.removeprefix("vmlinuz-") vmlinuz = context.root / "usr/lib/modules" / kver / "vmlinuz" if not vmlinuz.exists(): shutil.copy2(d, vmlinuz) @classmethod def remove_packages(cls, context: Context, packages: Sequence[str]) -> None: invoke_apt(context, "apt-get", "purge", packages) @classmethod def architecture(cls, arch: Architecture) -> str: a = { Architecture.arm64 : "arm64", Architecture.arm : "armhf", Architecture.alpha : "alpha", Architecture.x86_64 : "amd64", Architecture.x86 : "i386", Architecture.ia64 : "ia64", Architecture.loongarch64 : "loongarch64", Architecture.mips64_le : "mips64el", Architecture.mips_le : "mipsel", Architecture.parisc : "hppa", Architecture.ppc64_le : "ppc64el", Architecture.ppc64 : "ppc64", Architecture.riscv64 : "riscv64", Architecture.s390x : "s390x", Architecture.s390 : "s390", }.get(arch) if not a: die(f"Architecture {arch} is not supported by Debian") return a def install_apt_sources(context: Context, repos: Sequence[str]) -> None: if not (context.root / "usr/bin/apt").exists(): return sources = context.root / "etc/apt/sources.list" if not sources.exists(): with sources.open("w") as f: for repo in repos: f.write(f"{repo}\n") mkosi-20.2/mkosi/distributions/fedora.py000066400000000000000000000146731455345632200204410ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1+ from collections.abc import Sequence from mkosi.config import Architecture from mkosi.context import Context from mkosi.distributions import ( Distribution, DistributionInstaller, PackageType, join_mirror, ) from mkosi.installer.dnf import createrepo_dnf, invoke_dnf, setup_dnf from mkosi.installer.rpm import RpmRepository, find_rpm_gpgkey from mkosi.log import die class Installer(DistributionInstaller): @classmethod def pretty_name(cls) -> str: return "Fedora Linux" @classmethod def filesystem(cls) -> str: return "btrfs" @classmethod def package_type(cls) -> PackageType: return PackageType.rpm @classmethod def default_release(cls) -> str: return "39" @classmethod def default_tools_tree_distribution(cls) -> Distribution: return Distribution.fedora @classmethod def grub_prefix(cls) -> str: return "grub2" @classmethod def createrepo(cls, context: Context) -> None: return createrepo_dnf(context) @classmethod def setup(cls, context: Context) -> None: gpgurls = ( find_rpm_gpgkey( context, key=f"RPM-GPG-KEY-fedora-{context.config.release}-primary", url="https://fedoraproject.org/fedora.gpg", ), ) repos = [] if context.config.local_mirror: repos += [RpmRepository("fedora", f"baseurl={context.config.local_mirror}", gpgurls)] elif context.config.release == "eln": mirror = context.config.mirror or "https://odcs.fedoraproject.org/composes/production/latest-Fedora-ELN/compose" for repo in ("Appstream", "BaseOS", "Extras", "CRB"): url = f"baseurl={join_mirror(mirror, repo)}" repos += [ RpmRepository(repo.lower(), f"{url}/$basearch/os", gpgurls), RpmRepository(repo.lower(), f"{url}/$basearch/debug/tree", gpgurls, enabled=False), RpmRepository(repo.lower(), f"{url}/source/tree", gpgurls, enabled=False), ] elif (m := context.config.mirror): directory = "development" if context.config.release == "rawhide" else "releases" url = f"baseurl={join_mirror(m, f'fedora/linux/{directory}/$releasever/Everything')}" repos += [ RpmRepository("fedora", f"{url}/$basearch/os", gpgurls), RpmRepository("fedora-debuginfo", f"{url}/$basearch/debug/tree", gpgurls, enabled=False), RpmRepository("fedora-source", f"{url}/source/tree", gpgurls, enabled=False), ] if context.config.release != "rawhide": url = f"baseurl={join_mirror(m, 'fedora/linux/updates/$releasever/Everything')}" repos += [ RpmRepository("updates", f"{url}/$basearch", gpgurls), RpmRepository("updates-debuginfo", f"{url}/$basearch/debug", gpgurls, enabled=False), RpmRepository("updates-source", f"{url}/source/tree", gpgurls, enabled=False), ] url = f"baseurl={join_mirror(m, 'fedora/linux/updates/testing/$releasever/Everything')}" repos += [ RpmRepository("updates-testing", f"{url}/$basearch", gpgurls, enabled=False), RpmRepository("updates-testing-debuginfo", f"{url}/$basearch/debug", gpgurls, enabled=False), RpmRepository("updates-testing-source", f"{url}/source/tree", gpgurls, enabled=False) ] else: url = "metalink=https://mirrors.fedoraproject.org/metalink?arch=$basearch" repos += [ RpmRepository("fedora", f"{url}&repo=fedora-$releasever", gpgurls), RpmRepository("fedora-debuginfo", f"{url}&repo=fedora-debug-$releasever", gpgurls, enabled=False), RpmRepository("fedora-source", f"{url}&repo=fedora-source-$releasever", gpgurls, enabled=False), ] if context.config.release != "rawhide": repos += [ RpmRepository("updates", f"{url}&repo=updates-released-f$releasever", gpgurls), RpmRepository( "updates-debuginfo", f"{url}&repo=updates-released-debug-f$releasever", gpgurls, enabled=False, ), RpmRepository( "updates-source", f"{url}&repo=updates-released-source-f$releasever", gpgurls, enabled=False ), RpmRepository( "updates-testing", f"{url}&repo=updates-testing-f$releasever", gpgurls, enabled=False ), RpmRepository( "updates-testing-debuginfo", f"{url}&repo=updates-testing-debug-f$releasever", gpgurls, enabled=False, ), RpmRepository( "updates-testing-source", f"{url}&repo=updates-testing-source-f$releasever", gpgurls, enabled=False, ), ] setup_dnf(context, repos) @classmethod def install(cls, context: Context) -> None: cls.install_packages(context, ["filesystem"], apivfs=False) @classmethod def install_packages(cls, context: Context, packages: Sequence[str], apivfs: bool = True) -> None: invoke_dnf(context, "install", packages, apivfs=apivfs) @classmethod def remove_packages(cls, context: Context, packages: Sequence[str]) -> None: invoke_dnf(context, "remove", packages) @classmethod def architecture(cls, arch: Architecture) -> str: a = { Architecture.arm64 : "aarch64", Architecture.mips64_le : "mips64el", Architecture.mips_le : "mipsel", Architecture.ppc64_le : "ppc64le", Architecture.riscv64 : "riscv64", Architecture.s390x : "s390x", Architecture.x86_64 : "x86_64", }.get(arch) if not a: die(f"Architecture {a} is not supported by Fedora") return a mkosi-20.2/mkosi/distributions/gentoo.py000066400000000000000000000162501455345632200204650ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1+ import os import re import urllib.parse import urllib.request from collections.abc import Sequence from pathlib import Path from mkosi.archive import extract_tar from mkosi.config import Architecture from mkosi.context import Context from mkosi.distributions import ( Distribution, DistributionInstaller, PackageType, join_mirror, ) from mkosi.log import ARG_DEBUG, complete_step, die from mkosi.run import run from mkosi.sandbox import apivfs_cmd, chroot_cmd from mkosi.tree import copy_tree, rmtree from mkosi.types import PathString from mkosi.util import sort_packages def invoke_emerge(context: Context, packages: Sequence[str] = (), apivfs: bool = True) -> None: run( apivfs_cmd(context.root) + [ # We can't mount the stage 3 /usr using `options`, because bwrap isn't available in the stage 3 # tarball which is required by apivfs_cmd(), so we have to mount /usr from the tarball later # using another bwrap exec. "bwrap", "--dev-bind", "/", "/", "--bind", context.cache_dir / "stage3/usr", "/usr", "emerge", "--buildpkg=y", "--usepkg=y", "--getbinpkg=y", "--binpkg-respect-use=y", "--jobs", "--load-average", "--root-deps=rdeps", "--with-bdeps=n", "--verbose-conflicts", "--noreplace", *(["--verbose", "--quiet=n", "--quiet-fail=n"] if ARG_DEBUG.get() else ["--quiet-build", "--quiet"]), f"--root={context.root}", *sort_packages(packages), ], sandbox=context.sandbox( network=True, options=[ # TODO: Get rid of as many of these as possible. "--bind", context.cache_dir / "stage3/etc", "/etc", "--bind", context.cache_dir / "stage3/var", "/var", "--ro-bind", "/etc/resolv.conf", "/etc/resolv.conf", "--bind", context.cache_dir / "repos", "/var/db/repos", ], ), env=dict( PKGDIR=str(context.cache_dir / "binpkgs"), DISTDIR=str(context.cache_dir / "distfiles"), ) | ({"USE": "build"} if not apivfs else {}) | context.config.environment, ) class Installer(DistributionInstaller): @classmethod def pretty_name(cls) -> str: return "Gentoo" @classmethod def filesystem(cls) -> str: return "btrfs" @classmethod def package_type(cls) -> PackageType: return PackageType.ebuild @classmethod def default_release(cls) -> str: return "17.1" @classmethod def default_tools_tree_distribution(cls) -> Distribution: return Distribution.gentoo @classmethod def setup(cls, context: Context) -> None: pass @classmethod def install(cls, context: Context) -> None: arch = context.config.distribution.architecture(context.config.architecture) mirror = context.config.mirror or "https://distfiles.gentoo.org" # http://distfiles.gentoo.org/releases/amd64/autobuilds/latest-stage3.txt stage3tsf_path_url = join_mirror( mirror.partition(" ")[0], f"releases/{arch}/autobuilds/latest-stage3.txt", ) with urllib.request.urlopen(stage3tsf_path_url) as r: # e.g.: 20230108T161708Z/stage3-amd64-nomultilib-systemd-mergedusr-20230108T161708Z.tar.xz regexp = rf"^[0-9]+T[0-9]+Z/stage3-{arch}-llvm-systemd-mergedusr-[0-9]+T[0-9]+Z\.tar\.xz" all_lines = r.readlines() for line in all_lines: if (m := re.match(regexp, line.decode("utf-8"))): stage3_latest = Path(m.group(0)) break else: die("profile names changed upstream?") stage3_url = join_mirror(mirror, f"releases/{arch}/autobuilds/{stage3_latest}") stage3_tar = context.cache_dir / "stage3.tar" stage3 = context.cache_dir / "stage3" with complete_step("Fetching latest stage3 snapshot"): old = stage3_tar.stat().st_mtime if stage3_tar.exists() else 0 cmd: list[PathString] = ["curl", "-L", "--progress-bar", "-o", stage3_tar, stage3_url] if stage3_tar.exists(): cmd += ["--time-cond", stage3_tar] run(cmd, sandbox=context.sandbox()) if stage3_tar.stat().st_mtime > old: rmtree(stage3) stage3.mkdir(exist_ok=True) if not any(stage3.iterdir()): with complete_step(f"Extracting {stage3_tar.name} to {stage3}"): extract_tar( stage3_tar, stage3, tools=context.config.tools(), sandbox=context.sandbox(options=["--bind", context.root, context.root]), ) for d in ("binpkgs", "distfiles", "repos/gentoo"): (context.cache_dir / d).mkdir(parents=True, exist_ok=True) copy_tree(context.pkgmngr, stage3, preserve=False, use_subvolumes=context.config.use_subvolumes) features = " ".join([ # Disable sandboxing in emerge because we already do it in mkosi. "-sandbox", "-pid-sandbox", "-ipc-sandbox", "-network-sandbox", "-userfetch", "-userpriv", "-usersandbox", "-usersync", "-ebuild-locks", "parallel-install", *(["noman", "nodoc", "noinfo"] if context.config.with_docs else []), ]) # Setting FEATURES via the environment variable does not seem to apply to ebuilds in portage, so we # append to /etc/portage/make.conf instead. with (stage3 / "etc/portage/make.conf").open("a") as f: f.write(f"\nFEATURES=\"${{FEATURES}} {features}\"\n") chroot = chroot_cmd( stage3, options=["--bind", context.cache_dir / "repos", "/var/db/repos"], ) run(chroot + ["emerge-webrsync"], sandbox=context.sandbox(network=True)) invoke_emerge(context, packages=["sys-apps/baselayout"], apivfs=False) @classmethod def install_packages(cls, context: Context, packages: Sequence[str], apivfs: bool = True) -> None: invoke_emerge(context, packages=packages, apivfs=apivfs) for d in context.root.glob("usr/src/linux-*"): kver = d.name.removeprefix("linux-") kimg = d / { Architecture.x86_64: "arch/x86/boot/bzImage", Architecture.arm64: "arch/arm64/boot/Image.gz", Architecture.arm: "arch/arm/boot/zImage", }[context.config.architecture] vmlinuz = context.root / "usr/lib/modules" / kver / "vmlinuz" if not vmlinuz.exists() and not vmlinuz.is_symlink(): vmlinuz.symlink_to(os.path.relpath(kimg, start=vmlinuz.parent)) @classmethod def architecture(cls, arch: Architecture) -> str: a = { Architecture.x86_64 : "amd64", Architecture.arm64 : "arm64", Architecture.arm : "arm", }.get(arch) if not a: die(f"Architecture {a} is not supported by Gentoo") return a mkosi-20.2/mkosi/distributions/mageia.py000066400000000000000000000062201455345632200204110ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1+ import shutil from collections.abc import Sequence from mkosi.config import Architecture from mkosi.context import Context from mkosi.distributions import ( Distribution, DistributionInstaller, PackageType, join_mirror, ) from mkosi.installer.dnf import createrepo_dnf, invoke_dnf, setup_dnf from mkosi.installer.rpm import RpmRepository, find_rpm_gpgkey from mkosi.log import die class Installer(DistributionInstaller): @classmethod def pretty_name(cls) -> str: return "Mageia" @classmethod def filesystem(cls) -> str: return "ext4" @classmethod def package_type(cls) -> PackageType: return PackageType.rpm @classmethod def default_release(cls) -> str: return "cauldron" @classmethod def default_tools_tree_distribution(cls) -> Distribution: return Distribution.mageia @classmethod def createrepo(cls, context: "Context") -> None: return createrepo_dnf(context) @classmethod def setup(cls, context: Context) -> None: gpgurls = ( find_rpm_gpgkey( context, "RPM-GPG-KEY-Mageia", "https://mirrors.kernel.org/mageia/distrib/$releasever/$basearch/media/core/release/media_info/pubkey", ), ) repos = [] if context.config.local_mirror: repos += [RpmRepository("core-release", f"baseurl={context.config.local_mirror}", gpgurls)] elif context.config.mirror: url = f"baseurl={join_mirror(context.config.mirror, 'distrib/$releasever/$basearch/media/core/')}" repos += [ RpmRepository("core-release", f"{url}/release", gpgurls), RpmRepository("core-updates", f"{url}/updates/", gpgurls) ] else: url = "mirrorlist=https://www.mageia.org/mirrorlist/?release=$releasever&arch=$basearch§ion=core" repos += [ RpmRepository("core-release", f"{url}&repo=release", gpgurls), RpmRepository("core-updates", f"{url}&repo=updates", gpgurls) ] setup_dnf(context, repos) @classmethod def install(cls, context: Context) -> None: cls.install_packages(context, ["filesystem"], apivfs=False) @classmethod def install_packages(cls, context: Context, packages: Sequence[str], apivfs: bool = True) -> None: invoke_dnf(context, "install", packages, apivfs=apivfs) for d in context.root.glob("boot/vmlinuz-*"): kver = d.name.removeprefix("vmlinuz-") vmlinuz = context.root / "usr/lib/modules" / kver / "vmlinuz" if not vmlinuz.exists(): shutil.copy2(d, vmlinuz) @classmethod def remove_packages(cls, context: Context, packages: Sequence[str]) -> None: invoke_dnf(context, "remove", packages) @classmethod def architecture(cls, arch: Architecture) -> str: a = { Architecture.x86_64 : "x86_64", Architecture.arm64 : "aarch64", }.get(arch) if not a: die(f"Architecture {a} is not supported by Mageia") return a mkosi-20.2/mkosi/distributions/openmandriva.py000066400000000000000000000063011455345632200216510ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1+ import shutil from collections.abc import Sequence from mkosi.config import Architecture from mkosi.context import Context from mkosi.distributions import ( Distribution, DistributionInstaller, PackageType, join_mirror, ) from mkosi.installer.dnf import createrepo_dnf, invoke_dnf, setup_dnf from mkosi.installer.rpm import RpmRepository, find_rpm_gpgkey from mkosi.log import die class Installer(DistributionInstaller): @classmethod def pretty_name(cls) -> str: return "OpenMandriva" @classmethod def filesystem(cls) -> str: return "ext4" @classmethod def package_type(cls) -> PackageType: return PackageType.rpm @classmethod def default_release(cls) -> str: return "cooker" @classmethod def default_tools_tree_distribution(cls) -> Distribution: return Distribution.openmandriva @classmethod def createrepo(cls, context: "Context") -> None: return createrepo_dnf(context) @classmethod def setup(cls, context: Context) -> None: mirror = context.config.mirror or "http://mirror.openmandriva.org" gpgurls = ( find_rpm_gpgkey( context, "RPM-GPG-KEY-OpenMandriva", "https://raw.githubusercontent.com/OpenMandrivaAssociation/openmandriva-repos/master/RPM-GPG-KEY-OpenMandriva", ), ) repos = [] if context.config.local_mirror: repos += [RpmRepository("main-release", f"baseurl={context.config.local_mirror}", gpgurls)] else: url = f"baseurl={join_mirror(mirror, '$releasever/repository/$basearch/main')}" repos += [ RpmRepository("main-release", f"{url}/release", gpgurls), RpmRepository("main-updates", f"{url}/updates", gpgurls), ] setup_dnf(context, repos) @classmethod def install(cls, context: Context) -> None: cls.install_packages(context, ["filesystem"], apivfs=False) @classmethod def install_packages(cls, context: Context, packages: Sequence[str], apivfs: bool = True) -> None: invoke_dnf(context, "install", packages, apivfs=apivfs) for d in context.root.glob("boot/vmlinuz-*"): kver = d.name.removeprefix("vmlinuz-") vmlinuz = context.root / "usr/lib/modules" / kver / "vmlinuz" # Openmandriva symlinks /usr/lib/modules//vmlinuz to /boot/vmlinuz-, so get rid of the symlink # and put the actual vmlinuz in /usr/lib/modules/. if vmlinuz.is_symlink(): vmlinuz.unlink() if not vmlinuz.exists(): shutil.copy2(d, vmlinuz) @classmethod def remove_packages(cls, context: Context, packages: Sequence[str]) -> None: invoke_dnf(context, "remove", packages) @classmethod def architecture(cls, arch: Architecture) -> str: a = { Architecture.x86_64 : "x86_64", Architecture.arm64 : "aarch64", Architecture.riscv64 : "riscv64", }.get(arch) if not a: die(f"Architecture {a} is not supported by OpenMandriva") return a mkosi-20.2/mkosi/distributions/opensuse.py000066400000000000000000000134251455345632200210340ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1+ import tempfile import xml.etree.ElementTree as ElementTree from collections.abc import Sequence from pathlib import Path from mkosi.config import Architecture from mkosi.context import Context from mkosi.distributions import Distribution, DistributionInstaller, PackageType from mkosi.installer.dnf import createrepo_dnf, invoke_dnf, setup_dnf from mkosi.installer.rpm import RpmRepository from mkosi.installer.zypper import createrepo_zypper, invoke_zypper, setup_zypper from mkosi.log import die from mkosi.run import find_binary, run from mkosi.sandbox import finalize_crypto_mounts class Installer(DistributionInstaller): @classmethod def pretty_name(cls) -> str: return "openSUSE" @classmethod def filesystem(cls) -> str: return "btrfs" @classmethod def package_type(cls) -> PackageType: return PackageType.rpm @classmethod def default_release(cls) -> str: return "tumbleweed" @classmethod def default_tools_tree_distribution(cls) -> Distribution: return Distribution.opensuse @classmethod def grub_prefix(cls) -> str: return "grub2" @classmethod def createrepo(cls, context: "Context") -> None: if find_binary("zypper", root=context.config.tools()): createrepo_zypper(context) else: createrepo_dnf(context) @classmethod def setup(cls, context: Context) -> None: release = context.config.release if release == "leap": release = "stable" mirror = context.config.mirror or "https://download.opensuse.org" # If the release looks like a timestamp, it's Tumbleweed. 13.x is legacy # (14.x won't ever appear). For anything else, let's default to Leap. if context.config.local_mirror: release_url = f"{context.config.local_mirror}" updates_url = None if release.isdigit() or release == "tumbleweed": release_url = f"{mirror}/tumbleweed/repo/oss/" updates_url = f"{mirror}/update/tumbleweed/" elif release in ("current", "stable"): release_url = f"{mirror}/distribution/openSUSE-{release}/repo/oss/" updates_url = f"{mirror}/update/openSUSE-{release}/" else: release_url = f"{mirror}/distribution/leap/{release}/repo/oss/" updates_url = f"{mirror}/update/leap/{release}/oss/" zypper = find_binary("zypper", root=context.config.tools()) # If we need to use a local mirror, create a temporary repository definition # that doesn't get in the image, as it is valid only at image build time. if context.config.local_mirror: repos = [RpmRepository(id="local-mirror", url=f"baseurl={context.config.local_mirror}", gpgurls=())] else: repos = [ RpmRepository( id="repo-oss", url=f"baseurl={release_url}", gpgurls=fetch_gpgurls(context, release_url) if not zypper else (), ), ] if updates_url is not None: repos += [ RpmRepository( id="repo-update", url=f"baseurl={updates_url}", gpgurls=fetch_gpgurls(context, updates_url) if not zypper else (), ) ] if zypper: setup_zypper(context, repos) else: setup_dnf(context, repos) @classmethod def install(cls, context: Context) -> None: cls.install_packages(context, ["filesystem", "distribution-release"], apivfs=False) @classmethod def install_packages(cls, context: Context, packages: Sequence[str], apivfs: bool = True) -> None: if find_binary("zypper", root=context.config.tools()): options = [ "--download", "in-advance", "--recommends" if context.config.with_recommends else "--no-recommends", ] invoke_zypper(context, "install", packages, options, apivfs=apivfs) else: invoke_dnf(context, "install", packages, apivfs=apivfs) @classmethod def remove_packages(cls, context: Context, packages: Sequence[str]) -> None: if find_binary("zypper", root=context.config.tools()): invoke_zypper(context, "remove", packages, ["--clean-deps"]) else: invoke_dnf(context, "remove", packages) @classmethod def architecture(cls, arch: Architecture) -> str: a = { Architecture.x86_64 : "x86_64", }.get(arch) if not a: die(f"Architecture {a} is not supported by OpenSUSE") return a def fetch_gpgurls(context: Context, repourl: str) -> tuple[str, ...]: gpgurls = [f"{repourl}/repodata/repomd.xml.key"] with tempfile.TemporaryDirectory() as d: run( [ "curl", "--location", "--output-dir", d, "--remote-name", "--no-progress-meter", "--fail", f"{repourl}/repodata/repomd.xml", ], sandbox=context.sandbox( network=True, options=["--bind", d, d, *finalize_crypto_mounts(context.config.tools())], ), ) xml = (Path(d) / "repomd.xml").read_text() root = ElementTree.fromstring(xml) tags = root.find("{http://linux.duke.edu/metadata/repo}tags") if not tags: die("repomd.xml missing element") for child in tags.iter("{http://linux.duke.edu/metadata/repo}content"): if child.text and child.text.startswith("gpg-pubkey"): gpgkey = child.text.partition("?")[0] gpgurls += [f"{repourl}{gpgkey}"] return tuple(gpgurls) mkosi-20.2/mkosi/distributions/rhel.py000066400000000000000000000075671455345632200201370ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1+ from collections.abc import Iterable from pathlib import Path from typing import Any, Optional from mkosi.context import Context from mkosi.distributions import centos, join_mirror from mkosi.installer.rpm import RpmRepository, find_rpm_gpgkey from mkosi.log import die class Installer(centos.Installer): @classmethod def pretty_name(cls) -> str: return "RHEL" @staticmethod def gpgurls(context: Context) -> tuple[str, ...]: major = int(float(context.config.release)) return ( find_rpm_gpgkey( context, f"RPM-GPG-KEY-redhat{major}-release", "https://access.redhat.com/security/data/fd431d51.txt", ), ) @staticmethod def sslcacert(context: Context) -> Optional[Path]: if context.config.mirror: return None p = Path("etc/rhsm/ca/redhat-uep.pem") if (context.pkgmngr / p).exists(): p = context.pkgmngr / p elif (Path("/") / p).exists(): p = Path("/") / p else: die("redhat-uep.pem certificate not found in host system or package manager tree") return p @staticmethod def sslclientkey(context: Context) -> Optional[Path]: if context.config.mirror: return None pattern = "etc/pki/entitlement/*-key.pem" p = next((p for p in sorted(context.pkgmngr.glob(pattern))), None) if not p: p = next((p for p in Path("/").glob(pattern)), None) if not p: die("Entitlement key not found in host system or package manager tree") return p @staticmethod def sslclientcert(context: Context) -> Optional[Path]: if context.config.mirror: return None pattern = "etc/pki/entitlement/*.pem" p = next((p for p in sorted(context.pkgmngr.glob(pattern)) if "key" not in p.name), None) if not p: p = next((p for p in sorted(Path("/").glob(pattern)) if "key" not in p.name), None) if not p: die("Entitlement certificate not found in host system or package manager tree") return p @classmethod def repository_variants(cls, context: Context, repo: str) -> Iterable[RpmRepository]: if context.config.local_mirror: yield RpmRepository(repo, f"baseurl={context.config.local_mirror}", cls.gpgurls(context)) else: mirror = context.config.mirror or "https://cdn.redhat.com/content/dist/" common: dict[str, Any] = dict( gpgurls=cls.gpgurls(context), sslcacert=cls.sslcacert(context), sslclientcert=cls.sslclientcert(context), sslclientkey=cls.sslclientkey(context), ) v = context.config.release major = int(float(v)) yield RpmRepository( f"rhel-{v}-{repo}-rpms", f"baseurl={join_mirror(mirror, f'rhel{major}/{v}/$basearch/{repo}/os')}", enabled=True, **common, ) yield RpmRepository( f"rhel-{v}-{repo}-debug-rpms", f"baseurl={join_mirror(mirror, f'rhel{major}/{v}/$basearch/{repo}/debug')}", enabled=False, **common, ) yield RpmRepository( f"rhel-{v}-{repo}-source", f"baseurl={join_mirror(mirror, f'rhel{major}/{v}/$basearch/{repo}/source')}", enabled=False, **common, ) @classmethod def repositories(cls, context: Context) -> Iterable[RpmRepository]: yield from cls.repository_variants(context, "baseos") yield from cls.repository_variants(context, "appstream") yield from cls.repository_variants(context, "codeready-builder") yield from cls.epel_repositories(context) mkosi-20.2/mkosi/distributions/rhel_ubi.py000066400000000000000000000041301455345632200207550ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1+ from collections.abc import Iterable from mkosi.context import Context from mkosi.distributions import centos, join_mirror from mkosi.installer.rpm import RpmRepository, find_rpm_gpgkey class Installer(centos.Installer): @classmethod def pretty_name(cls) -> str: return "RHEL UBI" @staticmethod def gpgurls(context: Context) -> tuple[str, ...]: major = int(float(context.config.release)) return ( find_rpm_gpgkey( context, f"RPM-GPG-KEY-redhat{major}-release", "https://access.redhat.com/security/data/fd431d51.txt", ), ) @classmethod def repository_variants(cls, context: Context, repo: str) -> Iterable[RpmRepository]: if context.config.local_mirror: yield RpmRepository(repo, f"baseurl={context.config.local_mirror}", cls.gpgurls(context)) else: mirror = context.config.mirror or "https://cdn-ubi.redhat.com/content/public/ubi/dist/" v = context.config.release yield RpmRepository( f"ubi-{v}-{repo}-rpms", f"baseurl={join_mirror(mirror, f'ubi{v}/{v}/$basearch/{repo}/os')}", cls.gpgurls(context), ) yield RpmRepository( f"ubi-{v}-{repo}-debug-rpms", f"baseurl={join_mirror(mirror, f'ubi{v}/{v}/$basearch/{repo}/debug')}", cls.gpgurls(context), enabled=False, ) yield RpmRepository( f"ubi-{v}-{repo}-source", f"baseurl={join_mirror(mirror, f'ubi{v}/{v}/$basearch/{repo}/source')}", cls.gpgurls(context), enabled=False, ) @classmethod def repositories(cls, context: Context) -> Iterable[RpmRepository]: yield from cls.repository_variants(context, "baseos") yield from cls.repository_variants(context, "appstream") yield from cls.repository_variants(context, "codeready-builder") yield from cls.epel_repositories(context) mkosi-20.2/mkosi/distributions/rocky.py000066400000000000000000000022421455345632200203150ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1+ from mkosi.context import Context from mkosi.distributions import centos, join_mirror from mkosi.installer.rpm import RpmRepository, find_rpm_gpgkey class Installer(centos.Installer): @classmethod def pretty_name(cls) -> str: return "Rocky Linux" @staticmethod def gpgurls(context: Context) -> tuple[str, ...]: return ( find_rpm_gpgkey( context, f"RPM-GPG-KEY-Rocky-{context.config.release}", f"https://download.rockylinux.org/pub/rocky/RPM-GPG-KEY-Rocky-{context.config.release}", ), ) @classmethod def repository_variants(cls, context: Context, repo: str) -> list[RpmRepository]: if context.config.mirror: url = f"baseurl={join_mirror(context.config.mirror, f'rocky/$releasever/{repo}/$basearch/os')}" else: url = f"mirrorlist=https://mirrors.rockylinux.org/mirrorlist?arch=$basearch&repo={repo}-$releasever" return [RpmRepository(repo, url, cls.gpgurls(context))] @classmethod def sig_repositories(cls, context: Context) -> list[RpmRepository]: return [] mkosi-20.2/mkosi/distributions/ubuntu.py000066400000000000000000000041061455345632200205110ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1+ from mkosi.config import Architecture from mkosi.context import Context from mkosi.distributions import debian class Installer(debian.Installer): @classmethod def pretty_name(cls) -> str: return "Ubuntu" @classmethod def default_release(cls) -> str: return "lunar" @staticmethod def repositories(context: Context, local: bool = True) -> list[str]: if context.config.local_mirror and local: return [f"deb [trusted=yes] {context.config.local_mirror} {context.config.release} main"] archives = ("deb", "deb-src") if context.config.architecture in (Architecture.x86, Architecture.x86_64): mirror = context.config.mirror or "http://archive.ubuntu.com/ubuntu" else: mirror = context.config.mirror or "http://ports.ubuntu.com" signedby = "[signed-by=/usr/share/keyrings/ubuntu-archive-keyring.gpg]" # From kinetic onwards, the usr-is-merged package is available in universe and is required by # mkosi to set up a proper usr-merged system so we add the universe repository unconditionally. components = ["main"] + (["universe"] if context.config.release not in ("focal", "jammy") else []) components = ' '.join((*components, *context.config.repositories)) repos = [ f"{archive} {signedby} {mirror} {context.config.release} {components}" for archive in archives ] repos += [ f"{archive} {signedby} {mirror} {context.config.release}-updates {components}" for archive in archives ] # Security updates repos are never mirrored. But !x86 are on the ports server. if context.config.architecture in [Architecture.x86, Architecture.x86_64]: mirror = "http://security.ubuntu.com/ubuntu/" else: mirror = "http://ports.ubuntu.com/" repos += [ f"{archive} {signedby} {mirror} {context.config.release}-security {components}" for archive in archives ] return repos mkosi-20.2/mkosi/installer/000077500000000000000000000000001455345632200157075ustar00rootroot00000000000000mkosi-20.2/mkosi/installer/__init__.py000066400000000000000000000057741455345632200200350ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1+ import os from pathlib import Path from mkosi.config import ConfigFeature from mkosi.context import Context from mkosi.sandbox import apivfs_cmd, finalize_crypto_mounts from mkosi.tree import rmtree from mkosi.types import PathString from mkosi.util import flatten def clean_package_manager_metadata(context: Context) -> None: """ Remove package manager metadata Try them all regardless of the distro: metadata is only removed if the package manager is not present in the image. """ if context.config.clean_package_metadata == ConfigFeature.disabled: return always = context.config.clean_package_metadata == ConfigFeature.enabled for tool, paths in (("rpm", ["var/lib/rpm", "usr/lib/sysimage/rpm"]), ("dnf5", ["usr/lib/sysimage/libdnf5"]), ("dpkg", ["var/lib/dpkg"]), ("pacman", ["var/lib/pacman"])): for bin in ("bin", "sbin"): if not always and os.access(context.root / "usr" / bin / tool, mode=os.F_OK, follow_symlinks=False): break else: rmtree(*(context.root / p for p in paths), sandbox=context.sandbox(options=["--bind", context.root, context.root])) def package_manager_scripts(context: Context) -> dict[str, list[PathString]]: from mkosi.installer.apt import apt_cmd from mkosi.installer.dnf import dnf_cmd from mkosi.installer.pacman import pacman_cmd from mkosi.installer.rpm import rpm_cmd from mkosi.installer.zypper import zypper_cmd return { "pacman": apivfs_cmd(context.root) + pacman_cmd(context), "zypper": apivfs_cmd(context.root) + zypper_cmd(context), "dnf" : apivfs_cmd(context.root) + dnf_cmd(context), "rpm" : apivfs_cmd(context.root) + rpm_cmd(context), } | { command: apivfs_cmd(context.root) + apt_cmd(context, command) for command in ( "apt", "apt-cache", "apt-cdrom", "apt-config", "apt-extracttemplates", "apt-get", "apt-key", "apt-mark", "apt-sortpkgs", ) } def finalize_package_manager_mounts(context: Context) -> list[PathString]: from mkosi.installer.dnf import dnf_subdir mounts: list[PathString] = [ *(["--ro-bind", m, m] if (m := context.config.local_mirror) else []), *(["--ro-bind", os.fspath(p), os.fspath(p)] if (p := context.workspace / "apt.conf").exists() else []), *finalize_crypto_mounts(tools=context.config.tools()), "--bind", context.packages, "/work/packages", ] mounts += flatten( ["--bind", context.cache_dir / d, Path("/var") / d] for d in ( "lib/apt", "cache/apt", f"cache/{dnf_subdir(context)}", f"lib/{dnf_subdir(context)}", "cache/pacman/pkg", "cache/zypp", ) if (context.cache_dir / d).exists() ) return mounts mkosi-20.2/mkosi/installer/apt.py000066400000000000000000000132201455345632200170430ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1+ import textwrap from collections.abc import Sequence from mkosi.context import Context from mkosi.installer import finalize_package_manager_mounts from mkosi.mounts import finalize_ephemeral_source_mounts from mkosi.run import find_binary, run from mkosi.sandbox import apivfs_cmd from mkosi.types import PathString from mkosi.util import sort_packages, umask def setup_apt(context: Context, repos: Sequence[str]) -> None: (context.pkgmngr / "etc/apt").mkdir(exist_ok=True, parents=True) (context.pkgmngr / "etc/apt/apt.conf.d").mkdir(exist_ok=True, parents=True) (context.pkgmngr / "etc/apt/preferences.d").mkdir(exist_ok=True, parents=True) (context.pkgmngr / "etc/apt/sources.list.d").mkdir(exist_ok=True, parents=True) # TODO: Drop once apt 2.5.4 is widely available. with umask(~0o755): (context.root / "var/lib/dpkg").mkdir(parents=True, exist_ok=True) (context.root / "var/lib/dpkg/status").touch() (context.cache_dir / "lib/apt").mkdir(exist_ok=True, parents=True) (context.cache_dir / "cache/apt").mkdir(exist_ok=True, parents=True) # We have a special apt.conf outside of pkgmngr dir that only configures "Dir::Etc" that we pass to APT_CONFIG to # tell apt it should read config files from /etc/apt in case this is overridden by distributions. This is required # because apt parses CLI configuration options after parsing its configuration files and as such we can't use CLI # options to tell apt where to look for configuration files. config = context.workspace / "apt.conf" if not config.exists(): config.write_text( textwrap.dedent( """\ Dir::Etc "etc/apt"; """ ) ) sources = context.pkgmngr / "etc/apt/sources.list" if not sources.exists(): with sources.open("w") as f: for repo in repos: f.write(f"{repo}\n") def apt_cmd(context: Context, command: str) -> list[PathString]: debarch = context.config.distribution.architecture(context.config.architecture) cmdline: list[PathString] = [ "env", f"APT_CONFIG={context.workspace / 'apt.conf'}", "DEBIAN_FRONTEND=noninteractive", "DEBCONF_INTERACTIVE_SEEN=true", "INITRD=No", command, "-o", f"APT::Architecture={debarch}", "-o", f"APT::Architectures={debarch}", "-o", f"APT::Install-Recommends={str(context.config.with_recommends).lower()}", "-o", "APT::Immediate-Configure=off", "-o", "APT::Get::Assume-Yes=true", "-o", "APT::Get::AutomaticRemove=true", "-o", "APT::Get::Allow-Change-Held-Packages=true", "-o", "APT::Get::Allow-Remove-Essential=true", "-o", "APT::Sandbox::User=root", "-o", "Dir::Cache=/var/cache/apt", "-o", "Dir::State=/var/lib/apt", "-o", f"Dir::State::Status={context.root / 'var/lib/dpkg/status'}", "-o", f"Dir::Log={context.workspace}", "-o", f"Dir::Bin::DPkg={find_binary('dpkg', root=context.config.tools())}", "-o", "Debug::NoLocking=true", "-o", f"DPkg::Options::=--root={context.root}", "-o", "DPkg::Options::=--force-unsafe-io", "-o", "DPkg::Options::=--force-architecture", "-o", "DPkg::Options::=--force-depends", "-o", "DPkg::Options::=--no-debsig", "-o", "DPkg::Use-Pty=false", "-o", "DPkg::Install::Recursive::Minimum=1000", "-o", "pkgCacheGen::ForceEssential=,", ] if not context.config.repository_key_check: cmdline += [ "-o", "Acquire::AllowInsecureRepositories=true", "-o", "Acquire::AllowDowngradeToInsecureRepositories=true", "-o", "APT::Get::AllowUnauthenticated=true", ] if not context.config.with_docs: cmdline += [ "-o", "DPkg::Options::=--path-exclude=/usr/share/doc/*", "-o", "DPkg::Options::=--path-include=/usr/share/doc/*/copyright", "-o", "DPkg::Options::=--path-exclude=/usr/share/man/*", "-o", "DPkg::Options::=--path-exclude=/usr/share/groff/*", "-o", "DPkg::Options::=--path-exclude=/usr/share/info/*", ] return cmdline def invoke_apt( context: Context, command: str, operation: str, packages: Sequence[str] = (), *, apivfs: bool = True, mounts: Sequence[PathString] = (), ) -> None: with finalize_ephemeral_source_mounts(context.config) as sources: run( apt_cmd(context, command) + [operation, *sort_packages(packages)], sandbox=( context.sandbox( network=True, options=[ "--bind", context.root, context.root, *finalize_package_manager_mounts(context), *sources, *mounts, "--chdir", "/work/src", ], ) + (apivfs_cmd(context.root) if apivfs else []) ), env=context.config.environment, ) def createrepo_apt(context: Context) -> None: with (context.packages / "Packages").open("wb") as f: run(["dpkg-scanpackages", context.packages], stdout=f, sandbox=context.sandbox(options=["--ro-bind", context.packages, context.packages])) (context.pkgmngr / "etc/apt/sources.list.d").mkdir(parents=True, exist_ok=True) (context.pkgmngr / "etc/apt/sources.list.d/mkosi-packages.sources").write_text( f"""\ Enabled: yes Types: deb URIs: file:///work/packages Suites: {context.config.release} Components: main Trusted: yes """ ) mkosi-20.2/mkosi/installer/dnf.py000066400000000000000000000145461455345632200170420ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1+ import textwrap from collections.abc import Iterable from pathlib import Path from mkosi.context import Context from mkosi.installer import finalize_package_manager_mounts from mkosi.installer.rpm import RpmRepository, fixup_rpmdb_location, setup_rpm from mkosi.mounts import finalize_ephemeral_source_mounts from mkosi.run import find_binary, run from mkosi.sandbox import apivfs_cmd from mkosi.types import PathString from mkosi.util import sort_packages def dnf_executable(context: Context) -> str: # Allow the user to override autodetection with an environment variable dnf = context.config.environment.get("MKOSI_DNF") root = context.config.tools() return Path(dnf or find_binary("dnf5", root=root) or find_binary("dnf", root=root) or "yum").name def dnf_subdir(context: Context) -> str: dnf = dnf_executable(context) return "libdnf5" if dnf.endswith("dnf5") else "dnf" def setup_dnf(context: Context, repositories: Iterable[RpmRepository], filelists: bool = True) -> None: (context.pkgmngr / "etc/dnf/vars").mkdir(exist_ok=True, parents=True) (context.pkgmngr / "etc/yum.repos.d").mkdir(exist_ok=True, parents=True) (context.cache_dir / "cache" / dnf_subdir(context)).mkdir(exist_ok=True, parents=True) (context.cache_dir / "lib" / dnf_subdir(context)).mkdir(exist_ok=True, parents=True) config = context.pkgmngr / "etc/dnf/dnf.conf" if not config.exists(): config.parent.mkdir(exist_ok=True, parents=True) with config.open("w") as f: # Make sure we download filelists so all dependencies can be resolved. # See https://bugzilla.redhat.com/show_bug.cgi?id=2180842 if dnf_executable(context).endswith("dnf5") and filelists: f.write("[main]\noptional_metadata_types=filelists\n") repofile = context.pkgmngr / "etc/yum.repos.d/mkosi.repo" if not repofile.exists(): repofile.parent.mkdir(exist_ok=True, parents=True) with repofile.open("w") as f: for repo in repositories: f.write( textwrap.dedent( f"""\ [{repo.id}] name={repo.id} {repo.url} gpgcheck=1 enabled={int(repo.enabled)} """ ) ) if repo.sslcacert: f.write(f"sslcacert={repo.sslcacert}\n") if repo.sslclientcert: f.write(f"sslclientcert={repo.sslclientcert}\n") if repo.sslclientkey: f.write(f"sslclientkey={repo.sslclientkey}\n") for i, url in enumerate(repo.gpgurls): f.write("gpgkey=" if i == 0 else len("gpgkey=") * " ") f.write(f"{url}\n") f.write("\n") setup_rpm(context) def dnf_cmd(context: Context) -> list[PathString]: dnf = dnf_executable(context) cmdline: list[PathString] = [ "env", "HOME=/", # Make sure rpm doesn't pick up ~/.rpmmacros and ~/.rpmrc. dnf, "--assumeyes", "--best", f"--releasever={context.config.release}", f"--installroot={context.root}", "--setopt=keepcache=1", f"--setopt=cachedir=/var/cache/{dnf_subdir(context)}", f"--setopt=persistdir=/var/lib/{dnf_subdir(context)}", f"--setopt=install_weak_deps={int(context.config.with_recommends)}", "--setopt=check_config_file_age=0", "--disable-plugin=*" if dnf.endswith("dnf5") else "--disableplugin=*", "--enable-plugin=builddep" if dnf.endswith("dnf5") else "--enableplugin=builddep", ] if not context.config.repository_key_check: cmdline += ["--nogpgcheck"] if context.config.repositories: opt = "--enable-repo" if dnf.endswith("dnf5") else "--enablerepo" cmdline += [f"{opt}={repo}" for repo in context.config.repositories] # TODO: this breaks with a local, offline repository created with 'createrepo' if context.config.cache_only and not context.config.local_mirror: cmdline += ["--cacheonly"] if not context.config.architecture.is_native(): cmdline += [f"--forcearch={context.config.distribution.architecture(context.config.architecture)}"] if not context.config.with_docs: cmdline += ["--no-docs" if dnf.endswith("dnf5") else "--nodocs"] if dnf.endswith("dnf5"): cmdline += ["--use-host-config"] else: cmdline += [ "--config=/etc/dnf/dnf.conf", "--setopt=reposdir=/etc/yum.repos.d", "--setopt=varsdir=/etc/dnf/vars", ] return cmdline def invoke_dnf(context: Context, command: str, packages: Iterable[str], apivfs: bool = True) -> None: with finalize_ephemeral_source_mounts(context.config) as sources: run( dnf_cmd(context) + [command, *sort_packages(packages)], sandbox=( context.sandbox( network=True, options=[ "--bind", context.root, context.root, *finalize_package_manager_mounts(context), *sources, "--chdir", "/work/src", ], ) + (apivfs_cmd(context.root) if apivfs else []) ), env=context.config.environment, ) fixup_rpmdb_location(context) # The log directory is always interpreted relative to the install root so there's nothing we can do but # to remove the log files from the install root afterwards. for p in (context.root / "var/log").iterdir(): if any(p.name.startswith(prefix) for prefix in ("dnf", "hawkey", "yum")): p.unlink() def createrepo_dnf(context: Context) -> None: run(["createrepo_c", context.packages], sandbox=context.sandbox(options=["--bind", context.packages, context.packages])) (context.pkgmngr / "etc/yum.repos.d").mkdir(parents=True, exist_ok=True) (context.pkgmngr / "etc/yum.repos.d/mkosi-packages.repo").write_text( textwrap.dedent( """\ [mkosi-packages] name=mkosi-packages gpgcheck=0 enabled=1 baseurl=file:///work/packages metadata_expire=0 priority=50 """ ) ) mkosi-20.2/mkosi/installer/pacman.py000066400000000000000000000070251455345632200175240ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1+ import textwrap from collections.abc import Iterable, Sequence from typing import NamedTuple from mkosi.context import Context from mkosi.installer import finalize_package_manager_mounts from mkosi.mounts import finalize_ephemeral_source_mounts from mkosi.run import run from mkosi.sandbox import apivfs_cmd from mkosi.types import PathString from mkosi.util import sort_packages, umask class PacmanRepository(NamedTuple): id: str url: str def setup_pacman(context: Context, repositories: Iterable[PacmanRepository]) -> None: if context.config.repository_key_check: sig_level = "Required DatabaseOptional" else: # If we are using a single local mirror built on the fly there # will be no signatures sig_level = "Never" # Create base layout for pacman and pacman-key with umask(~0o755): (context.root / "var/lib/pacman").mkdir(exist_ok=True, parents=True) (context.cache_dir / "cache/pacman/pkg").mkdir(parents=True, exist_ok=True) config = context.pkgmngr / "etc/pacman.conf" if config.exists(): return config.parent.mkdir(exist_ok=True, parents=True) with config.open("w") as f: f.write( textwrap.dedent( f"""\ [options] SigLevel = {sig_level} LocalFileSigLevel = Optional ParallelDownloads = 5 """ ) ) for repo in repositories: f.write( textwrap.dedent( f"""\ [{repo.id}] Server = {repo.url} """ ) ) if any((context.pkgmngr / "etc/pacman.d/").glob("*.conf")): f.write( textwrap.dedent( """\ Include = /etc/pacman.d/*.conf """ ) ) def pacman_cmd(context: Context) -> list[PathString]: return [ "pacman", "--root", context.root, "--logfile=/dev/null", "--cachedir=/var/cache/pacman/pkg", "--hookdir", context.root / "etc/pacman.d/hooks", "--arch", context.config.distribution.architecture(context.config.architecture), "--color", "auto", "--noconfirm", ] def invoke_pacman( context: Context, operation: str, options: Sequence[str] = (), packages: Sequence[str] = (), apivfs: bool = True, ) -> None: with finalize_ephemeral_source_mounts(context.config) as sources: run( pacman_cmd(context) + [operation, *options, *sort_packages(packages)], sandbox=( context.sandbox( network=True, options=[ "--bind", context.root, context.root, *finalize_package_manager_mounts(context), *sources, "--chdir", "/work/src", ], ) + (apivfs_cmd(context.root) if apivfs else []) ), env=context.config.environment, ) def createrepo_pacman(context: Context) -> None: run(["repo-add", context.packages / "mkosi-packages.db.tar", *context.packages.glob("*.pkg.tar*")]) with (context.pkgmngr / "etc/pacman.conf").open("a") as f: f.write( textwrap.dedent( """\ [mkosi-packages] Server = file:///work/packages """ ) ) mkosi-20.2/mkosi/installer/rpm.py000066400000000000000000000051261455345632200170630ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1+ import os import shutil import subprocess from pathlib import Path from typing import NamedTuple, Optional from mkosi.context import Context from mkosi.run import run from mkosi.tree import rmtree from mkosi.types import PathString class RpmRepository(NamedTuple): id: str url: str gpgurls: tuple[str, ...] enabled: bool = True sslcacert: Optional[Path] = None sslclientkey: Optional[Path] = None sslclientcert: Optional[Path] = None def find_rpm_gpgkey(context: Context, key: str, url: str) -> str: gpgpath = next((context.config.tools() / "usr/share/distribution-gpg-keys").rglob(key), None) if gpgpath: return f"file://{Path('/') / gpgpath.relative_to(context.config.tools())}" gpgpath = next(Path(context.pkgmngr / "etc/pki/rpm-gpg").rglob(key), None) if gpgpath: return f"file://{Path('/') / gpgpath.relative_to(context.pkgmngr)}" return url def setup_rpm(context: Context) -> None: confdir = context.pkgmngr / "etc/rpm" confdir.mkdir(parents=True, exist_ok=True) if not (confdir / "macros.lang").exists() and context.config.locale: (confdir / "macros.lang").write_text(f"%_install_langs {context.config.locale}") plugindir = Path(run(["rpm", "--eval", "%{__plugindir}"], sandbox=context.sandbox(), stdout=subprocess.PIPE).stdout.strip()) if (plugindir := context.config.tools() / plugindir.relative_to("/")).exists(): with (confdir / "macros.disable-plugins").open("w") as f: for plugin in plugindir.iterdir(): f.write(f"%__transaction_{plugin.stem} %{{nil}}\n") def fixup_rpmdb_location(context: Context) -> None: # On Debian, rpm/dnf ship with a patch to store the rpmdb under ~/ so it needs to be copied back in the # right location, otherwise the rpmdb will be broken. See: https://bugs.debian.org/1004863. We also # replace it with a symlink so that any further rpm operations immediately use the correct location. rpmdb_home = context.root / "root/.rpmdb" if not rpmdb_home.exists() or rpmdb_home.is_symlink(): return # Take into account the new location in F36 rpmdb = context.root / "usr/lib/sysimage/rpm" if not rpmdb.exists(): rpmdb = context.root / "var/lib/rpm" rmtree(rpmdb, sandbox=context.sandbox(options=["--bind", rpmdb.parent, rpmdb.parent])) shutil.move(rpmdb_home, rpmdb) rpmdb_home.symlink_to(os.path.relpath(rpmdb, start=rpmdb_home.parent)) def rpm_cmd(context: Context) -> list[PathString]: return ["env", "HOME=/", "rpm", "--root", context.root] mkosi-20.2/mkosi/installer/zypper.py000066400000000000000000000075171455345632200176240ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1+ import textwrap from collections.abc import Sequence from mkosi.config import yes_no from mkosi.context import Context from mkosi.installer import finalize_package_manager_mounts from mkosi.installer.rpm import RpmRepository, fixup_rpmdb_location, setup_rpm from mkosi.mounts import finalize_ephemeral_source_mounts from mkosi.run import run from mkosi.sandbox import apivfs_cmd from mkosi.types import PathString from mkosi.util import sort_packages def setup_zypper(context: Context, repos: Sequence[RpmRepository]) -> None: config = context.pkgmngr / "etc/zypp/zypp.conf" config.parent.mkdir(exist_ok=True, parents=True) (context.cache_dir / "cache/zypp").mkdir(exist_ok=True, parents=True) # rpm.install.excludedocs can only be configured in zypp.conf so we append # to any user provided config file. Let's also bump the refresh delay to # the same default as dnf which is 48 hours. with config.open("a") as f: f.write( textwrap.dedent( f""" [main] rpm.install.excludedocs = {yes_no(not context.config.with_docs)} repo.refresh.delay = {48 * 60} """ ) ) repofile = context.pkgmngr / "etc/zypp/repos.d/mkosi.repo" if not repofile.exists(): repofile.parent.mkdir(exist_ok=True, parents=True) with repofile.open("w") as f: for repo in repos: f.write( textwrap.dedent( f"""\ [{repo.id}] name={repo.id} {repo.url} gpgcheck=1 enabled={int(repo.enabled)} autorefresh=1 keeppackages=1 """ ) ) for i, url in enumerate(repo.gpgurls): f.write("gpgkey=" if i == 0 else len("gpgkey=") * " ") f.write(f"{url}\n") setup_rpm(context) def zypper_cmd(context: Context) -> list[PathString]: return [ "env", "ZYPP_CONF=/etc/zypp/zypp.conf", "HOME=/", "zypper", f"--installroot={context.root}", "--cache-dir=/var/cache/zypp", "--gpg-auto-import-keys" if context.config.repository_key_check else "--no-gpg-checks", "--non-interactive", ] def invoke_zypper( context: Context, verb: str, packages: Sequence[str], options: Sequence[str] = (), apivfs: bool = True, ) -> None: with finalize_ephemeral_source_mounts(context.config) as sources: run( zypper_cmd(context) + [verb, *options, *sort_packages(packages)], sandbox=( context.sandbox( network=True, options=[ "--bind", context.root, context.root, *finalize_package_manager_mounts(context), *sources, "--chdir", "/work/src", ], ) + (apivfs_cmd(context.root) if apivfs else []) ), env=context.config.environment, ) fixup_rpmdb_location(context) def createrepo_zypper(context: Context) -> None: run(["createrepo_c", context.packages], sandbox=context.sandbox(options=["--bind", context.packages, context.packages])) (context.pkgmngr / "etc/zypp/repos.d").mkdir(parents=True, exist_ok=True) (context.pkgmngr / "etc/zypp/repos.d/mkosi-packages.repo").write_text( textwrap.dedent( """\ [mkosi-packages] name=mkosi-packages gpgcheck=0 enabled=1 baseurl=file:///work/packages autorefresh=0 priority=50 """ ) ) mkosi-20.2/mkosi/kmod.py000066400000000000000000000150761455345632200152270ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1+ import logging import os import re import subprocess from collections.abc import Iterator, Sequence from pathlib import Path from mkosi.log import complete_step, log_step from mkosi.run import run from mkosi.types import PathString def loaded_modules() -> list[str]: return [f"{line.split()[0]}\\.ko" for line in Path("/proc/modules").read_text().splitlines()] def filter_kernel_modules( root: Path, kver: str, *, include: Sequence[str], exclude: Sequence[str], host: bool, ) -> list[Path]: modulesd = root / "usr/lib/modules" / kver modules = {m for m in modulesd.rglob("*.ko*")} if host: include = [*include, *loaded_modules()] keep = set() if include: regex = re.compile("|".join(include)) for m in modules: rel = os.fspath(m.relative_to(modulesd / "kernel")) if regex.search(rel): logging.debug(f"Including module {rel}") keep.add(rel) if exclude: remove = set() regex = re.compile("|".join(exclude)) for m in modules: rel = os.fspath(m.relative_to(modulesd / "kernel")) if rel not in keep and regex.search(rel): logging.debug(f"Excluding module {rel}") remove.add(m) modules -= remove return sorted(modules) def module_path_to_name(path: Path) -> str: return path.name.partition(".")[0] def resolve_module_dependencies( root: Path, kver: str, modules: Sequence[str], *, sandbox: Sequence[PathString] = (), ) -> tuple[set[Path], set[Path]]: """ Returns a tuple of lists containing the paths to the module and firmware dependencies of the given list of module names (including the given module paths themselves). The paths are returned relative to the root directory. """ modulesd = Path("usr/lib/modules") / kver builtin = set(module_path_to_name(Path(m)) for m in (root / modulesd / "modules.builtin").read_text().splitlines()) allmodules = set((root / modulesd / "kernel").glob("**/*.ko*")) nametofile = {module_path_to_name(m): m for m in allmodules} log_step("Running modinfo to fetch kernel module dependencies") # We could run modinfo once for each module but that's slow. Luckily we can pass multiple modules to # modinfo and it'll process them all in a single go. We get the modinfo for all modules to build two maps # that map the path of the module to its module dependencies and its firmware dependencies respectively. # Because there's more kernel modules than the max number of accepted CLI arguments for bwrap, we split the modules # list up into chunks. info = "" for i in range(0, len(nametofile.keys()), 8500): chunk = list(nametofile.keys())[i:i+8500] info += run(["modinfo", "--basedir", root, "--set-version", kver, "--null", *chunk], stdout=subprocess.PIPE, sandbox=sandbox).stdout.strip() log_step("Calculating required kernel modules and firmware") moddep = {} firmwaredep = {} depends = [] firmware = [] for line in info.split("\0"): key, sep, value = line.partition(":") if not sep: key, sep, value = line.partition("=") if key in ("depends", "softdep"): depends += [d for d in value.strip().split(",") if d] elif key == "firmware": fw = [f for f in (root / "usr/lib/firmware").glob(f"{value.strip()}*")] if not fw: logging.debug(f"Not including missing firmware /usr/lib/firmware/{value} in the initrd") firmware += fw elif key == "name": # The file names use dashes, but the module names use underscores. We track the names # in terms of the file names, since the depends use dashes and therefore filenames as # well. name = value.strip().replace("_", "-") moddep[name] = depends firmwaredep[name] = firmware depends = [] firmware = [] todo = [*builtin, *modules] mods = set() firmware = set() while todo: m = todo.pop() if m in mods: continue depends = moddep.get(m, []) for d in depends: if d not in nametofile and d not in builtin: logging.warning(f"{d} is a dependency of {m} but is not installed, ignoring ") mods.add(m) todo += depends firmware.update(firmwaredep.get(m, [])) return set(nametofile[m] for m in mods if m in nametofile), set(firmware) def gen_required_kernel_modules( root: Path, kver: str, *, include: Sequence[str], exclude: Sequence[str], host: bool, sandbox: Sequence[PathString] = (), ) -> Iterator[Path]: modulesd = root / "usr/lib/modules" / kver modules = filter_kernel_modules(root, kver, include=include, exclude=exclude, host=host) names = [module_path_to_name(m) for m in modules] mods, firmware = resolve_module_dependencies(root, kver, names, sandbox=sandbox) def files() -> Iterator[Path]: yield modulesd.parent yield modulesd yield modulesd / "kernel" for d in (modulesd, root / "usr/lib/firmware"): for p in (root / d).rglob("*"): if p.is_dir(): yield p for p in sorted(mods) + sorted(firmware): yield p for p in (root / modulesd).iterdir(): if not p.name.startswith("modules"): continue yield p if (root / modulesd / "vdso").exists(): yield modulesd / "vdso" for p in (root / modulesd / "vdso").iterdir(): yield p return files() def process_kernel_modules( root: Path, kver: str, *, include: Sequence[str], exclude: Sequence[str], host: bool, sandbox: Sequence[PathString] = (), ) -> None: if not include and not exclude: return with complete_step("Applying kernel module filters"): required = set( gen_required_kernel_modules(root, kver, include=include, exclude=exclude, host=host, sandbox=sandbox) ) for m in (root / "usr/lib/modules" / kver).rglob("*.ko*"): if m in required: continue logging.debug(f"Removing module {m}") (root / m).unlink() for fw in (m for m in (root / "usr/lib/firmware").rglob("*") if not m.is_dir()): if fw in required: continue logging.debug(f"Removing firmware {fw}") (root / fw).unlink() mkosi-20.2/mkosi/log.py000066400000000000000000000054071455345632200150530ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1+ import contextlib import contextvars import logging import os import sys from collections.abc import Iterator from typing import Any, NoReturn, Optional # This global should be initialized after parsing arguments ARG_DEBUG = contextvars.ContextVar("debug", default=False) ARG_DEBUG_SHELL = contextvars.ContextVar("debug-shell", default=False) LEVEL = 0 class Style: bold = "\033[0;1;39m" if sys.stderr.isatty() else "" gray = "\033[0;38;5;245m" if sys.stderr.isatty() else "" red = "\033[31;1m" if sys.stderr.isatty() else "" yellow = "\033[33;1m" if sys.stderr.isatty() else "" reset = "\033[0m" if sys.stderr.isatty() else "" def die(message: str, *, hint: Optional[str] = None) -> NoReturn: logging.error(f"{message}") if hint: logging.info(f"({hint})") sys.exit(1) def log_step(text: str) -> None: prefix = " " * LEVEL if sys.exc_info()[0]: # We are falling through exception handling blocks. # De-emphasize this step here, so the user can tell more # easily which step generated the exception. The exception # or error will only be printed after we finish cleanup. logging.info(f"{prefix}({text})") else: logging.info(f"{prefix}{Style.bold}{text}{Style.reset}") def log_notice(text: str) -> None: logging.info(f"{Style.bold}{text}{Style.reset}") @contextlib.contextmanager def complete_step(text: str, text2: Optional[str] = None) -> Iterator[list[Any]]: global LEVEL log_step(text) LEVEL += 1 try: args: list[Any] = [] yield args finally: LEVEL -= 1 assert LEVEL >= 0 if text2 is not None: log_step(text2.format(*args)) class Formatter(logging.Formatter): def __init__(self, fmt: Optional[str] = None, *args: Any, **kwargs: Any) -> None: fmt = fmt or "%(message)s" self.formatters = { logging.DEBUG: logging.Formatter(f"‣ {Style.gray}{fmt}{Style.reset}"), logging.INFO: logging.Formatter(f"‣ {fmt}"), logging.WARNING: logging.Formatter(f"‣ {Style.yellow}{fmt}{Style.reset}"), logging.ERROR: logging.Formatter(f"‣ {Style.red}{fmt}{Style.reset}"), logging.CRITICAL: logging.Formatter(f"‣ {Style.red}{Style.bold}{fmt}{Style.reset}"), } super().__init__(fmt, *args, **kwargs) def format(self, record: logging.LogRecord) -> str: return self.formatters[record.levelno].format(record) def log_setup() -> None: handler = logging.StreamHandler(stream=sys.stderr) handler.setFormatter(Formatter()) logging.getLogger().addHandler(handler) logging.getLogger().setLevel(logging.getLevelName(os.getenv("SYSTEMD_LOG_LEVEL", "info").upper())) mkosi-20.2/mkosi/manifest.py000066400000000000000000000273031455345632200160770ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1+ import dataclasses import datetime import json import logging import subprocess import textwrap from pathlib import Path from typing import IO, Any, Optional from mkosi.config import Config, ManifestFormat from mkosi.distributions import Distribution, PackageType from mkosi.run import run @dataclasses.dataclass class PackageManifest: """A description of a package The fields used here must match https://systemd.io/COREDUMP_PACKAGE_METADATA/#well-known-keys. """ type: str name: str version: str architecture: str size: int def as_dict(self) -> dict[str, str]: return { "type": self.type, "name": self.name, "version": self.version, "architecture": self.architecture, } @dataclasses.dataclass class SourcePackageManifest: name: str changelog: Optional[str] packages: list[PackageManifest] = dataclasses.field(default_factory=list) def add(self, package: PackageManifest) -> None: self.packages.append(package) def report(self) -> str: size = sum(p.size for p in self.packages) t = textwrap.dedent( f"""\ SourcePackage: {self.name} Packages: {" ".join(p.name for p in self.packages)} Size: {size} """ ) if self.changelog: t += f"""\nChangelog:\n{self.changelog}\n""" return t def parse_pkg_desc(f: Path) -> tuple[str, str, str, str]: name = version = base = arch = "" with f.open() as desc: for line in desc: line = line.strip() if line == "%NAME%": name = next(desc).strip() elif line == "%VERSION%": version = next(desc).strip() elif line == "%BASE%": base = next(desc).strip() elif line == "%ARCH%": arch = next(desc).strip() break return name, version, base, arch @dataclasses.dataclass class Manifest: config: Config packages: list[PackageManifest] = dataclasses.field(default_factory=list) source_packages: dict[str, SourcePackageManifest] = dataclasses.field(default_factory=dict) _init_timestamp: datetime.datetime = dataclasses.field(init=False, default_factory=datetime.datetime.now) def need_source_info(self) -> bool: return ManifestFormat.changelog in self.config.manifest_format def record_packages(self, root: Path) -> None: if self.config.distribution.package_type() == PackageType.rpm: self.record_rpm_packages(root) if self.config.distribution.package_type() == PackageType.deb: self.record_deb_packages(root) if self.config.distribution.package_type() == PackageType.pkg: self.record_pkg_packages(root) def record_rpm_packages(self, root: Path) -> None: # On Debian, rpm/dnf ship with a patch to store the rpmdb under ~/ so rpm # has to be told to use the location the rpmdb was moved to. # Otherwise the rpmdb will appear empty. See: https://bugs.debian.org/1004863 dbpath = "/usr/lib/sysimage/rpm" if not (root / dbpath).exists(): dbpath = "/var/lib/rpm" c = run( [ "rpm", f"--root={root}", f"--dbpath={dbpath}", "-qa", "--qf", r"%{NEVRA}\t%{SOURCERPM}\t%{NAME}\t%{ARCH}\t%{LONGSIZE}\t%{INSTALLTIME}\n", ], stdout=subprocess.PIPE, sandbox=self.config.sandbox(), ) packages = sorted(c.stdout.splitlines()) for package in packages: nevra, srpm, name, arch, size, installtime = package.split("\t") assert nevra.startswith(f"{name}-") evra = nevra.removeprefix(f"{name}-") # Some packages have architecture '(none)', and it's not part of NEVRA, e.g.: # gpg-pubkey-45719a39-5f2c0192 gpg-pubkey (none) 0 1635985199 if arch != "(none)": assert nevra.endswith(f".{arch}") evr = evra.removesuffix(f".{arch}") else: evr = evra arch = "" size = int(size) installtime = datetime.datetime.fromtimestamp(int(installtime)) # If we are creating a layer based on a BaseImage=, e.g. a sysext, filter by # packages that were installed in this execution of mkosi. We assume that the # upper layer is put together in one go, which currently is always true. if self.config.base_trees and installtime < self._init_timestamp: continue manifest = PackageManifest("rpm", name, evr, arch, size) self.packages.append(manifest) if not self.need_source_info(): continue source = self.source_packages.get(srpm) if source is None: c = run( [ "rpm", f"--root={root}", f"--dbpath={dbpath}", "-q", "--changelog", nevra, ], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, sandbox=self.config.sandbox(), ) changelog = c.stdout.strip() source = SourcePackageManifest(srpm, changelog) self.source_packages[srpm] = source source.add(manifest) def record_deb_packages(self, root: Path) -> None: c = run( [ "dpkg-query", f"--admindir={root}/var/lib/dpkg", "--show", "--showformat", r'${Package}\t${source:Package}\t${Version}\t${Architecture}\t${Installed-Size}\t${db-fsys:Last-Modified}\n', ], stdout=subprocess.PIPE, sandbox=self.config.sandbox(), ) packages = sorted(c.stdout.splitlines()) for package in packages: name, source, version, arch, size, installtime = package.split("\t") # dpkg records the size in KBs, the field is optional # db-fsys:Last-Modified is not available in very old dpkg, so just skip creating # the manifest for sysext when building on very old distributions by setting the # timestamp to epoch. This only affects Ubuntu Bionic which is nearing EOL. size = int(size) * 1024 if size else 0 installtime = datetime.datetime.fromtimestamp(int(installtime) if installtime else 0) # If we are creating a layer based on a BaseImage=, e.g. a sysext, filter by # packages that were installed in this execution of mkosi. We assume that the # upper layer is put together in one go, which currently is always true. if self.config.base_trees and installtime < self._init_timestamp: continue manifest = PackageManifest("deb", name, version, arch, size) self.packages.append(manifest) if not self.need_source_info(): continue source_package = self.source_packages.get(source) if source_package is None: # Yes, --quiet is specified twice, to avoid output about download stats. # Note that the argument of the 'changelog' verb is the binary package name, # not the source package name. cmd = [ "apt-get", "--quiet", "--quiet", "-o", f"Dir={root}", "-o", f"DPkg::Chroot-Directory={root}", "changelog", name, ] # If we are building with docs then it's easy, as the changelogs are saved # in the image, just fetch them. Otherwise they will be downloaded from the network. if self.config.with_docs: # By default apt drops privileges and runs as the 'apt' user, but that means it # loses access to the build directory, which is 700. cmd += ["--option", "Acquire::Changelogs::AlwaysOnline=false", "--option", "Debug::NoDropPrivs=true"] else: # Override the URL to avoid HTTPS, so that we don't need to install # ca-certificates to make it work. if self.config.distribution == Distribution.ubuntu: cmd += ["--option", "Acquire::Changelogs::URI::Override::Origin::Ubuntu=http://changelogs.ubuntu.com/changelogs/pool/@CHANGEPATH@/changelog"] else: cmd += ["--option", "Acquire::Changelogs::URI::Override::Origin::Debian=http://metadata.ftp-master.debian.org/changelogs/@CHANGEPATH@_changelog"] # We have to run from the root, because if we use the RootDir option to make # apt from the host look at the repositories in the image, it will also pick # the 'methods' executables from there, but the ABI might not be compatible. result = run(cmd, stdout=subprocess.PIPE, sandbox=self.config.sandbox()) source_package = SourcePackageManifest(source, result.stdout.strip()) self.source_packages[source] = source_package source_package.add(manifest) def record_pkg_packages(self, root: Path) -> None: packages = sorted((root / "var/lib/pacman/local").glob("*/desc")) for desc in packages: name, version, source, arch = parse_pkg_desc(desc) package = PackageManifest("pkg", name, version, arch, 0) self.packages.append(package) source_package = self.source_packages.get(source) if source_package is None: source_package = SourcePackageManifest(source, None) self.source_packages[source] = source_package source_package.add(package) def has_data(self) -> bool: # We might add more data in the future return len(self.packages) > 0 def as_dict(self) -> dict[str, Any]: config = { "name": self.config.image_id or "image", "distribution": str(self.config.distribution), "architecture": str(self.config.architecture), } if self.config.image_version is not None: config["version"] = self.config.image_version if self.config.release is not None: config["release"] = self.config.release return { # Bump this when incompatible changes are made to the manifest format. "manifest_version": 1, # Describe the image itself. "config": config, # Describe the image content in terms of packages. "packages": [package.as_dict() for package in self.packages], } def write_json(self, out: IO[str]) -> None: json.dump(self.as_dict(), out, indent=2) def write_package_report(self, out: IO[str]) -> None: """Create a human-readable report about packages This is modelled after "Fedora compose reports" that are sent to fedora-devel. The format describes added and removed packages, and includes the changelogs. A diff between two such reports shows what changed *in* the packages quite nicely. """ logging.info(f"Packages: {len(self.packages)}") logging.info(f"Size: {sum(p.size for p in self.packages)}") for package in self.source_packages.values(): logging.info(f"\n{80*'-'}\n") out.write(package.report()) mkosi-20.2/mkosi/mounts.py000066400000000000000000000104761455345632200156210ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1+ import contextlib import os import platform import stat import tempfile from collections.abc import Iterator, Sequence from pathlib import Path from typing import Optional from mkosi.config import Config from mkosi.run import run from mkosi.types import PathString from mkosi.util import umask from mkosi.versioncomp import GenericVersion def stat_is_whiteout(st: os.stat_result) -> bool: return stat.S_ISCHR(st.st_mode) and st.st_rdev == 0 def delete_whiteout_files(path: Path) -> None: """Delete any char(0,0) device nodes underneath @path Overlayfs uses such files to mark "whiteouts" (files present in the lower layers, but removed in the upper one). """ for entry in path.rglob("*"): # TODO: Use Path.stat() once we depend on Python 3.10+. if stat_is_whiteout(os.stat(entry, follow_symlinks=False)): entry.unlink() @contextlib.contextmanager def mount( what: PathString, where: Path, operation: Optional[str] = None, options: Sequence[str] = (), type: Optional[str] = None, read_only: bool = False, lazy: bool = False, umount: bool = True, ) -> Iterator[Path]: if not where.exists(): with umask(~0o755): where.mkdir(parents=True) if read_only: options = ["ro", *options] cmd: list[PathString] = ["mount", "--no-mtab"] if operation: cmd += [operation] cmd += [what, where] if type: cmd += ["--types", type] if options: cmd += ["--options", ",".join(options)] try: run(cmd) yield where finally: if umount: run(["umount", "--no-mtab", *(["--lazy"] if lazy else []), where]) @contextlib.contextmanager def mount_overlay( lowerdirs: Sequence[Path], upperdir: Optional[Path] = None, where: Optional[Path] = None, lazy: bool = False, ) -> Iterator[Path]: with contextlib.ExitStack() as stack: if upperdir is None: upperdir = Path(stack.enter_context(tempfile.TemporaryDirectory(prefix="volatile-overlay"))) st = lowerdirs[-1].stat() os.chmod(upperdir, st.st_mode) os.chown(upperdir, st.st_uid, st.st_gid) workdir = Path( stack.enter_context(tempfile.TemporaryDirectory(dir=upperdir.parent, prefix=f"{upperdir.name}-workdir")) ) if where is None: where = Path( stack.enter_context( tempfile.TemporaryDirectory(dir=upperdir.parent, prefix=f"{upperdir.name}-mountpoint") ) ) options = [ f"lowerdir={':'.join(os.fspath(p) for p in reversed(lowerdirs))}", f"upperdir={upperdir}", f"workdir={workdir}", # Disable the inodes index and metacopy (only copy metadata upwards if possible) # options. If these are enabled (e.g., if the kernel enables them by default), # the mount will fail if the upper directory has been earlier used with a different # lower directory, such as with a build overlay that was generated on top of a # different temporary root. # See https://www.kernel.org/doc/html/latest/filesystems/overlayfs.html#sharing-and-copying-layers # and https://github.com/systemd/mkosi/issues/1841. "index=off", "metacopy=off" ] # userxattr is only supported on overlayfs since kernel 5.11 if GenericVersion(platform.release()) >= GenericVersion("5.11"): options.append("userxattr") try: with mount("overlay", where, options=options, type="overlay", lazy=lazy): yield where finally: delete_whiteout_files(upperdir) @contextlib.contextmanager def finalize_ephemeral_source_mounts(config: Config) -> Iterator[list[PathString]]: with contextlib.ExitStack() as stack: mounts = ( (stack.enter_context(mount_overlay([source])) if config.build_sources_ephemeral else source, target) for source, target in {t.with_prefix(Path("/work/src")) for t in config.build_sources} ) options: list[PathString] = ["--dir", "/work/src"] for src, target in sorted(mounts, key=lambda s: s[1]): options += ["--bind", src, target] yield options mkosi-20.2/mkosi/pager.py000066400000000000000000000010101455345632200153520ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1+ import os import pydoc from typing import Optional def page(text: str, enabled: Optional[bool]) -> None: if enabled: # Initialize less options from $MKOSI_LESS or provide a suitable fallback. # F: don't page if one screen # X: do not clear screen # M: verbose prompt # K: quit on ^C # R: allow rich formatting os.environ["LESS"] = os.getenv("MKOSI_LESS", "FXMKR") pydoc.pager(text) else: print(text) mkosi-20.2/mkosi/partition.py000066400000000000000000000042741455345632200163040ustar00rootroot00000000000000import dataclasses import json import subprocess from collections.abc import Mapping, Sequence from pathlib import Path from typing import Any, Optional from mkosi.log import die from mkosi.run import run from mkosi.types import PathString @dataclasses.dataclass(frozen=True) class Partition: type: str uuid: str partno: Optional[int] split_path: Optional[Path] roothash: Optional[str] @classmethod def from_dict(cls, dict: Mapping[str, Any]) -> "Partition": return cls( type=dict["type"], uuid=dict["uuid"], partno=int(partno) if (partno := dict.get("partno")) else None, split_path=Path(p) if ((p := dict.get("split_path")) and p != "-") else None, roothash=dict.get("roothash"), ) GRUB_BOOT_PARTITION_UUID = "21686148-6449-6e6f-744e-656564454649" def find_partitions(image: Path, *, sandbox: Sequence[PathString]) -> list[Partition]: output = json.loads(run(["systemd-repart", "--json=short", image], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, sandbox=sandbox).stdout) return [Partition.from_dict(d) for d in output] def finalize_roothash(partitions: Sequence[Partition]) -> Optional[str]: roothash = usrhash = None for p in partitions: if (h := p.roothash) is None: continue if not (p.type.startswith("usr") or p.type.startswith("root")): die(f"Found roothash property on unexpected partition type {p.type}") # When there's multiple verity enabled root or usr partitions, the first one wins. if p.type.startswith("usr"): usrhash = usrhash or h else: roothash = roothash or h return f"roothash={roothash}" if roothash else f"usrhash={usrhash}" if usrhash else None def finalize_root(partitions: Sequence[Partition]) -> Optional[str]: root = finalize_roothash(partitions) if not root: root = next((f"root=PARTUUID={p.uuid}" for p in partitions if p.type.startswith("root")), None) if not root: root = next((f"mount.usr=PARTUUID={p.uuid}" for p in partitions if p.type.startswith("usr")), None) return root mkosi-20.2/mkosi/qemu.py000066400000000000000000001016071455345632200152400ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1+ import asyncio import base64 import contextlib import enum import errno import fcntl import hashlib import logging import os import random import shutil import socket import struct import subprocess import sys import tempfile import uuid from collections.abc import Iterator from pathlib import Path from typing import Optional from mkosi.config import ( Architecture, Args, Config, ConfigFeature, OutputFormat, QemuFirmware, QemuVsockCID, format_bytes, want_selinux_relabel, ) from mkosi.log import die from mkosi.partition import finalize_root, find_partitions from mkosi.run import AsyncioThread, become_root, find_binary, fork_and_wait, run, spawn from mkosi.tree import copy_tree, rmtree from mkosi.types import PathString from mkosi.util import INVOKING_USER, StrEnum from mkosi.versioncomp import GenericVersion QEMU_KVM_DEVICE_VERSION = GenericVersion("9.0") VHOST_VSOCK_SET_GUEST_CID = 0x4008af60 class QemuDeviceNode(StrEnum): kvm = enum.auto() vhost_vsock = enum.auto() def device(self) -> Path: return Path("/dev") / str(self) def description(self) -> str: return { QemuDeviceNode.kvm: "KVM acceleration", QemuDeviceNode.vhost_vsock: "a VSock device", }[self] def feature(self, config: Config) -> ConfigFeature: return { QemuDeviceNode.kvm: config.qemu_kvm, QemuDeviceNode.vhost_vsock: config.qemu_vsock, }[self] def open(self) -> int: return os.open(self.device(), os.O_RDWR|os.O_CLOEXEC|os.O_NONBLOCK) def available(self, log: bool = False) -> bool: try: os.close(self.open()) except OSError as e: if e.errno not in (errno.ENOENT, errno.EPERM, errno.EACCES): raise e if log and e.errno == errno.ENOENT: logging.warning(f"{self.device()} not found. Not adding {self.description()} to the virtual machine.") if log and e.errno in (errno.EPERM, errno.EACCES): logging.warning( f"Permission denied to access {self.device()}. " f"Not adding {self.description()} to the virtual machine. " "(Maybe a kernel module could not be loaded?)" ) return False return True def hash_output(config: Config) -> "hashlib._Hash": p = os.fspath(config.output_dir_or_cwd() / config.output_with_compression) return hashlib.sha256(p.encode()) def hash_to_vsock_cid(hash: "hashlib._Hash") -> int: cid = int.from_bytes(hash.digest()[:4], byteorder='little') # Make sure we don't return any of the well-known CIDs. return max(3, min(cid, 0xFFFFFFFF - 1)) def vsock_cid_in_use(vfd: int, cid: int) -> bool: try: fcntl.ioctl(vfd, VHOST_VSOCK_SET_GUEST_CID, struct.pack("=Q", cid)) except OSError as e: if e.errno != errno.EADDRINUSE: raise return True return False def find_unused_vsock_cid(config: Config, vfd: int) -> int: hash = hash_output(config) for i in range(64): cid = hash_to_vsock_cid(hash) if not vsock_cid_in_use(vfd, cid): return cid hash.update(i.to_bytes(length=4, byteorder='little')) for i in range(64): cid = random.randint(0, 0xFFFFFFFF - 1) if not vsock_cid_in_use(vfd, cid): return cid die("Failed to find an unused VSock connection ID") class KernelType(StrEnum): pe = enum.auto() uki = enum.auto() unknown = enum.auto() @classmethod def identify(cls, config: Config, path: Path) -> "KernelType": type = run(["bootctl", "kernel-identify", path], stdout=subprocess.PIPE, sandbox=config.sandbox(options=["--ro-bind", path, path])).stdout.strip() try: return cls(type) except ValueError: logging.warning(f"Unknown kernel type '{type}', assuming 'unknown'") return KernelType.unknown def find_qemu_binary(config: Config) -> str: binaries = [f"qemu-system-{config.architecture.to_qemu()}"] binaries += ["qemu", "qemu-kvm"] if config.architecture.is_native() else [] for binary in binaries: if find_binary(binary, root=config.tools()) is not None: return binary die("Couldn't find QEMU/KVM binary") def find_ovmf_firmware(config: Config) -> tuple[Path, bool]: FIRMWARE_LOCATIONS = { Architecture.x86_64: [ "usr/share/ovmf/x64/OVMF_CODE.secboot.fd", "usr/share/qemu/ovmf-x86_64.smm.bin", "usr/share/edk2/x64/OVMF_CODE.secboot.4m.fd", "usr/share/edk2/x64/OVMF_CODE.secboot.fd", ], Architecture.x86: [ "usr/share/edk2/ovmf-ia32/OVMF_CODE.secboot.fd", "usr/share/OVMF/OVMF32_CODE_4M.secboot.fd", "usr/share/edk2/ia32/OVMF_CODE.secboot.4m.fd", "usr/share/edk2/ia32/OVMF_CODE.secboot.fd", ], }.get(config.architecture, []) for firmware in FIRMWARE_LOCATIONS: if (config.tools() / firmware).exists(): return Path("/") / firmware, True FIRMWARE_LOCATIONS = { Architecture.x86_64: [ "usr/share/ovmf/ovmf_code_x64.bin", "usr/share/ovmf/x64/OVMF_CODE.fd", "usr/share/qemu/ovmf-x86_64.bin", "usr/share/edk2/x64/OVMF_CODE.4m.fd", "usr/share/edk2/x64/OVMF_CODE.fd", ], Architecture.x86: [ "usr/share/ovmf/ovmf_code_ia32.bin", "usr/share/edk2/ovmf-ia32/OVMF_CODE.fd", "usr/share/edk2/ia32/OVMF_CODE.4m.fd", "usr/share/edk2/ia32/OVMF_CODE.fd", ], Architecture.arm64: ["usr/share/AAVMF/AAVMF_CODE.fd"], Architecture.arm: ["usr/share/AAVMF/AAVMF32_CODE.fd"], }.get(config.architecture, []) for firmware in FIRMWARE_LOCATIONS: if (config.tools() / firmware).exists(): logging.warning("Couldn't find OVMF firmware blob with secure boot support, " "falling back to OVMF firmware blobs without secure boot support.") return Path("/") / firmware, False # If we can't find an architecture specific path, fall back to some generic paths that might also work. FIRMWARE_LOCATIONS = [ "usr/share/edk2/ovmf/OVMF_CODE.secboot.fd", "usr/share/edk2-ovmf/OVMF_CODE.secboot.fd", "usr/share/qemu/OVMF_CODE.secboot.fd", "usr/share/ovmf/OVMF.secboot.fd", "usr/share/OVMF/OVMF_CODE_4M.secboot.fd", "usr/share/OVMF/OVMF_CODE.secboot.fd", ] for firmware in FIRMWARE_LOCATIONS: if (config.tools() / firmware).exists(): return Path("/") / firmware, True FIRMWARE_LOCATIONS = [ "usr/share/edk2/ovmf/OVMF_CODE.fd", "usr/share/edk2-ovmf/OVMF_CODE.fd", "usr/share/qemu/OVMF_CODE.fd", "usr/share/ovmf/OVMF.fd", "usr/share/OVMF/OVMF_CODE_4M.fd", "usr/share/OVMF/OVMF_CODE.fd", ] for firmware in FIRMWARE_LOCATIONS: if (config.tools() / firmware).exists(): logging.warn("Couldn't find OVMF firmware blob with secure boot support, " "falling back to OVMF firmware blobs without secure boot support.") return Path("/") / firmware, False die("Couldn't find OVMF UEFI firmware blob.") def find_ovmf_vars(config: Config) -> Path: OVMF_VARS_LOCATIONS = [] if config.architecture == Architecture.x86_64: OVMF_VARS_LOCATIONS += [ "usr/share/ovmf/x64/OVMF_VARS.fd", "usr/share/qemu/ovmf-x86_64-vars.bin", "usr/share/edk2/x64/OVMF_VARS.4m.fd", "usr/share/edk2/x64/OVMF_VARS.fd", ] elif config.architecture == Architecture.x86: OVMF_VARS_LOCATIONS += [ "usr/share/edk2/ovmf-ia32/OVMF_VARS.fd", "usr/share/OVMF/OVMF32_VARS_4M.fd", "usr/share/edk2/ia32/OVMF_VARS.4m.fd", "usr/share/edk2/ia32/OVMF_VARS.fd", ] elif config.architecture == Architecture.arm: OVMF_VARS_LOCATIONS += ["usr/share/AAVMF/AAVMF32_VARS.fd"] elif config.architecture == Architecture.arm64: OVMF_VARS_LOCATIONS += ["usr/share/AAVMF/AAVMF_VARS.fd"] OVMF_VARS_LOCATIONS += [ "usr/share/edk2/ovmf/OVMF_VARS.fd", "usr/share/edk2-ovmf/OVMF_VARS.fd", "usr/share/qemu/OVMF_VARS.fd", "usr/share/ovmf/OVMF_VARS.fd", "usr/share/OVMF/OVMF_VARS_4M.fd", "usr/share/OVMF/OVMF_VARS.fd", ] for location in OVMF_VARS_LOCATIONS: if (config.tools() / location).exists(): return config.tools() / location die("Couldn't find OVMF UEFI variables file.") @contextlib.contextmanager def start_swtpm(config: Config) -> Iterator[Path]: with tempfile.TemporaryDirectory(prefix="mkosi-swtpm") as state: cmdline = ["swtpm", "socket", "--tpm2", "--tpmstate", f"dir={state}"] # We create the socket ourselves and pass the fd to swtpm to avoid race conditions where we start qemu before # swtpm has had the chance to create the socket (or where we try to chown it first). with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as sock: path = Path(state) / Path("sock") sock.bind(os.fspath(path)) sock.listen() cmdline += ["--ctrl", f"type=unixio,fd={sock.fileno()}"] with spawn( cmdline, pass_fds=(sock.fileno(),), sandbox=config.sandbox(options=["--bind", state, state]), ) as proc: try: yield path finally: proc.terminate() proc.wait() def find_virtiofsd(*, tools: Path = Path("/")) -> Optional[Path]: if p := find_binary("virtiofsd", root=tools): return p if (p := tools / "usr/libexec/virtiofsd").exists(): return Path("/") / p.relative_to(tools) if (p := tools / "usr/lib/virtiofsd").exists(): return Path("/") / p.relative_to(tools) return None @contextlib.contextmanager def start_virtiofsd(config: Config, directory: Path, *, uidmap: bool) -> Iterator[Path]: virtiofsd = find_virtiofsd(tools=config.tools()) if virtiofsd is None: die("virtiofsd must be installed to boot directory images or use RuntimeTrees= with mkosi qemu") cmdline: list[PathString] = [ virtiofsd, "--shared-dir", directory, "--xattr", # qemu's client doesn't seem to support announcing submounts so disable the feature to avoid the warning. "--no-announce-submounts", "--sandbox=chroot", ] if not uidmap and want_selinux_relabel(config, directory, fatal=False): cmdline += ["--security-label"] # We create the socket ourselves and pass the fd to virtiofsd to avoid race conditions where we start qemu # before virtiofsd has had the chance to create the socket (or where we try to chown it first). with ( tempfile.TemporaryDirectory(prefix="mkosi-virtiofsd") as context, socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as sock, ): # Make sure virtiofsd can access the socket in this directory. os.chown(context, INVOKING_USER.uid, INVOKING_USER.gid) # Make sure we can use the socket name as a unique identifier for the fs as well but make sure it's not too # long as virtiofs tag names are limited to 36 bytes. path = Path(context) / f"sock-{uuid.uuid4().hex}"[:35] sock.bind(os.fspath(path)) sock.listen() # Make sure virtiofsd can connect to the socket. os.chown(path, INVOKING_USER.uid, INVOKING_USER.gid) cmdline += ["--fd", str(sock.fileno())] with spawn( cmdline, pass_fds=(sock.fileno(),), # When not invoked as root, bubblewrap will automatically map the current uid/gid to the requested uid/gid # in the user namespace it spawns, so by specifying --uid 0 --gid 0 we'll get a userns with the current # uid/gid mapped to root in the userns. --cap-add=all is required to make virtiofsd work. Since it drops # capabilities itself, we don't bother figuring out the exact set of capabilities it needs. user=INVOKING_USER.uid if uidmap else None, group=INVOKING_USER.gid if uidmap else None, preexec_fn=become_root if not uidmap else None, sandbox=config.sandbox( options=[ "--uid", "0", "--gid", "0", "--cap-add", "all", "--bind", directory, directory, ], ), ) as proc: try: yield path finally: proc.terminate() proc.wait() @contextlib.contextmanager def vsock_notify_handler() -> Iterator[tuple[str, dict[str, str]]]: """ This yields a vsock address and a dict that will be filled in with the notifications from the VM. The dict should only be accessed after the context manager has been finalized. """ with socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) as vsock: vsock.bind((socket.VMADDR_CID_ANY, socket.VMADDR_PORT_ANY)) vsock.listen() vsock.setblocking(False) num_messages = 0 num_bytes = 0 messages = {} async def notify() -> None: nonlocal num_messages nonlocal num_bytes loop = asyncio.get_running_loop() while True: s, _ = await loop.sock_accept(vsock) num_messages += 1 with s: data = [] try: while (buf := await loop.sock_recv(s, 4096)): data.append(buf) except ConnectionResetError: logging.debug("vsock notify listener connection reset by peer") for msg in b"".join(data).decode().split("\n"): if not msg: continue num_bytes += len(msg) k, _, v = msg.partition("=") messages[k] = v with AsyncioThread(notify()): yield f"vsock-stream:{socket.VMADDR_CID_HOST}:{vsock.getsockname()[1]}", messages logging.debug(f"Received {num_messages} notify messages totalling {format_bytes(num_bytes)} bytes") for k, v in messages.items(): logging.debug(f"- {k}={v}") @contextlib.contextmanager def copy_ephemeral(config: Config, src: Path) -> Iterator[Path]: src = src.resolve() # tempfile doesn't provide an API to get a random filename in an arbitrary directory so we do this # instead. tmp = src.parent / f"{src.name}-{uuid.uuid4().hex}" try: def copy() -> None: if config.output_format == OutputFormat.directory: become_root() copy_tree( src, tmp, preserve=config.output_format == OutputFormat.directory, use_subvolumes=config.use_subvolumes, tools=config.tools(), sandbox=config.sandbox(options=["--ro-bind", src, src, "--bind", tmp.parent, tmp.parent]), ) fork_and_wait(copy) yield tmp finally: def rm() -> None: if config.output_format == OutputFormat.directory: become_root() rmtree(tmp, sandbox=config.sandbox(options=["--ro-bind", src, src, "--bind", tmp.parent, tmp.parent])) fork_and_wait(rm) def qemu_version(config: Config) -> GenericVersion: return GenericVersion(run([find_qemu_binary(config), "--version"], stdout=subprocess.PIPE, sandbox=config.sandbox()).stdout.split()[3]) def want_scratch(config: Config) -> bool: return config.runtime_scratch == ConfigFeature.enabled or ( config.runtime_scratch == ConfigFeature.auto and find_binary(f"mkfs.{config.distribution.filesystem()}", root=config.tools()) is not None ) def run_qemu(args: Args, config: Config) -> None: if config.output_format not in ( OutputFormat.disk, OutputFormat.cpio, OutputFormat.uki, OutputFormat.esp, OutputFormat.directory, ): die(f"{config.output_format} images cannot be booted in qemu") if ( config.output_format in (OutputFormat.cpio, OutputFormat.uki, OutputFormat.esp) and config.qemu_firmware not in (QemuFirmware.auto, QemuFirmware.linux, QemuFirmware.uefi) ): die(f"{config.output_format} images cannot be booted with the '{config.qemu_firmware}' firmware") if (config.runtime_trees and config.qemu_firmware == QemuFirmware.bios): die("RuntimeTrees= cannot be used when booting in BIOS firmware") if config.qemu_kvm == ConfigFeature.enabled and not config.architecture.is_native(): die(f"KVM acceleration requested but {config.architecture} does not match the native host architecture") # After we unshare the user namespace to sandbox qemu, we might not have access to /dev/kvm or related device nodes # anymore as access to these might be gated behind the kvm group and we won't be part of the kvm group anymore # after unsharing the user namespace. To get around this, open all those device nodes early can pass them as file # descriptors to qemu later. Note that we can't pass the kvm file descriptor to qemu until version 9.0. qemu_device_fds = { d: d.open() for d in QemuDeviceNode if d.feature(config) != ConfigFeature.disabled and d.available(log=True) } have_kvm = ((qemu_version(config) < QEMU_KVM_DEVICE_VERSION and QemuDeviceNode.kvm.available()) or (qemu_version(config) >= QEMU_KVM_DEVICE_VERSION and QemuDeviceNode.kvm in qemu_device_fds)) if (config.qemu_kvm == ConfigFeature.enabled and not have_kvm): die("KVM acceleration requested but cannot access /dev/kvm") if config.qemu_vsock == ConfigFeature.enabled and QemuDeviceNode.vhost_vsock not in qemu_device_fds: die("VSock requested but cannot access /dev/vhost-vsock") if config.qemu_kernel: kernel = config.qemu_kernel elif "-kernel" in args.cmdline: kernel = Path(args.cmdline[args.cmdline.index("-kernel") + 1]) else: kernel = None if config.output_format in (OutputFormat.uki, OutputFormat.esp) and kernel: logging.warning( f"Booting UKI output, kernel {kernel} configured with QemuKernel= or passed with -kernel will not be used" ) kernel = None if kernel and not kernel.exists(): die(f"Kernel not found at {kernel}") if config.qemu_firmware == QemuFirmware.auto: if kernel: firmware = ( QemuFirmware.uefi if KernelType.identify(config, kernel) != KernelType.unknown else QemuFirmware.linux ) elif ( config.output_format in (OutputFormat.cpio, OutputFormat.directory) or config.architecture.to_efi() is None ): firmware = QemuFirmware.linux else: firmware = QemuFirmware.uefi else: firmware = config.qemu_firmware if ( not kernel and ( firmware == QemuFirmware.linux or config.output_format in (OutputFormat.cpio, OutputFormat.directory, OutputFormat.uki) ) ): if firmware == QemuFirmware.uefi: name = config.output if config.output_format == OutputFormat.uki else config.output_split_uki kernel = config.output_dir_or_cwd() / name else: kernel = config.output_dir_or_cwd() / config.output_split_kernel if not kernel.exists(): die( f"Kernel or UKI not found at {kernel}, please install a kernel in the image " "or provide a -kernel argument to mkosi qemu" ) ovmf, ovmf_supports_sb = find_ovmf_firmware(config) if firmware == QemuFirmware.uefi else (None, False) # A shared memory backend might increase ram usage so only add one if actually necessary for virtiofsd. shm = [] if config.runtime_trees or config.output_format == OutputFormat.directory: shm = ["-object", f"memory-backend-memfd,id=mem,size={config.qemu_mem},share=on"] machine = f"type={config.architecture.default_qemu_machine()}" if firmware == QemuFirmware.uefi and config.architecture.supports_smm(): machine += f",smm={'on' if ovmf_supports_sb else 'off'}" if shm: machine += ",memory-backend=mem" cmdline: list[PathString] = [ find_qemu_binary(config), "-machine", machine, "-smp", config.qemu_smp, "-m", config.qemu_mem, "-object", "rng-random,filename=/dev/urandom,id=rng0", "-device", "virtio-rng-pci,rng=rng0,id=rng-device0", *shm, ] cmdline += ["-nic", f"user,model={config.architecture.default_qemu_nic_model()}"] if config.qemu_kvm != ConfigFeature.disabled and have_kvm and config.architecture.is_native(): accel = "kvm" if qemu_version(config) >= QEMU_KVM_DEVICE_VERSION: cmdline += ["--add-fd", f"fd={qemu_device_fds[QemuDeviceNode.kvm]},set=1,opaque=/dev/kvm"] accel += ",device=/dev/fdset/1" else: accel = "tcg" cmdline += ["-accel", accel] if QemuDeviceNode.vhost_vsock in qemu_device_fds: if config.qemu_vsock_cid == QemuVsockCID.auto: cid = find_unused_vsock_cid(config, qemu_device_fds[QemuDeviceNode.vhost_vsock]) elif config.qemu_vsock_cid == QemuVsockCID.hash: cid = hash_to_vsock_cid(hash_output(config)) else: cid = config.qemu_vsock_cid if vsock_cid_in_use(qemu_device_fds[QemuDeviceNode.vhost_vsock], cid): die(f"VSock connection ID {cid} is already in use by another virtual machine", hint="Use QemuVsockConnectionId=auto to have mkosi automatically find a free vsock connection ID") cmdline += [ "-device", f"vhost-vsock-pci,guest-cid={cid},vhostfd={qemu_device_fds[QemuDeviceNode.vhost_vsock]}" ] cmdline += ["-cpu", "max"] if config.qemu_gui: cmdline += ["-vga", "virtio"] else: # -nodefaults removes the default CDROM device which avoids an error message during boot # -serial mon:stdio adds back the serial device removed by -nodefaults. cmdline += [ "-nographic", "-nodefaults", "-chardev", "stdio,mux=on,id=console,signal=off", "-serial", "chardev:console", "-mon", "console", ] # QEMU has built-in logic to look for the BIOS firmware so we don't need to do anything special for that. if firmware == QemuFirmware.uefi: cmdline += ["-drive", f"if=pflash,format=raw,readonly=on,file={ovmf}"] notifications: dict[str, str] = {} with contextlib.ExitStack() as stack: if firmware == QemuFirmware.uefi: ovmf_vars = stack.enter_context(tempfile.NamedTemporaryFile(prefix="mkosi-ovmf-vars")) shutil.copy2(config.qemu_firmware_variables or find_ovmf_vars(config), Path(ovmf_vars.name)) cmdline += ["-drive", f"file={ovmf_vars.name},if=pflash,format=raw"] if ovmf_supports_sb: cmdline += [ "-global", "ICH9-LPC.disable_s3=1", "-global", "driver=cfi.pflash01,property=secure,value=on", ] if config.qemu_cdrom and config.output_format in (OutputFormat.disk, OutputFormat.esp): # CD-ROM devices have sector size 2048 so we transform disk images into ones with sector size 2048. src = (config.output_dir_or_cwd() / config.output_with_compression).resolve() fname = src.parent / f"{src.name}-{uuid.uuid4().hex}" run( [ "systemd-repart", "--definitions", "", "--no-pager", "--pretty=no", "--offline=yes", "--empty=create", "--size=auto", "--sector-size=2048", "--copy-from", src, fname, ], sandbox=config.sandbox(options=["--bind", fname.parent, fname.parent, "--ro-bind", src, src]), ) stack.callback(lambda: fname.unlink()) elif config.ephemeral and config.output_format not in (OutputFormat.cpio, OutputFormat.uki): fname = stack.enter_context( copy_ephemeral(config, config.output_dir_or_cwd() / config.output_with_compression) ) else: fname = config.output_dir_or_cwd() / config.output_with_compression if config.output_format == OutputFormat.disk and config.runtime_size: run( [ "systemd-repart", "--definitions", "", "--no-pager", f"--size={config.runtime_size}", "--pretty=no", "--offline=yes", fname, ], sandbox=config.sandbox(options=["--bind", fname, fname]), ) if ( kernel and ( KernelType.identify(config, kernel) != KernelType.uki or not config.architecture.supports_smbios(firmware) ) ): kcl = config.kernel_command_line + config.kernel_command_line_extra else: kcl = config.kernel_command_line_extra for k, v in config.credentials.items(): payload = base64.b64encode(v.encode()).decode() if config.architecture.supports_smbios(firmware): cmdline += ["-smbios", f"type=11,value=io.systemd.credential.binary:{k}={payload}"] elif config.architecture.supports_fw_cfg(): f = stack.enter_context(tempfile.NamedTemporaryFile(prefix="mkosi-fw-cfg", mode="w")) f.write(v) f.flush() cmdline += ["-fw_cfg", f"name=opt/io.systemd.credentials/{k},file={f.name}"] elif kernel: kcl += [f"systemd.set_credential_binary={k}:{payload}"] if kernel: cmdline += ["-kernel", kernel] if any(s.startswith("root=") for s in kcl): pass elif config.output_format == OutputFormat.disk: # We can't rely on gpt-auto-generator when direct kernel booting so synthesize a root= # kernel argument instead. root = finalize_root( find_partitions(fname, sandbox=config.sandbox(options=["--ro-bind", fname, fname])) ) if not root: die("Cannot perform a direct kernel boot without a root or usr partition") kcl += [root] elif config.output_format == OutputFormat.directory: sock = stack.enter_context(start_virtiofsd(config, fname, uidmap=False)) cmdline += [ "-chardev", f"socket,id={sock.name},path={sock}", "-device", f"vhost-user-fs-pci,queue-size=1024,chardev={sock.name},tag=root", ] kcl += ["root=root", "rootfstype=virtiofs", "rw"] for tree in config.runtime_trees: sock = stack.enter_context(start_virtiofsd(config, tree.source, uidmap=True)) tag = tree.target.name if tree.target else tree.source.name cmdline += [ "-chardev", f"socket,id={sock.name},path={sock}", "-device", f"vhost-user-fs-pci,queue-size=1024,chardev={sock.name},tag={tag}", ] target = Path("/root/src") / (tree.target or tree.source.name) kcl += [f"systemd.mount-extra={tag}:{target}:virtiofs"] if want_scratch(config) or config.output_format in (OutputFormat.disk, OutputFormat.esp): cmdline += ["-device", "virtio-scsi-pci,id=scsi"] if want_scratch(config): scratch = stack.enter_context(tempfile.NamedTemporaryFile(dir="/var/tmp", prefix="mkosi-scratch")) scratch.truncate(1024**4) fs = config.distribution.filesystem() extra = config.environment.get(f"SYSTEMD_REPART_MKFS_OPTIONS_{fs.upper()}", "") run( [f"mkfs.{fs}", "-L", "scratch", *extra.split(), scratch.name], stdout=subprocess.DEVNULL, sandbox=config.sandbox(options=["--bind", scratch.name, scratch.name]), ) cmdline += [ "-drive", f"if=none,id=scratch,file={scratch.name},format=raw", "-device", "scsi-hd,drive=scratch", ] kcl += [f"systemd.mount-extra=LABEL=scratch:/var/tmp:{config.distribution.filesystem()}"] if ( kernel and ( KernelType.identify(config, kernel) != KernelType.uki or not config.architecture.supports_smbios(firmware) ) ): cmdline += ["-append", " ".join(kcl)] elif config.architecture.supports_smbios(firmware): cmdline += [ "-smbios", f"type=11,value=io.systemd.stub.kernel-cmdline-extra={' '.join(kcl)}" ] if config.output_format == OutputFormat.cpio: cmdline += ["-initrd", fname] elif ( kernel and KernelType.identify(config, kernel) != KernelType.uki and "-initrd" not in args.cmdline and (config.output_dir_or_cwd() / config.output_split_initrd).exists() ): cmdline += ["-initrd", config.output_dir_or_cwd() / config.output_split_initrd] if config.output_format in (OutputFormat.disk, OutputFormat.esp): cmdline += ["-drive", f"if=none,id=mkosi,file={fname},format=raw", "-device", f"scsi-{'cd' if config.qemu_cdrom else 'hd'},drive=mkosi,bootindex=1"] if ( firmware == QemuFirmware.uefi and config.qemu_swtpm != ConfigFeature.disabled and find_binary("swtpm", root=config.tools()) is not None ): sock = stack.enter_context(start_swtpm(config)) cmdline += ["-chardev", f"socket,id=chrtpm,path={sock}", "-tpmdev", "emulator,id=tpm0,chardev=chrtpm"] if config.architecture == Architecture.x86_64: cmdline += ["-device", "tpm-tis,tpmdev=tpm0"] elif config.architecture == Architecture.arm64: cmdline += ["-device", "tpm-tis-device,tpmdev=tpm0"] if QemuDeviceNode.vhost_vsock in qemu_device_fds and config.architecture.supports_smbios(firmware): addr, notifications = stack.enter_context(vsock_notify_handler()) cmdline += ["-smbios", f"type=11,value=io.systemd.credential:vmm.notify_socket={addr}"] for drive in config.qemu_drives: file = stack.enter_context( tempfile.NamedTemporaryFile(dir=drive.directory or "/var/tmp", prefix=f"mkosi-drive-{drive.id}") ) file.truncate(drive.size) arg = f"if=none,id={drive.id},file={file.name},format=raw" if drive.options: arg += f",{drive.options}" cmdline += ["-drive", arg] cmdline += config.qemu_args cmdline += args.cmdline with spawn( cmdline, stdin=sys.stdin, stdout=sys.stdout, pass_fds=qemu_device_fds.values(), env=os.environ, log=False, foreground=True, sandbox=config.sandbox(network=True, devices=True, relaxed=True), ) as qemu: # We have to close these before we wait for qemu otherwise we'll deadlock as qemu will never exit. for fd in qemu_device_fds.values(): os.close(fd) qemu.wait() if status := int(notifications.get("EXIT_STATUS", 0)): raise subprocess.CalledProcessError(status, cmdline) def run_ssh(args: Args, config: Config) -> None: if config.qemu_vsock_cid == QemuVsockCID.auto: die("Can't use ssh verb with QemuVSockCID=auto") if not config.ssh_key: die("SshKey= must be configured to use 'mkosi ssh'", hint="Use 'mkosi genkey' to generate a new SSH key and certificate") if config.qemu_vsock_cid == QemuVsockCID.hash: cid = hash_to_vsock_cid(hash_output(config)) else: cid = config.qemu_vsock_cid cmd: list[PathString] = [ "ssh", "-i", config.ssh_key, "-F", "none", # Silence known hosts file errors/warnings. "-o", "UserKnownHostsFile=/dev/null", "-o", "StrictHostKeyChecking=no", "-o", "LogLevel=ERROR", "-o", f"ProxyCommand=socat - VSOCK-CONNECT:{cid}:%p", "root@mkosi", ] cmd += args.cmdline run( cmd, stdin=sys.stdin, stdout=sys.stdout, env=os.environ, log=False, sandbox=config.sandbox(network=True, devices=True, relaxed=True), ) mkosi-20.2/mkosi/resources/000077500000000000000000000000001455345632200157245ustar00rootroot00000000000000mkosi-20.2/mkosi/resources/__init__.py000066400000000000000000000000001455345632200200230ustar00rootroot00000000000000mkosi-20.2/mkosi/resources/mkosi-initrd/000077500000000000000000000000001455345632200203355ustar00rootroot00000000000000mkosi-20.2/mkosi/resources/mkosi-initrd/mkosi.conf000066400000000000000000000027311455345632200223310ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Output] @Output=initrd @Format=cpio ManifestFormat= [Content] Bootable=no MakeInitrd=yes @CleanPackageMetadata=yes Packages= systemd # sine qua non udev bash # for emergency logins less # this makes 'systemctl' much nicer to use ;) p11-kit # dl-opened by systemd lvm2 RemoveFiles= # we don't need this after the binary catalogs have been built /usr/lib/systemd/catalog /etc/udev/hwdb.d /usr/lib/udev/hwdb.d # this is not needed by anything updated in the last 20 years /etc/services # Including kernel images in the initrd is generally not useful. # This also stops mkosi from extracting the kernel image out of the image as a separate output. /usr/lib/modules/*/vmlinuz* /usr/lib/modules/*/System.map # Configure locale explicitly so that all other locale data is stripped on distros whose package manager supports it. @Locale=C.UTF-8 WithDocs=no # Make sure various core modules are always included in the initrd. KernelModulesInclude= btrfs dm-crypt dm-integrity dm-verity erofs ext4 loop overlay squashfs vfat xfs mkosi-20.2/mkosi/resources/mkosi-initrd/mkosi.conf.d/000077500000000000000000000000001455345632200226255ustar00rootroot00000000000000mkosi-20.2/mkosi/resources/mkosi-initrd/mkosi.conf.d/10-arch.conf000066400000000000000000000021551455345632200246320ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=arch [Content] Packages= gzip # For compressed keymap unpacking by loadkeys e2fsprogs xfsprogs # Various libraries that are dlopen'ed by systemd libfido2 tpm2-tss util-linux RemoveFiles= # Arch Linux doesn't split their gcc-libs package so we manually remove # unneeded stuff here to make sure it doesn't end up in the initrd. /usr/lib/libgfortran.so* /usr/lib/libgo.so* /usr/lib/libgomp.so* /usr/lib/libgphobos.so* /usr/lib/libobjc.so* /usr/lib/libasan.so* /usr/lib/libtsan.so* /usr/lib/liblsan.so* /usr/lib/libubsan.so* /usr/lib/libstdc++.so* /usr/lib/libgdruntime.so* # Remove all files that are only required for development. /usr/lib/*.a /usr/include/* /usr/share/i18n/* /usr/share/hwdata/* /usr/share/iana-etc/* /usr/share/doc/* /usr/share/man/* /usr/share/locale/* /usr/share/info/* /usr/share/gtk-doc/* mkosi-20.2/mkosi/resources/mkosi-initrd/mkosi.conf.d/10-centos-fedora.conf000066400000000000000000000011071455345632200264420ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=|fedora Distribution=|centos Distribution=|alma Distribution=|rocky Distribution=|rhel [Content] Packages= # Various libraries that are dlopen'ed by systemd libfido2 tpm2-tss # File system checkers for supported root file systems /usr/sbin/fsck.ext4 /usr/sbin/fsck.xfs # fsck.btrfs is a dummy, checking is done in the kernel. RemovePackages= # Various packages pull in shadow-utils to create users, we can remove it afterwards shadow-utils mkosi-20.2/mkosi/resources/mkosi-initrd/mkosi.conf.d/10-centos.conf000066400000000000000000000002551455345632200252070ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=|centos Distribution=|alma Distribution=|rocky Distribution=|rhel [Content] Packages= util-linux mkosi-20.2/mkosi/resources/mkosi-initrd/mkosi.conf.d/10-debian-ubuntu.conf000066400000000000000000000013461455345632200264600ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=|debian Distribution=|ubuntu [Content] Packages= kmod # Not pulled in as a dependency on Debian/Ubuntu dmsetup # Not pulled in as a dependency on Debian/Ubuntu # xfsprogs pulls in python on Debian (???) and XFS generally # isn't used on Debian so we don't install xfsprogs. e2fsprogs util-linux # Various libraries that are dlopen'ed by systemd libfido2-1 ^libtss2-esys-[0-9\.]+-0$ libtss2-rc0 libtss2-mu0 libtss2-tcti-device0 RemovePackages= # TODO: Remove dpkg if dash ever loses its dependency on it. # dpkg RemoveFiles= /usr/share/locale/* mkosi-20.2/mkosi/resources/mkosi-initrd/mkosi.conf.d/10-fedora.conf000066400000000000000000000001671455345632200251560ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=fedora [Content] Packages= util-linux-core mkosi-20.2/mkosi/resources/mkosi-initrd/mkosi.conf.d/10-opensuse.conf000066400000000000000000000006761455345632200255640ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=opensuse [Content] Packages= # Various libraries that are dlopen'ed by systemd libfido2-1 tpm2-0-tss # File system checkers for supported root file systems e2fsprogs xfsprogs # fsck.btrfs is a dummy, checking is done in the kernel. util-linux RemoveFiles= /usr/share/locale/* /usr/etc/services mkosi-20.2/mkosi/resources/mkosi-initrd/mkosi.conf.d/20-stub.conf000066400000000000000000000001651455345632200246720ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Format=uki Distribution=!arch [Content] Packages=systemd-boot mkosi-20.2/mkosi/resources/mkosi-initrd/mkosi.extra/000077500000000000000000000000001455345632200226015ustar00rootroot00000000000000mkosi-20.2/mkosi/resources/mkosi-initrd/mkosi.extra/usr/000077500000000000000000000000001455345632200234125ustar00rootroot00000000000000mkosi-20.2/mkosi/resources/mkosi-initrd/mkosi.extra/usr/lib/000077500000000000000000000000001455345632200241605ustar00rootroot00000000000000mkosi-20.2/mkosi/resources/mkosi-initrd/mkosi.extra/usr/lib/systemd/000077500000000000000000000000001455345632200256505ustar00rootroot00000000000000mkosi-20.2/mkosi/resources/mkosi-initrd/mkosi.extra/usr/lib/systemd/system-preset/000077500000000000000000000000001455345632200304745ustar00rootroot00000000000000mkosi-20.2/mkosi/resources/mkosi-initrd/mkosi.extra/usr/lib/systemd/system-preset/99-mkosi.preset000066400000000000000000000002111455345632200332730ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later # Make sure that services are disabled by default (primarily for Debian/Ubuntu). disable * mkosi-20.2/mkosi/resources/mkosi-initrd/mkosi.extra/usr/lib/systemd/system/000077500000000000000000000000001455345632200271745ustar00rootroot00000000000000systemd-cryptsetup@.service.d/000077500000000000000000000000001455345632200350065ustar00rootroot00000000000000mkosi-20.2/mkosi/resources/mkosi-initrd/mkosi.extra/usr/lib/systemd/systemcredential.conf000066400000000000000000000004501455345632200377660ustar00rootroot00000000000000mkosi-20.2/mkosi/resources/mkosi-initrd/mkosi.extra/usr/lib/systemd/system/systemd-cryptsetup@.service.d[Service] ImportCredential=cryptsetup.* # Compat with older systemd versions that don't support ImportCredential=. LoadCredential=cryptsetup.passphrase LoadCredential=cryptsetup.fido2-pin LoadCredential=cryptsetup.tpm2-pin LoadCredential=cryptsetup.luks2-pin LoadCredential=cryptsetup.pkcs11-pin mkosi-20.2/mkosi/resources/mkosi-initrd/mkosi.extra/usr/lib/udev/000077500000000000000000000000001455345632200251235ustar00rootroot00000000000000mkosi-20.2/mkosi/resources/mkosi-initrd/mkosi.extra/usr/lib/udev/rules.d/000077500000000000000000000000001455345632200264775ustar00rootroot00000000000000mkosi-20.2/mkosi/resources/mkosi-initrd/mkosi.extra/usr/lib/udev/rules.d/10-mkosi-initrd-dm.rules000066400000000000000000000004161455345632200330010ustar00rootroot00000000000000# SPDX-License-Identifier: GPL-2.0-only # Copied from https://github.com/dracutdevs/dracut/blob/059/modules.d/90dm/11-dm.rules SUBSYSTEM!="block", GOTO="dm_end" KERNEL!="dm-[0-9]*", GOTO="dm_end" ACTION!="add|change", GOTO="dm_end" OPTIONS+="db_persist" LABEL="dm_end" mkosi-20.2/mkosi/resources/mkosi-initrd/mkosi.extra/usr/lib/udev/rules.d/10-mkosi-initrd-md.rules000066400000000000000000000017541455345632200330070ustar00rootroot00000000000000# SPDX-License-Identifier: GPL-2.0-only # Copied from https://github.com/dracutdevs/dracut/blob/059/modules.d/90mdraid/59-persistent-storage-md.rules SUBSYSTEM!="block", GOTO="md_end" ACTION!="add|change", GOTO="md_end" # Also don't process disks that are slated to be a multipath device ENV{DM_MULTIPATH_DEVICE_PATH}=="1", GOTO="md_end" KERNEL!="md[0-9]*|md_d[0-9]*|md/*", KERNEL!="md*", GOTO="md_end" # partitions have no md/{array_state,metadata_version} ENV{DEVTYPE}=="partition", GOTO="md_ignore_state" # container devices have a metadata version of e.g. 'external:ddf' and # never leave state 'inactive' ATTR{md/metadata_version}=="external:[A-Za-z]*", ATTR{md/array_state}=="inactive", GOTO="md_ignore_state" TEST!="md/array_state", GOTO="md_end" ATTR{md/array_state}=="|clear|inactive", GOTO="md_end" LABEL="md_ignore_state" IMPORT{program}="/sbin/mdadm --detail --export $tempnode" IMPORT{builtin}="blkid" OPTIONS+="link_priority=100" OPTIONS+="watch" OPTIONS+="db_persist" LABEL="md_end" mkosi-20.2/mkosi/resources/mkosi-tools/000077500000000000000000000000001455345632200202045ustar00rootroot00000000000000mkosi-20.2/mkosi/resources/mkosi-tools/mkosi.conf000066400000000000000000000007431455345632200222010ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Output] Format=directory ManifestFormat= [Content] Bootable=no Packages= bash bubblewrap ca-certificates coreutils cpio diffutils dnf dosfstools e2fsprogs kmod less mtools nano openssl socat strace swtpm systemd tar util-linux virtiofsd xfsprogs zstd mkosi-20.2/mkosi/resources/mkosi-tools/mkosi.conf.d/000077500000000000000000000000001455345632200224745ustar00rootroot00000000000000mkosi-20.2/mkosi/resources/mkosi-tools/mkosi.conf.d/10-arch.conf000066400000000000000000000007361455345632200245040ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=arch [Content] Packages= apt archlinux-keyring base btrfs-progs curl debian-archive-keyring dpkg edk2-ovmf erofs-utils grub openssh pacman pesign python-cryptography qemu-base sbsigntools shadow squashfs-tools systemd-ukify ubuntu-keyring xz mkosi-20.2/mkosi/resources/mkosi-tools/mkosi.conf.d/10-centos-fedora/000077500000000000000000000000001455345632200254435ustar00rootroot00000000000000mkosi-20.2/mkosi/resources/mkosi-tools/mkosi.conf.d/10-centos-fedora/mkosi.conf000066400000000000000000000010651455345632200274360ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=|centos Distribution=|alma Distribution=|rocky Distribution=|rhel Distribution=|fedora [Content] Packages= apt createrepo_c curl-minimal debian-keyring distribution-gpg-keys dnf-plugins-core dpkg-dev grub2-tools openssh-clients policycoreutils python3-cryptography qemu-kvm-core shadow-utils squashfs-tools systemd-container systemd-udev ubu-keyring xz mkosi-20.2/mkosi/resources/mkosi-tools/mkosi.conf.d/10-centos-fedora/mkosi.conf.d/000077500000000000000000000000001455345632200277335ustar00rootroot00000000000000mkosi-20.2/mkosi/resources/mkosi-tools/mkosi.conf.d/10-centos-fedora/mkosi.conf.d/10-uefi.conf000066400000000000000000000002351455345632200317500ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] HostArchitecture=|x86-64 HostArchitecture=|arm64 [Content] Packages= edk2-ovmf pesign mkosi-20.2/mkosi/resources/mkosi-tools/mkosi.conf.d/10-centos.conf000066400000000000000000000003021455345632200250470ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=|centos Distribution=|alma Distribution=|rocky Distribution=|rhel [Distribution] Repositories= epel epel-next mkosi-20.2/mkosi/resources/mkosi-tools/mkosi.conf.d/10-debian-ubuntu.conf000066400000000000000000000013241455345632200263230ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=|debian Distribution=|ubuntu [Content] Packages= ?exact-name(systemd-ukify) apt archlinux-keyring btrfs-progs createrepo-c curl debian-archive-keyring dpkg-dev erofs-utils grub2 libtss2-dev makepkg openssh-client ovmf pacman-package-manager pesign policycoreutils python3-cryptography python3-pefile qemu-system sbsigntool squashfs-tools systemd-boot systemd-container systemd-coredump ubuntu-keyring uidmap xz-utils zypper mkosi-20.2/mkosi/resources/mkosi-tools/mkosi.conf.d/10-fedora/000077500000000000000000000000001455345632200241525ustar00rootroot00000000000000mkosi-20.2/mkosi/resources/mkosi-tools/mkosi.conf.d/10-fedora/mkosi.conf000066400000000000000000000005241455345632200261440ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=fedora [Content] Packages= archlinux-keyring btrfs-progs dnf5 dnf5-plugins erofs-utils pacman qemu-system-aarch64-core qemu-system-ppc-core qemu-system-s390x-core systemd-ukify zypper mkosi-20.2/mkosi/resources/mkosi-tools/mkosi.conf.d/10-fedora/mkosi.conf.d/000077500000000000000000000000001455345632200264425ustar00rootroot00000000000000mkosi-20.2/mkosi/resources/mkosi-tools/mkosi.conf.d/10-fedora/mkosi.conf.d/10-uefi.conf000066400000000000000000000002201455345632200304510ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] HostArchitecture=|x86-64 HostArchitecture=|arm64 [Content] Packages= sbsigntools mkosi-20.2/mkosi/resources/mkosi-tools/mkosi.conf.d/10-opensuse.conf000066400000000000000000000010621455345632200254210ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=opensuse [Content] Packages= btrfs-progs ca-certificates-mozilla createrepo_c curl distribution-gpg-keys dnf-plugins-core erofs-utils grep grub2 openssh-clients ovmf pesign policycoreutils qemu-headless sbsigntools shadow squashfs systemd-boot systemd-container systemd-coredump systemd-experimental xz zypper mkosi-20.2/mkosi/resources/mkosi-tools/mkosi.prepare.chroot000077500000000000000000000002521455345632200242050ustar00rootroot00000000000000#!/bin/sh # SPDX-License-Identifier: LGPL-2.1-or-later if [ "$1" = "final" ] && command -v pacman-key; then pacman-key --init pacman-key --populate archlinux fi mkosi-20.2/mkosi/resources/mkosi.md000066400000000000000000003110711455345632200173730ustar00rootroot00000000000000% mkosi(1) % % # NAME mkosi — Build Bespoke OS Images # SYNOPSIS `mkosi [options…] summary` `mkosi [options…] build [command line…]` `mkosi [options…] shell [command line…]` `mkosi [options…] boot [nspawn settings…]` `mkosi [options…] qemu [qemu parameters…]` `mkosi [options…] ssh [command line…]` `mkosi [options…] journalctl [command line…]` `mkosi [options…] coredumpctl [command line…]` `mkosi [options…] clean` `mkosi [options…] serve` `mkosi [options…] burn ` `mkosi [options…] bump` `mkosi [options…] genkey` `mkosi [options…] documentation` `mkosi [options…] help` # DESCRIPTION `mkosi` is a tool for easily building customized OS images. It's a fancy wrapper around `dnf --installroot`, `apt`, `pacman` and `zypper` that may generate disk images with a number of bells and whistles. ## Command Line Verbs The following command line verbs are known: `summary` : Outputs a human-readable summary of all options used for building an image. This will parse the command line and `mkosi.conf` file as it would do on `build`, but only output what it is configured for and not actually build anything. `build` : This builds the image based on the settings passed in on the command line or read from configuration files. This command is the default if no verb is explicitly specified. If any command line arguments are specified, these are passed directly to the build script if one is defined. `shell` : This builds the image if it is not built yet, and then invokes `systemd-nspawn` to acquire an interactive shell prompt in it. An optional command line may be specified after the `shell` verb, to be invoked in place of the shell in the container. Use `-f` in order to rebuild the image unconditionally before acquiring the shell, see below. This command must be executed as `root`. `boot` : Similar to `shell`, but boots the image using `systemd-nspawn`. An optional command line may be specified after the `boot` verb, which can contain extra nspawn options as well as arguments which are passed as the *kernel command line* to the init system in the image. `qemu` : Similar to `boot`, but uses `qemu` to boot up the image, i.e. instead of container virtualization virtual machine virtualization is used. This verb is only supported for disk images that contain a boot loader and cpio images in which a kernel was installed. For cpio images a kernel can also be provided by passing the `-kernel` qemu argument to the `qemu` verb. Any arguments specified after the `qemu` verb are appended to the `qemu` invocation. `ssh` : When the image is built with the `Ssh=yes` option, this command connects to a booted virtual machine (`qemu`) via SSH. Make sure to run `mkosi ssh` with the same config as `mkosi build` so that it has the necessary information available to connect to the running virtual machine via SSH. Specifically, the SSH private key from the `SshKey=` setting is used to connect to the virtual machine. Use `mkosi genkey` to automatically generate a key and certificate that will be picked up by mkosi. Any arguments passed after the `ssh` verb are passed as arguments to the `ssh` invocation. To connect to a container, use `machinectl login` or `machinectl shell`. `journalctl` : Uses `journalctl` to inspect the journal inside the image. Any arguments specified after the `journalctl` verb are appended to the `journalctl` invocation. `coredumpctl` : Uses `coredumpctl` to look for coredumps inside the image. Any arguments specified after the `coredumpctl` verb are appended to the `coredumpctl` invocation. `clean` : Remove build artifacts generated on a previous build. If combined with `-f`, also removes incremental build cache images. If `-f` is specified twice, also removes any package cache. `serve` : This builds the image if it is not built yet, and then serves the output directory (i.e. usually `mkosi.output/`, see below) via a small embedded HTTP server, listening on port 8081. Combine with `-f` in order to rebuild the image unconditionally before serving it. This command is useful for testing network based acquisition of OS images, for example via `machinectl pull-raw …` and `machinectl pull-tar …`. `burn ` : This builds the image if it is not built yet, and then writes it to the specified block device. The partition contents are written as-is, but the GPT partition table is corrected to match sector and disk size of the specified medium. `bump` : Bumps the image version from `mkosi.version` and writes the resulting version string to `mkosi.version`. This is useful for implementing a simple versioning scheme: each time this verb is called the version is bumped in preparation for the subsequent build. Note that `--auto-bump`/`-B` may be used to automatically bump the version after each successful build. `genkey` : Generate a pair of SecureBoot keys for usage with the `SecureBootKey=`/`--secure-boot-key=` and `SecureBootCertificate=`/`--secure-boot-certificate=` options. `documentation` : Show mkosi's documentation. By default this verb will try several ways to output the documentation, but a specific option can be chosen with the `--doc-format` option. Distro packagers are encouraged to add a file `mkosi.1` into the `mkosi/resources` directory of the Python package, if it is missing, as well as to install it in the appropriate search path for man pages. The man page can be generated from the markdown file `mkosi/resources/mkosi.md` e.g via `pandoc -t man -s -o mkosi.1 mkosi.md`. `help` : This verb is equivalent to the `--help` switch documented below: it shows a brief usage explanation. ## Commandline-only Options Those settings cannot be configured in the configuration files. `--force`, `-f` : Replace the output file if it already exists, when building an image. By default when building an image and an output artifact already exists `mkosi` will refuse operation. Specify this option once to delete all build artifacts from a previous run before re-building the image. If incremental builds are enabled, specifying this option twice will ensure the intermediary cache files are removed, too, before the re-build is initiated. If a package cache is used (also see the **Files** section below), specifying this option thrice will ensure the package cache is removed too, before the re-build is initiated. For the `clean` operation this option has a slightly different effect: by default the verb will only remove build artifacts from a previous run, when specified once the incremental cache files are deleted too, and when specified twice the package cache is also removed. `--directory=`, `-C` : Takes a path to a directory. `mkosi` switches to this directory before doing anything. Note that the various configuration files are searched for in this directory, hence using this option is an effective way to build a project located in a specific directory. `--debug=` : Enable additional debugging output. `--debug-shell=` : When executing a command in the image fails, mkosi will start an interactive shell in the image allowing further debugging. `--debug-workspace=` : When an error occurs, the workspace directory will not be deleted. `--version` : Show package version. `--help`, `-h` : Show brief usage information. `--genkey-common-name=` : Common name to be used when generating keys via mkosi's `genkey` command. Defaults to `mkosi of %u`, where `%u` expands to the username of the user invoking mkosi. `--genkey-valid-days=` : Number of days that the keys should remain valid when generating keys via mkosi's `genkey` command. Defaults to two years (730 days). `--auto-bump=`, `-B` : If specified, after each successful build the the version is bumped in a fashion equivalent to the `bump` verb, in preparation for the next build. This is useful for simple, linear version management: each build in a series will have a version number one higher then the previous one. `--doc-format` : The format to show the documentation in. Supports the values `markdown`, `man`, `pandoc`, `system` and `auto`. In the case of `markdown` the documentation is shown in the original Markdown format. `man` shows the documentation in man page format, if it is available. `pandoc` will generate the man page format on the fly, if `pandoc` is available. `system` will show the system-wide man page for mkosi, which may or may not correspond to the version you are using, depending on how you installed mkosi. `auto`, which is the default, will try all methods in the order `man`, `pandoc`, `markdown`, `system`. `--json` : Show the summary output as JSON-SEQ. ## Supported output formats The following output formats are supported: * Raw *GPT* disk image, created using systemd-repart (*disk*) * Plain directory, containing the OS tree (*directory*) * Tar archive (*tar*) * CPIO archive (*cpio*) The output format may also be set to *none* to have mkosi produce no image at all. This can be useful if you only want to use the image to produce another output in the build scripts (e.g. build an rpm). When a *GPT* disk image is created, repart partition definition files may be placed in `mkosi.repart/` to configure the generated disk image. It is highly recommended to run `mkosi` on a file system that supports reflinks such as XFS and btrfs and to keep all related directories on the same file system. This allows mkosi to create images very quickly by using reflinks to perform copying via copy-on-write operations. ## Configuration Settings The following settings can be set through configuration files (the syntax with `SomeSetting=value`) and on the command line (the syntax with `--some-setting=value`). For some command line parameters, a single-letter shortcut is also allowed. In the configuration files, the setting must be in the appropriate section, so the settings are grouped by section below. Configuration is parsed in the following order: * The command line arguments are parsed * `mkosi.local.conf` is parsed if it exists. This file should be in the gitignore (or equivalent) and is intended for local configuration. * Any default paths (depending on the option) are configured if the corresponding path exists. * `mkosi.conf` is parsed if it exists in the directory configured with `--directory=` or the current working directory if `--directory=` is not used. * `mkosi.conf.d/` is parsed in the same directory if it exists. Each directory and each file with the `.conf` extension in `mkosi.conf.d/` is parsed. Any directory in `mkosi.conf.d` is parsed as if it were a regular top level directory. Note that if the same setting is configured twice, the later assignment overrides the earlier assignment unless the setting is a list based setting. Also note that before v16, we used to do the opposite, where the earlier assignment would be used instead of later assignments. Settings that take a list of values are merged by appending the new values to the previously configured values. Assigning the empty string to such a setting removes all previously assigned values, and overrides any configured default values as well. If a setting's name in the configuration file is prefixed with `@`, it configures the default value used for that setting if no explicit default value is set. This can be used to set custom default values in configuration files that can still be overridden by specifying the setting explicitly via the CLI. To conditionally include configuration files, the `[Match]` section can be used. Matches can use a pipe symbol (`|`) after the equals sign (`…=|…`), which causes the match to become a triggering match. The config file will be included if the logical AND of all non-triggering matches and the logical OR of all triggering matches is satisfied. To negate the result of a match, prefix the argument with an exclamation mark. If an argument is prefixed with the pipe symbol and an exclamation mark, the pipe symbol must be passed first, and the exclamation second. Note that `[Match]` settings match against the current values of specific settings, and do not take into account changes made to the setting in configuration files that have not been parsed yet. Also note that matching against a setting and then changing its value afterwards in a different config file may lead to unexpected results. The `[Match]` section of a `mkosi.conf` file in a directory applies to the entire directory. If the conditions are not satisfied, the entire directory is skipped. The `[Match]` sections of files in `mkosi.conf.d/` and `mkosi.local.conf` only apply to the file itself. If there are multiple `[Match]` sections in the same configuration file, each of them has to be satisfied in order for the configuration file to be included. Specifically, triggering matches only apply to the current `[Match]` section and are reset between multiple `[Match]` sections. As an example, the following will only match if the output format is one of `disk` or `directory` and the architecture is one of `x86-64` or `arm64`: ```conf [Match] Format=|disk Format=|directory [Match] Architecture=|x86-64 Architecture=|arm64 ``` Command line options that take no argument are shown without `=` in their long version. In the config files, they should be specified with a boolean argument: either `1`, `yes`, or `true` to enable, or `0`, `no`, `false` to disable. ### [Match] Section. `Profile=` : Matches against the configured profile. `Distribution=` : Matches against the configured distribution. `Release=` : Matches against the configured distribution release. If this condition is used and no distribution has been explicitly configured yet, the host distribution and release are used. `Architecture=` : Matches against the configured architecture. If this condition is used and no architecture has been explicitly configured yet, the host architecture is used. `PathExists=` : This condition is satisfied if the given path exists. Relative paths are interpreted relative to the parent directory of the config file that the condition is read from. `ImageId=` : Matches against the configured image ID, supporting globs. If this condition is used and no image ID has been explicitly configured yet, this condition fails. `ImageVersion=` : Matches against the configured image version. Image versions can be prepended by the operators `==`, `!=`, `>=`, `<=`, `<`, `>` for rich version comparisons according to the UAPI group version format specification. If no operator is prepended, the equality operator is assumed by default. If this condition is used and no image version has been explicitly configured yet, this condition fails. `Bootable=` : Matches against the configured value for the `Bootable=` feature. Takes a boolean value or `auto`. `Format=` : Matches against the configured value for the `Format=` option. Takes an output format (see the `Format=` option). `SystemdVersion=` : Matches against the systemd version on the host (as reported by `systemctl --version`). Values can be prepended by the operators `==`, `!=`, `>=`, `<=`, `<`, `>` for rich version comparisons according to the UAPI group version format specification. If no operator is prepended, the equality operator is assumed by default. `BuildSources=` : Takes a build source target path (see `BuildSources=`). This match is satisfied if any of the configured build sources uses this target path. For example, if we have a `mkosi.conf` file containing: ```conf [Content] BuildSources=../abc/qed:kernel ``` and a drop-in containing: ```conf [Match] BuildSources=kernel ``` The drop-in will be included. : Any absolute paths passed to this setting are interpreted relative to the current working directory. `HostArchitecture=` : Matches against the host's native architecture. See the `Architecture=` setting for a list of possible values. | Matcher | Globs | Rich Comparisons | Default | |---------------------|-------|------------------|-------------------------| | `Profile=` | no | no | match fails | | `Distribution=` | no | no | match host distribution | | `Release=` | no | no | match host release | | `Architecture=` | no | no | match host architecture | | `PathExists=` | no | no | n/a | | `ImageId=` | yes | no | match fails | | `ImageVersion=` | no | yes | match fails | | `Bootable=` | no | no | match auto feature | | `Format=` | no | no | match default format | | `SystemdVersion=` | no | yes | n/a | | `BuildSources=` | no | no | match fails | | `HostArchitecture=` | no | no | n/a | ### [Config] Section `Profile=`, `--profile=` : Select the given profile. A profile is a configuration file or directory in the `mkosi.profiles/` directory. When selected, this configuration file or directory is included after parsing the `mkosi.conf` file, but before any `mkosi.conf.d/*.conf` drop in configuration. `Include=`, `--include=` : Include extra configuration from the given file or directory. The extra configuration is included immediately after parsing the setting, except when a default is set using `@Include=`, in which case the configuration is included after parsing all the other configuration files. : Note that each path containing extra configuration is only parsed once, even if included more than once with `Include=`. `InitrdInclude=`, `--initrd-include=` : Same as `Include=`, but the extra configuration files or directories are included when building the default initrd. `Images=`, `--image=` : If specified, only build the given image. Can be specified multiple times to build multiple images. All the given images and their dependencies are built. If not specified, all images are built. See the **Building multiple images** section for more information. : Note that this section only takes effect when specified in the global configuration files. It has no effect if specified as an image specific setting. `Dependencies=`, `--dependency=` : The images that this image depends on specified as a comma-separated list. All images configured in this option will be built before this image and will be pulled in as dependencies of this image when `Images=` is used. `MinimumVersion=`, `--minimum-version=` : The minimum mkosi version required to build this configuration. If specified multiple times, the highest specified version is used. ### [Distribution] Section `Distribution=`, `--distribution=`, `-d` : The distribution to install in the image. Takes one of the following arguments: `fedora`, `debian`, `ubuntu`, `arch`, `opensuse`, `mageia`, `centos`, `rhel`, `rhel-ubi`, `openmandriva`, `rocky`, `alma`, `custom`. If not specified, defaults to the distribution of the host or `custom` if the distribution of the host is not a supported distribution. `Release=`, `--release=`, `-r` : The release of the distribution to install in the image. The precise syntax of the argument this takes depends on the distribution used, and is either a numeric string (in case of Fedora Linux, CentOS, …, e.g. `29`), or a distribution version name (in case of Debian, Ubuntu, …, e.g. `artful`). Defaults to a recent version of the chosen distribution, or the version of the distribution running on the host if it matches the configured distribution. `Architecture=`, `--architecture=` : The architecture to build the image for. The architectures that are actually supported depends on the distribution used and whether a bootable image is requested or not. When building for a foreign architecture, you'll also need to install and register a user mode emulator for that architecture. : One of the following architectures can be specified per image built: `alpha`, `arc`, `arm`, `arm64`, `ia64`, `loongarch64`, `mips64-le`, `mips-le`, `parisc`, `ppc`, `ppc64`, `ppc64-le`, `riscv32`, `riscv64`, `s390`, `s390x`, `tilegx`, `x86`, `x86-64`. `Mirror=`, `--mirror=`, `-m` : The mirror to use for downloading the distribution packages. Expects a mirror URL as argument. If not provided, the default mirror for the distribution is used. : The default mirrors for each distribution are as follows (unless specified, the same mirror is used for all architectures): | | x86-64 | aarch64 | |----------------|-----------------------------------|--------------------------------| | `debian` | http://deb.debian.org/debian | | | `arch` | https://geo.mirror.pkgbuild.com | http://mirror.archlinuxarm.org | | `opensuse` | http://download.opensuse.org | | | `ubuntu` | http://archive.ubuntu.com | http://ports.ubuntu.com | | `centos` | https://mirrors.centos.org | | | `rocky` | https://mirrors.rockylinux.org | | | `alma` | https://mirrors.almalinux.org | | | `fedora` | https://mirrors.fedoraproject.org | | | `rhel-ubi` | https://cdn-ubi.redhat.com | | | `mageia` | https://www.mageia.org | | | `openmandriva` | http://mirrors.openmandriva.org | | `LocalMirror=`, `--local-mirror=` : The mirror will be used as a local, plain and direct mirror instead of using it as a prefix for the full set of repositories normally supported by distributions. Useful for fully offline builds with a single repository. Supported on deb/rpm/arch based distributions. Overrides `--mirror=` but only for the local mkosi build, it will not be configured inside the final image, `--mirror=` (or the default repository) will be configured inside the final image instead. `RepositoryKeyCheck=`, `--repository-key-check=` : Controls signature/key checks when using repositories, enabled by default. Useful to disable checks when combined with `--local-mirror=` and using only a repository from a local filesystem. Not used for DNF-based distros yet. `Repositories=`, `--repositories=` : Enable package repositories that are disabled by default. This can be used to enable the EPEL repos for CentOS or different components of the Debian/Ubuntu repositories. `CacheOnly=`, `--cache-only=` : If specified, the package manager is instructed not to contact the network for updating package data. This provides a minimal level of reproducibility, as long as the package cache is already fully populated. `PackageManagerTrees=`, `--package-manager-tree=` : This option mirrors the above `SkeletonTrees=` option and defaults to the same value if not configured otherwise, but installs the files to a subdirectory of the workspace directory instead of the OS tree. This subdirectory of the workspace is used to configure the package manager. : `mkosi` will look for the package manager configuration and related files in the configured package manager trees. Unless specified otherwise, it will use the configuration files from their canonical locations in `/usr` or `/etc` in the package manager trees. For example, it will look for `etc/dnf/dnf.conf` in the package manager trees if `dnf` is used to install packages. : `SkeletonTrees=` and `PackageManagerTrees=` fulfill similar roles. Use `SkeletonTrees=` if you want the files to be present in the final image. Use `PackageManagerTrees=` if you don't want the files to be present in the final image, e.g. when building an initrd or if you want to refer to paths outside of the image in your repository configuration. ### [Output] Section `Format=`, `--format=`, `-t` : The image format type to generate. One of `directory` (for generating an OS image directly in a local directory), `tar` (similar, but a tarball of the OS image is generated), `cpio` (similar, but a cpio archive is generated), `disk` (a block device OS image with a GPT partition table), `uki` (a unified kernel image with the OS image in the `.initrd` PE section), `esp` (`uki` but wrapped in a disk image with only an ESP partition), `sysext`, `confext`, `portable` or `none` (the OS image is solely intended as a build image to produce another artifact). : If the `disk` output format is used, the disk image is generated using `systemd-repart`. The repart partition definition files to use can be configured using the `RepartDirectories=` setting or via `mkosi.repart/`. When verity partitions are configured using systemd-repart's `Verity=` setting, mkosi will automatically parse the verity hash partition's roothash from systemd-repart's JSON output and include it in the kernel command line of every unified kernel image built by mkosi. `ManifestFormat=`, `--manifest-format=` : The manifest format type or types to generate. A comma-delimited list consisting of `json` (the standard JSON output format that describes the packages installed), `changelog` (a human-readable text format designed for diffing). By default no manifest is generated. `Output=`, `--output=`, `-o` : Name to use for the generated output image file or directory. All outputs will be prefixed with the given name. Defaults to `image` or, if `ImageId=` is specified, it is used as the default output name, optionally suffixed with the version set with `ImageVersion=`. Note that this option does not allow configuring the output directory, use `OutputDirectory=` for that. : Note that this only specifies the output prefix, depending on the specific output format, compression and image version used, the full output name might be `image_7.8.raw.xz`. `CompressOutput=`, `--compress-output=` : Configure compression for the resulting image or archive. The argument can be either a boolean or a compression algorithm (`xz`, `zstd`). `zstd` compression is used by default, except CentOS and derivatives up to version 8, which default to `xz`. Note that when applied to block device image types, compression means the image cannot be started directly but needs to be decompressed first. This also means that the `shell`, `boot`, `qemu` verbs are not available when this option is used. Implied for `tar`, `cpio`, `uki`, and `esp`. `OutputDirectory=`, `--output-dir=`, `-O` : Path to a directory where to place all generated artifacts. If this is not specified and the directory `mkosi.output/` exists in the local directory, it is automatically used for this purpose. `WorkspaceDirectory=`, `--workspace-dir=` : Path to a directory where to store data required temporarily while building the image. This directory should have enough space to store the full OS image, though in most modes the actually used disk space is smaller. If not specified, a subdirectory of `$XDG_CACHE_HOME` (if set), `$HOME/.cache` (if set) or `/var/tmp` is used. : The data in this directory is removed automatically after each build. It's safe to manually remove the contents of this directory should an `mkosi` invocation be aborted abnormally (for example, due to reboot/power failure). `CacheDirectory=`, `--cache-dir=` : Takes a path to a directory to use as package cache for the distribution package manager used. If this option is not used, but a `mkosi.cache/` directory is found in the local directory it is automatically used for this purpose. `BuildDirectory=`, `--build-dir=` : Takes a path to a directory to use as the build directory for build systems that support out-of-tree builds (such as Meson). The directory used this way is shared between repeated builds, and allows the build system to reuse artifacts (such as object files, executable, …) generated on previous invocations. The build scripts can find the path to this directory in the `$BUILDDIR` environment variable. This directory is mounted into the image's root directory when `mkosi-chroot` is invoked during execution of the build scripts. If this option is not specified, but a directory `mkosi.builddir/` exists in the local directory it is automatically used for this purpose (also see the **Files** section below). `ImageVersion=`, `--image-version=` : Configure the image version. This accepts any string, but it is recommended to specify a series of dot separated components. The version may also be configured in a file `mkosi.version` in which case it may be conveniently managed via the `bump` verb or the `--auto-bump` option. When specified the image version is included in the default output file name, i.e. instead of `image.raw` the default will be `image_0.1.raw` for version `0.1` of the image, and similar. The version is also passed via the `$IMAGE_VERSION` to any build scripts invoked (which may be useful to patch it into `/etc/os-release` or similar, in particular the `IMAGE_VERSION=` field of it). `ImageId=`, `--image-id=` : Configure the image identifier. This accepts a freeform string that shall be used to identify the image with. If set the default output file will be named after it (possibly suffixed with the version). The identifier is also passed via the `$IMAGE_ID` to any build scripts invoked. The image ID is automatically added to `/usr/lib/os-release`. `SplitArtifacts=`, `--split-artifacts` : If specified and building a disk image, pass `--split=yes` to systemd-repart to have it write out split partition files for each configured partition. Read the [man](https://www.freedesktop.org/software/systemd/man/systemd-repart.html#--split=BOOL) page for more information. This is useful in A/B update scenarios where an existing disk image shall be augmented with a new version of a root or `/usr` partition along with its Verity partition and unified kernel. `RepartDirectories=`, `--repart-dir=` : Paths to directories containing systemd-repart partition definition files that are used when mkosi invokes systemd-repart when building a disk image. If `mkosi.repart/` exists in the local directory, it will be used for this purpose as well. Note that mkosi invokes repart with `--root=` set to the root of the image root, so any `CopyFiles=` source paths in partition definition files will be relative to the image root directory. `SectorSize=`, `--sector-size=` : Override the default sector size that systemd-repart uses when building a disk image. `RepartOffline=`, `--repart-offline=` : Specifies whether to build disk images using loopback devices. Enabled by default. When enabled, `systemd-repart` will not use loopback devices to build disk images. When disabled, `systemd-repart` will always use loopback devices to build disk images. : Note that when using `RepartOffline=no` mkosi cannot run unprivileged and the image build has to be done as the root user outside of any containers and with loopback devices available on the host system. : There are currently two known scenarios where `RepartOffline=no` has to be used. The first is when using `Subvolumes=` in a repart partition definition file, as subvolumes cannot be created without using loopback devices. The second is when creating a system with SELinux and an XFS root partition. Because `mkfs.xfs` does not support populating an XFS filesystem with extended attributes, loopback devices have to be used to ensure the SELinux extended attributes end up in the generated XFS filesystem. `Overlay=`, `--overlay` : When used together with `BaseTrees=`, the output will consist only out of changes to the specified base trees. Each base tree is attached as a lower layer in an overlayfs structure, and the output becomes the upper layer, initially empty. Thus files that are not modified compared to the base trees will not be present in the final output. : This option may be used to create [systemd *system extensions* or *portable services*](https://uapi-group.org/specifications/specs/extension_image). `UseSubvolumes=`, `--use-subvolumes=` : Takes a boolean or `auto`. Enables or disables use of btrfs subvolumes for directory tree outputs. If enabled, mkosi will create the root directory as a btrfs subvolume and use btrfs subvolume snapshots where possible to copy base or cached trees which is much faster than doing a recursive copy. If explicitly enabled and `btrfs` is not installed or subvolumes cannot be created, an error is raised. If `auto`, missing `btrfs` or failures to create subvolumes are ignored. `Seed=`, `--seed=` : Takes a UUID as argument or the special value `random`. Overrides the seed that [`systemd-repart(8)`](https://www.freedesktop.org/software/systemd/man/systemd-repart.service.html) uses when building a disk image. This is useful to achieve reproducible builds, where deterministic UUIDs and other partition metadata should be derived on each build. `SourceDateEpoch=`, `--source-date-epoch=` : Takes a timestamp as argument. Resets file modification times of all files to this timestamp. The variable is also propagated to systemd-repart and scripts executed by mkosi. If not set explicitly, `SOURCE_DATE_EPOCH` from `--environment` and from the host environment are tried in that order. This is useful to make builds reproducible. See [SOURCE_DATE_EPOCH](https://reproducible-builds.org/specs/source-date-epoch/) for more information. ### [Content] Section `Packages=`, `--package=`, `-p` : Install the specified distribution packages (i.e. RPM, DEB, …) in the image. Takes a comma separated list of package specifications. This option may be used multiple times in which case the specified package lists are combined. Use `BuildPackages=` to specify packages that shall only be installed in an overlay that is mounted when the prepare scripts are executed with the `build` argument and when the build scripts are executed. : The types and syntax of *package specifications* that are allowed depend on the package installer (e.g. `dnf` for `rpm`-based distros or `apt` for `deb`-based distros), but may include package names, package names with version and/or architecture, package name globs, paths to packages in the file system, package groups, and virtual provides, including file paths. : Example: when using a distro that uses `dnf`, the following configuration would install the `meson` package (in the latest version), the 32-bit version of the `libfdisk-devel` package, all available packages that start with the `git-` prefix, a `systemd` rpm from the local file system, one of the packages that provides `/usr/bin/ld`, the packages in the *Development Tools* group, and the package that contains the `mypy` python module. ```conf Packages=meson libfdisk-devel.i686 git-* prebuilt/rpms/systemd-249-rc1.local.rpm /usr/bin/ld @development-tools python3dist(mypy) ``` : Note that since mkosi runs in a sandbox with most of the host files unavailable, any local packages have to be mounted into the sandbox explicitly using `BuildSources=`. For example, let's say we have a local package located at `../my-packages/abc.rpm` relative to the mkosi working directory, then we'd be able to install it as follows: ```conf BuildSources=../my-packages:my-packages-in-sandbox Packages=my-packages-in-sandbox/abc.rpm ``` `BuildPackages=`, `--build-package=` : Similar to `Packages=`, but configures packages to install only in an overlay that is made available on top of the image to the prepare scripts when executed with the `build` argument and the build scripts. This option should be used to list packages containing header files, compilers, build systems, linkers and other build tools the `mkosi.build` scripts require to operate. Note that packages listed here will be absent in the final image. `PackageDirectories=`, `--package-directory=` : Specify directories containing extra packages to be made available during the build. `mkosi` will create a local repository containing all packages in these directories and make it available when installing packages or running scripts. : Note that this local repository is also made available when running scripts. Build scripts can add more packages to the local repository by placing the built packages in `$PACKAGEDIR`. `WithRecommends=`, `--with-recommends=` : Configures whether to install recommended or weak dependencies, depending on how they are named by the used package manager, or not. By default, recommended packages are not installed. This is only used for package managers that support the concept, which are currently apt, dnf and zypper. `WithDocs=`, `--with-docs` : Include documentation in the image. Enabled by default. When disabled, if the underlying distribution package manager supports it documentation is not included in the image. The `$WITH_DOCS` environment variable passed to the `mkosi.build` scripts is set to `0` or `1` depending on whether this option is enabled or disabled. `BaseTrees=`, `--base-tree=` : Takes a comma separated list of paths to use as base trees. When used, these base trees are each copied into the OS tree and form the base distribution instead of installing the distribution from scratch. Only extra packages are installed on top of the ones already installed in the base trees. Note that for this to work properly, the base image still needs to contain the package manager metadata (see `CleanPackageMetadata=`). : Instead of a directory, a tar file or a disk image may be provided. In this case it is unpacked into the OS tree. This mode of operation allows setting permissions and file ownership explicitly, in particular for projects stored in a version control system such as `git` which retain full file ownership and access mode metadata for committed files. `SkeletonTrees=`, `--skeleton-tree=` : Takes a comma separated list of colon separated path pairs. The first path of each pair refers to a directory to copy into the OS tree before invoking the package manager. The second path of each pair refers to the target directory inside the image. If the second path is not provided, the directory is copied on top of the root directory of the image. The second path is always interpreted as an absolute path. Use this to insert files and directories into the OS tree before the package manager installs any packages. If the `mkosi.skeleton/` directory is found in the local directory it is also used for this purpose with the root directory as target (also see the **Files** section below). : Note that skeleton trees are cached and any changes to skeleton trees after a cached image has been built (when using `Incremental=`) are only applied when the cached image is rebuilt (by using `-ff` or running `mkosi -f clean`). : As with the base tree logic above, instead of a directory, a tar file may be provided too. `mkosi.skeleton.tar` will be automatically used if found in the local directory. `ExtraTrees=`, `--extra-tree=` : Takes a comma separated list of colon separated path pairs. The first path of each pair refers to a directory to copy from the host into the image. The second path of each pair refers to the target directory inside the image. If the second path is not provided, the directory is copied on top of the root directory of the image. The second path is always interpreted as an absolute path. Use this to override any default configuration files shipped with the distribution. If the `mkosi.extra/` directory is found in the local directory it is also used for this purpose with the root directory as target. (also see the **Files** section below). : As with the base tree logic above, instead of a directory, a tar file may be provided too. `mkosi.extra.tar` will be automatically used if found in the local directory. `RemovePackages=`, `--remove-package=` : Takes a comma-separated list of package specifications for removal, in the same format as `Packages=`. The removal will be performed as one of the last steps. This step is skipped if `CleanPackageMetadata=no` is used. `RemoveFiles=`, `--remove-files=` : Takes a comma-separated list of globs. Files in the image matching the globs will be purged at the end. `CleanPackageMetadata=`, `--clean-package-metadata=` : Enable/disable removal of package manager databases at the end of installation. Can be specified as `true`, `false`, or `auto` (the default). With `auto`, files will be removed if the respective package manager executable is *not* present at the end of the installation. `PrepareScripts=`, `--prepare-script=` : Takes a comma-separated list of paths to executables that are used as the prepare scripts for this image. See the **Scripts** section for more information. `BuildScripts=`, `--build-script=` : Takes a comma-separated list of paths to executables that are used as the build scripts for this image. See the **Scripts** section for more information. `PostInstallationScripts=`, `--postinst-script=` : Takes a comma-separated list of paths to executables that are used as the post-installation scripts for this image. See the **Scripts** section for more information. `FinalizeScripts=`, `--finalize-script=` : Takes a comma-separated list of paths to executables that are used as the finalize scripts for this image. See the **Scripts** section for more information. `BuildSources=`, `--build-sources=` : Takes a comma separated list of colon separated path pairs. The first path of each pair refers to a directory to mount from the host. The second path of each pair refers to the directory where the source directory should be mounted when running scripts. Every target path is prefixed with `/work/src` and all build sources are sorted lexicographically by their target before mounting, so that top level paths are mounted first. If not configured explicitly, the current working directory is mounted to `/work/src`. `BuildSourcesEphemeral=`, `--build-sources-ephemeral=` : Takes a boolean. Disabled by default. Configures whether changes to source directories (The working directory and configured using `BuildSources=`) are persisted. If enabled, all source directories will be reset to their original state after scripts finish executing. `Environment=`, `--environment=` : Adds variables to the environment that package managers and the prepare/build/postinstall/finalize scripts are executed with. Takes a space-separated list of variable assignments or just variable names. In the latter case, the values of those variables will be passed through from the environment in which `mkosi` was invoked. This option may be specified more than once, in which case all listed variables will be set. If the same variable is set twice, the later setting overrides the earlier one. `EnvironmentFiles=`, `--env-file=` : Takes a comma-separated list of paths to files that contain environment variable definitions to be added to the scripting environment. Uses `mkosi.env` if it is found in the local directory. The variables are first read from `mkosi.env` if it exists, then from the given list of files and then from the `Environment=` settings. `WithTests=`, `--without-tests`, `-T` : If set to false (or when the command-line option is used), the `$WITH_TESTS` environment variable is set to `0` when the `mkosi.build` scripts are invoked. This is supposed to be used by the build scripts to bypass any unit or integration tests that are normally run during the source build process. Note that this option has no effect unless the `mkosi.build` build scripts honor it. `WithNetwork=`, `--with-network=` : When true, enables network connectivity while the build scripts `mkosi.build` are invoked. By default, the build scripts run with networking turned off. The `$WITH_NETWORK` environment variable is passed to the `mkosi.build` build scripts indicating whether the build is done with or without network. `Bootable=`, `--bootable=` : Takes a boolean or `auto`. Enables or disables generation of a bootable image. If enabled, mkosi will install an EFI bootloader, and add an ESP partition when the disk image output is used. If the selected EFI bootloader (See `Bootloader=`) is not installed or no kernel images can be found, the build will fail. `auto` behaves as if the option was enabled, but the build won't fail if either no kernel images or the selected EFI bootloader can't be found. If disabled, no bootloader will be installed even if found inside the image, no unified kernel images will be generated and no ESP partition will be added to the image if the disk output format is used. `Bootloader=`, `--bootloader=` : Takes one of `none`, `systemd-boot`, `uki` or `grub`. Defaults to `systemd-boot`. If set to `none`, no EFI bootloader will be installed into the image. If set to `systemd-boot`, systemd-boot will be installed and for each installed kernel, a UKI will be generated and stored in `EFI/Linux` in the ESP. If set to `uki`, a single UKI will be generated for the latest installed kernel (the one with the highest version) which is installed to `EFI/BOOT/BOOTX64.EFI` in the ESP. If set to `grub`, for each installed kernel, a UKI will be generated and stored in `EFI/Linux` in the ESP. For each generated UKI, a menu entry is appended to the grub configuration in `grub/grub.cfg` in the ESP which chainloads into the UKI. A shim grub.cfg is also written to `EFI//grub.cfg` in the ESP which loads `grub/grub.cfg` in the ESP for compatibility with signed versions of grub which load the grub configuration from this location. : Note that we do not yet install grub to the ESP when `Bootloader=` is set to `grub`. This has to be done manually in a postinst or finalize script. The grub EFI binary should be installed to `/efi/EFI/BOOT/BOOTX64.EFI` (or similar depending on the architecture) and should be configured to load its configuration from `EFI//grub.cfg` in the ESP. Signed versions of grub shipped by distributions will load their configuration from this location by default. `BiosBootloader=`, `--bios-bootloader=` : Takes one of `none` or `grub`. Defaults to `none`. If set to `none`, no BIOS bootloader will be installed. If set to `grub`, grub is installed as the BIOS boot loader if a bootable image is requested with the `Bootable=` option. If no repart partition definition files are configured, mkosi will add a grub BIOS boot partition and an EFI system partition to the default partition definition files. : Note that this option is not mutually exclusive with `Bootloader=`. It is possible to have an image that is both bootable on UEFI and BIOS by configuring both `Bootloader=` and `BiosBootloader=`. : The grub BIOS boot partition should have UUID `21686148-6449-6e6f-744e-656564454649` and should be at least 1MB. : Even if no EFI bootloader is installed, we still need an ESP for BIOS boot as that's where we store the kernel, initrd and grub modules. `ShimBootloader=`, `--shim-bootloader=` : Takes one of `none`, `unsigned`, or `signed`. Defaults to `none`. If set to `none`, shim and MokManager will not be installed to the ESP. If set to `unsigned`, mkosi will search for unsigned shim and MokManager EFI binaries and install them. If `SecureBoot=` is enabled, mkosi will sign the unsigned EFI binaries before installing thel. If set to `signed`, mkosi will search for signed EFI binaries and install those. Even if `SecureBoot=` is enabled, mkosi won't sign these binaries again. : Note that this option only takes effect when an image that is bootable on UEFI firmware is requested using other options (`Bootable=`, `Bootloader=`). `Initrds=`, `--initrd` : Use user-provided initrd(s). Takes a comma separated list of paths to initrd files. This option may be used multiple times in which case the initrd lists are combined. If no initrds are specified and a bootable image is requested, mkosi will automatically build a default initrd. `InitrdPackages=`, `--initrd-package=` : Extra packages to install into the default initrd. Takes a comma separated list of package specifications. This option may be used multiple times in which case the specified package lists are combined. `KernelCommandLine=`, `--kernel-command-line=` : Use the specified kernel command line when building images. `KernelModulesInclude=`, `--kernel-modules-include=` : Takes a list of regex patterns that specify kernel modules to include in the image. Patterns should be relative to the `/usr/lib/modules//kernel` directory. mkosi checks for a match anywhere in the module path (e.g. `i915` will match against `drivers/gpu/drm/i915.ko`). All modules that match any of the specified patterns are included in the image. All module and firmware dependencies of the matched modules are included in the image as well. This setting takes priority over `KernelModulesExclude=` and only makes sense when used in combination with it because all kernel modules are included in the image by default. `KernelModulesExclude=`, `--kernel-modules-exclude=` : Takes a list of regex patterns that specify modules to exclude from the image. Behaves the same as `KernelModulesInclude=` except that all modules that match any of the specified patterns are excluded from the image. `KernelModulesIncludeHost=`, `--kernel-modules-include-host=` : Takes a boolean. Specifies whether to include the currently loaded modules on the host system in the image. This setting takes priority over `KernelModulesExclude=` and only makes sense when used in combination with it because all kernel modules are included in the image by default. `KernelModulesInitrd=`, `--kernel-modules-initrd=` : Enable/Disable generation of the kernel modules initrd when building a bootable image. Enabled by default. If enabled, when building a bootable image, for each kernel that we assemble a unified kernel image for we generate an extra initrd containing only the kernel modules for that kernel version and append it to the prebuilt initrd. This allows generating kernel independent initrds which are augmented with the necessary kernel modules when the UKI is assembled. `KernelModulesInitrdInclude=`, `--kernel-modules-initrd-include=` : Like `KernelModulesInclude=`, but applies to the kernel modules included in the kernel modules initrd. `KernelModulesInitrdExclude=`, `--kernel-modules-initrd-exclude=` : Like `KernelModulesExclude=`, but applies to the kernel modules included in the kernel modules initrd. `KernelModulesInitrdIncludeHost=`, `--kernel-modules-initrd-include-host=` : Like `KernelModulesIncludeHost=`, but applies to the kernel modules included in the kernel modules initrd. `Locale=`, `--locale=`, `LocaleMessages=`, `--locale-messages=`, `Keymap=`, `--keymap=`, `Timezone=`, `--timezone=`, `Hostname=`, `--hostname=`, `RootShell=`, `--root-shell=` : The settings `Locale=`, `--locale=`, `LocaleMessages=`, `--locale-messages=`, `Keymap=`, `--keymap=`, `Timezone=`, `--timezone=`, `Hostname=`, `--hostname=`, `RootShell=`, `--root-shell=` correspond to the identically named systemd-firstboot options. See the systemd firstboot [manpage](https://www.freedesktop.org/software/systemd/man/systemd-firstboot.html) for more information. Additionally, where applicable, the corresponding systemd credentials for these settings are written to `/usr/lib/credstore`, so that they apply even if only `/usr` is shipped in the image. `RootPassword=`, `--root-password=`, : Set the system root password. If this option is not used, but a `mkosi.rootpw` file is found in the local directory, the password is automatically read from it. If the password starts with `hashed:`, it is treated as an already hashed root password. The root password is also stored in `/usr/lib/credstore` under the appropriate systemd credential so that it applies even if only `/usr` is shipped in the image. To create an unlocked account without any password use `hashed:` without a hash. `Autologin=`, `--autologin` : Enable autologin for the `root` user on `/dev/pts/0` (nspawn), `/dev/tty1` and `/dev/ttyS0`. `MakeInitrd=`, `--make-initrd` : Add `/etc/initrd-release` and `/init` to the image so that it can be used as an initramfs. `Ssh=`, `--ssh` : If specified, an sshd socket unit and matching service are installed in the final image that expose SSH over VSock. When building with this option and running the image using `mkosi qemu`, the `mkosi ssh` command can be used to connect to the container/VM via SSH. Note that you still have to make sure openssh is installed in the image to make this option behave correctly. Run `mkosi genkey` to automatically generate an X509 certificate and private key to be used by mkosi to enable SSH access to any virtual machines via `mkosi ssh`. To access images booted using `mkosi boot`, use `machinectl`. `SELinuxRelabel=`, `--selinux-relabel=` : Specifies whether to relabel files to match the image's SELinux policy. Takes a boolean value or `auto`. Defaults to `auto`. If disabled, files will not relabeled. If enabled, an SELinux policy has to be installed in the image and `setfiles` has to be available to relabel files. If any errors occur during `setfiles`, the build will fail. If set to `auto`, files will be relabeled if an SELinux policy is installed in the image and if `setfiles` is available. Any errors occurred during `setfiles` will be ignored. : Note that when running unprivileged, `setfiles` will fail to set any labels that are not in the host's SELinux policy. To ensure `setfiles` succeeds without errors, make sure to run mkosi as root or build from a host system with the same SELinux policy as the image you're building. ### [Validation] Section `SecureBoot=`, `--secure-boot` : Sign systemd-boot (if it is not signed yet) and any generated unified kernel images for UEFI SecureBoot. `SecureBootAutoEnroll=`, `--secure-boot-auto-enroll=` : Set up automatic enrollment of the secure boot keys in virtual machines as documented in the systemd-boot [man page](https://www.freedesktop.org/software/systemd/man/systemd-boot.html) if `SecureBoot=` is used. Note that systemd-boot will only do automatic secure boot key enrollment in virtual machines starting from systemd v253. To do auto enrollment on systemd v252 or on bare metal machines, write a systemd-boot configuration file to `/efi/loader/loader.conf` using an extra tree with `secure-boot-enroll force` or `secure-boot-enroll manual` in it. Auto enrollment is not supported on systemd versions older than v252. Defaults to `yes`. `SecureBootKey=`, `--secure-boot-key=` : Path to the PEM file containing the secret key for signing the UEFI kernel image, if `SecureBoot=` is used. `SecureBootCertificate=`, `--secure-boot-certificate=` : Path to the X.509 file containing the certificate for the signed UEFI kernel image, if `SecureBoot=` is used. `SecureBootSignTool=`, `--secure-boot-sign-tool` : Tool to use to sign secure boot PE binaries. Takes one of `sbsign`, `pesign` or `auto`. Defaults to `auto`. If set to `auto`, either sbsign or pesign are used if available, with sbsign being preferred if both are installed. `VerityKey=`, `--verity-key=` : Path to the PEM file containing the secret key for signing the verity signature, if a verity signature partition is added with systemd-repart. `VerityCertificate=`, `--verity-certificate=` : Path to the X.509 file containing the certificate for signing the verity signature, if a verity signature partition is added with systemd-repart. `SignExpectedPcr=`, `--sign-expected-pcr` : Measure the components of the unified kernel image (UKI) using `systemd-measure` and embed the PCR signature into the unified kernel image. This option takes a boolean value or the special value `auto`, which is the default, which is equal to a true value if the `systemd-measure` binary is in `PATH`. `Passphrase=`, `--passphrase` : Specify the path to a file containing the passphrase to use for LUKS encryption. It should contain the passphrase literally, and not end in a newline character (i.e. in the same format as cryptsetup and `/etc/crypttab` expect the passphrase files). The file must have an access mode of 0600 or less. `Checksum=`, `--checksum` : Generate a `SHA256SUMS` file of all generated artifacts after the build is complete. `Sign=`, `--sign` : Sign the generated `SHA256SUMS` using `gpg` after completion. `Key=`, `--key=` : Select the `gpg` key to use for signing `SHA256SUMS`. This key must be already present in the `gpg` keyring. ### [Host] Section `Incremental=`, `--incremental=`, `-i` : Enable incremental build mode. In this mode, a copy of the OS image is created immediately after all OS packages are installed and the prepare scripts have executed but before the `mkosi.build` scripts are invoked (or anything that happens after it). On subsequent invocations of `mkosi` with the `-i` switch this cached image may be used to skip the OS package installation, thus drastically speeding up repetitive build times. Note that while there is some rudimentary cache invalidation, it is definitely not perfect. In order to force rebuilding of the cached image, combine `-i` with `-ff` to ensure the cached image is first removed and then re-created. `NSpawnSettings=`, `--settings=` : Specifies a `.nspawn` settings file for `systemd-nspawn` to use in the `boot` and `shell` verbs, and to place next to the generated image file. This is useful to configure the `systemd-nspawn` environment when the image is run. If this setting is not used but an `mkosi.nspawn` file found in the local directory it is automatically used for this purpose. `ExtraSearchPaths=`, `--extra-search-path=` : List of colon-separated paths to look for tools in, before using the regular `$PATH` search path. `QemuGui=`, `--qemu-gui=` : If enabled, qemu is executed with its graphical interface instead of with a serial console. `QemuSmp=`, `--qemu-smp=` : When used with the `qemu` verb, this options sets `qemu`'s `-smp` argument which controls the number of guest's CPUs. Defaults to `2`. `QemuMem=`, `--qemu-mem=` : When used with the `qemu` verb, this options sets `qemu`'s `-m` argument which controls the amount of guest's RAM. Defaults to `2G`. `QemuKvm=`, `--qemu-kvm=` : When used with the `qemu` verb, this option specifies whether QEMU should use KVM acceleration. Takes a boolean value or `auto`. Defaults to `auto`. `QemuVsock=`, `--qemu-vsock=` : When used with the `qemu` verb, this option specifies whether QEMU should be configured with a vsock. Takes a boolean value or `auto`. Defaults to `auto`. `QemuVsockConnectionId=`, `--qemu-vsock-cid=` : When used with the `qemu` verb, this option specifies the vsock connection ID to use. Takes a number in the interval `[3, 0xFFFFFFFF)` or `hash` or `auto`. Defaults to `hash`. When set to `hash`, the connection ID will be derived from the full path to the image. When set to `auto`, `mkosi` will try to find a free connection ID automatically. Otherwise, the provided number will be used as is. : Note that when set to `auto`, `mkosi ssh` cannot be used as we cannot figure out which free connection ID we found when booting the image earlier. `QemuSwtpm=`, `--qemu-swtpm=` : When used with the `qemu` verb, this option specifies whether to start an instance of swtpm to be used as a TPM with qemu. This requires swtpm to be installed on the host. Takes a boolean value or `auto`. Defaults to `auto`. `QemuCdrom=`, `--qemu-cdrom=` : When used with the `qemu` verb, this option specifies whether to attach the image to the virtual machine as a CD-ROM device. Takes a boolean. Defaults to `no`. `QemuFirmware=`, `--qemu-firmware=` : When used with the `qemu` verb, this option specifies which firmware to use. Takes one of `uefi`, `bios`, `linux`, or `auto`. Defaults to `auto`. When set to `uefi`, the OVMF firmware is used. When set to `bios`, the default SeaBIOS firmware is used. When set to `linux`, direct kernel boot is used. See the `QemuKernel=` option for more details on which kernel image is used with direct kernel boot. When set to `auto`, `linux` is used if a cpio image is being booted, `uefi` otherwise. `QemuFirmwareVariables=`, `--qemu-firmware-variables=` : When used with the `qemu` verb, this option specifies the path to the the firmware variables file to use. Currently, this option is only taken into account when the `uefi` firmware is used. If not specified, mkosi will search for the default variables file and use that instead. : `virt-fw-vars` from the [virt-firmware](https://gitlab.com/kraxel/virt-firmware) project can be used to customize OVMF variable files. : Some distributions also provide variable files which already have Microsoft's certificates for secure boot enrolled. For Fedora and Debian these are `OVMF_VARS.secboot.fd` and `OVMF_VARS_4M.ms.fd` under `/usr/share/OVMF` respectively. You can use `locate` and look under `/usr/share/qemu/firmware` for hints on where to find these files if your distribution ships them. `QemuKernel=`, `--qemu-kernel=` : Set the kernel image to use for qemu direct kernel boot. If not specified, mkosi will use the kernel provided via the command line (`-kernel` option) or latest the kernel that was installed into the image (or fail if no kernel was installed into the image). : Note that when the `cpio` output format is used, direct kernel boot is used regardless of the configured firmware. Depending on the configured firmware, qemu might boot the kernel itself or using the configured firmware. `QemuDrives=`, `--qemu-drive=` : Add a qemu drive. Takes a colon-delimited string of format `:[:[:]]`. `id` specifies the qemu id we assign to the drive. This can be used as the `drive=` property in various qemu devices. `size` specifies the size of the drive. This takes a size in bytes. Additionally, the suffixes `K`, `M` and `G` can be used to specify a size in kilobytes, megabytes and gigabytes respectively. `directory` optionally specifies the directory in which to create the file backing the drive. `options` optionally specifies extra comma-delimited properties which are passed verbatime to qemu's `-drive` option. `QemuArgs=` : Space-delimited list of additional arguments to pass when invoking qemu. `Ephemeral=`, `--ephemeral` : When used with the `shell`, `boot`, or `qemu` verbs, this option runs the specified verb on a temporary snapshot of the output image that is removed immediately when the container terminates. Taking the temporary snapshot is more efficient on file systems that support reflinks natively (btrfs or xfs) than on more traditional file systems that do not (ext4). `Credentials=`, `--credential=` : Set credentials to be passed to systemd-nspawn or qemu respectively when `mkosi shell/boot` or `mkosi qemu` are used. This option takes a space separated list of key=value assignments. `KernelCommandLineExtra=`, `--kernel-command-line-extra=` : Set extra kernel command line entries that are appended to the kernel command line at runtime when booting the image. When booting in a container, these are passed as extra arguments to systemd. When booting in a VM, these are appended to the kernel command line via the SMBIOS io.systemd.stub.kernel-cmdline-extra OEM string. This will only be picked up by systemd-boot/systemd-stub versions newer than or equal to v254. `Acl=`, `--acl=` : If specified, ACLs will be set on any generated root filesystem directories that allow the user running mkosi to remove them without needing privileges. `ToolsTree=`, `--tools-tree=` : If specified, programs executed by mkosi are looked up inside the given tree instead of in the host system. Use this option to make image builds more reproducible by always using the same versions of programs to build the final image instead of whatever version is installed on the host system. If this option is not used, but the `mkosi.tools/` directory is found in the local directory it is automatically used for this purpose with the root directory as target. Note that when looking up binaries in `--tools-tree=`, only `/usr/bin` and `/usr/sbin` are considered. Specifically, paths specified by `--extra-search-path=` are ignored when looking up binaries in the given tools tree. : If set to `default`, mkosi will automatically add an extra tools tree image and use it as the tools tree. The following table shows for which distributions default tools tree packages are defined and which packages are included in those default tools trees: | | Fedora | CentOS | Debian | Ubuntu | Arch | openSUSE | |-------------------------|--------|--------|--------|--------|------|----------| | `apt` | X | X | X | X | X | | | `archlinux-keyring` | X | | X | X | X | | | `bash` | X | X | X | X | X | X | | `btrfs-progs` | X | | X | X | X | X | | `bubblewrap` | X | X | X | X | X | X | | `ca-certificates` | X | X | X | X | X | X | | `coreutils` | X | X | X | X | X | X | | `cpio` | X | X | X | X | X | X | | `curl` | X | X | X | X | X | X | | `debian-keyring` | X | X | X | X | X | | | `diffutils` | X | X | X | X | X | X | | `distribution-gpg-keys` | X | X | | | | X | | `dnf` | X | X | X | X | X | X | | `dnf-plugins-core` | X | X | | | | X | | `dnf5` | X | | | | | | | `dnf5-plugins` | X | | | | | | | `dosfstools` | X | X | X | X | X | X | | `e2fsprogs` | X | X | X | X | X | X | | `edk2-ovmf` | X | X | X | X | X | X | | `erofs-utils` | X | | X | X | X | X | | `kmod` | X | X | X | X | X | X | | `less` | X | X | X | X | X | X | | `mtools` | X | X | X | X | X | X | | `nano` | X | X | X | X | X | X | | `openssh` | X | X | X | X | X | X | | `openssl` | X | X | X | X | X | X | | `pacman` | X | | X | X | X | | | `pesign` | X | X | X | X | X | X | | `policycoreutils` | X | X | X | X | | X | | `qemu` | X | X | X | X | X | X | | `sbsigntools` | X | | X | X | X | X | | `socat` | X | X | X | X | X | X | | `squashfs-tools` | X | X | X | X | X | X | | `strace` | X | X | X | X | X | X | | `swtpm` | X | X | X | X | X | X | | `systemd` | X | X | X | X | X | X | | `ukify` | X | | X | X | X | X | | `tar` | X | X | X | X | X | X | | `ubuntu-keyring` | X | X | X | X | X | | | `util-linux` | X | X | X | X | X | X | | `virtiofsd` | X | X | | | X | X | | `xfsprogs` | X | X | X | X | X | X | | `xz` | X | X | X | X | X | X | | `zstd` | X | X | X | X | X | X | | `zypper` | X | | X | X | X | | `ToolsTreeDistribution=`, `--tools-tree-distribution=` : Set the distribution to use for the default tools tree. By default, the same distribution as the image that's being built is used, except for CentOS and Ubuntu images, in which case Fedora and Debian are used respectively. `ToolsTreeRelease=`, `--tools-tree-release=` : Set the distribution release to use for the default tools tree. By default, the hardcoded default release in mkosi for the distribution is used. `ToolsTreeMirror=`, `--tools-tree-mirror=` : Set the mirror to use for the default tools tree. By default, the default mirror for the tools tree distribution is used. `ToolsTreePackages=`, `--tools-tree-packages=` : Extra packages to install into the default tools tree. Takes a comma separated list of package specifications. This option may be used multiple times in which case the specified package lists are combined. `RuntimeTrees=`, `--runtime-tree=` : Takes a colon separated pair of paths. The first path refers to a directory to mount into any machine (container or VM) started by mkosi. The second path refers to the target directory inside the machine. If the second path is not provided, the directory is mounted below `/root/src` in the machine. If the second path is relative, it is interpreted relative to `/root/src` in the machine. : For each mounted directory, the uid and gid of the user running mkosi are mapped to the root user in the machine. This means that all the files and directories will appear as if they're owned by root in the machine, and all new files and directories created by root in the machine in these directories will be owned by the user running mkosi on the host. : Note that when using `mkosi qemu` with this feature systemd v254 or newer has to be installed in the image. `RuntimeSize=`, `--runtime-size=` : If specified, disk images are grown to the specified size before they're booted with systemd-nspawn or qemu. Takes a size in bytes. Additionally, the suffixes `K`, `M` and `G` can be used to specify a size in kilobytes, megabytes and gigabytes respectively. `RuntimeScratch=`: `--runtime-scratch=` : Takes a boolean value or `auto`. Specifies whether to mount extra scratch space to `/var/tmp`. If enabled, practically unlimited scratch space is made available under `/var/tmp` when booting the image with `mkosi qemu`, `mkosi boot` or `mkosi shell`. : Note that using this feature with `mkosi qemu` requires systemd v254 or newer in the guest. `SshKey=`, `--ssh-key=` : Path to the X509 private key in PEM format to use to connect to a virtual machine started with `mkosi qemu` and built with the `Ssh=` option enabled via the `mkosi ssh` command. If not configured and `mkosi.key` exists in the working directory, it will automatically be used for this purpose. Run `mkosi genkey` to automatically generate a key in `mkosi.key`. `SshCertificate=`, `--ssh-certificate=` : Path to the X509 certificate in PEM format to provision as the SSH public key in virtual machines started with `mkosi qemu`. If not configured and `mkosi.crt` exists in the working directory, it will automatically be used for this purpose. Run `mkosi genkey` to automatically generate a certificate in `mkosi.crt`. ## Specifiers The current value of various settings can be accessed when parsing configuration files by using specifiers. To write a literal `%` character in a configuration file without treating it as a specifier, use `%%`. The following specifiers are understood: | Setting | Specifier | |--------------------|-----------| | `Distribution=` | `%d` | | `Release=` | `%r` | | `Architecture=` | `%a` | | `Format=` | `%t` | | `Output=` | `%o` | | `OutputDirectory=` | `%O` | | `ImageId=` | `%i` | | `ImageVersion=` | `%v` | ## Supported distributions Images may be created containing installations of the following distributions: * *Fedora Linux* * *Debian* * *Ubuntu* * *Arch Linux* * *openSUSE* * *Mageia* * *CentOS* * *RHEL* * *RHEL UBI* * *OpenMandriva* * *Rocky Linux* * *Alma Linux* * *Gentoo* (**Gentoo is experimental and unsupported. We make no guarantee that it will work at all and the core maintainers will generally not fix gentoo specific issues**) * *None* (**Requires the user to provide a pre-built rootfs**) In theory, any distribution may be used on the host for building images containing any other distribution, as long as the necessary tools are available. Specifically, any distribution that packages `apt` may be used to build *Debian* or *Ubuntu* images. Any distribution that packages `dnf` may be used to build images for any of the rpm-based distributions. Any distro that packages `pacman` may be used to build *Arch Linux* images. Any distribution that packages `zypper` may be used to build *openSUSE* images. Other distributions and build automation tools for embedded Linux systems such as Buildroot, OpenEmbedded and Yocto Project may be used by selecting the `custom` distribution, and populating the rootfs via a combination of base trees, skeleton trees, and prepare scripts. Currently, *Fedora Linux* packages all relevant tools as of Fedora 28. Note that when not using a custom mirror, `RHEL` images can only be built from a host system with a `RHEL` subscription (established using e.g. `subscription-manager`). # Execution Flow Execution flow for `mkosi build`. Default values/calls are shown in parentheses. When building with `--incremental` mkosi creates a cache of the distribution installation if not already existing and replaces the distribution installation in consecutive runs with data from the cached one. 1. Parse CLI options 1. Parse configuration files 1. If we're not running as root, unshare the user namespace and map the subuid range configured in `/etc/subuid` and `/etc/subgid` into it. 1. Unshare the mount namespace 1. Remount the following directories read-only if they exist: - `/usr` - `/etc` - `/opt` - `/srv` - `/boot` - `/efi` - `/media` - `/mnt` Then, for each image, we execute the following steps: 1. Copy package manager trees into the workspace 1. Copy base trees (`--base-tree=`) into the image 1. Copy skeleton trees (`mkosi.skeleton`) into image 1. Install distribution and packages into image or use cache tree if available 1. Run prepare scripts on image with the `final` argument (`mkosi.prepare`) 1. Install build packages in overlay if any build scripts are configured 1. Run prepare scripts on overlay with the `build` argument if any build scripts are configured (`mkosi.prepare`) 1. Cache the image if configured (`--incremental`) 1. Run build scripts on image + overlay if any build scripts are configured (`mkosi.build`) 1. Finalize the build if the output format `none` is configured 1. Copy the build scripts outputs into the image 1. Copy the extra trees into the image (`mkosi.extra`) 1. Run post-install scripts (`mkosi.postinst`) 1. Write config files required for `Ssh=`, `Autologin=` and `MakeInitrd=` 1. Install systemd-boot and configure secure boot if configured (`--secure-boot`) 1. Run `systemd-sysusers` 1. Run `systemd-tmpfiles` 1. Run `systemctl preset-all` 1. Run `depmod` 1. Run `systemd-firstboot` 1. Run `systemd-hwdb` 1. Remove packages and files (`RemovePackages=`, `RemoveFiles=`) 1. Run SELinux relabel is a SELinux policy is installed 1. Run finalize scripts (`mkosi.finalize`) 1. Generate unified kernel image if configured to do so 1. Generate final output format # Scripts To allow for image customization that cannot be implemented using mkosi's builtin features, mkosi supports running scripts at various points during the image build process that can customize the image as needed. Scripts are executed on the host system as root (either real root or root within the user namespace that mkosi created when running unprivileged) with a customized environment to simplify modifying the image. For each script, the configured build sources (`BuildSources=`) are mounted into the current working directory before running the script in the current working directory. `$SRCDIR` is set to point to the current working directory. The following scripts are supported: * If **`mkosi.prepare`** (`PrepareScripts=`) exists, it is first called with the `final` argument, right after the software packages are installed. It is called a second time with the `build` command line parameter, right after the build packages are installed and the build overlay mounted on top of the image's root directory . This script has network access and may be used to install packages from other sources than the distro's package manager (e.g. `pip`, `npm`, ...), after all software packages are installed but before the image is cached (if incremental mode is enabled). In contrast to a general purpose installation, it is safe to install packages to the system (`pip install`, `npm install -g`) instead of in `$SRCDIR` itself because the build image is only used for a single project and can easily be thrown away and rebuilt so there's no risk of conflicting dependencies and no risk of polluting the host system. * If **`mkosi.build`** (`BuildScripts=`) exists, it is executed with the build overlay mounted on top of the image's root directory. When running the build script, `$DESTDIR` points to a directory where the script should place any files generated it would like to end up in the image. Note that `make`/`automake`/`meson` based build systems generally honor `$DESTDIR`, thus making it very natural to build *source* trees from the build script. After running the build script, the contents of `$DESTDIR` are copied into the image. * If **`mkosi.postinst`** (`PostInstallationScripts=`) exists, it is executed after the (optional) build tree and extra trees have been installed. This script may be used to alter the images without any restrictions, after all software packages and built sources have been installed. * If **`mkosi.finalize`** (`FinalizeScripts=`) exists, it is executed as the last step of preparing an image. If a script uses the `.chroot` extension, mkosi will chroot into the image using `mkosi-chroot` (see below) before executing the script. For example, if `mkosi.postinst.chroot` exists, mkosi will chroot into the image and execute it as the post-installation script. Scripts executed by mkosi receive the following environment variables: * `$ARCHITECTURE` contains the architecture from the `Architecture=` setting. If `Architecture=` is not set, it will contain the native architecture of the host machine. See the documentation of `Architecture=` for possible values for this variable. * `$CHROOT_SCRIPT` contains the path to the running script relative to the image root directory. The primary usecase for this variable is in combination with the `mkosi-chroot` script. See the description of `mkosi-chroot` below for more information. * `$SRCDIR` contains the path to the directory mkosi was invoked from, with any configured build sources mounted on top. `$CHROOT_SRCDIR` contains the value that `$SRCDIR` will have after invoking `mkosi-chroot`. * `$BUILDDIR` is only defined if `mkosi.builddir` exists and points to the build directory to use. This is useful for all build systems that support out-of-tree builds to reuse already built artifacts from previous runs. `$CHROOT_BUILDDIR` contains the value that `$BUILDDIR` will have after invoking `mkosi-chroot`. * `$DESTDIR` is a directory into which any installed software generated by a build script may be placed. This variable is only set when executing a build script. `$CHROOT_DESTDIR` contains the value that `$DESTDIR` will have after invoking `mkosi-chroot`. * `$OUTPUTDIR` points to the staging directory used to store build artifacts generated during the build. `$CHROOT_OUTPUTDIR` contains the value that `$OUTPUTDIR` will have after invoking `mkosi-chroot`. * `$PACKAGEDIR` points to the directory containing the local package repository. Build scripts can add more packages to the local repository by writing the packages to `$PACKAGEDIR`. * `$BUILDROOT` is the root directory of the image being built, optionally with the build overlay mounted on top depending on the script that's being executed. * `$WITH_DOCS` is either `0` or `1` depending on whether a build without or with installed documentation was requested (`WithDocs=yes`). A build script should suppress installation of any package documentation to `$DESTDIR` in case `$WITH_DOCS` is set to `0`. * `$WITH_TESTS` is either `0` or `1` depending on whether a build without or with running the test suite was requested (`WithTests=no`). A build script should avoid running any unit or integration tests in case `$WITH_TESTS` is `0`. * `$WITH_NETWORK` is either `0` or `1` depending on whether a build without or with networking is being executed (`WithNetwork=no`). A build script should avoid any network communication in case `$WITH_NETWORK` is `0`. * `$SOURCE_DATE_EPOCH` is defined if requested (`SourceDateEpoch=TIMESTAMP`, `Environment=SOURCE_DATE_EPOCH=TIMESTAMP` or the host environment variable `$SOURCE_DATE_EPOCH`). This is useful to make builds reproducible. See [SOURCE_DATE_EPOCH](https://reproducible-builds.org/specs/source-date-epoch/) for more information. * `$MKOSI_UID` and `$MKOSI_GID` are the respectively the uid, gid of the user that invoked mkosi, potentially translated to a uid in the user namespace that mkosi is running in. These can be used in combination with `setpriv` to run commands as the user that invoked mkosi (e.g. `setpriv --reuid=$MKOSI_UID --regid=$MKOSI_GID --clear-groups `) Consult this table for which script receives which environment variables: | Variable | `mkosi.prepare` | `mkosi.build` | `mkosi.postinst` | `mkosi.finalize` | |---------------------|-----------------|---------------|------------------|------------------| | `$CHROOT_SCRIPT` | X | X | X | X | | `$SRCDIR` | X | X | X | X | | `CHROOT_SRCDIR` | X | X | X | X | | `$BUILDDIR` | | X | | | | `CHROOT_BUILDDIR` | | X | | | | `DESTDIR` | | X | | | | `CHROOT_DESTDIR` | | X | | | | `$OUTPUTDIR` | | X | X | X | | `CHROOT_OUTPUTDIR` | | X | X | X | | `$BUILDROOT` | X | X | X | X | | `WITH_DOCS` | X | X | | | | `WITH_TESTS` | X | X | | | | `WITH_NETWORK` | X | X | | | | `SOURCE_DATE_EPOCH` | X | X | X | X | | `MKOSI_UID` | X | X | X | X | | `MKOSI_GID` | X | X | X | X | Additionally, when a script is executed, a few scripts are made available via `$PATH` to simplify common usecases. * `mkosi-chroot`: This script will chroot into the image and execute the given command. On top of chrooting into the image, it will also mount various files and directories (`$SRCDIR`, `$DESTDIR`, `$BUILDDIR`, `$OUTPUTDIR`, `$CHROOT_SCRIPT`) into the image and modify the corresponding environment variables to point to the locations inside the image. It will also mount APIVFS filesystems (`/proc`, `/dev`, ...) to make sure scripts and tools executed inside the chroot work properly. It also propagates `/etc/resolv.conf` from the host into the chroot if requested so that DNS resolution works inside the chroot. After the mkosi-chroot command exits, various mount points are cleaned up. For example, to invoke `ls` inside of the image, use the following ```sh mkosi-chroot ls ... ``` To execute the entire script inside the image, add a ".chroot" suffix to the name (`mkosi.build.chroot` instead of `mkosi.build`, etc.). * For all of the supported package managers except portage (`dnf`, `rpm`, `apt`, `pacman`, `zypper`), scripts of the same name are put into `$PATH` that make sure these commands operate on the image's root directory with the configuration supplied by the user instead of on the host system. This means that from a script, you can do e.g. `dnf install vim` to install vim into the image. * `mkosi-as-caller`: This script uses `setpriv` to switch from the user `root` in the user namespace used for various build steps back to the original user that called mkosi. This is useful when we want to invoke build steps which will write to `$BUILDDIR` and we want to have the files owned by the calling user. For example, a complete `mkosi.build` script might be the following: ```sh set -ex mkosi-as-caller meson setup "$BUILDDIR/build" "$SRCDIR" mkosi-as-caller meson compile -C "$BUILDDIR/build" meson install -C "$BUILDDIR/build" --no-rebuild ``` * `git` is automatically invoked with `safe.directory=*` to avoid permissions errors when running as the root user in a user namespace. * `useradd` and `groupadd` are automatically invoked with `--root=$BUILDROOT` when executed outside of the image. When scripts are executed, any directories that are still writable are also made read-only (`/home`, `/var`, `/root`, ...) and only the minimal set of directories that need to be writable remain writable. This is to ensure that scripts can't mess with the host system when mkosi is running as root. Note that when executing scripts, all source directories are made ephemeral which means all changes made to source directories while running scripts are thrown away after the scripts finish executing. Use the output, build or cache directories if you need to persist data between builds. # Files To make it easy to build images for development versions of your projects, mkosi can read configuration data from the local directory, under the assumption that it is invoked from a *source* tree. Specifically, the following files are used if they exist in the local directory: * The **`mkosi.skeleton/`** directory or **`mkosi.skeleton.tar`** archive may be used to insert files into the image. The files are copied *before* the distribution packages are installed into the image. This allows creation of files that need to be provided early, for example to configure the package manager or set systemd presets. When using the directory, file ownership is not preserved: all files copied will be owned by root. To preserve ownership, use a tar archive. * The **`mkosi.extra/`** directory or **`mkosi.extra.tar`** archive may be used to insert additional files into the image, on top of what the distribution includes in its packages. They are similar to `mkosi.skeleton/` and `mkosi.skeleton.tar`, but the files are copied into the directory tree of the image *after* the OS was installed. When using the directory, file ownership is not preserved: all files copied will be owned by root. To preserve ownership, use a tar archive. * The **`mkosi.nspawn`** nspawn settings file will be copied into the same place as the output image file, if it exists. This is useful since nspawn looks for settings files next to image files it boots, for additional container runtime settings. * The **`mkosi.cache/`** directory, if it exists, is automatically used as package download cache, in order to speed repeated runs of the tool. * The **`mkosi.builddir/`** directory, if it exists, is automatically used as out-of-tree build directory, if the build commands in the `mkosi.build` scripts support it. Specifically, this directory will be mounted into the build container, and the `$BUILDDIR` environment variable will be set to it when the build scripts are invoked. A build script may then use this directory as build directory, for automake-style or ninja-style out-of-tree builds. This speeds up builds considerably, in particular when `mkosi` is used in incremental mode (`-i`): not only the image and build overlay, but also the build tree is reused between subsequent invocations. Note that if this directory does not exist the `$BUILDDIR` environment variable is not set, and it is up to the build scripts to decide whether to do in in-tree or an out-of-tree build, and which build directory to use. * The **`mkosi.rootpw`** file can be used to provide the password for the root user of the image. If the password is prefixed with `hashed:` it is treated as an already hashed root password. The password may optionally be followed by a newline character which is implicitly removed. The file must have an access mode of 0600 or less. If this file does not exist, the distribution's default root password is set (which usually means access to the root user is blocked). * The **`mkosi.passphrase`** file provides the passphrase to use when LUKS encryption is selected. It should contain the passphrase literally, and not end in a newline character (i.e. in the same format as cryptsetup and `/etc/crypttab` expect the passphrase files). The file must have an access mode of 0600 or less. * The **`mkosi.crt`** and **`mkosi.key`** files contain an X.509 certificate and PEM private key to use when signing is required (UEFI SecureBoot, verity, ...). * The **`mkosi.output/`** directory is used to store all build artifacts. * The **`mkosi.credentials/`** directory is used as a source of extra credentials similar to the `Credentials=` option. For each file in the directory, the filename will be used as the credential name and the file contents become the credential value, or, if the file is executable, mkosi will execute the file and the command's output to stdout will be used as the credential value. Output to stderr will be ignored. Credentials configured with `Credentials=` take precedence over files in `mkosi.credentials`. * The **`mkosi.repart/`** directory is used as the source for systemd-repart partition definition files which are passed to systemd-repart when building a disk image. If it does not exist and the `RepartDirectories=` setting is not configured, mkosi will default to the following partition definition files: `00-esp.conf` (if we're building a bootable image): ``` [Partition] Type=esp Format=vfat CopyFiles=/boot:/ CopyFiles=/efi:/ SizeMinBytes=512M SizeMaxBytes=512M ``` `05-bios.conf` (if we're building a BIOS bootable image): ``` [Partition] # UUID of the grub BIOS boot partition which grubs needs on GPT to # embed itself into. Type=21686148-6449-6e6f-744e-656564454649 SizeMinBytes=1M SizeMaxBytes=1M ``` `10-root.conf`: ``` [Partition] Type=root Format= CopyFiles=/ Minimize=guess ``` Note that if either `mkosi.repart/` is found or `RepartDirectories=` is used, we will not use any of the default partition definitions. All these files are optional. Note that the location of all these files may also be configured during invocation via command line switches, and as settings in `mkosi.conf`, in case the default settings are not acceptable for a project. # CACHING `mkosi` supports three different caches for speeding up repetitive re-building of images. Specifically: 1. The package cache of the distribution package manager may be cached between builds. This is configured with the `--cache-dir=` option or the `mkosi.cache/` directory. This form of caching relies on the distribution's package manager, and caches distribution packages (RPM, DEB, …) after they are downloaded, but before they are unpacked. 2. If the incremental build mode is enabled with `--incremental`, cached copies of the final image and build overlay are made immediately before the build sources are copied in (for the build overlay) or the artifacts generated by `mkosi.build` are copied in (in case of the final image). This form of caching allows bypassing the time-consuming package unpacking step of the distribution package managers, but is only effective if the list of packages to use remains stable, but the build sources and its scripts change regularly. Note that this cache requires manual flushing: whenever the package list is modified the cached images need to be explicitly removed before the next re-build, using the `-f` switch. 3. Finally, between multiple builds the build artifact directory may be shared, using the `mkosi.builddir/` directory. This directory allows build systems such as Meson to reuse already compiled sources from a previous built, thus speeding up the build process of a `mkosi.build` build script. The package cache and incremental mode are unconditionally useful. The final cache only apply to uses of `mkosi` with a source tree and build script. When all three are enabled together turn-around times for complete image builds are minimal, as only changed source files need to be recompiled. # Building multiple images If the `mkosi.images/` directory exists, mkosi will load individual image configurations from it and build each of them. Image configurations can be either directories containing mkosi configuration files or regular files with the `.conf` extension. When image configurations are found in `mkosi.images/`, mkosi will build the configured images and all of their dependencies (or all of them if no images were explicitly configured using `Images=`). To add dependencies between images, the `Dependencies=` setting can be used. When images are defined, mkosi will first read the global configuration (configuration outside of the `mkosi.images/` directory), followed by the image specific configuration. This means that global configuration takes precedence over image specific configuration. Images can refer to outputs of images they depend on. Specifically, for the following options, mkosi will only check whether the inputs exist just before building the image: - `BaseTrees=` - `PackageManagerTrees=` - `SkeletonTrees=` - `ExtraTrees=` - `ToolsTree=` - `Initrds=` To refer to outputs of a image's dependencies, simply configure any of these options with a relative path to the output to use in the output directory of the dependency. Or use the `%O` specifier to refer to the output directory. A good example on how to build multiple images can be found in the [systemd](https://github.com/systemd/systemd/tree/main/mkosi.images) repository. # ENVIRONMENT VARIABLES * `$MKOSI_LESS` overrides options for `less` when it is invoked by `mkosi` to page output. * `$MKOSI_DNF` can be used to override the executable used as `dnf`. This is particularly useful to select between `dnf` and `dnf5`. # EXAMPLES Create and run a raw *GPT* image with *ext4*, as `image.raw`: ```console # mkosi -p systemd --incremental boot ``` Create and run a bootable *GPT* image, as `foobar.raw`: ```console $ mkosi -d fedora -p kernel-core -p systemd -p systemd-boot -p udev -o foobar.raw # mkosi --output foobar.raw boot $ mkosi --output foobar.raw qemu ``` Create and run a *Fedora Linux* image in a plain directory: ```console # mkosi --distribution fedora --format directory boot ``` Create a compressed image `image.raw.xz` with *SSH* installed and add a checksum file: ```console $ mkosi --distribution fedora --format disk --checksum --compress-output --package=openssh-clients ``` Inside the source directory of an `automake`-based project, configure *mkosi* so that simply invoking `mkosi` without any parameters builds an OS image containing a built version of the project in its current state: ```console $ cat >mkosi.conf <mkosi.build < None: libc_name = ctypes.util.find_library("c") if libc_name is None: die("Could not find libc") libc = ctypes.CDLL(libc_name, use_errno=True) if libc.unshare(ctypes.c_int(flags)) != 0: e = ctypes.get_errno() raise OSError(e, os.strerror(e)) def read_subrange(path: Path) -> int: uid = str(os.getuid()) try: user = pwd.getpwuid(os.getuid()).pw_name except KeyError: user = None for line in path.read_text().splitlines(): name, start, count = line.split(":") if name == uid or name == user: break else: die(f"No mapping found for {user or uid} in {path}") if int(count) < SUBRANGE: die( f"subuid/subgid range length must be at least {SUBRANGE}, " f"got {count} for {user or uid} from line '{line}'" ) return int(start) def become_root() -> None: """ Set up a new user namespace mapping using /etc/subuid and /etc/subgid. The current user will be mapped to root and 65436 will be mapped to the UID/GID of the invoking user. The other IDs will be mapped through. The function modifies the uid, gid of the INVOKING_USER object to the uid, gid of the invoking user in the user namespace. """ if os.getuid() == 0: return subuid = read_subrange(Path("/etc/subuid")) subgid = read_subrange(Path("/etc/subgid")) pid = os.getpid() # We map the private UID range configured in /etc/subuid and /etc/subgid into the container using # newuidmap and newgidmap. On top of that, we also make sure to map in the user running mkosi so that # we can run still chown stuff to that user or run stuff as that user which will make sure any # generated files are owned by that user. We don't map to the last user in the range as the last user # is sometimes used in tests as a default value and mapping to that user might break those tests. newuidmap = [ "flock", "--exclusive", "--no-fork", "/etc/subuid", "newuidmap", pid, 0, subuid, SUBRANGE - 100, SUBRANGE - 100, os.getuid(), 1, SUBRANGE - 100 + 1, subuid + SUBRANGE - 100 + 1, 99 ] newgidmap = [ "flock", "--exclusive", "--no-fork", "/etc/subuid", "newgidmap", pid, 0, subgid, SUBRANGE - 100, SUBRANGE - 100, os.getgid(), 1, SUBRANGE - 100 + 1, subgid + SUBRANGE - 100 + 1, 99 ] newuidmap = [str(x) for x in newuidmap] newgidmap = [str(x) for x in newgidmap] # newuidmap and newgidmap have to run from outside the user namespace to be able to assign a uid mapping # to the process in the user namespace. The mapping can only be assigned after the user namespace has # been unshared. To make this work, we first lock /etc/subuid, then spawn the newuidmap and newgidmap # processes, which we execute using flock so they don't execute before they can get a lock on /etc/subuid, # then we unshare the user namespace and finally we unlock /etc/subuid, which allows the newuidmap and # newgidmap processes to execute. we then wait for the processes to finish before continuing. with flock(Path("/etc/subuid")) as fd, spawn(newuidmap) as uidmap, spawn(newgidmap) as gidmap: unshare(CLONE_NEWUSER) fcntl.flock(fd, fcntl.LOCK_UN) uidmap.wait() gidmap.wait() # By default, we're root in the user namespace because if we were our current user by default, we # wouldn't be able to chown stuff to be owned by root while the reverse is possible. os.setresuid(0, 0, 0) os.setresgid(0, 0, 0) os.setgroups([0]) INVOKING_USER.uid = SUBRANGE - 100 INVOKING_USER.gid = SUBRANGE - 100 def make_foreground_process(*, new_process_group: bool = True) -> None: """ If we're connected to a terminal, put the process in a new process group and make that the foreground process group so that only this process receives SIGINT. """ STDERR_FILENO = 2 if os.isatty(STDERR_FILENO): if new_process_group: os.setpgrp() old = signal.signal(signal.SIGTTOU, signal.SIG_IGN) try: os.tcsetpgrp(STDERR_FILENO, os.getpgrp()) except OSError as e: if e.errno != errno.ENOTTY: raise e signal.signal(signal.SIGTTOU, old) def ensure_exc_info() -> tuple[type[BaseException], BaseException, TracebackType]: exctype, exc, tb = sys.exc_info() assert exctype assert exc assert tb return (exctype, exc, tb) @contextlib.contextmanager def uncaught_exception_handler(exit: Callable[[int], NoReturn] = sys.exit) -> Iterator[None]: rc = 0 try: yield except SystemExit as e: if ARG_DEBUG.get(): sys.excepthook(*ensure_exc_info()) rc = e.code if isinstance(e.code, int) else 1 except KeyboardInterrupt: if ARG_DEBUG.get(): sys.excepthook(*ensure_exc_info()) else: logging.error("Interrupted") rc = 1 except subprocess.CalledProcessError as e: # Failures from qemu, ssh and systemd-nspawn are expected and we won't log stacktraces for those. # Failures from self come from the forks we spawn to build images in a user namespace. We've already done all # the logging for those failures so we don't log stacktraces for those either. if ( ARG_DEBUG.get() and e.cmd and e.cmd[0] not in ("self", "ssh", "systemd-nspawn") and not e.cmd[0].startswith("qemu") ): sys.excepthook(*ensure_exc_info()) # We always log when subprocess.CalledProcessError is raised, so we don't log again here. rc = e.returncode except BaseException: sys.excepthook(*ensure_exc_info()) rc = 1 finally: sys.stdout.flush() sys.stderr.flush() exit(rc) def fork_and_wait(target: Callable[[], None]) -> None: pid = os.fork() if pid == 0: with uncaught_exception_handler(exit=os._exit): make_foreground_process() target() try: _, status = os.waitpid(pid, 0) except BaseException: os.kill(pid, signal.SIGTERM) _, status = os.waitpid(pid, 0) finally: make_foreground_process(new_process_group=False) rc = os.waitstatus_to_exitcode(status) if rc != 0: raise subprocess.CalledProcessError(rc, ["self"]) @contextlib.contextmanager def sigkill_to_sigterm() -> Iterator[None]: old = signal.SIGKILL signal.SIGKILL = signal.SIGTERM try: yield finally: signal.SIGKILL = old def log_process_failure(cmdline: Sequence[str], returncode: int) -> None: if returncode < 0: logging.error(f"Interrupted by {signal.Signals(-returncode).name} signal") else: logging.error(f"\"{shlex.join(cmdline)}\" returned non-zero exit code {returncode}.") def run( cmdline: Sequence[PathString], check: bool = True, stdin: _FILE = None, stdout: _FILE = None, stderr: _FILE = None, input: Optional[str] = None, user: Optional[int] = None, group: Optional[int] = None, env: Mapping[str, str] = {}, cwd: Optional[Path] = None, log: bool = True, preexec_fn: Optional[Callable[[], None]] = None, sandbox: Sequence[PathString] = (), ) -> CompletedProcess: sandbox = [os.fspath(x) for x in sandbox] cmdline = [os.fspath(x) for x in cmdline] if ARG_DEBUG.get(): logging.info(f"+ {shlex.join(sandbox + cmdline)}") if not stdout and not stderr: # Unless explicit redirection is done, print all subprocess # output on stderr, since we do so as well for mkosi's own # output. stdout = sys.stderr env = { "PATH": os.environ["PATH"], "TERM": os.getenv("TERM", "vt220"), "LANG": "C.UTF-8", **env, } if "TMPDIR" in os.environ: env["TMPDIR"] = os.environ["TMPDIR"] if ARG_DEBUG.get(): env["SYSTEMD_LOG_LEVEL"] = "debug" if input is not None: assert stdin is None # stdin and input cannot be specified together elif stdin is None: stdin = subprocess.DEVNULL def preexec() -> None: make_foreground_process() if preexec_fn: preexec_fn() if ( sandbox and subprocess.run(sandbox + ["sh", "-c", "command -v setpgid"], stdout=subprocess.DEVNULL).returncode == 0 ): cmdline = ["setpgid", "--foreground", "--"] + cmdline try: # subprocess.run() will use SIGKILL to kill processes when an exception is raised. # We'd prefer it to use SIGTERM instead but since this we can't configure which signal # should be used, we override the constant in the signal module instead before we call # subprocess.run(). with sigkill_to_sigterm(): return subprocess.run( sandbox + cmdline, check=check, stdin=stdin, stdout=stdout, stderr=stderr, input=input, text=True, user=user, group=group, env=env, cwd=cwd, preexec_fn=preexec, ) except FileNotFoundError as e: die(f"{e.filename} not found.") except subprocess.CalledProcessError as e: if log: log_process_failure(cmdline, e.returncode) if ARG_DEBUG_SHELL.get(): subprocess.run( [*sandbox, "sh"], check=False, stdin=sys.stdin, text=True, user=user, group=group, env=env, cwd=cwd, preexec_fn=preexec, ) # Remove the sandboxing stuff from the command line to show a more readable error to users. e.cmd = cmdline raise finally: make_foreground_process(new_process_group=False) @contextlib.contextmanager def spawn( cmdline: Sequence[PathString], stdin: _FILE = None, stdout: _FILE = None, stderr: _FILE = None, user: Optional[int] = None, group: Optional[int] = None, pass_fds: Collection[int] = (), env: Mapping[str, str] = {}, log: bool = True, foreground: bool = False, preexec_fn: Optional[Callable[[], None]] = None, sandbox: Sequence[PathString] = (), ) -> Iterator[Popen]: sandbox = [os.fspath(x) for x in sandbox] cmdline = [os.fspath(x) for x in cmdline] if ARG_DEBUG.get(): logging.info(f"+ {shlex.join(sandbox + cmdline)}") if not stdout and not stderr: # Unless explicit redirection is done, print all subprocess # output on stderr, since we do so as well for mkosi's own # output. stdout = sys.stderr env = { "PATH": os.environ["PATH"], "TERM": os.getenv("TERM", "vt220"), "LANG": "C.UTF-8", **env, } def preexec() -> None: if foreground: make_foreground_process() if preexec_fn: preexec_fn() if ( foreground and sandbox and subprocess.run(sandbox + ["sh", "-c", "command -v setpgid"], stdout=subprocess.DEVNULL).returncode == 0 ): cmdline = ["setpgid", "--foreground", "--"] + cmdline try: with subprocess.Popen( sandbox + cmdline, stdin=stdin, stdout=stdout, stderr=stderr, text=True, user=user, group=group, pass_fds=pass_fds, env=env, preexec_fn=preexec, ) as proc: yield proc except FileNotFoundError as e: die(f"{e.filename} not found.") except subprocess.CalledProcessError as e: if log: log_process_failure(cmdline, e.returncode) raise e finally: if foreground: make_foreground_process(new_process_group=False) def find_binary(*names: PathString, root: Path = Path("/")) -> Optional[Path]: if root != Path("/"): path = ":".join(os.fspath(p) for p in (root / "usr/bin", root / "usr/sbin")) else: path = os.environ["PATH"] for name in names: if Path(name).is_absolute(): name = root / Path(name).relative_to("/") elif "/" in str(name): name = root / name if (binary := shutil.which(name, path=path)): if root != Path("/") and not Path(binary).is_relative_to(root): return Path(binary) else: return Path("/") / Path(binary).relative_to(root) return None class AsyncioThread(threading.Thread): """ The default threading.Thread() is not interruptable, so we make our own version by using the concurrency feature in python that is interruptable, namely asyncio. Additionally, we store any exception that the coroutine raises and re-raise it in join() if no other exception was raised before. """ def __init__(self, target: Awaitable[Any], *args: Any, **kwargs: Any) -> None: self.target = target self.loop: queue.SimpleQueue[asyncio.AbstractEventLoop] = queue.SimpleQueue() self.exc: queue.SimpleQueue[BaseException] = queue.SimpleQueue() super().__init__(*args, **kwargs) def run(self) -> None: async def wrapper() -> None: self.loop.put(asyncio.get_running_loop()) await self.target try: asyncio.run(wrapper()) except asyncio.CancelledError: pass except BaseException as e: self.exc.put(e) def cancel(self) -> None: loop = self.loop.get() for task in asyncio.tasks.all_tasks(loop): loop.call_soon_threadsafe(task.cancel) def __enter__(self) -> "AsyncioThread": self.start() return self def __exit__( self, type: Optional[type[BaseException]], value: Optional[BaseException], traceback: Optional[TracebackType], ) -> None: self.cancel() self.join() if type is None: try: raise self.exc.get_nowait() except queue.Empty: pass mkosi-20.2/mkosi/sandbox.py000066400000000000000000000176341455345632200157350ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1+ import enum import logging import os import uuid from collections.abc import Sequence from pathlib import Path from typing import Optional from mkosi.types import PathString from mkosi.util import INVOKING_USER, flatten, one_zero # https://github.com/torvalds/linux/blob/master/include/uapi/linux/capability.h class Capability(enum.Enum): CAP_NET_ADMIN = 12 def have_effective_cap(capability: Capability) -> bool: for line in Path("/proc/self/status").read_text().splitlines(): if line.startswith("CapEff:"): hexcap = line.removeprefix("CapEff:").strip() break else: logging.warning(f"\"CapEff:\" not found in /proc/self/status, assuming we don't have {capability}") return False return (int(hexcap, 16) & (1 << capability.value)) != 0 def finalize_passwd_mounts(root: Path) -> list[PathString]: """ If passwd or a related file exists in the apivfs directory, bind mount it over the host files while we run the command, to make sure that the command we run uses user/group information from the apivfs directory instead of from the host. """ options: list[PathString] = [] for f in ("passwd", "group", "shadow", "gshadow"): options += ["--ro-bind-try", root / "etc" / f, f"/etc/{f}"] return options def finalize_crypto_mounts(tools: Path = Path("/")) -> list[PathString]: mounts = [ (tools / subdir, Path("/") / subdir) for subdir in ( Path("etc/pki"), Path("etc/ssl"), Path("etc/crypto-policies"), Path("etc/ca-certificates"), Path("etc/pacman.d/gnupg"), Path("var/lib/ca-certificates"), ) if (tools / subdir).exists() ] return flatten( ["--ro-bind", src, target] for src, target in sorted(set(mounts), key=lambda s: s[1]) ) def sandbox_cmd( *, network: bool = False, devices: bool = False, scripts: Optional[Path] = None, tools: Path = Path("/"), relaxed: bool = False, options: Sequence[PathString] = (), ) -> list[PathString]: cmdline: list[PathString] = [] if not relaxed: # We want to use an empty subdirectory in the host's /var/tmp as the sandbox's /var/tmp. To make sure it only # gets created when we run the sandboxed command and cleaned up when the sandboxed command exits, we create it # using shell. vartmp = f"/var/tmp/mkosi-var-tmp-{uuid.uuid4().hex[:16]}" cmdline += ["sh", "-c", f"trap 'rm -rf {vartmp}' EXIT && mkdir --mode 1777 {vartmp} && $0 \"$@\""] else: vartmp = None cmdline += [ "bwrap", "--ro-bind", tools / "usr", "/usr", *(["--unshare-net"] if not network and have_effective_cap(Capability.CAP_NET_ADMIN) else []), "--die-with-parent", "--proc", "/proc", "--setenv", "SYSTEMD_OFFLINE", one_zero(network), ] if relaxed: cmdline += ["--bind", "/tmp", "/tmp"] else: cmdline += [ "--tmpfs", "/tmp", "--unshare-ipc", ] if (tools / "nix/store").exists(): cmdline += ["--bind", tools / "nix/store", "/nix/store"] if devices or relaxed: cmdline += [ "--bind", "/sys", "/sys", "--bind", "/run", "/run", "--dev-bind", "/dev", "/dev", ] else: cmdline += ["--dev", "/dev"] if relaxed: dirs = ("/etc", "/opt", "/srv", "/media", "/mnt", "/var", os.fspath(INVOKING_USER.home())) for d in dirs: if Path(d).exists(): cmdline += ["--bind", d, d] if len(Path.cwd().parents) >= 2: # `Path.parents` only supports slices and negative indexing from Python 3.10 onwards. # TODO: Remove list() when we depend on Python 3.10 or newer. d = os.fspath(list(Path.cwd().parents)[-2]) elif len(Path.cwd().parents) == 1: d = os.fspath(Path.cwd()) else: d = "" if d and d not in (*dirs, "/home", "/usr", "/nix", "/tmp"): cmdline += ["--bind", d, d] if vartmp: cmdline += ["--bind", vartmp, "/var/tmp"] for d in ("bin", "sbin", "lib", "lib32", "lib64"): if (p := tools / d).is_symlink(): cmdline += ["--symlink", p.readlink(), Path("/") / p.relative_to(tools)] path = "/usr/bin:/usr/sbin" if tools != Path("/") else os.environ["PATH"] cmdline += [ "--setenv", "PATH", f"{scripts or ''}:{path}", *options, ] if not relaxed: cmdline += ["--symlink", "../proc/self/mounts", "/etc/mtab"] # If we're using /usr from a tools tree, we have to use /etc/alternatives from the tools tree as well if it # exists since that points directly back to /usr. Apply this after the options so the caller can mount # something else to /etc without overriding this mount. In relaxed mode, we only do this if /etc/alternatives # already exists on the host as otherwise we'd modify the host's /etc by creating the mountpoint ourselves (or # fail when trying to create it). if (tools / "etc/alternatives").exists() and (not relaxed or Path("/etc/alternatives").exists()): cmdline += ["--ro-bind", tools / "etc/alternatives", "/etc/alternatives"] if scripts: cmdline += ["--ro-bind", scripts, scripts] if network and not relaxed: cmdline += ["--bind", "/etc/resolv.conf", "/etc/resolv.conf"] # bubblewrap creates everything with a restricted mode so relax stuff as needed. ops = [] if not devices: ops += ["chmod 1777 /dev/shm"] if not relaxed: ops += ["chmod 755 /etc"] ops += ["exec $0 \"$@\""] cmdline += ["sh", "-c", " && ".join(ops)] return cmdline def apivfs_cmd(root: Path) -> list[PathString]: return [ "bwrap", "--dev-bind", "/", "/", "--tmpfs", root / "run", "--tmpfs", root / "tmp", "--bind", "/var/tmp", root / "var/tmp", "--proc", root / "proc", "--dev", root / "dev", # APIVFS generally means chrooting is going to happen so unset TMPDIR just to be safe. "--unsetenv", "TMPDIR", # Make sure /etc/machine-id is not overwritten by any package manager post install scripts. "--ro-bind-try", root / "etc/machine-id", root / "etc/machine-id", *finalize_passwd_mounts(root), "sh", "-c", f"chmod 1777 {root / 'tmp'} {root / 'var/tmp'} {root / 'dev/shm'} && " f"chmod 755 {root / 'run'} && " # Make sure anything running in the root directory thinks it's in a container. $container can't always be # accessed so we write /run/host/container-manager as well which is always accessible. f"mkdir -m 755 {root}/run/host && echo mkosi >{root}/run/host/container-manager && " "exec $0 \"$@\"", ] def chroot_cmd(root: Path, *, resolve: bool = False, options: Sequence[PathString] = ()) -> list[PathString]: cmdline: list[PathString] = [ "sh", "-c", f"trap 'rm -rf {root / 'work'}' EXIT && " # /etc/resolv.conf can be a dangling symlink to /run/systemd/resolve/stub-resolv.conf. Bubblewrap tries to call # mkdir() on each component of the path which means it will try to call # mkdir(/run/systemd/resolve/stub-resolv.conf) which will fail unless /run/systemd/resolve exists already so # we make sure that it already exists. f"mkdir -p -m 755 {root / 'work'} {root / 'run/systemd'} {root / 'run/systemd/resolve'} && " # No exec here because we need to clean up the /work directory afterwards. f"$0 \"$@\"", "bwrap", "--dev-bind", root, "/", "--setenv", "container", "mkosi", "--setenv", "HOME", "/", "--setenv", "PATH", "/work/scripts:/usr/bin:/usr/sbin", ] if resolve: cmdline += ["--ro-bind-try", "/etc/resolv.conf", "/etc/resolv.conf"] cmdline += options return apivfs_cmd(root) + cmdline mkosi-20.2/mkosi/tree.py000066400000000000000000000114431455345632200152260ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1+ import contextlib import errno import shutil import subprocess import tempfile from collections.abc import Iterator, Sequence from pathlib import Path from mkosi.config import ConfigFeature from mkosi.log import die from mkosi.run import find_binary, run from mkosi.types import PathString def statfs(path: Path, *, sandbox: Sequence[PathString] = ()) -> str: return run(["stat", "--file-system", "--format", "%T", path], sandbox=sandbox, stdout=subprocess.PIPE).stdout.strip() def is_subvolume(path: Path, *, sandbox: Sequence[PathString] = ()) -> bool: return path.is_dir() and statfs(path, sandbox=sandbox) == "btrfs" and path.stat().st_ino == 256 def make_tree( path: Path, *, use_subvolumes: ConfigFeature = ConfigFeature.disabled, tools: Path = Path("/"), sandbox: Sequence[PathString] = (), ) -> None: if use_subvolumes == ConfigFeature.enabled and not find_binary("btrfs", root=tools): die("Subvolumes requested but the btrfs command was not found") if statfs(path.parent, sandbox=sandbox) != "btrfs": if use_subvolumes == ConfigFeature.enabled: die(f"Subvolumes requested but {path} is not located on a btrfs filesystem") path.mkdir() return if use_subvolumes != ConfigFeature.disabled and find_binary("btrfs", root=tools) is not None: result = run(["btrfs", "subvolume", "create", path], sandbox=sandbox, check=use_subvolumes == ConfigFeature.enabled).returncode else: result = 1 if result != 0: path.mkdir() @contextlib.contextmanager def preserve_target_directories_stat(src: Path, dst: Path) -> Iterator[None]: dirs = [p for d in src.glob("**/") if (dst / (p := d.relative_to(src))).exists()] with tempfile.TemporaryDirectory() as tmp: for d in dirs: (tmp / d).mkdir(exist_ok=True) shutil.copystat(dst / d, tmp / d) yield for d in dirs: shutil.copystat(tmp / d, dst / d) def copy_tree( src: Path, dst: Path, *, preserve: bool = True, dereference: bool = False, use_subvolumes: ConfigFeature = ConfigFeature.disabled, tools: Path = Path("/"), sandbox: Sequence[PathString] = (), ) -> None: subvolume = (use_subvolumes == ConfigFeature.enabled or use_subvolumes == ConfigFeature.auto and find_binary("btrfs", root=tools) is not None) if use_subvolumes == ConfigFeature.enabled and not find_binary("btrfs", root=tools): die("Subvolumes requested but the btrfs command was not found") copy: list[PathString] = [ "cp", "--recursive", "--dereference" if dereference else "--no-dereference", f"--preserve=mode,links{',timestamps,ownership,xattr' if preserve else ''}", "--reflink=auto", src, dst, ] # If the source and destination are both directories, we want to merge the source directory with the # destination directory. If the source if a file and the destination is a directory, we want to copy # the source inside the directory. if src.is_dir(): copy += ["--no-target-directory"] # Subvolumes always have inode 256 so we can use that to check if a directory is a subvolume. if ( not subvolume or not preserve or not is_subvolume(src, sandbox=sandbox) or not find_binary("btrfs", root=tools) or (dst.exists() and any(dst.iterdir())) ): with ( preserve_target_directories_stat(src, dst) if not preserve else contextlib.nullcontext() ): run(copy, sandbox=sandbox) return # btrfs can't snapshot to an existing directory so make sure the destination does not exist. if dst.exists(): dst.rmdir() result = run(["btrfs", "subvolume", "snapshot", src, dst], check=use_subvolumes == ConfigFeature.enabled, sandbox=sandbox).returncode if result != 0: with ( preserve_target_directories_stat(src, dst) if not preserve else contextlib.nullcontext() ): run(copy, sandbox=sandbox) def rmtree(*paths: Path, sandbox: Sequence[PathString] = ()) -> None: if paths: run(["rm", "-rf", "--", *paths], sandbox=sandbox) def move_tree( src: Path, dst: Path, *, use_subvolumes: ConfigFeature = ConfigFeature.disabled, tools: Path = Path("/"), sandbox: Sequence[PathString] = (), ) -> None: if src == dst: return if dst.is_dir(): dst = dst / src.name try: src.rename(dst) except OSError as e: if e.errno != errno.EXDEV: raise e copy_tree(src, dst, use_subvolumes=use_subvolumes, tools=tools, sandbox=sandbox) rmtree(src, sandbox=sandbox) mkosi-20.2/mkosi/types.py000066400000000000000000000021351455345632200154310ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1+ import subprocess from pathlib import Path from typing import IO, TYPE_CHECKING, Any, Protocol, TypeVar, Union # These types are only generic during type checking and not at runtime, leading # to a TypeError during compilation. # Let's be as strict as we can with the description for the usage we have. if TYPE_CHECKING: CompletedProcess = subprocess.CompletedProcess[str] Popen = subprocess.Popen[str] else: CompletedProcess = subprocess.CompletedProcess Popen = subprocess.Popen # Borrowed from https://github.com/python/typeshed/blob/3d14016085aed8bcf0cf67e9e5a70790ce1ad8ea/stdlib/3/subprocess.pyi#L24 _FILE = Union[None, int, IO[Any]] PathString = Union[Path, str] # Borrowed from # https://github.com/python/typeshed/blob/ec52bf1adde1d3183d0595d2ba982589df48dff1/stdlib/_typeshed/__init__.pyi#L19 # and # https://github.com/python/typeshed/blob/ec52bf1adde1d3183d0595d2ba982589df48dff1/stdlib/_typeshed/__init__.pyi#L224 _T_co = TypeVar("_T_co", covariant=True) class SupportsRead(Protocol[_T_co]): def read(self, __length: int = ...) -> _T_co: ... mkosi-20.2/mkosi/util.py000066400000000000000000000174001455345632200152430ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1+ import ast import contextlib import copy import enum import fcntl import functools import importlib import importlib.resources import itertools import logging import os import pwd import re import resource import stat import tempfile from collections.abc import Iterable, Iterator, Mapping, Sequence from pathlib import Path from types import ModuleType from typing import Any, Callable, TypeVar, no_type_check from mkosi.types import PathString T = TypeVar("T") V = TypeVar("V") def dictify(f: Callable[..., Iterator[tuple[T, V]]]) -> Callable[..., dict[T, V]]: def wrapper(*args: Any, **kwargs: Any) -> dict[T, V]: return dict(f(*args, **kwargs)) return functools.update_wrapper(wrapper, f) @dictify def read_env_file(path: Path) -> Iterator[tuple[str, str]]: with path.open() as f: for line_number, line in enumerate(f, start=1): line = line.rstrip() if not line or line.startswith("#"): continue if (m := re.match(r"([A-Z][A-Z_0-9]+)=(.*)", line)): name, val = m.groups() if val and val[0] in "\"'": val = ast.literal_eval(val) yield name, val else: logging.info(f"{path}:{line_number}: bad line {line!r}") def read_os_release(root: Path = Path("/")) -> dict[str, str]: filename = root / "etc/os-release" if not filename.exists(): filename = root / "usr/lib/os-release" return read_env_file(filename) def format_rlimit(rlimit: int) -> str: limits = resource.getrlimit(rlimit) soft = "infinity" if limits[0] == resource.RLIM_INFINITY else str(limits[0]) hard = "infinity" if limits[1] == resource.RLIM_INFINITY else str(limits[1]) return f"{soft}:{hard}" def sort_packages(packages: Iterable[str]) -> list[str]: """Sorts packages: normal first, paths second, conditional third""" m = {"(": 2, "/": 1} return sorted(packages, key=lambda name: (m.get(name[0], 0), name)) def flatten(lists: Iterable[Iterable[T]]) -> list[T]: """Flatten a sequence of sequences into a single list.""" return list(itertools.chain.from_iterable(lists)) class INVOKING_USER: uid = int(os.getenv("SUDO_UID") or os.getenv("PKEXEC_UID") or os.getuid()) gid = int(os.getenv("SUDO_GID") or os.getgid()) @classmethod def init(cls) -> None: name = cls.name() home = cls.home() logging.debug(f"Running as user '{name}' ({cls.uid}:{cls.gid}) with home {home}.") @classmethod def is_running_user(cls) -> bool: return cls.uid == os.getuid() @classmethod @functools.lru_cache(maxsize=1) def name(cls) -> str: return pwd.getpwuid(cls.uid).pw_name @classmethod @functools.lru_cache(maxsize=1) def home(cls) -> Path: return Path(f"~{cls.name()}").expanduser() @contextlib.contextmanager def chdir(directory: PathString) -> Iterator[None]: old = Path.cwd() if old == directory: yield return try: os.chdir(directory) yield finally: os.chdir(old) def make_executable(*paths: Path) -> None: for path in paths: st = path.stat() os.chmod(path, st.st_mode | stat.S_IEXEC) @contextlib.contextmanager def flock(path: Path) -> Iterator[int]: fd = os.open(path, os.O_CLOEXEC|os.O_RDONLY) try: fcntl.fcntl(fd, fcntl.FD_CLOEXEC) fcntl.flock(fd, fcntl.LOCK_EX) yield fd finally: os.close(fd) @contextlib.contextmanager def scopedenv(env: Mapping[str, Any]) -> Iterator[None]: old = copy.deepcopy(os.environ) os.environ |= env # python caches the default temporary directory so when we might modify TMPDIR we have to make sure it # gets recalculated (see https://docs.python.org/3/library/tempfile.html#tempfile.tempdir). tempfile.tempdir = None try: yield finally: os.environ = old tempfile.tempdir = None class StrEnum(enum.Enum): def __str__(self) -> str: assert isinstance(self.value, str) return self.value # Used by enum.auto() to get the next value. @staticmethod def _generate_next_value_(name: str, start: int, count: int, last_values: Sequence[str]) -> str: return name.replace("_", "-") @classmethod def values(cls) -> list[str]: return list(map(str, cls)) def one_zero(b: bool) -> str: return "1" if b else "0" @contextlib.contextmanager def umask(mask: int) -> Iterator[None]: old = os.umask(mask) try: yield finally: os.umask(old) def is_power_of_2(x: int) -> bool: return x > 0 and (x & x - 1 == 0) @contextlib.contextmanager def resource_path(mod: ModuleType, path: str) -> Iterator[Path]: # We backport as_file() from python 3.12 here temporarily since it added directory support. # TODO: Remove once minimum python version is 3.12. # SPDX-License-Identifier: PSF-2.0 # Copied from https://github.com/python/cpython/blob/main/Lib/importlib/resources/_common.py @no_type_check @contextlib.contextmanager def _tempfile( reader, suffix='', # gh-93353: Keep a reference to call os.remove() in late Python # finalization. *, _os_remove=os.remove, ): # Not using tempfile.NamedTemporaryFile as it leads to deeper 'try' # blocks due to the need to close the temporary file to work on Windows # properly. fd, raw_path = tempfile.mkstemp(suffix=suffix) try: try: os.write(fd, reader()) finally: os.close(fd) del reader yield Path(raw_path) finally: try: _os_remove(raw_path) except FileNotFoundError: pass @no_type_check def _temp_file(path): return _tempfile(path.read_bytes, suffix=path.name) @no_type_check def _is_present_dir(path) -> bool: """ Some Traversables implement ``is_dir()`` to raise an exception (i.e. ``FileNotFoundError``) when the directory doesn't exist. This function wraps that call to always return a boolean and only return True if there's a dir and it exists. """ with contextlib.suppress(FileNotFoundError): return path.is_dir() return False @no_type_check @functools.singledispatch def as_file(path): """ Given a Traversable object, return that object as a path on the local file system in a context manager. """ return _temp_dir(path) if _is_present_dir(path) else _temp_file(path) @no_type_check @contextlib.contextmanager def _temp_path(dir: tempfile.TemporaryDirectory): """ Wrap tempfile.TemporyDirectory to return a pathlib object. """ with dir as result: yield Path(result) @no_type_check @contextlib.contextmanager def _temp_dir(path): """ Given a traversable dir, recursively replicate the whole tree to the file system in a context manager. """ assert path.is_dir() with _temp_path(tempfile.TemporaryDirectory()) as temp_dir: yield _write_contents(temp_dir, path) @no_type_check def _write_contents(target, source): child = target.joinpath(source.name) if source.is_dir(): child.mkdir() for item in source.iterdir(): _write_contents(child, item) else: child.write_bytes(source.read_bytes()) return child t = importlib.resources.files(mod) with as_file(t.joinpath(path)) as p: yield p def round_up(x: int, blocksize: int = 4096) -> int: return (x + blocksize - 1) // blocksize * blocksize mkosi-20.2/mkosi/versioncomp.py000066400000000000000000000154511455345632200166360ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1+ import functools import itertools import string @functools.total_ordering class GenericVersion: # These constants follow the convention of the return value of rpmdev-vercmp that are followe # by systemd-analyze compare-versions when called with only two arguments (without a comparison # operator), recreated in the compare_versions method. _EQUAL = 0 _RIGHT_SMALLER = 1 _LEFT_SMALLER = -1 def __init__(self, version: str): self._version = version @classmethod def compare_versions(cls, v1: str, v2: str) -> int: """Implements comparison according to UAPI Group Version Format Specification""" def rstrip_invalid_version_chars(s: str) -> str: valid_version_chars = {*string.ascii_letters, *string.digits, "~", "-", "^", "."} for i, c in enumerate(s): if c in valid_version_chars: return s[i:] return "" def digit_prefix(s: str) -> str: return "".join(itertools.takewhile(lambda c: c in string.digits, s)) def letter_prefix(s: str) -> str: return "".join(itertools.takewhile(lambda c: c in string.ascii_letters, s)) while True: # Any characters which are outside of the set of listed above (a-z, A-Z, 0-9, -, ., ~, # ^) are skipped in both strings. In particular, this means that non-ASCII characters # that are Unicode digits or letters are skipped too. v1 = rstrip_invalid_version_chars(v1) v2 = rstrip_invalid_version_chars(v2) # If the remaining part of one of strings starts with "~": if other remaining part does # not start with ~, the string with ~ compares lower. Otherwise, both tilde characters # are skipped. if v1.startswith("~") and v2.startswith("~"): v1 = v1.removeprefix("~") v2 = v2.removeprefix("~") elif v1.startswith("~"): return cls._LEFT_SMALLER elif v2.startswith("~"): return cls._RIGHT_SMALLER # If one of the strings has ended: if the other string hasn’t, the string that has # remaining characters compares higher. Otherwise, the strings compare equal. if not v1 and not v2: return cls._EQUAL elif not v1 and v2: return cls._LEFT_SMALLER elif v1 and not v2: return cls._RIGHT_SMALLER # If the remaining part of one of strings starts with "-": if the other remaining part # does not start with -, the string with - compares lower. Otherwise, both minus # characters are skipped. if v1.startswith("-") and v2.startswith("-"): v1 = v1.removeprefix("-") v2 = v2.removeprefix("-") elif v1.startswith("-"): return cls._LEFT_SMALLER elif v2.startswith("-"): return cls._RIGHT_SMALLER # If the remaining part of one of strings starts with "^": if the other remaining part # does not start with ^, the string with ^ compares higher. Otherwise, both caret # characters are skipped. if v1.startswith("^") and v2.startswith("^"): v1 = v1.removeprefix("^") v2 = v2.removeprefix("^") elif v1.startswith("^"): # TODO: bug? return cls._LEFT_SMALLER #cls._RIGHT_SMALLER elif v2.startswith("^"): return cls._RIGHT_SMALLER #cls._LEFT_SMALLER # If the remaining part of one of strings starts with ".": if the other remaining part # does not start with ., the string with . compares lower. Otherwise, both dot # characters are skipped. if v1.startswith(".") and v2.startswith("."): v1 = v1.removeprefix(".") v2 = v2.removeprefix(".") elif v1.startswith("."): return cls._LEFT_SMALLER elif v2.startswith("."): return cls._RIGHT_SMALLER # If either of the remaining parts starts with a digit: numerical prefixes are compared # numerically. Any leading zeroes are skipped. The numerical prefixes (until the first # non-digit character) are evaluated as numbers. If one of the prefixes is empty, it # evaluates as 0. If the numbers are different, the string with the bigger number # compares higher. Otherwise, the comparison continues at the following characters at # point 1. v1_digit_prefix = digit_prefix(v1) v2_digit_prefix = digit_prefix(v2) if v1_digit_prefix or v2_digit_prefix: v1_digits = int(v1_digit_prefix) if v1_digit_prefix else 0 v2_digits = int(v2_digit_prefix) if v2_digit_prefix else 0 if v1_digits < v2_digits: return cls._LEFT_SMALLER elif v1_digits > v2_digits: return cls._RIGHT_SMALLER v1 = v1.removeprefix(v1_digit_prefix) v2 = v2.removeprefix(v2_digit_prefix) continue # Leading alphabetical prefixes are compared alphabetically. The substrings are # compared letter-by-letter. If both letters are the same, the comparison continues # with the next letter. Capital letters compare lower than lower-case letters (A < # a). When the end of one substring has been reached (a non-letter character or the end # of the whole string), if the other substring has remaining letters, it compares # higher. Otherwise, the comparison continues at the following characters at point 1. v1_letter_prefix = letter_prefix(v1) v2_letter_prefix = letter_prefix(v2) if v1_letter_prefix < v2_letter_prefix: return cls._LEFT_SMALLER elif v1_letter_prefix > v2_letter_prefix: return cls._RIGHT_SMALLER v1 = v1.removeprefix(v1_letter_prefix) v2 = v2.removeprefix(v2_letter_prefix) def __eq__(self, other: object) -> bool: if isinstance(other, (str, int)): other = GenericVersion(str(other)) elif not isinstance(other, GenericVersion): return False return self.compare_versions(self._version, other._version) == self._EQUAL def __lt__(self, other: object) -> bool: if isinstance(other, (str, int)): other = GenericVersion(str(other)) elif not isinstance(other, GenericVersion): return False return self.compare_versions(self._version, other._version) == self._LEFT_SMALLER def __str__(self) -> str: return self._version mkosi-20.2/pyproject.toml000066400000000000000000000033471455345632200155130ustar00rootroot00000000000000[build-system] requires = ["setuptools", "setuptools-scm"] build-backend = "setuptools.build_meta" [project] name = "mkosi" authors = [ {name = "mkosi contributors", email = "systemd-devel@lists.freedesktop.org"}, ] version = "20.2" description = "Build Bespoke OS Images" readme = "README.md" requires-python = ">=3.9" license = {file = "LICENSE"} [project.optional-dependencies] bootable = [ "pefile >= 2021.9.3", ] [project.scripts] mkosi = "mkosi.__main__:main" [tool.setuptools] packages = [ "mkosi", "mkosi.distributions", "mkosi.installer", "mkosi.resources", ] [tool.setuptools.package-data] "mkosi.resources" = ["repart/**/*", "mkosi.md", "mkosi.1", "mkosi-initrd/**/*", "mkosi-tools/**/*"] [tool.isort] profile = "black" include_trailing_comma = true multi_line_output = 3 py_version = "39" [tool.pyright] pythonVersion = "3.9" [tool.mypy] python_version = 3.9 # belonging to --strict warn_unused_configs = true disallow_any_generics = true disallow_subclassing_any = true disallow_untyped_calls = true disallow_untyped_defs = true disallow_untyped_decorators = true disallow_incomplete_defs = true check_untyped_defs = true no_implicit_optional = true warn_redundant_casts = true warn_unused_ignores = false warn_return_any = true no_implicit_reexport = true # extra options not in --strict pretty = true show_error_codes = true show_column_numbers = true warn_unreachable = true allow_redefinition = true strict_equality = true [[tool.mypy.overrides]] module = ["argcomplete"] ignore_missing_imports = true [tool.ruff] target-version = "py39" line-length = 119 select = ["E", "F", "I", "UP"] [tool.pytest.ini_options] markers = [ "integration: mark a test as an integration test." ] addopts = "-m \"not integration\"" mkosi-20.2/tests/000077500000000000000000000000001455345632200137325ustar00rootroot00000000000000mkosi-20.2/tests/.gitignore000066400000000000000000000000071455345632200157170ustar00rootroot00000000000000/*.pyc mkosi-20.2/tests/__init__.py000066400000000000000000000120151455345632200160420ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1+ import os import subprocess import sys import tempfile from collections.abc import Iterator, Sequence from types import TracebackType from typing import Any, NamedTuple, Optional import pytest from mkosi.distributions import Distribution from mkosi.run import run from mkosi.types import _FILE, CompletedProcess, PathString from mkosi.util import INVOKING_USER class Image: class Config(NamedTuple): distribution: Distribution release: str tools_tree_distribution: Optional[Distribution] def __init__(self, config: Config, options: Sequence[PathString] = []) -> None: self.options = options self.config = config def __enter__(self) -> "Image": self.output_dir = tempfile.TemporaryDirectory(dir="/var/tmp") os.chown(self.output_dir.name, INVOKING_USER.uid, INVOKING_USER.gid) return self def __exit__( self, type: Optional[type[BaseException]], value: Optional[BaseException], traceback: Optional[TracebackType], ) -> None: self.mkosi("clean", user=INVOKING_USER.uid, group=INVOKING_USER.gid) def mkosi( self, verb: str, options: Sequence[PathString] = (), args: Sequence[str] = (), stdin: _FILE = None, user: Optional[int] = None, group: Optional[int] = None, check: bool = True, ) -> CompletedProcess: kcl = [ "console=ttyS0", "systemd.crash_shell", "systemd.log_level=debug", "udev.log_level=info", "systemd.log_ratelimit_kmsg=0", "systemd.journald.forward_to_console", "systemd.journald.max_level_console=warning", "printk.devkmsg=on", "systemd.early_core_pattern=/core", ] return run([ "python3", "-m", "mkosi", "--distribution", str(self.config.distribution), "--release", self.config.release, *(["--tools-tree=default"] if self.config.tools_tree_distribution else []), *( ["--tools-tree-distribution", str(self.config.tools_tree_distribution)] if self.config.tools_tree_distribution else [] ), *self.options, *options, "--output-dir", self.output_dir.name, "--cache-dir", "mkosi.cache", *(f"--kernel-command-line={i}" for i in kcl), "--qemu-vsock=yes", "--qemu-mem=4G", verb, *args, ], check=check, stdin=stdin, stdout=sys.stdout, user=user, group=group) def build(self, options: Sequence[str] = (), args: Sequence[str] = ()) -> CompletedProcess: return self.mkosi( "build", [*options, "--debug", "--force"], args, stdin=sys.stdin if sys.stdin.isatty() else None, user=INVOKING_USER.uid, group=INVOKING_USER.gid, ) def boot(self, options: Sequence[str] = (), args: Sequence[str] = ()) -> CompletedProcess: result = self.mkosi( "boot", [*options, "--debug"], args, stdin=sys.stdin if sys.stdin.isatty() else None, check=False, ) if result.returncode != 123: raise subprocess.CalledProcessError(result.returncode, result.args, result.stdout, result.stderr) return result def qemu(self, options: Sequence[str] = (), args: Sequence[str] = ()) -> CompletedProcess: result = self.mkosi( "qemu", [*options, "--debug"], args, stdin=sys.stdin if sys.stdin.isatty() else None, user=INVOKING_USER.uid, group=INVOKING_USER.gid, check=False, ) if self.config.distribution == Distribution.ubuntu or self.config.distribution.is_centos_variant(): rc = 0 else: rc = 123 if result.returncode != rc: raise subprocess.CalledProcessError(result.returncode, result.args, result.stdout, result.stderr) return result def summary(self, options: Sequence[str] = ()) -> CompletedProcess: return self.mkosi("summary", options, user=INVOKING_USER.uid, group=INVOKING_USER.gid) def genkey(self) -> CompletedProcess: return self.mkosi("genkey", ["--force"], user=INVOKING_USER.uid, group=INVOKING_USER.gid) @pytest.fixture(scope="session", autouse=True) def suspend_capture_stdin(pytestconfig: Any) -> Iterator[None]: """ When --capture=no (or -s) is specified, pytest will still intercept stdin. Let's explicitly make it not capture stdin when --capture=no is specified so we can debug image boot failures by logging into the emergency shell. """ capmanager: Any = pytestconfig.pluginmanager.getplugin("capturemanager") if pytestconfig.getoption("capture") == "no": capmanager.suspend_global_capture(in_=True) yield if pytestconfig.getoption("capture") == "no": capmanager.resume_global_capture() mkosi-20.2/tests/conftest.py000066400000000000000000000027141455345632200161350ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1+ from typing import Any, cast import pytest from mkosi.config import parse_config from mkosi.distributions import Distribution, detect_distribution from . import Image def pytest_addoption(parser: Any) -> None: parser.addoption( "-D", "--distribution", metavar="DISTRIBUTION", help="Run the integration tests for the given distribution.", default=detect_distribution()[0], type=Distribution, choices=[Distribution(d) for d in Distribution.values()], ) parser.addoption( "-R", "--release", metavar="RELEASE", help="Run the integration tests for the given release.", ) parser.addoption( "-T", "--tools-tree-distribution", metavar="DISTRIBUTION", help="Use the given tools tree distribution to build the integration test images", type=Distribution, choices=[Distribution(d) for d in Distribution.values()], ) @pytest.fixture(scope="session") def config(request: Any) -> Image.Config: distribution = cast(Distribution, request.config.getoption("--distribution")) release = cast(str, request.config.getoption("--release") or parse_config(["-d", str(distribution)])[1][0].release) return Image.Config( distribution=distribution, release=release, tools_tree_distribution=cast(Distribution, request.config.getoption("--tools-tree-distribution")), ) mkosi-20.2/tests/test_boot.py000066400000000000000000000043311455345632200163070ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1+ import os import pytest from mkosi.config import OutputFormat from mkosi.distributions import Distribution from mkosi.qemu import find_virtiofsd from . import Image pytestmark = pytest.mark.integration @pytest.mark.parametrize("format", OutputFormat) def test_boot(config: Image.Config, format: OutputFormat) -> None: with Image( config, options=[ "--kernel-command-line=systemd.unit=mkosi-check-and-shutdown.service", "--incremental", "--ephemeral", ], ) as image: if image.config.distribution == Distribution.rhel_ubi and format in (OutputFormat.esp, OutputFormat.uki): pytest.skip("Cannot build RHEL-UBI images with format 'esp' or 'uki'") options = ["--format", str(format)] image.summary(options) image.genkey() image.build(options=options) if format in (OutputFormat.disk, OutputFormat.directory) and os.getuid() == 0: # systemd-resolved is enabled by default in Arch/Debian/Ubuntu (systemd default preset) but fails # to start in a systemd-nspawn container with --private-users so we mask it out here to avoid CI # failures. # FIXME: Remove when Arch/Debian/Ubuntu ship systemd v253 args = ["systemd.mask=systemd-resolved.service"] if format == OutputFormat.directory else [] image.boot(options=options, args=args) if ( image.config.distribution == Distribution.ubuntu and format in (OutputFormat.cpio, OutputFormat.uki, OutputFormat.esp) ): # https://bugs.launchpad.net/ubuntu/+source/linux-kvm/+bug/2045561 pytest.skip("Cannot boot Ubuntu UKI/cpio images in qemu until we switch back to linux-kvm") if image.config.distribution == Distribution.rhel_ubi: return if format in (OutputFormat.tar, OutputFormat.none) or format.is_extension_image(): return if format == OutputFormat.directory and not find_virtiofsd(): return image.qemu(options=options) if format != OutputFormat.disk: return image.qemu(options=options + ["--qemu-firmware=bios"]) mkosi-20.2/tests/test_config.py000066400000000000000000000543531455345632200166220ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1+ import argparse import itertools import logging import operator import os from pathlib import Path from typing import Optional import pytest from mkosi.config import ( Architecture, Compression, Config, ConfigFeature, ConfigTree, OutputFormat, Verb, config_parse_bytes, parse_config, parse_ini, ) from mkosi.distributions import Distribution from mkosi.util import chdir def test_compression_enum_creation() -> None: assert Compression["none"] == Compression.none assert Compression["zstd"] == Compression.zstd assert Compression["zst"] == Compression.zstd assert Compression["xz"] == Compression.xz assert Compression["bz2"] == Compression.bz2 assert Compression["gz"] == Compression.gz assert Compression["lz4"] == Compression.lz4 assert Compression["lzma"] == Compression.lzma def test_compression_enum_bool() -> None: assert not bool(Compression.none) assert bool(Compression.zstd) assert bool(Compression.xz) assert bool(Compression.bz2) assert bool(Compression.gz) assert bool(Compression.lz4) assert bool(Compression.lzma) def test_compression_enum_str() -> None: assert str(Compression.none) == "none" assert str(Compression.zstd) == "zstd" assert str(Compression.zst) == "zstd" assert str(Compression.xz) == "xz" assert str(Compression.bz2) == "bz2" assert str(Compression.gz) == "gz" assert str(Compression.lz4) == "lz4" assert str(Compression.lzma) == "lzma" def test_parse_ini(tmp_path: Path) -> None: p = tmp_path / "ini" p.write_text( """\ [MySection] Value=abc Other=def ALLCAPS=txt # Comment ; Another comment [EmptySection] [AnotherSection] EmptyValue= Multiline=abc def qed ord """ ) g = parse_ini(p) assert next(g) == ("MySection", "Value", "abc") assert next(g) == ("MySection", "Other", "def") assert next(g) == ("MySection", "ALLCAPS", "txt") assert next(g) == ("", "", "") assert next(g) == ("", "", "") assert next(g) == ("AnotherSection", "EmptyValue", "") assert next(g) == ("AnotherSection", "Multiline", "abc\ndef\nqed\nord") def test_parse_config(tmp_path: Path) -> None: d = tmp_path (d / "mkosi.conf").write_text( """\ [Distribution] @Distribution = ubuntu Architecture = arm64 [Content] Packages=abc [Output] @Format = cpio ImageId = base """ ) with chdir(d): _, [config] = parse_config() assert config.distribution == Distribution.ubuntu assert config.architecture == Architecture.arm64 assert config.packages == ["abc"] assert config.output_format == OutputFormat.cpio assert config.image_id == "base" with chdir(d): _, [config] = parse_config(["--distribution", "fedora"]) # mkosi.conf sets a default distribution, so the CLI should take priority. assert config.distribution == Distribution.fedora # Any architecture set on the CLI is overridden by the config file, and we should complain loudly about that. with chdir(d), pytest.raises(SystemExit): _, [config] = parse_config(["--architecture", "x86-64"]) (d / "mkosi.conf.d").mkdir() (d / "mkosi.conf.d/d1.conf").write_text( """\ [Distribution] Distribution = debian @Architecture = x86-64 [Content] Packages = qed def [Output] ImageId = 00-dropin ImageVersion = 0 """ ) with chdir(d): _, [config] = parse_config() # Setting a value explicitly in a dropin should override the default from mkosi.conf. assert config.distribution == Distribution.debian # Setting a default in a dropin should be ignored since mkosi.conf sets the architecture explicitly. assert config.architecture == Architecture.arm64 # Lists should be merged by appending the new values to the existing values. assert config.packages == ["abc", "qed", "def"] assert config.output_format == OutputFormat.cpio assert config.image_id == "00-dropin" assert config.image_version == "0" (d / "mkosi.version").write_text("1.2.3") (d / "mkosi.conf.d/d2.conf").write_text( """\ [Content] Packages= [Output] ImageId= """ ) with chdir(d): _, [config] = parse_config() # Test that empty assignment resets settings. assert config.packages == [] assert config.image_id is None # mkosi.version should only be used if no version is set explicitly. assert config.image_version == "0" (d / "mkosi.conf.d/d1.conf").unlink() with chdir(d): _, [config] = parse_config() # ImageVersion= is not set explicitly anymore, so now the version from mkosi.version should be used. assert config.image_version == "1.2.3" (d / "abc").mkdir() (d / "abc/mkosi.conf").write_text( """\ [Content] Bootable=yes BuildPackages=abc """ ) (d / "abc/mkosi.conf.d").mkdir() (d / "abc/mkosi.conf.d/abc.conf").write_text( """\ [Output] SplitArtifacts=yes """ ) with chdir(d): _, [config] = parse_config() assert config.bootable == ConfigFeature.auto assert config.split_artifacts is False # Passing the directory should include both the main config file and the dropin. _, [config] = parse_config(["--include", os.fspath(d / "abc")] * 2) assert config.bootable == ConfigFeature.enabled assert config.split_artifacts is True # The same extra config should not be parsed more than once. assert config.build_packages == ["abc"] # Passing the main config file should not include the dropin. _, [config] = parse_config(["--include", os.fspath(d / "abc/mkosi.conf")]) assert config.bootable == ConfigFeature.enabled assert config.split_artifacts is False def test_profiles(tmp_path: Path) -> None: d = tmp_path (d / "mkosi.profiles").mkdir() (d / "mkosi.profiles/profile.conf").write_text( """\ [Distribution] Distribution=fedora [Host] QemuKvm=yes """ ) (d / "mkosi.conf").write_text( """\ [Config] Profile=profile """ ) (d / "mkosi.conf.d").mkdir() (d / "mkosi.conf.d/abc.conf").write_text( """\ [Distribution] Distribution=debian """ ) with chdir(d): _, [config] = parse_config() assert config.profile == "profile" # mkosi.conf.d/ should override the profile assert config.distribution == Distribution.debian assert config.qemu_kvm == ConfigFeature.enabled def test_override_default(tmp_path: Path) -> None: d = tmp_path (d / "mkosi.conf").write_text( """\ [Host] @ToolsTree=default """ ) with chdir(d): _, [config] = parse_config(["--tools-tree", ""]) assert config.tools_tree is None def test_local_config(tmp_path: Path) -> None: d = tmp_path (d / "mkosi.local.conf").write_text( """\ [Distribution] Distribution=debian """ ) with chdir(d): _, [config] = parse_config() assert config.distribution == Distribution.debian (d / "mkosi.conf").write_text( """\ [Distribution] Distribution=fedora """ ) with chdir(d): _, [config] = parse_config() assert config.distribution == Distribution.fedora def test_parse_load_verb(tmp_path: Path) -> None: with chdir(tmp_path): assert parse_config(["build"])[0].verb == Verb.build assert parse_config(["clean"])[0].verb == Verb.clean with pytest.raises(SystemExit): parse_config(["help"]) assert parse_config(["genkey"])[0].verb == Verb.genkey assert parse_config(["bump"])[0].verb == Verb.bump assert parse_config(["serve"])[0].verb == Verb.serve assert parse_config(["build"])[0].verb == Verb.build assert parse_config(["shell"])[0].verb == Verb.shell assert parse_config(["boot"])[0].verb == Verb.boot assert parse_config(["qemu"])[0].verb == Verb.qemu assert parse_config(["journalctl"])[0].verb == Verb.journalctl assert parse_config(["coredumpctl"])[0].verb == Verb.coredumpctl with pytest.raises(SystemExit): parse_config(["invalid"]) def test_os_distribution(tmp_path: Path) -> None: with chdir(tmp_path): for dist in Distribution: _, [config] = parse_config(["-d", dist.value]) assert config.distribution == dist with pytest.raises(tuple((argparse.ArgumentError, SystemExit))): parse_config(["-d", "invalidDistro"]) with pytest.raises(tuple((argparse.ArgumentError, SystemExit))): parse_config(["-d"]) for dist in Distribution: Path("mkosi.conf").write_text(f"[Distribution]\nDistribution={dist}") _, [config] = parse_config() assert config.distribution == dist def test_parse_config_files_filter(tmp_path: Path) -> None: with chdir(tmp_path): confd = Path("mkosi.conf.d") confd.mkdir() (confd / "10-file.conf").write_text("[Content]\nPackages=yes") (confd / "20-file.noconf").write_text("[Content]\nPackages=nope") _, [config] = parse_config() assert config.packages == ["yes"] def test_compression(tmp_path: Path) -> None: with chdir(tmp_path): _, [config] = parse_config(["--format", "disk", "--compress-output", "False"]) assert config.compress_output == Compression.none def test_match_multiple(tmp_path: Path) -> None: with chdir(tmp_path): Path("mkosi.conf").write_text( """\ [Match] Format=|disk Format=|directory [Match] Architecture=|x86-64 Architecture=|arm64 [Output] ImageId=abcde """ ) # Both sections are not matched, so image ID should not be "abcde". _, [config] = parse_config(["--format", "tar", "--architecture", "s390x"]) assert config.image_id != "abcde" # Only a single section is matched, so image ID should not be "abcde". _, [config] = parse_config(["--format", "disk", "--architecture", "s390x"]) assert config.image_id != "abcde" # Both sections are matched, so image ID should be "abcde". _, [config] = parse_config(["--format", "disk", "--architecture", "x86-64"]) assert config.image_id == "abcde" @pytest.mark.parametrize("dist1,dist2", itertools.combinations_with_replacement(Distribution, 2)) def test_match_distribution(tmp_path: Path, dist1: Distribution, dist2: Distribution) -> None: with chdir(tmp_path): parent = Path("mkosi.conf") parent.write_text( f"""\ [Distribution] Distribution={dist1} """ ) Path("mkosi.conf.d").mkdir() child1 = Path("mkosi.conf.d/child1.conf") child1.write_text( f"""\ [Match] Distribution={dist1} [Content] Packages=testpkg1 """ ) child2 = Path("mkosi.conf.d/child2.conf") child2.write_text( f"""\ [Match] Distribution={dist2} [Content] Packages=testpkg2 """ ) child3 = Path("mkosi.conf.d/child3.conf") child3.write_text( f"""\ [Match] Distribution=|{dist1} Distribution=|{dist2} [Content] Packages=testpkg3 """ ) _, [conf] = parse_config() assert "testpkg1" in conf.packages if dist1 == dist2: assert "testpkg2" in conf.packages else: assert "testpkg2" not in conf.packages assert "testpkg3" in conf.packages @pytest.mark.parametrize( "release1,release2", itertools.combinations_with_replacement([36, 37, 38], 2) ) def test_match_release(tmp_path: Path, release1: int, release2: int) -> None: with chdir(tmp_path): parent = Path("mkosi.conf") parent.write_text( f"""\ [Distribution] Distribution=fedora Release={release1} """ ) Path("mkosi.conf.d").mkdir() child1 = Path("mkosi.conf.d/child1.conf") child1.write_text( f"""\ [Match] Release={release1} [Content] Packages=testpkg1 """ ) child2 = Path("mkosi.conf.d/child2.conf") child2.write_text( f"""\ [Match] Release={release2} [Content] Packages=testpkg2 """ ) child3 = Path("mkosi.conf.d/child3.conf") child3.write_text( f"""\ [Match] Release=|{release1} Release=|{release2} [Content] Packages=testpkg3 """ ) _, [conf] = parse_config() assert "testpkg1" in conf.packages if release1 == release2: assert "testpkg2" in conf.packages else: assert "testpkg2" not in conf.packages assert "testpkg3" in conf.packages def test_match_build_sources(tmp_path: Path) -> None: d = tmp_path (d / "mkosi.conf").write_text( """\ [Match] BuildSources=kernel BuildSources=/kernel [Output] Output=abc """ ) with chdir(d): _, [config] = parse_config(["--build-sources", ".:kernel"]) assert config.output == "abc" @pytest.mark.parametrize( "image1,image2", itertools.combinations_with_replacement( ["image_a", "image_b", "image_c"], 2 ) ) def test_match_imageid(tmp_path: Path, image1: str, image2: str) -> None: with chdir(tmp_path): parent = Path("mkosi.conf") parent.write_text( f"""\ [Distribution] Distribution=fedora [Output] ImageId={image1} """ ) Path("mkosi.conf.d").mkdir() child1 = Path("mkosi.conf.d/child1.conf") child1.write_text( f"""\ [Match] ImageId={image1} [Content] Packages=testpkg1 """ ) child2 = Path("mkosi.conf.d/child2.conf") child2.write_text( f"""\ [Match] ImageId={image2} [Content] Packages=testpkg2 """ ) child3 = Path("mkosi.conf.d/child3.conf") child3.write_text( f"""\ [Match] ImageId=|{image1} ImageId=|{image2} [Content] Packages=testpkg3 """ ) child4 = Path("mkosi.conf.d/child4.conf") child4.write_text( """\ [Match] ImageId=image* [Content] Packages=testpkg4 """ ) _, [conf] = parse_config() assert "testpkg1" in conf.packages if image1 == image2: assert "testpkg2" in conf.packages else: assert "testpkg2" not in conf.packages assert "testpkg3" in conf.packages assert "testpkg4" in conf.packages @pytest.mark.parametrize( "op,version", itertools.product( ["", "==", "<", ">", "<=", ">="], [122, 123, 124], ) ) def test_match_imageversion(tmp_path: Path, op: str, version: str) -> None: opfunc = { "==": operator.eq, "!=": operator.ne, "<": operator.lt, "<=": operator.le, ">": operator.gt, ">=": operator.ge, }.get(op, operator.eq,) with chdir(tmp_path): parent = Path("mkosi.conf") parent.write_text( """\ [Output] ImageId=testimage ImageVersion=123 """ ) Path("mkosi.conf.d").mkdir() child1 = Path("mkosi.conf.d/child1.conf") child1.write_text( f"""\ [Match] ImageVersion={op}{version} [Content] Packages=testpkg1 """ ) child2 = Path("mkosi.conf.d/child2.conf") child2.write_text( f"""\ [Match] ImageVersion=<200 ImageVersion={op}{version} [Content] Packages=testpkg2 """ ) child3 = Path("mkosi.conf.d/child3.conf") child3.write_text( f"""\ [Match] ImageVersion=>9000 ImageVersion={op}{version} [Content] Packages=testpkg3 """ ) _, [conf] = parse_config() assert ("testpkg1" in conf.packages) == opfunc(123, version) assert ("testpkg2" in conf.packages) == opfunc(123, version) assert "testpkg3" not in conf.packages @pytest.mark.parametrize( "skel,pkgmngr", itertools.product( [None, Path("/foo"), Path("/bar")], [None, Path("/foo"), Path("/bar")], ) ) def test_package_manager_tree(tmp_path: Path, skel: Optional[Path], pkgmngr: Optional[Path]) -> None: with chdir(tmp_path): config = Path("mkosi.conf") with config.open("w") as f: f.write("[Content]\n") if skel is not None: f.write(f"SkeletonTrees={skel}\n") if pkgmngr is not None: f.write(f"PackageManagerTrees={pkgmngr}\n") _, [conf] = parse_config() skel_expected = [ConfigTree(skel, None)] if skel is not None else [] pkgmngr_expected = [ConfigTree(pkgmngr, None)] if pkgmngr is not None else skel_expected assert conf.skeleton_trees == skel_expected assert conf.package_manager_trees == pkgmngr_expected @pytest.mark.parametrize( "sections,args,warning_count", [ (["Output"], [], 0), (["Content"], [], 1), (["Content", "Output"], [], 1), (["Output", "Content"], [], 1), (["Output", "Content", "Distribution"], [], 2), (["Content"], ["--image-id=testimage"], 1), ], ) def test_wrong_section_warning( tmp_path: Path, caplog: pytest.LogCaptureFixture, sections: list[str], args: list[str], warning_count: int, ) -> None: with chdir(tmp_path): # Create a config with ImageId in the wrong section, # and sometimes in the correct section Path("mkosi.conf").write_text( "\n".join( f"""\ [{section}] ImageId=testimage """ for section in sections ) ) with caplog.at_level(logging.WARNING): # Parse the config, with --image-id sometimes given on the command line parse_config(args) assert len(caplog.records) == warning_count def test_config_parse_bytes() -> None: assert config_parse_bytes(None) is None assert config_parse_bytes("1") == 4096 assert config_parse_bytes("8000") == 8192 assert config_parse_bytes("8K") == 8192 assert config_parse_bytes("4097") == 8192 assert config_parse_bytes("1M") == 1024**2 assert config_parse_bytes("1.9M") == 1994752 assert config_parse_bytes("1G") == 1024**3 assert config_parse_bytes("7.3G") == 7838318592 with pytest.raises(SystemExit): config_parse_bytes("-1") with pytest.raises(SystemExit): config_parse_bytes("-2K") with pytest.raises(SystemExit): config_parse_bytes("-3M") with pytest.raises(SystemExit): config_parse_bytes("-4G") def test_specifiers(tmp_path: Path) -> None: d = tmp_path (d / "mkosi.conf").write_text( """\ [Distribution] Distribution=ubuntu Release=lunar Architecture=arm64 [Output] ImageId=my-image-id ImageVersion=1.2.3 OutputDirectory=abcde Output=test [Content] Environment=Distribution=%d Release=%r Architecture=%a ImageId=%i ImageVersion=%v OutputDirectory=%O Output=%o """ ) with chdir(d): _, [config] = parse_config() expected = { "Distribution": "ubuntu", "Release": "lunar", "Architecture": "arm64", "ImageId": "my-image-id", "ImageVersion": "1.2.3", "OutputDirectory": str(Path.cwd() / "abcde"), "Output": "test", } assert {k: v for k, v in config.environment.items() if k in expected} == expected def test_output_id_version(tmp_path: Path) -> None: d = tmp_path (d / "mkosi.conf").write_text( """ [Output] ImageId=output ImageVersion=1.2.3 """ ) with chdir(d): _, [config] = parse_config() assert config.output == "output_1.2.3" def test_deterministic() -> None: assert Config.default() == Config.default() def test_environment(tmp_path: Path) -> None: d = tmp_path (d / "mkosi.conf").write_text( """\ [Content] Environment=TestValue2=300 TestValue3=400 EnvironmentFiles=other.env """ ) (d / "mkosi.env").write_text( """\ TestValue1=90 TestValue4=99 """ ) (d / "other.env").write_text( """\ TestValue1=100 TestValue2=200 """ ) with chdir(d): _, [config] = parse_config() expected = { "TestValue1": "100", # from other.env "TestValue2": "300", # from mkosi.conf "TestValue3": "400", # from mkosi.conf "TestValue4": "99", # from mkosi.env } # Only check values for keys from expected, as config.environment contains other items as well assert {k: config.environment[k] for k in expected.keys()} == expected assert config.environment_files == [Path.cwd() / "mkosi.env", Path.cwd() / "other.env"] mkosi-20.2/tests/test_initrd.py000066400000000000000000000226141455345632200166410ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1+ import contextlib import os import subprocess import tempfile import textwrap import time from collections.abc import Iterator from pathlib import Path import pytest from mkosi.distributions import Distribution from mkosi.log import die from mkosi.mounts import mount from mkosi.run import find_binary, run from mkosi.tree import copy_tree from mkosi.types import PathString from mkosi.util import INVOKING_USER from mkosi.versioncomp import GenericVersion from . import Image pytestmark = pytest.mark.integration @pytest.fixture(scope="module") def passphrase() -> Iterator[Path]: # We can't use tmp_path fixture because pytest creates it in a nested directory we can't access using our # unprivileged user. # TODO: Use delete_on_close=False and close() instead of flush() when we require Python 3.12 or newer. with tempfile.NamedTemporaryFile(prefix="mkosi.passphrase", mode="w") as passphrase: passphrase.write("mkosi") passphrase.flush() os.fchown(passphrase.fileno(), INVOKING_USER.uid, INVOKING_USER.gid) os.fchmod(passphrase.fileno(), 0o600) yield Path(passphrase.name) @pytest.fixture(scope="module") def initrd(config: Image.Config) -> Iterator[Image]: with Image( config, options=[ "--directory", "", "--include=mkosi-initrd/", ], ) as initrd: if initrd.config.distribution == Distribution.rhel_ubi: pytest.skip("Cannot build RHEL-UBI initrds") initrd.build() yield initrd def test_initrd(initrd: Image) -> None: with Image( initrd.config, options=[ "--initrd", Path(initrd.output_dir.name) / "initrd", "--kernel-command-line=systemd.unit=mkosi-check-and-shutdown.service", "--incremental", "--ephemeral", "--format=disk", ] ) as image: image.build() image.qemu() def wait_for_device(device: PathString) -> None: if ( find_binary("udevadm") and GenericVersion(run(["udevadm", "--version"], stdout=subprocess.PIPE).stdout.strip()) >= 251 ): run(["udevadm", "wait", "--timeout=30", "/dev/vg_mkosi/lv0"]) return for i in range(30): if Path(device).exists(): return time.sleep(1) die(f"Device {device} did not appear within 30 seconds") @pytest.mark.skipif(os.getuid() != 0, reason="mkosi-initrd LVM test can only be executed as root") def test_initrd_lvm(initrd: Image) -> None: with Image( initrd.config, options=[ "--initrd", Path(initrd.output_dir.name) / "initrd", "--kernel-command-line=systemd.unit=mkosi-check-and-shutdown.service", "--kernel-command-line=root=LABEL=root", "--kernel-command-line=rw", "--incremental", "--ephemeral", "--qemu-firmware=linux", ] ) as image, contextlib.ExitStack() as stack: image.build(["--format", "directory"]) drive = Path(image.output_dir.name) / "image.raw" drive.touch() os.truncate(drive, 5000 * 1024**2) lodev = run(["losetup", "--show", "--find", "--partscan", drive], stdout=subprocess.PIPE).stdout.strip() stack.callback(lambda: run(["losetup", "--detach", lodev])) run(["sfdisk", "--label", "gpt", lodev], input="type=E6D6D379-F507-44C2-A23C-238F2A3DF928 bootable") run(["lvm", "pvcreate", f"{lodev}p1"]) run(["lvm", "pvs"]) run(["lvm", "vgcreate", "vg_mkosi", f"{lodev}p1"]) run(["lvm", "vgchange", "-ay", "vg_mkosi"]) run(["lvm", "vgs"]) stack.callback(lambda: run(["vgchange", "-an", "vg_mkosi"])) run(["lvm", "lvcreate", "-l", "100%FREE", "-n", "lv0", "vg_mkosi"]) run(["lvm", "lvs"]) wait_for_device("/dev/vg_mkosi/lv0") run([f"mkfs.{image.config.distribution.filesystem()}", "-L", "root", "/dev/vg_mkosi/lv0"]) with tempfile.TemporaryDirectory() as mnt, mount(Path("/dev/vg_mkosi/lv0"), Path(mnt)): # The image might have been built unprivileged so we need to fix the file ownership. Making all the # files owned by root isn't completely correct but good enough for the purposes of the test. copy_tree(Path(image.output_dir.name) / "image", Path(mnt), preserve=False) stack.close() image.qemu(["--format=disk"]) def test_initrd_luks(initrd: Image, passphrase: Path) -> None: with tempfile.TemporaryDirectory() as repartd: os.chown(repartd, INVOKING_USER.uid, INVOKING_USER.gid) (Path(repartd) / "00-esp.conf").write_text( textwrap.dedent( """\ [Partition] Type=esp Format=vfat CopyFiles=/boot:/ CopyFiles=/efi:/ SizeMinBytes=512M SizeMaxBytes=512M """ ) ) (Path(repartd) / "05-bios.conf").write_text( textwrap.dedent( """\ [Partition] # UUID of the grub BIOS boot partition which grubs needs on GPT to # embed itself into. Type=21686148-6449-6e6f-744e-656564454649 SizeMinBytes=1M SizeMaxBytes=1M """ ) ) (Path(repartd) / "10-root.conf").write_text( textwrap.dedent( f"""\ [Partition] Type=root Format={initrd.config.distribution.filesystem()} Minimize=guess Encrypt=key-file CopyFiles=/ """ ) ) with Image( initrd.config, options=[ "--initrd", Path(initrd.output_dir.name) / "initrd", "--repart-dir", repartd, "--passphrase", passphrase, "--kernel-command-line=systemd.unit=mkosi-check-and-shutdown.service", "--credential=cryptsetup.passphrase=mkosi", "--incremental", "--ephemeral", "--format=disk", ] ) as image: image.build() image.qemu() @pytest.mark.skipif(os.getuid() != 0, reason="mkosi-initrd LUKS+LVM test can only be executed as root") def test_initrd_luks_lvm(config: Image.Config, initrd: Image, passphrase: Path) -> None: with Image( config, options=[ "--initrd", Path(initrd.output_dir.name) / "initrd", "--kernel-command-line=systemd.unit=mkosi-check-and-shutdown.service", "--kernel-command-line=root=LABEL=root", "--kernel-command-line=rw", "--credential=cryptsetup.passphrase=mkosi", "--incremental", "--ephemeral", "--qemu-firmware=linux", ] ) as image, contextlib.ExitStack() as stack: image.build(["--format", "directory"]) drive = Path(image.output_dir.name) / "image.raw" drive.touch() os.truncate(drive, 5000 * 1024**2) lodev = run(["losetup", "--show", "--find", "--partscan", drive], stdout=subprocess.PIPE).stdout.strip() stack.callback(lambda: run(["losetup", "--detach", lodev])) run(["sfdisk", "--label", "gpt", lodev], input="type=E6D6D379-F507-44C2-A23C-238F2A3DF928 bootable") run( [ "cryptsetup", "--key-file", passphrase, "--use-random", "--pbkdf", "pbkdf2", "--pbkdf-force-iterations", "1000", "luksFormat", f"{lodev}p1", ] ) run(["cryptsetup", "--key-file", passphrase, "luksOpen", f"{lodev}p1", "lvm_root"]) stack.callback(lambda: run(["cryptsetup", "close", "lvm_root"])) luks_uuid = run(["cryptsetup", "luksUUID", f"{lodev}p1"], stdout=subprocess.PIPE).stdout.strip() run(["lvm", "pvcreate", "/dev/mapper/lvm_root"]) run(["lvm", "pvs"]) run(["lvm", "vgcreate", "vg_mkosi", "/dev/mapper/lvm_root"]) run(["lvm", "vgchange", "-ay", "vg_mkosi"]) run(["lvm", "vgs"]) stack.callback(lambda: run(["vgchange", "-an", "vg_mkosi"])) run(["lvm", "lvcreate", "-l", "100%FREE", "-n", "lv0", "vg_mkosi"]) run(["lvm", "lvs"]) wait_for_device("/dev/vg_mkosi/lv0") run([f"mkfs.{image.config.distribution.filesystem()}", "-L", "root", "/dev/vg_mkosi/lv0"]) with tempfile.TemporaryDirectory() as mnt, mount(Path("/dev/vg_mkosi/lv0"), Path(mnt)): # The image might have been built unprivileged so we need to fix the file ownership. Making all the # files owned by root isn't completely correct but good enough for the purposes of the test. copy_tree(Path(image.output_dir.name) / "image", Path(mnt), preserve=False) stack.close() image.qemu([ "--format=disk", f"--kernel-command-line=rd.luks.uuid={luks_uuid}", ]) def test_initrd_size(initrd: Image) -> None: # The fallback value is for CentOS and related distributions. maxsize = 1024**2 * { Distribution.fedora: 46, Distribution.debian: 40, Distribution.ubuntu: 36, Distribution.arch: 47, Distribution.opensuse: 39, }.get(initrd.config.distribution, 48) assert (Path(initrd.output_dir.name) / "initrd").stat().st_size <= maxsize mkosi-20.2/tests/test_json.py000066400000000000000000000316731455345632200163260ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1+ import os import textwrap import uuid from pathlib import Path from typing import Optional import pytest from mkosi.config import ( Architecture, Args, BiosBootloader, Bootloader, Compression, Config, ConfigFeature, ConfigTree, DocFormat, ManifestFormat, OutputFormat, QemuDrive, QemuFirmware, QemuVsockCID, SecureBootSignTool, ShimBootloader, Verb, ) from mkosi.distributions import Distribution from mkosi.versioncomp import GenericVersion @pytest.mark.parametrize("path", [None, "/baz/qux"]) def test_args(path: Optional[Path]) -> None: dump = textwrap.dedent( f"""\ {{ "AutoBump": false, "Cmdline": [ "foo", "bar" ], "Debug": false, "DebugShell": false, "DebugWorkspace": false, "Directory": {f'"{os.fspath(path)}"' if path is not None else 'null'}, "DocFormat": "auto", "Force": 9001, "GenkeyCommonName": "test", "GenkeyValidDays": "100", "Json": false, "Pager": true, "Verb": "build" }} """ ) args = Args( auto_bump = False, cmdline = ["foo", "bar"], debug = False, debug_shell = False, debug_workspace = False, directory = Path(path) if path is not None else None, doc_format = DocFormat.auto, force = 9001, genkey_common_name = "test", genkey_valid_days = "100", json = False, pager = True, verb = Verb.build, ) assert args.to_json(indent=4, sort_keys=True) == dump.rstrip() assert Args.from_json(dump) == args def test_config() -> None: dump = textwrap.dedent( """\ { "Acl": true, "Architecture": "ia64", "Autologin": false, "BaseTrees": [ "/hello/world" ], "BiosBootloader": "none", "Bootable": "disabled", "Bootloader": "grub", "BuildDirectory": null, "BuildPackages": [ "pkg1", "pkg2" ], "BuildScripts": [ "/path/to/buildscript" ], "BuildSources": [ { "source": "/qux", "target": "/frob" } ], "BuildSourcesEphemeral": true, "CacheDirectory": "/is/this/the/cachedir", "CacheOnly": true, "Checksum": false, "CleanPackageMetadata": "auto", "CompressOutput": "bz2", "Credentials": { "credkey": "credval" }, "Dependencies": [ "dep1" ], "Distribution": "fedora", "Environment": {}, "EnvironmentFiles": [], "Ephemeral": true, "ExtraSearchPaths": [], "ExtraTrees": [], "FinalizeScripts": [], "Format": "uki", "Hostname": null, "Image": "default", "ImageId": "myimage", "ImageVersion": "5", "Images": [ "default", "initrd" ], "Include": [], "Incremental": false, "InitrdInclude": [ "/foo/bar" ], "InitrdPackages": [ "clevis" ], "Initrds": [ "/efi/initrd1", "/efi/initrd2" ], "KernelCommandLine": [], "KernelCommandLineExtra": [ "look", "im", "on", "the", "kernel", "command", "line" ], "KernelModulesExclude": [ "nvidia" ], "KernelModulesInclude": [ "loop" ], "KernelModulesIncludeHost": true, "KernelModulesInitrd": true, "KernelModulesInitrdExclude": [], "KernelModulesInitrdInclude": [], "KernelModulesInitrdIncludeHost": true, "Key": null, "Keymap": "wow, so much keymap", "LocalMirror": null, "Locale": "en_C.UTF-8", "LocaleMessages": "", "MakeInitrd": false, "ManifestFormat": [ "json", "changelog" ], "MinimumVersion": "123", "Mirror": null, "NSpawnSettings": null, "Output": "outfile", "OutputDirectory": "/your/output/here", "Overlay": true, "PackageDirectories": [], "PackageManagerTrees": [ { "source": "/foo/bar", "target": null } ], "Packages": [], "Passphrase": null, "PostInstallationScripts": [ "/bar/qux" ], "PrepareScripts": [ "/run/foo" ], "Profile": "profile", "QemuArgs": [], "QemuCdrom": false, "QemuDrives": [ { "directory": "/foo/bar", "id": "abc", "options": "abc,qed", "size": 200 }, { "directory": null, "id": "abc", "options": "", "size": 200 } ], "QemuFirmware": "linux", "QemuFirmwareVariables": "/foo/bar", "QemuGui": true, "QemuKernel": null, "QemuKvm": "auto", "QemuMem": "", "QemuSmp": "yes", "QemuSwtpm": "auto", "QemuVsock": "enabled", "QemuVsockConnectionId": -2, "Release": "53", "RemoveFiles": [], "RemovePackages": [ "all" ], "RepartDirectories": [], "RepartOffline": true, "Repositories": [], "RepositoryKeyCheck": false, "RootPassword": [ "test1234", false ], "RootShell": "/bin/tcsh", "RuntimeScratch": "enabled", "RuntimeSize": 8589934592, "RuntimeTrees": [ { "source": "/foo/bar", "target": "/baz" }, { "source": "/bar/baz", "target": "/qux" } ], "SELinuxRelabel": "disabled", "SectorSize": null, "SecureBoot": true, "SecureBootAutoEnroll": true, "SecureBootCertificate": null, "SecureBootKey": "/path/to/keyfile", "SecureBootSignTool": "pesign", "Seed": "7496d7d8-7f08-4a2b-96c6-ec8c43791b60", "ShimBootloader": "none", "Sign": false, "SignExpectedPcr": "disabled", "SkeletonTrees": [ { "source": "/foo/bar", "target": "/" }, { "source": "/bar/baz", "target": "/qux" } ], "SourceDateEpoch": 12345, "SplitArtifacts": true, "Ssh": false, "SshCertificate": "/path/to/cert", "SshKey": null, "Timezone": null, "ToolsTree": null, "ToolsTreeDistribution": null, "ToolsTreeMirror": null, "ToolsTreePackages": [], "ToolsTreeRelease": null, "UseSubvolumes": "auto", "VerityCertificate": "/path/to/cert", "VerityKey": null, "WithDocs": true, "WithNetwork": false, "WithRecommends": true, "WithTests": true, "WorkspaceDirectory": "/cwd" } """ ) args = Config( acl = True, architecture = Architecture.ia64, autologin = False, base_trees = [Path("/hello/world")], bios_bootloader = BiosBootloader.none, bootable = ConfigFeature.disabled, bootloader = Bootloader.grub, build_dir = None, build_packages = ["pkg1", "pkg2"], build_scripts = [Path("/path/to/buildscript")], build_sources = [ConfigTree(Path("/qux"), Path("/frob"))], build_sources_ephemeral = True, cache_dir = Path("/is/this/the/cachedir"), cache_only = True, checksum = False, clean_package_metadata = ConfigFeature.auto, compress_output = Compression.bz2, credentials = {"credkey": "credval"}, dependencies = ("dep1",), distribution = Distribution.fedora, environment = {}, environment_files = [], ephemeral = True, extra_search_paths = [], extra_trees = [], finalize_scripts = [], hostname = None, image = "default", image_id = "myimage", image_version = "5", images = ("default", "initrd"), include = [], incremental = False, initrd_include = [Path("/foo/bar"),], initrd_packages = ["clevis"], initrds = [Path("/efi/initrd1"), Path("/efi/initrd2")], kernel_command_line = [], kernel_command_line_extra = ["look", "im", "on", "the", "kernel", "command", "line"], kernel_modules_exclude = ["nvidia"], kernel_modules_include = ["loop"], kernel_modules_include_host = True, kernel_modules_initrd = True, kernel_modules_initrd_exclude = [], kernel_modules_initrd_include = [], kernel_modules_initrd_include_host = True, key = None, keymap = "wow, so much keymap", local_mirror = None, locale = "en_C.UTF-8", locale_messages = "", make_initrd = False, manifest_format = [ManifestFormat.json, ManifestFormat.changelog], minimum_version = GenericVersion("123"), mirror = None, nspawn_settings = None, output = "outfile", output_dir = Path("/your/output/here"), output_format = OutputFormat.uki, overlay = True, package_directories = [], package_manager_trees = [ConfigTree(Path("/foo/bar"), None)], packages = [], passphrase = None, postinst_scripts = [Path("/bar/qux")], prepare_scripts = [Path("/run/foo")], profile = "profile", qemu_args = [], qemu_cdrom = False, qemu_drives = [QemuDrive("abc", 200, Path("/foo/bar"), "abc,qed"), QemuDrive("abc", 200, None, "")], qemu_firmware = QemuFirmware.linux, qemu_firmware_variables = Path("/foo/bar"), qemu_gui = True, qemu_kernel = None, qemu_kvm = ConfigFeature.auto, qemu_mem = "", qemu_smp = "yes", qemu_swtpm = ConfigFeature.auto, qemu_vsock = ConfigFeature.enabled, qemu_vsock_cid = QemuVsockCID.hash, release = "53", remove_files = [], remove_packages = ["all"], repart_dirs = [], repart_offline = True, repositories = [], repository_key_check = False, root_password = ("test1234", False), root_shell = "/bin/tcsh", runtime_scratch = ConfigFeature.enabled, runtime_size = 8589934592, runtime_trees = [ConfigTree(Path("/foo/bar"), Path("/baz")), ConfigTree(Path("/bar/baz"), Path("/qux"))], sector_size = None, secure_boot = True, secure_boot_auto_enroll = True, secure_boot_certificate = None, secure_boot_key = Path("/path/to/keyfile"), secure_boot_sign_tool = SecureBootSignTool.pesign, seed = uuid.UUID("7496d7d8-7f08-4a2b-96c6-ec8c43791b60"), selinux_relabel = ConfigFeature.disabled, shim_bootloader = ShimBootloader.none, sign = False, sign_expected_pcr = ConfigFeature.disabled, skeleton_trees = [ConfigTree(Path("/foo/bar"), Path("/")), ConfigTree(Path("/bar/baz"), Path("/qux"))], source_date_epoch = 12345, split_artifacts = True, ssh = False, ssh_certificate = Path("/path/to/cert"), ssh_key = None, timezone = None, tools_tree = None, tools_tree_distribution = None, tools_tree_mirror = None, tools_tree_packages = [], tools_tree_release = None, use_subvolumes = ConfigFeature.auto, verity_certificate = Path("/path/to/cert"), verity_key = None, with_docs = True, with_network = False, with_recommends = True, with_tests = True, workspace_dir = Path("/cwd"), ) assert args.to_json() == dump.rstrip() assert Config.from_json(dump) == args mkosi-20.2/tests/test_sysext.py000066400000000000000000000013301455345632200166770ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1+ from pathlib import Path import pytest from . import Image pytestmark = pytest.mark.integration def test_sysext(config: Image.Config) -> None: with Image( config, options=[ "--incremental", "--clean-package-metadata=no", "--format=directory", ], ) as image: image.build() with Image( image.config, options=[ "--directory", "", "--base-tree", Path(image.output_dir.name) / "image", "--overlay", "--package=dnsmasq", "--format=disk", ], ) as sysext: sysext.build() mkosi-20.2/tests/test_versioncomp.py000066400000000000000000000205541455345632200177150ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1+ import itertools import pytest from mkosi.versioncomp import GenericVersion def test_conversion() -> None: assert GenericVersion("1") < 2 assert GenericVersion("1") < "2" assert GenericVersion("2") > 1 assert GenericVersion("2") > "1" assert GenericVersion("1") == "1" def test_generic_version_systemd() -> None: """Same as the first block of systemd/test/test-compare-versions.sh""" assert GenericVersion("1") < GenericVersion("2") assert GenericVersion("1") <= GenericVersion("2") assert GenericVersion("1") != GenericVersion("2") assert not (GenericVersion("1") > GenericVersion("2")) assert not (GenericVersion("1") == GenericVersion("2")) assert not (GenericVersion("1") >= GenericVersion("2")) assert GenericVersion.compare_versions("1", "2") == -1 assert GenericVersion.compare_versions("2", "2") == 0 assert GenericVersion.compare_versions("2", "1") == 1 def test_generic_version_spec() -> None: """Examples from the uapi group version format spec""" assert GenericVersion("11") == GenericVersion("11") assert GenericVersion("systemd-123") == GenericVersion("systemd-123") assert GenericVersion("bar-123") < GenericVersion("foo-123") assert GenericVersion("123a") > GenericVersion("123") assert GenericVersion("123.a") > GenericVersion("123") assert GenericVersion("123.a") < GenericVersion("123.b") assert GenericVersion("123a") > GenericVersion("123.a") assert GenericVersion("11α") == GenericVersion("11β") assert GenericVersion("A") < GenericVersion("a") assert GenericVersion("") < GenericVersion("0") assert GenericVersion("0.") > GenericVersion("0") assert GenericVersion("0.0") > GenericVersion("0") assert GenericVersion("0") > GenericVersion("~") assert GenericVersion("") > GenericVersion("~") assert GenericVersion("1_") == GenericVersion("1") assert GenericVersion("_1") == GenericVersion("1") assert GenericVersion("1_") < GenericVersion("1.2") assert GenericVersion("1_2_3") > GenericVersion("1.3.3") assert GenericVersion("1+") == GenericVersion("1") assert GenericVersion("+1") == GenericVersion("1") assert GenericVersion("1+") < GenericVersion("1.2") assert GenericVersion("1+2+3") > GenericVersion("1.3.3") @pytest.mark.parametrize( "s1,s2", itertools.combinations_with_replacement( enumerate( [ GenericVersion("122.1"), GenericVersion("123~rc1-1"), GenericVersion("123"), GenericVersion("123-a"), GenericVersion("123-a.1"), GenericVersion("123-1"), GenericVersion("123-1.1"), GenericVersion("123^post1"), GenericVersion("123.a-1"), GenericVersion("123.1-1"), GenericVersion("123a-1"), GenericVersion("124-1"), ], ), 2 ) ) def test_generic_version_strverscmp_improved_doc( s1: tuple[int, GenericVersion], s2: tuple[int, GenericVersion], ) -> None: """Example from the doc string of strverscmp_improved. strverscmp_improved can be found in systemd/src/fundamental/string-util-fundamental.c """ i1, v1 = s1 i2, v2 = s2 assert (v1 == v2) == (i1 == i2) assert (v1 < v2) == (i1 < i2) assert (v1 <= v2) == (i1 <= i2) assert (v1 > v2) == (i1 > i2) assert (v1 >= v2) == (i1 >= i2) assert (v1 != v2) == (i1 != i2) def RPMVERCMP(a: str, b: str, expected: int) -> None: assert (GenericVersion(a) > GenericVersion(b)) - (GenericVersion(a) < GenericVersion(b)) == expected def test_generic_version_rpmvercmp() -> None: # Tests copied from rpm's rpmio test suite, under the LGPL license: # https://github.com/rpm-software-management/rpm/blob/master/tests/rpmvercmp.at. # The original form is retained as much as possible for easy comparisons and updates. RPMVERCMP("1.0", "1.0", 0) RPMVERCMP("1.0", "2.0", -1) RPMVERCMP("2.0", "1.0", 1) RPMVERCMP("2.0.1", "2.0.1", 0) RPMVERCMP("2.0", "2.0.1", -1) RPMVERCMP("2.0.1", "2.0", 1) RPMVERCMP("2.0.1a", "2.0.1a", 0) RPMVERCMP("2.0.1a", "2.0.1", 1) RPMVERCMP("2.0.1", "2.0.1a", -1) RPMVERCMP("5.5p1", "5.5p1", 0) RPMVERCMP("5.5p1", "5.5p2", -1) RPMVERCMP("5.5p2", "5.5p1", 1) RPMVERCMP("5.5p10", "5.5p10", 0) RPMVERCMP("5.5p1", "5.5p10", -1) RPMVERCMP("5.5p10", "5.5p1", 1) RPMVERCMP("10xyz", "10.1xyz", 1) # Note: this is reversed from rpm's vercmp */ RPMVERCMP("10.1xyz", "10xyz", -1) # Note: this is reversed from rpm's vercmp */ RPMVERCMP("xyz10", "xyz10", 0) RPMVERCMP("xyz10", "xyz10.1", -1) RPMVERCMP("xyz10.1", "xyz10", 1) RPMVERCMP("xyz.4", "xyz.4", 0) RPMVERCMP("xyz.4", "8", -1) RPMVERCMP("8", "xyz.4", 1) RPMVERCMP("xyz.4", "2", -1) RPMVERCMP("2", "xyz.4", 1) RPMVERCMP("5.5p2", "5.6p1", -1) RPMVERCMP("5.6p1", "5.5p2", 1) RPMVERCMP("5.6p1", "6.5p1", -1) RPMVERCMP("6.5p1", "5.6p1", 1) RPMVERCMP("6.0.rc1", "6.0", 1) RPMVERCMP("6.0", "6.0.rc1", -1) RPMVERCMP("10b2", "10a1", 1) RPMVERCMP("10a2", "10b2", -1) RPMVERCMP("1.0aa", "1.0aa", 0) RPMVERCMP("1.0a", "1.0aa", -1) RPMVERCMP("1.0aa", "1.0a", 1) RPMVERCMP("10.0001", "10.0001", 0) RPMVERCMP("10.0001", "10.1", 0) RPMVERCMP("10.1", "10.0001", 0) RPMVERCMP("10.0001", "10.0039", -1) RPMVERCMP("10.0039", "10.0001", 1) RPMVERCMP("4.999.9", "5.0", -1) RPMVERCMP("5.0", "4.999.9", 1) RPMVERCMP("20101121", "20101121", 0) RPMVERCMP("20101121", "20101122", -1) RPMVERCMP("20101122", "20101121", 1) RPMVERCMP("2_0", "2_0", 0) RPMVERCMP("2.0", "2_0", -1) # Note: in rpm those compare equal RPMVERCMP("2_0", "2.0", 1) # Note: in rpm those compare equal # RhBug:178798 case */ RPMVERCMP("a", "a", 0) RPMVERCMP("a+", "a+", 0) RPMVERCMP("a+", "a_", 0) RPMVERCMP("a_", "a+", 0) RPMVERCMP("+a", "+a", 0) RPMVERCMP("+a", "_a", 0) RPMVERCMP("_a", "+a", 0) RPMVERCMP("+_", "+_", 0) RPMVERCMP("_+", "+_", 0) RPMVERCMP("_+", "_+", 0) RPMVERCMP("+", "_", 0) RPMVERCMP("_", "+", 0) # Basic testcases for tilde sorting RPMVERCMP("1.0~rc1", "1.0~rc1", 0) RPMVERCMP("1.0~rc1", "1.0", -1) RPMVERCMP("1.0", "1.0~rc1", 1) RPMVERCMP("1.0~rc1", "1.0~rc2", -1) RPMVERCMP("1.0~rc2", "1.0~rc1", 1) RPMVERCMP("1.0~rc1~git123", "1.0~rc1~git123", 0) RPMVERCMP("1.0~rc1~git123", "1.0~rc1", -1) RPMVERCMP("1.0~rc1", "1.0~rc1~git123", 1) # Basic testcases for caret sorting RPMVERCMP("1.0^", "1.0^", 0) RPMVERCMP("1.0^", "1.0", 1) RPMVERCMP("1.0", "1.0^", -1) RPMVERCMP("1.0^git1", "1.0^git1", 0) RPMVERCMP("1.0^git1", "1.0", 1) RPMVERCMP("1.0", "1.0^git1", -1) RPMVERCMP("1.0^git1", "1.0^git2", -1) RPMVERCMP("1.0^git2", "1.0^git1", 1) RPMVERCMP("1.0^git1", "1.01", -1) RPMVERCMP("1.01", "1.0^git1", 1) RPMVERCMP("1.0^20160101", "1.0^20160101", 0) RPMVERCMP("1.0^20160101", "1.0.1", -1) RPMVERCMP("1.0.1", "1.0^20160101", 1) RPMVERCMP("1.0^20160101^git1", "1.0^20160101^git1", 0) RPMVERCMP("1.0^20160102", "1.0^20160101^git1", 1) RPMVERCMP("1.0^20160101^git1", "1.0^20160102", -1) # Basic testcases for tilde and caret sorting */ RPMVERCMP("1.0~rc1^git1", "1.0~rc1^git1", 0) RPMVERCMP("1.0~rc1^git1", "1.0~rc1", 1) RPMVERCMP("1.0~rc1", "1.0~rc1^git1", -1) RPMVERCMP("1.0^git1~pre", "1.0^git1~pre", 0) RPMVERCMP("1.0^git1", "1.0^git1~pre", 1) RPMVERCMP("1.0^git1~pre", "1.0^git1", -1) # These are included here to document current, arguably buggy behaviors # for reference purposes and for easy checking against unintended # behavior changes. */ print("/* RPM version comparison oddities */") # RhBug:811992 case RPMVERCMP("1b.fc17", "1b.fc17", 0) RPMVERCMP("1b.fc17", "1.fc17", 1) # Note: this is reversed from rpm's vercmp, WAT! */ RPMVERCMP("1.fc17", "1b.fc17", -1) RPMVERCMP("1g.fc17", "1g.fc17", 0) RPMVERCMP("1g.fc17", "1.fc17", 1) RPMVERCMP("1.fc17", "1g.fc17", -1) # Non-ascii characters are considered equal so these are all the same, eh… */ RPMVERCMP("1.1.α", "1.1.α", 0) RPMVERCMP("1.1.α", "1.1.β", 0) RPMVERCMP("1.1.β", "1.1.α", 0) RPMVERCMP("1.1.αα", "1.1.α", 0) RPMVERCMP("1.1.α", "1.1.ββ", 0) RPMVERCMP("1.1.ββ", "1.1.αα", 0) mkosi-20.2/tools/000077500000000000000000000000001455345632200137305ustar00rootroot00000000000000mkosi-20.2/tools/do-a-release.sh000077500000000000000000000007021455345632200165240ustar00rootroot00000000000000#!/bin/sh # SPDX-License-Identifier: LGPL-2.1+ if [ -z "$1" ] ; then echo "Version number not specified." exit 1 fi if ! git diff-index --quiet HEAD; then echo "Repo has modified files." exit 1 fi sed -r -i "s/^version = \".*\"$/version = \"$1\"/" pyproject.toml sed -r -i "s/^__version__ = \".*\"$/__version__ = \"$1\"/" mkosi/config.py git add -p pyproject.toml mkosi git commit -m "Release $1" git tag -s "v$1" -m "mkosi $1" mkosi-20.2/tools/generate-zipapp.sh000077500000000000000000000004261455345632200173640ustar00rootroot00000000000000#!/bin/bash BUILDDIR=$(mktemp -d -q) cleanup() { rm -rf "$BUILDDIR" } trap cleanup EXIT mkdir -p builddir cp -r mkosi "${BUILDDIR}/" python3 -m zipapp \ -p "/usr/bin/env python3" \ -o builddir/mkosi \ -m mkosi.__main__:main \ "$BUILDDIR" mkosi-20.2/tools/make-man-page.sh000077500000000000000000000001751455345632200166720ustar00rootroot00000000000000#!/bin/sh # SPDX-License-Identifier: LGPL-2.1+ set -ex pandoc -t man -s -o mkosi/resources/mkosi.1 mkosi/resources/mkosi.md