pax_global_header00006660000000000000000000000064141513614760014522gustar00rootroot0000000000000052 comment=bf884c3f92fb19019c7252bfb09360a24adfc667 mkosi-12/000077500000000000000000000000001415136147600124305ustar00rootroot00000000000000mkosi-12/.editorconfig000066400000000000000000000002331415136147600151030ustar00rootroot00000000000000root = true [*] end_of_line = lf insert_final_newline = true trim_trailing_whitespace = true charset = utf-8 [*.py] indent_style = space indent_size = 4 mkosi-12/.git-blame-ignore-revs000066400000000000000000000001101415136147600165200ustar00rootroot00000000000000# Migrate code style to Black 9dfda52fffb564d99264c09e0f0182ab8540d7cd mkosi-12/.github/000077500000000000000000000000001415136147600137705ustar00rootroot00000000000000mkosi-12/.github/workflows/000077500000000000000000000000001415136147600160255ustar00rootroot00000000000000mkosi-12/.github/workflows/ci-unit-test.yml000066400000000000000000000050121415136147600210730ustar00rootroot00000000000000name: CI Unit Test on: push: branches: - main pull_request: branches: - main jobs: unit-test: runs-on: ubuntu-20.04 concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true steps: - uses: actions/checkout@v2 - name: Install run: | python3 -m pip install pytest mypy types-cryptography isort pyflakes npm install -g pyright - name: Check that imports are sorted run: python3 -m isort --verbose --check-only mkosi/ - name: Run pyflakes run: python3 -m pyflakes mkosi/ tests/ - name: Check that tabs are not used in code run: sh -c '! git grep -P "\\t" "*.py"' - name: Type Checking (mypy) run: python3 -m mypy mkosi - name: Type Checking (pyright) run: pyright . - name: Unit Tests run: python3 -m pytest - name: Test execution from current working directory run: python3 -m mkosi -h - name: Test execution from current working directory (sudo call) run: sudo python3 -m mkosi -h - name: Test system installation run: | sudo python3 -m pip install . sudo mkosi -h sudo python3 -m pip uninstall --yes mkosi - name: Test user installation run: | python3 -m pip install --user . $HOME/.local/bin/mkosi -h python3 -m pip uninstall --yes mkosi - name: Test user installation (sudo call) run: | python3 -m pip install --user . sudo $HOME/.local/bin/mkosi -h python3 -m pip uninstall --yes mkosi - name: Test editable user installation run: | python3 -m pip install --user --no-use-pep517 --editable . $HOME/.local/bin/mkosi -h python3 -m pip uninstall --yes mkosi - name: Test editable user installation (sudo call) run: | python3 -m pip install --user --no-use-pep517 --editable . sudo $HOME/.local/bin/mkosi -h python3 -m pip uninstall --yes mkosi - name: Test venv installation run: | python3 -m venv testvenv testvenv/bin/python3 -m pip install . testvenv/bin/mkosi -h - name: Test venv installation (sudo call) run: | sudo testvenv/bin/mkosi -h - name: Test zipapp creation run: | ./tools/generate-zipapp.sh ./builddir/mkosi -h - name: Test shell scripts run: | sudo apt-get update && sudo apt-get install --no-install-recommends shellcheck bash -c 'set globstar; shellcheck mkosi/**/*.sh' mkosi-12/.github/workflows/ci.yml000066400000000000000000000162301415136147600171450ustar00rootroot00000000000000name: CI on: push: branches: - main pull_request: branches: - main jobs: ci: runs-on: ubuntu-20.04 concurrency: group: ${{ github.workflow }}-${{ matrix.distro }}-${{ matrix.format }}-${{ github.ref }} cancel-in-progress: true strategy: fail-fast: false matrix: distro: - arch - centos_epel - debian - ubuntu - fedora - rocky_epel - alma_epel - gentoo format: - directory - tar - cpio - gpt_ext4 - gpt_xfs - gpt_btrfs - gpt_squashfs - plain_squashfs exclude: # CentOS 8/Rocky/Alma Linux does not support btrfs. - distro: centos format: gpt_btrfs - distro: centos_epel format: gpt_btrfs - distro: rocky format: gpt_btrfs - distro: rocky_epel format: gpt_btrfs - distro: alma format: gpt_btrfs - distro: alma_epel format: gpt_btrfs steps: - uses: actions/checkout@v2 - uses: ./ - name: Install dependencies run: sudo apt-get update && sudo apt-get install --no-install-recommends python3-pexpect - name: Install Gentoo dependencies (portage) if: ${{ matrix.distro == 'gentoo' }} run: | sudo tee /usr/lib/sysusers.d/acct-user-portage.conf > /dev/null <<- EOF # /usr/lib/sysusers.d/portage.conf u portage - "Portage system user" /var/lib/portage/home - EOF sudo systemd-sysusers --no-pager sudo install --owner=portage --group=portage --mode=0755 --directory /var/db/repos sudo install --owner=portage --group=portage --mode=0755 --directory /etc/portage/repos.conf sudo install --owner=portage --group=portage --mode=0755 --directory /var/cache/binpkgs sudo tee /etc/portage/repos.conf/eselect-repo.conf > /dev/null <<- EOF [gentoo] location = /var/db/repos/gentoo sync-type = git sync-uri = https://anongit.gentoo.org/git/repo/gentoo.git EOF git clone https://anongit.gentoo.org/git/proj/portage.git --depth=1 cd portage tee setup.cfg > /dev/null <<- EOF [build_ext] portage-ext-modules=true EOF sudo python setup.py install sudo ln -s --relative /var/db/repos/gentoo/profiles/default/linux/amd64/17.1/no-multilib/systemd /etc/portage/make.profile # Do a manual install so we have the latest changes from the pull request available. - name: Install run: sudo python3 -m pip install . - name: Write shared mkosi config run: | mkdir -p mkosi.default.d tee mkosi.default.d/mkosi.default <<- EOF [Distribution] Distribution=${{ matrix.distro }} [Output] Format=${{ matrix.format }} [Validation] Password= [Host] Ssh=yes NetworkVeth=yes EOF mkdir -p mkosi.skeleton/etc/portage tee mkosi.skeleton/etc/portage/binrepos.conf <<- EOF [binhost] sync-uri = https://raw.githubusercontent.com/257/binpkgs/main EOF # Ubuntu's systemd-nspawn doesn't support faccessat2() syscall, which is # required, since current Arch's glibc implements faccessat() via faccessat2(). - name: Update systemd-nspawn if: ${{ matrix.distro == 'arch' }} run: | echo "deb-src http://archive.ubuntu.com/ubuntu/ $(lsb_release -cs) main restricted universe multiverse" | sudo tee -a /etc/apt/sources.list sudo apt update sudo apt build-dep systemd git clone https://github.com/systemd/systemd --depth=1 && cd systemd meson build ninja -C build sudo ln -svf $PWD/build/systemd-nspawn `which systemd-nspawn` systemd-nspawn --version - name: Build ${{ matrix.distro }}/${{ matrix.format }} run: | sudo python3 -m mkosi build - name: Boot ${{ matrix.distro }}/${{ matrix.format }} if: matrix.format != 'tar' && matrix.format != 'cpio' run: sudo ./tests/pexpect/boot.py python3 -m mkosi boot - name: Build ${{ matrix.distro }}/${{ matrix.format }} UEFI UKI if: matrix.format != 'directory' && matrix.format != 'plain_squashfs' && matrix.format != 'tar' && matrix.format != 'cpio' run: | tee mkosi.default <<- EOF [Output] Bootable=yes BootProtocols=uefi [Host] QemuHeadless=yes EOF sudo python3 -m mkosi --force build - name: Boot ${{ matrix.distro }}/${{ matrix.format }} UEFI UKI if: matrix.format != 'directory' && matrix.format != 'plain_squashfs' && matrix.format != 'gpt_squashfs' && matrix.format != 'tar' && matrix.format != 'cpio' && !(matrix.distro == 'ubuntu' && matrix.format == 'gpt_squashfs') run: sudo ./tests/pexpect/boot.py python3 -m mkosi qemu - name: Build ${{ matrix.distro }}/${{ matrix.format }} UEFI if: matrix.format != 'directory' && matrix.format != 'plain_squashfs' && matrix.format != 'tar' && matrix.format != 'cpio' && ( matrix.distro == 'arch' || matrix.distro == 'centos' || matrix.distro == 'rocky' || matrix.distro == 'alma' || matrix.distro == 'fedora' || matrix.distro == 'gentoo') run: | tee mkosi.default <<- EOF [Output] Bootable=yes BootProtocols=uefi WithUnifiedKernelImages=no # Boot with systemd.volatile=overlay so squashfs images can write to the filesystem KernelCommandLine=systemd.volatile=overlay [Host] QemuHeadless=yes EOF sudo python3 -m mkosi --force build - name: Boot ${{ matrix.distro }}/${{ matrix.format }} UEFI if: matrix.format != 'directory' && matrix.format != 'plain_squashfs' && matrix.format != 'cpio' && matrix.format != 'tar' && ( matrix.distro == 'arch' || matrix.distro == 'centos' || matrix.distro == 'rocky' || matrix.distro == 'alma' || matrix.distro == 'fedora' || matrix.distro == 'gentoo') run: sudo ./tests/pexpect/boot.py python3 -m mkosi qemu - name: Build ${{ matrix.distro }}/${{ matrix.format }} BIOS if: matrix.format != 'directory' && matrix.format != 'gpt_squashfs' && matrix.format != 'plain_squashfs' && matrix.format != 'tar' && matrix.format != 'cpio' run: | tee mkosi.default <<- EOF [Output] Bootable=yes BootProtocols=bios WithUnifiedKernelImages=no [Host] QemuHeadless=yes EOF sudo python3 -m mkosi --force build - name: Boot ${{ matrix.distro }}/${{ matrix.format }} BIOS if: matrix.format != 'directory' && matrix.format != 'gpt_squashfs' && matrix.format != 'plain_squashfs' && matrix.format != 'cpio' && matrix.format != 'tar' run: sudo ./tests/pexpect/boot.py python3 -m mkosi qemu mkosi-12/.gitignore000066400000000000000000000004351415136147600144220ustar00rootroot00000000000000*.cache-pre-dev *.cache-pre-inst .cache .mypy_cache/ .project .pydevproject .pytest_cache/ /.mkosi-* /SHA256SUMS /SHA256SUMS.gpg /build /dist /mkosi.build /mkosi.egg-info /mkosi.extra /mkosi.nspawn /mkosi.rootpw /mkosi.default /mkosi.secure-boot.key /mkosi.secure-boot.crt __pycache__ mkosi-12/LICENSE000066400000000000000000000636421415136147600134500ustar00rootroot00000000000000 GNU LESSER GENERAL PUBLIC LICENSE Version 2.1, February 1999 Copyright (C) 1991, 1999 Free Software Foundation, Inc. 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. [This is the first released version of the Lesser GPL. It also counts as the successor of the GNU Library Public License, version 2, hence the version number 2.1.] Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public Licenses are intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This license, the Lesser General Public License, applies to some specially designated software packages--typically libraries--of the Free Software Foundation and other authors who decide to use it. You can use it too, but we suggest you first think carefully about whether this license or the ordinary General Public License is the better strategy to use in any particular case, based on the explanations below. When we speak of free software, we are referring to freedom of use, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish); that you receive source code or can get it if you want it; that you can change the software and use pieces of it in new free programs; and that you are informed that you can do these things. To protect your rights, we need to make restrictions that forbid distributors to deny you these rights or to ask you to surrender these rights. These restrictions translate to certain responsibilities for you if you distribute copies of the library or if you modify it. For example, if you distribute copies of the library, whether gratis or for a fee, you must give the recipients all the rights that we gave you. You must make sure that they, too, receive or can get the source code. If you link other code with the library, you must provide complete object files to the recipients, so that they can relink them with the library after making changes to the library and recompiling it. And you must show them these terms so they know their rights. We protect your rights with a two-step method: (1) we copyright the library, and (2) we offer you this license, which gives you legal permission to copy, distribute and/or modify the library. To protect each distributor, we want to make it very clear that there is no warranty for the free library. Also, if the library is modified by someone else and passed on, the recipients should know that what they have is not the original version, so that the original author's reputation will not be affected by problems that might be introduced by others. Finally, software patents pose a constant threat to the existence of any free program. We wish to make sure that a company cannot effectively restrict the users of a free program by obtaining a restrictive license from a patent holder. Therefore, we insist that any patent license obtained for a version of the library must be consistent with the full freedom of use specified in this license. Most GNU software, including some libraries, is covered by the ordinary GNU General Public License. This license, the GNU Lesser General Public License, applies to certain designated libraries, and is quite different from the ordinary General Public License. We use this license for certain libraries in order to permit linking those libraries into non-free programs. When a program is linked with a library, whether statically or using a shared library, the combination of the two is legally speaking a combined work, a derivative of the original library. The ordinary General Public License therefore permits such linking only if the entire combination fits its criteria of freedom. The Lesser General Public License permits more lax criteria for linking other code with the library. We call this license the "Lesser" General Public License because it does Less to protect the user's freedom than the ordinary General Public License. It also provides other free software developers Less of an advantage over competing non-free programs. These disadvantages are the reason we use the ordinary General Public License for many libraries. However, the Lesser license provides advantages in certain special circumstances. For example, on rare occasions, there may be a special need to encourage the widest possible use of a certain library, so that it becomes a de-facto standard. To achieve this, non-free programs must be allowed to use the library. A more frequent case is that a free library does the same job as widely used non-free libraries. In this case, there is little to gain by limiting the free library to free software only, so we use the Lesser General Public License. In other cases, permission to use a particular library in non-free programs enables a greater number of people to use a large body of free software. For example, permission to use the GNU C Library in non-free programs enables many more people to use the whole GNU operating system, as well as its variant, the GNU/Linux operating system. Although the Lesser General Public License is Less protective of the users' freedom, it does ensure that the user of a program that is linked with the Library has the freedom and the wherewithal to run that program using a modified version of the Library. The precise terms and conditions for copying, distribution and modification follow. Pay close attention to the difference between a "work based on the library" and a "work that uses the library". The former contains code derived from the library, whereas the latter must be combined with the library in order to run. GNU LESSER GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License Agreement applies to any software library or other program which contains a notice placed by the copyright holder or other authorized party saying it may be distributed under the terms of this Lesser General Public License (also called "this License"). Each licensee is addressed as "you". A "library" means a collection of software functions and/or data prepared so as to be conveniently linked with application programs (which use some of those functions and data) to form executables. The "Library", below, refers to any such software library or work which has been distributed under these terms. A "work based on the Library" means either the Library or any derivative work under copyright law: that is to say, a work containing the Library or a portion of it, either verbatim or with modifications and/or translated straightforwardly into another language. (Hereinafter, translation is included without limitation in the term "modification".) "Source code" for a work means the preferred form of the work for making modifications to it. For a library, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the library. Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running a program using the Library is not restricted, and output from such a program is covered only if its contents constitute a work based on the Library (independent of the use of the Library in a tool for writing it). Whether that is true depends on what the Library does and what the program that uses the Library does. 1. You may copy and distribute verbatim copies of the Library's complete source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and distribute a copy of this License along with the Library. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Library or any portion of it, thus forming a work based on the Library, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) The modified work must itself be a software library. b) You must cause the files modified to carry prominent notices stating that you changed the files and the date of any change. c) You must cause the whole of the work to be licensed at no charge to all third parties under the terms of this License. d) If a facility in the modified Library refers to a function or a table of data to be supplied by an application program that uses the facility, other than as an argument passed when the facility is invoked, then you must make a good faith effort to ensure that, in the event an application does not supply such function or table, the facility still operates, and performs whatever part of its purpose remains meaningful. (For example, a function in a library to compute square roots has a purpose that is entirely well-defined independent of the application. Therefore, Subsection 2d requires that any application-supplied function or table used by this function must be optional: if the application does not supply it, the square root function must still compute square roots.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Library, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Library, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Library. In addition, mere aggregation of another work not based on the Library with the Library (or with a work based on the Library) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may opt to apply the terms of the ordinary GNU General Public License instead of this License to a given copy of the Library. To do this, you must alter all the notices that refer to this License, so that they refer to the ordinary GNU General Public License, version 2, instead of to this License. (If a newer version than version 2 of the ordinary GNU General Public License has appeared, then you can specify that version instead if you wish.) Do not make any other change in these notices. Once this change is made in a given copy, it is irreversible for that copy, so the ordinary GNU General Public License applies to all subsequent copies and derivative works made from that copy. This option is useful when you wish to copy part of the code of the Library into a program that is not a library. 4. You may copy and distribute the Library (or a portion or derivative of it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange. If distribution of object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place satisfies the requirement to distribute the source code, even though third parties are not compelled to copy the source along with the object code. 5. A program that contains no derivative of any portion of the Library, but is designed to work with the Library by being compiled or linked with it, is called a "work that uses the Library". Such a work, in isolation, is not a derivative work of the Library, and therefore falls outside the scope of this License. However, linking a "work that uses the Library" with the Library creates an executable that is a derivative of the Library (because it contains portions of the Library), rather than a "work that uses the library". The executable is therefore covered by this License. Section 6 states terms for distribution of such executables. When a "work that uses the Library" uses material from a header file that is part of the Library, the object code for the work may be a derivative work of the Library even though the source code is not. Whether this is true is especially significant if the work can be linked without the Library, or if the work is itself a library. The threshold for this to be true is not precisely defined by law. If such an object file uses only numerical parameters, data structure layouts and accessors, and small macros and small inline functions (ten lines or less in length), then the use of the object file is unrestricted, regardless of whether it is legally a derivative work. (Executables containing this object code plus portions of the Library will still fall under Section 6.) Otherwise, if the work is a derivative of the Library, you may distribute the object code for the work under the terms of Section 6. Any executables containing that work also fall under Section 6, whether or not they are linked directly with the Library itself. 6. As an exception to the Sections above, you may also combine or link a "work that uses the Library" with the Library to produce a work containing portions of the Library, and distribute that work under terms of your choice, provided that the terms permit modification of the work for the customer's own use and reverse engineering for debugging such modifications. You must give prominent notice with each copy of the work that the Library is used in it and that the Library and its use are covered by this License. You must supply a copy of this License. If the work during execution displays copyright notices, you must include the copyright notice for the Library among them, as well as a reference directing the user to the copy of this License. Also, you must do one of these things: a) Accompany the work with the complete corresponding machine-readable source code for the Library including whatever changes were used in the work (which must be distributed under Sections 1 and 2 above); and, if the work is an executable linked with the Library, with the complete machine-readable "work that uses the Library", as object code and/or source code, so that the user can modify the Library and then relink to produce a modified executable containing the modified Library. (It is understood that the user who changes the contents of definitions files in the Library will not necessarily be able to recompile the application to use the modified definitions.) b) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (1) uses at run time a copy of the library already present on the user's computer system, rather than copying library functions into the executable, and (2) will operate properly with a modified version of the library, if the user installs one, as long as the modified version is interface-compatible with the version that the work was made with. c) Accompany the work with a written offer, valid for at least three years, to give the same user the materials specified in Subsection 6a, above, for a charge no more than the cost of performing this distribution. d) If distribution of the work is made by offering access to copy from a designated place, offer equivalent access to copy the above specified materials from the same place. e) Verify that the user has already received a copy of these materials or that you have already sent this user a copy. For an executable, the required form of the "work that uses the Library" must include any data and utility programs needed for reproducing the executable from it. However, as a special exception, the materials to be distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. It may happen that this requirement contradicts the license restrictions of other proprietary libraries that do not normally accompany the operating system. Such a contradiction means you cannot use both them and the Library together in an executable that you distribute. 7. You may place library facilities that are a work based on the Library side-by-side in a single library together with other library facilities not covered by this License, and distribute such a combined library, provided that the separate distribution of the work based on the Library and of the other library facilities is otherwise permitted, and provided that you do these two things: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities. This must be distributed under the terms of the Sections above. b) Give prominent notice with the combined library of the fact that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 8. You may not copy, modify, sublicense, link with, or distribute the Library except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense, link with, or distribute the Library is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 9. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Library or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Library (or any work based on the Library), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Library or works based on it. 10. Each time you redistribute the Library (or any work based on the Library), the recipient automatically receives a license from the original licensor to copy, distribute, link with or modify the Library subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties with this License. 11. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Library at all. For example, if a patent license would not permit royalty-free redistribution of the Library by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Library. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply, and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 12. If the distribution and/or use of the Library is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Library under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 13. The Free Software Foundation may publish revised and/or new versions of the Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Library does not specify a license version number, you may choose any version ever published by the Free Software Foundation. 14. If you wish to incorporate parts of the Library into other free programs whose distribution conditions are incompatible with these, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Libraries If you develop a new library, and you want it to be of the greatest possible use to the public, we recommend making it free software that everyone can redistribute and change. You can do so by permitting redistribution under these terms (or, alternatively, under the terms of the ordinary General Public License). To apply these terms, attach the following notices to the library. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Also add information on how to contact you by electronic and paper mail. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the library, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the library `Frob' (a library for tweaking knobs) written by James Random Hacker. , 1 April 1990 Ty Coon, President of Vice That's all there is to it! mkosi-12/MANIFEST.in000066400000000000000000000000201415136147600141560ustar00rootroot00000000000000include LICENSE mkosi-12/NEWS.md000066400000000000000000000233121415136147600135270ustar00rootroot00000000000000# mkosi Changelog ## v12 - Fix handling of baselayout in Gentoo installations. ## v11 - Support for Rocky Linux, Alma Linux, and Gentoo has been added! - A new `ManifestFormat=` option can be used to generate "manifest" files that describe what packages were installed. With `json`, a JSON file that shows the names and versions of all installed packages will be created. With `changelog`, a longer human-readable file that shows package descriptions and changelogs will be generated. This latter format should be considered experimental and likely to change in later versions. - A new `RemovePackages=` option can be used to uninstall packages after the build and finalize scripts have been done. This is useful for the case where packages are required by the build scripts, or pulled in as dependencies for scriptlets of other packages, but are not necessary in the final image. - A new `BaseImage=` option can be used to build "system extensions" a.k.a. "sysexts" — partial images which are mounted on top of an existing system to provide additional files under `/usr/`. See the [systemd-sysext man page](https://www.freedesktop.org/software/systemd/man/systemd-sysext.html) for more information. - A new `CleanPackageMetadata=` option can be used to force or disable the removal of package manager files. When this option is not used, they are removed when the package manager is not installed in the final image. - A new `UseHostRepositories=` option instructs mkosi to use repository configuration from the host system, instead of the internal list. - A new `SshAgent=` option configures the path to the ssh agent. - A new `SshPort=` option overrides the port used for ssh. - The `Verity=` setting supports a new value `signed`. When set, verity data will be signed and the result inserted as an additional partition in the image. See https://systemd.io/DISCOVERABLE_PARTITIONS for details about signed disk images. This information is used by `systemd-nspawn`, `systemd-dissect`, `systemd-sysext`, `systemd-portabled` and `systemd`'s `RootImage=` setting (among others) to cryptographically validate the image file systems before use. - The `--build-environment=` option was renamed to `--environment=` and extended to cover *all* invoked scripts, not just the `mkosi.build`. The old name is still understood. - With `--with-network=never`, `dnf` is called with `--cacheonly`, so that the package lists are not refreshed. This gives a degree of reproducibility when doing repeated installs with the same package set (and also makes installs significantly faster). - The `--debug=` option gained a new value `disk` to show information about disk sized and partition allocations. - Some sections and settings have been renamed for clarity: [Packages] is now [Content], `Password=`, `PasswordIsHashed=`, and `Autologin=` are now in [Content]. The old names are still supported, but not documented. - When `--prepare-script=`/`--build-script=`/`--finalize-script=` is used with an empty argument, the corresponding script will not be called. - Python 3.7 is the minimal supported version. - Note to packagers: the Python `cryptography` module is needed for signing of verity data. ## v10 - Minimum supported Python version is now 3.7. - Automatic configuration of the network for Arch Linux was removed to bring different distros more in line with each other. To add it back, add a postinstall script to configure your network manager of choice. - The `--default` option was changed to not affect the search location of `mkosi.default.d/`. mkosi now always searches for `mkosi.default.d/` in the working directory. - `quiet` was dropped from the default kernel command line. - `--source-file-transfer` and `--source-file-transfer-final` now accept an empty value as the argument which can be used to override a previous setting. - A new command `mkosi serve` can be used to serve build artifacts using a small embedded HTTP server. This is useful for `machinectl pull-raw …` and `machinectl pull-tar …`. - A new command `mkosi genkey` can be used to generate secure boot keys for use with mkosi's `--secure-boot` options. The number of days the keys should remain valid can be specified via `--secure-boot-valid-days=` and their CN via `--secure-boot-common-name=`. - When booting images with `qemu`, firmware that supports Secure Boot will be used if available. - `--source-resolve-symlinks` and `--source-resolve-symlinks-final` options are added to control how symlinks in the build sources are handled when `--source-file-transfer[-final]=copy-all` is used. - `--build-environment=` option was added to set variables for the build script. - `--usr-only` option was added to build images that comprise only the `/usr/` directory, instead of the whole root file system. This is useful for stateless systems where `/etc/` and `/var/` are populated by `systemd-tmpfiles`/`systemd-sysusers` and related calls at boot, or systems that are originally shipped without a root file system, but where `systemd-repart` adds one on the first boot. - Support for "image versions" has been added. The version number can be set with `--version-number=`. It is included in the default output filename and passed as `$IMAGE_VERSION` to the build script. In addition, `mkosi bump` can be used to increase the version number by one, and `--auto-bump` can be used to increase it automatically after successful builds. - Support for "image identifiers" has been added. The id can be set with `--image=id` and is passed to the build script as `$IMAGE_ID`. - The list of packages to install can be configured with `--base-packages=`. With `--base-packages=no`, only packages specified with `--packages=` will be installed. With `--base-packages=conditional`, various packages will be installed "conditionally", i.e. only if some other package is otherwise pulled in. For example, `systemd-udev` may be installed only if `systemd` is listed in `--packages=`. - CPIO output format has been added. This is useful for kernel initramfs images. - Output compression can be configured with `--compress-fs=` and `--compress-output=`, and support for `zstd` has been added. - `--ssh-key=` option was added to control the ssh key used to connect to the image. - `--remove-files=` option was added to remove file from the generated images. - Inline comments are now allowed in config files (anything from `#` until the end of the line will be ignored). - The development branch was renamed from `master` to `main`. ## v9 ### Highlighted Changes - The mkosi Github action now defaults to the current release of mkosi instead of the tip of the master branch. - Add a `ssh` verb and accompanying `--ssh` option. The latter sets up SSH keys for direct SSH access into a booted image, whereas the former can be used to start an SSH connection to the image. - Allow for distribution specific `mkosi.*` files in subdirectories of `mkosi.default.d/`. These files are only processed if a subdirectory named after the target distribution of the image is found in `mkosi.default.d/`. - The summary of used options for the image is now only printed when building the image for the first time or when the `summary` verb is used. - All of mkosi's output, except for the build script, will now go to stderr. There was no clear policy on this before and this choice makes it easier to use images generated and booted via mkosi with language servers using stdin and stdout for communication. - `--source-file-transfer` now defaults to `copy-git-others` to also include untracked files. - [black](https://github.com/psf/black) is now used as a code style and conformance with it is checked in CI. - Add a new `--ephemeral` option to boot into a temporary snapshot of the image that will be thrown away on shutdown. - Add a new option `--network-veth` to set up a virtual Ethernet link between the host and the image for usage with nspawn or QEMU - Add a new `--autologin` option to automatically log into the root account upon boot of the image. This is useful when using mkosi for boot tests. - Add a new `--hostonly` option to generate host specific initrds. This is useful when using mkosi for boot tests. - Add a new `--install-directory` option and special directory `mkosi.installdir/` that will be used as `$DESTDIR` for the build script, so that the contents of this directory can be shared between builds. - Add a new `--include-directory` option and special directory `mkosi.includedir/` that will be mounted at `/usr/include` during the build. This way headers files installed during the build can be made available to the host system, which is useful for usage with language servers. - Add a new `--source-file-transfer-final` option to complement `--source-file-transfer`. It does the same `--source-file-transfer` does for the build image, but for the final one. - Add a new `--tar-strip-selinux-context` option to remove SELinux xattrs. This is useful when an image with a target distribution not using SELinux is generated on a host that is using it. - Document the `--no-chown` option. Using this option, artifacts generated by mkosi are not chowned to the user invoking mkosi when it is invoked via sudo. It has been with as for a while, but hasn't been documented until now. ### Fixed Issues - [#506](https://github.com/systemd/mkosi/issues/506) - [#559](https://github.com/systemd/mkosi/issues/559) - [#561](https://github.com/systemd/mkosi/issues/561) - [#562](https://github.com/systemd/mkosi/issues/562) - [#575](https://github.com/systemd/mkosi/issues/575) - [#580](https://github.com/systemd/mkosi/issues/580) - [#593](https://github.com/systemd/mkosi/issues/593) ### Authors - Daan De Meyer - Joerg Behrmann - Luca Boccassi - Peter Hutterer - ValdikSS mkosi-12/README000077700000000000000000000000001415136147600145622README.mdustar00rootroot00000000000000mkosi-12/README.md000066400000000000000000000047711415136147600137200ustar00rootroot00000000000000# mkosi — Build Bespoke OS Images A fancy wrapper around `dnf --installroot`, `debootstrap`, `pacstrap` and `zypper` that generates customized disk images with a number of bells and whistles. For a longer description and available features and options, see the [man page](mkosi.md). Packaging status # Installation Installing mkosi is easy, as it has no runtime Python dependencies (you will need all the tools to format filesystems and bootstrap the distribution appropriate for your image, though). If you just want the main branch you can run ```shell python3 -m pip install --user git+https://github.com/systemd/mkosi.git ``` If you want to hack on mkosi do ```shell # clone either this repository or your fork of it git clone https://github.com/systemd/mkosi/ cd mkosi python3 -m pip install --user --no-use-pep517 --editable . ``` This will install mkosi in editable mode to `~/.local/bin/mkosi`, allowing you to use your own changes right away. For development you also need [mypy](https://github.com/python/mypy), for type checking, [pytest](https://github.com/pytest-dev/pytest), to run tests, and [black](https://github.com/psf/black), for code formatting. We check tests, typing and code formatting in CI (see `.github/workflows`), but you can run the tests locally as well. ## zipapp You can also package mkosi as a [zipapp](https://docs.python.org/3/library/zipapp.html) that you can deploy anywhere in your `PATH`. Running this will leave a `mkosi` binary in `builddir/` ```shell tools/generate-zipapp.sh ``` ## Python module Besides the mkosi binary, you can also call mkosi via ```shell python -m mkosi ``` when not installed as a zipapp. Please note, that the python module exists solely for the usage of the mkosi binary and is not to be considered a public API. ## git blame When using git blame, be sure to add `--ignore-revs-file .git-blame-ignore-revs` to the arguments to ignore irrelevant code formatting commits. This can be set permanently via the `blame.ignoreRevsFile` git option. # References * [Primary mkosi git repository on GitHub](https://github.com/systemd/mkosi/) * [mkosi — A Tool for Generating OS Images](http://0pointer.net/blog/mkosi-a-tool-for-generating-os-images.html) indroductory blog post by Lennart Poettering * [The mkosi OS generation tool](https://lwn.net/Articles/726655/) story on LWN mkosi-12/TODO.md000066400000000000000000000020011415136147600135100ustar00rootroot00000000000000# TODO * volatile images * work on device nodes * mkosi --all (for building everything in mkosi.files/) * --architecture= is chaos: we need to define a clear vocabulary of architectures that can deal with the different names of architectures in debian, fedora and uname. * squashfs root images with /home and /srv on ext4 * optionally output the root partition (+ verity) and the unified kernel image as additional artifacts, so that they can be used in automatic updating schemes (i.e. take an old image that is currently in use, add a root partition with the new root image (+ verity), and drop the new kernel into the ESP, and an update is complete. * minimization with gpt_btrfs doesn't seem to take fs compression into account. The resulting device is half-empty. * --format gpt_mksquashfs --minimize throws an error. It should just silently ignore --minimize, since it's implied. * --debug=help should list known options and exit. Same for other options which accept a fixed list of choices. mkosi-12/action.yaml000066400000000000000000000015761415136147600146020ustar00rootroot00000000000000name: setup-mkosi description: Install mkosi and all its dependencies runs: using: composite steps: - name: Dependencies shell: bash run: | mkdir $BUILDDIR sudo ${{ github.action_path }}/action/setup-github-actions.sh sudo pacman-key --init sudo pacman-key --populate archlinux sudo tee /etc/systemd/network/80-vm-vt.network > /dev/null <<- EOF [Match] Name=vt-* Driver=tun [Network] # Default to using a /28 prefix, giving up to 13 addresses per VM. Address=0.0.0.0/28 LinkLocalAddressing=yes DHCPServer=yes IPMasquerade=yes LLDP=yes EmitLLDP=customer-bridge IPv6PrefixDelegation=yes EOF sudo systemctl restart systemd-networkd env: BUILDDIR: build - name: Install shell: bash run: sudo python3 -m pip install ${{ github.action_path }} mkosi-12/action/000077500000000000000000000000001415136147600137055ustar00rootroot00000000000000mkosi-12/action/mkosi.default000066400000000000000000000017211415136147600163760ustar00rootroot00000000000000[Distribution] Distribution=ubuntu Release=focal [Output] HostonlyInitrd=yes [Content] Cache=../mkosi.cache/ubuntu~focal BuildScript=setup-github-actions.sh WithNetwork=yes Repositories=main,universe Autologin=yes Bootable=yes Password= Packages=bzip2 debootstrap e2fsprogs git libarchive13 libcap2 libcppunit-1.15-0 libcurl4 libdb5.3 libgcrypt20 libglib2.0.0 libgpgme11 libjson-c4 liblua5.3-0 liblzma5 libmagic1 libpopt0 libpython3.8 libsmartcols1 libsqlite3-0 libssl1.1 libtool libxml2 libyaml-0-2 libzstd1 python3 python3-distutils python3-gpg python3-setuptools squashfs-tools systemd-container xfsprogs zlib1g zypper [Validation] QemuHeadless=yes [Host] NetworkVeth=yes mkosi-12/action/setup-github-actions.sh000077500000000000000000000212361415136147600203260ustar00rootroot00000000000000#!/usr/bin/env bash set -e PACMAN_VERSION="6.0.1" ARCHLINUX_KEYRING_VERSION="20210902" RPM_VERSION="4.17.0" LIBCOMPS_VERSION="0.1.17" LIBREPO_VERSION="1.14.2" LIBMODULEMD_VERSION="2.13.0" LIBSOLV_VERSION="0.7.19" LIBDNF_VERSION="0.63.1" DNF_VERSION="4.8.0" export CMAKE_GENERATOR=Ninja # All built libraries are installed to both $DESTDIR and /usr so they appear in # the final image and can be found by the build scripts of the libraries and # binaries that depend on them. If every library/binary used CMake as the build # systemd we'd just use CMAKE_PREFIX_PATH to allow CMake to find libraries in # $DESTDIR but unfortunately meson and autotools don't have an equivalent # feature. apt-get update apt-get --assume-yes --no-install-recommends install \ asciidoc \ autoconf \ automake \ autopoint \ check \ cmake \ debootstrap \ docbook-xsl \ e2fsprogs \ g++ \ gcc \ gettext \ gobject-introspection \ libarchive-dev \ libbz2-dev \ libcap-dev \ libcppunit-dev \ libcurl4-openssl-dev \ libdb-dev \ libgcrypt-dev \ libgirepository1.0-dev \ libglib2.0-dev \ libgpgme-dev \ libjson-c-dev \ liblua5.3-dev \ liblzma-dev \ libmagic-dev \ libpopt-dev \ libsmartcols-dev \ libsqlite3-dev \ libssl-dev \ libtool \ libxml2-dev \ libyaml-dev \ libzstd-dev \ m4 \ make \ meson \ ninja-build \ ovmf \ pandoc \ pkgconf \ python3 \ python3-dev \ python3-gpg \ python3-sphinx \ python3-setuptools \ qemu-system-x86-64 \ squashfs-tools \ swig \ systemd-container \ xfsprogs \ xsltproc \ zlib1g-dev \ zypper cd "$BUILDDIR" if [ ! -f pacman-$PACMAN_VERSION.tar.xz ]; then wget https://sources.archlinux.org/other/pacman/pacman-$PACMAN_VERSION.tar.xz tar xf pacman-$PACMAN_VERSION.tar.xz fi if [ ! -f pacman-$PACMAN_VERSION-build/build.ninja ]; then meson \ --buildtype=release \ --prefix /usr \ --libdir lib/x86_64-linux-gnu \ -Ddoc=disabled \ -Dscriptlet-shell=/usr/bin/bash \ -Dldconfig=/usr/bin/ldconfig \ pacman-$PACMAN_VERSION-build \ pacman-$PACMAN_VERSION fi meson install -C pacman-$PACMAN_VERSION-build if [ ! -f archlinux-keyring-$ARCHLINUX_KEYRING_VERSION.tar.gz ]; then wget https://sources.archlinux.org/other/archlinux-keyring/archlinux-keyring-$ARCHLINUX_KEYRING_VERSION.tar.gz tar xf archlinux-keyring-$ARCHLINUX_KEYRING_VERSION.tar.gz fi make -C archlinux-keyring-$ARCHLINUX_KEYRING_VERSION PREFIX=/usr install if [ ! -f rpm-$RPM_VERSION-release.tar.gz ]; then wget https://github.com/rpm-software-management/rpm/archive/refs/tags/rpm-$RPM_VERSION-release.tar.gz tar xf rpm-$RPM_VERSION-release.tar.gz fi pushd rpm-rpm-$RPM_VERSION-release if [ ! -f Makefile ]; then ./autogen.sh \ --prefix=/usr \ --libdir=/usr/lib/x86_64-linux-gnu \ --sysconfdir=/etc \ --localstatedir=/var \ --enable-python \ --with-external-db \ --with-lua \ --with-cap \ LUA_CFLAGS="$(pkg-config --cflags lua5.3)" \ LUA_LIBS="$(pkg-config --libs lua5.3)" fi make -j 2 make install make DESTDIR="" install pushd python python3 setup.py install --root="$DESTDIR" --optimize=1 python3 setup.py install --optimize=1 popd popd if [ ! -f $LIBCOMPS_VERSION.tar.gz ]; then wget https://github.com/rpm-software-management/libcomps/archive/$LIBCOMPS_VERSION.tar.gz tar xf $LIBCOMPS_VERSION.tar.gz fi if [ ! -f libcomps-$LIBCOMPS_VERSION-build/build.ninja ]; then cmake \ -DCMAKE_BUILD_TYPE=Release \ -DCMAKE_INSTALL_PREFIX=/usr \ -DCMAKE_INSTALL_LIBDIR=lib/x86_64-linux-gnu \ -DENABLE_TESTS=OFF \ -DENABLE_DOCS=OFF \ -DBUILD_SHARED_LIBS=ON \ -Wno-dev \ -B libcomps-$LIBCOMPS_VERSION-build \ -S libcomps-$LIBCOMPS_VERSION/libcomps fi cmake --build libcomps-$LIBCOMPS_VERSION-build cmake --install libcomps-$LIBCOMPS_VERSION-build DESTDIR="" cmake --install libcomps-$LIBCOMPS_VERSION-build if [ ! -f $LIBREPO_VERSION.tar.gz ]; then wget https://github.com/rpm-software-management/librepo/archive/$LIBREPO_VERSION.tar.gz tar xf $LIBREPO_VERSION.tar.gz fi if [ ! -f librepo-$LIBREPO_VERSION-build/build.ninja ]; then cmake \ -DCMAKE_BUILD_TYPE=Release \ -DCMAKE_INSTALL_PREFIX=/usr \ -DCMAKE_INSTALL_LIBDIR=lib/x86_64-linux-gnu \ -DENABLE_TESTS=OFF \ -DENABLE_DOCS=OFF \ -DWITH_ZCHUNK=OFF \ -DBUILD_SHARED_LIBS=ON \ -Wno-dev \ -B librepo-$LIBREPO_VERSION-build \ -S librepo-$LIBREPO_VERSION fi cmake --build librepo-$LIBREPO_VERSION-build cmake --install librepo-$LIBREPO_VERSION-build DESTDIR="" cmake --install librepo-$LIBREPO_VERSION-build if [ ! -f libmodulemd-$LIBMODULEMD_VERSION.tar.gz ]; then wget https://github.com/fedora-modularity/libmodulemd/archive/libmodulemd-$LIBMODULEMD_VERSION.tar.gz tar xf libmodulemd-$LIBMODULEMD_VERSION.tar.gz fi if [ ! -f libmodulemd-$LIBMODULEMD_VERSION-build/build.ninja ]; then meson \ --buildtype=release \ --prefix /usr \ --libdir lib/x86_64-linux-gnu \ --pkg-config-path /usr/lib/x86_64-linux-gnu/pkgconfig \ -Ddeveloper_build=false \ -Dwith_docs=false \ -Dwith_manpages=disabled \ -Dskip_introspection=false \ -Dgobject_overrides_dir_py3=override \ libmodulemd-$LIBMODULEMD_VERSION-build \ libmodulemd-libmodulemd-$LIBMODULEMD_VERSION fi meson install -C libmodulemd-$LIBMODULEMD_VERSION-build DESTDIR="" meson install -C libmodulemd-$LIBMODULEMD_VERSION-build if [ ! -f $LIBSOLV_VERSION.tar.gz ]; then wget https://github.com/openSUSE/libsolv/archive/$LIBSOLV_VERSION.tar.gz tar xf $LIBSOLV_VERSION.tar.gz fi if [ ! -f libsolv-$LIBSOLV_VERSION-build/build.ninja ]; then cmake \ -DCMAKE_BUILD_TYPE=Release \ -DCMAKE_INSTALL_PREFIX=/usr \ -DCMAKE_INSTALL_LIBDIR=lib/x86_64-linux-gnu \ -DENABLE_RPMDB=ON \ -DENABLE_RPMPKG=ON \ -DENABLE_PUBKEY=ON \ -DENABLE_RPMDB_BYRPMHEADER=ON \ -DENABLE_RPMDB_LIBRPM=ON \ -DENABLE_RPMPKG_LIBRPM=ON \ -DENABLE_RPMMD=ON \ -DENABLE_COMPS=ON \ -DENABLE_MDKREPO=ON \ -DENABLE_COMPLEX_DEPS=ON \ -DENABLE_APPDATA=ON \ -DENABLE_LZMA_COMPRESSION=ON \ -DENABLE_BZIP2_COMPRESSION=ON \ -DENABLE_ZSTD_COMPRESSION=ON \ -Wno-dev \ -B libsolv-$LIBSOLV_VERSION-build \ -S libsolv-$LIBSOLV_VERSION fi cmake --build libsolv-$LIBSOLV_VERSION-build cmake --install libsolv-$LIBSOLV_VERSION-build DESTDIR="" cmake --install libsolv-$LIBSOLV_VERSION-build if [ ! -f $LIBDNF_VERSION.tar.gz ]; then wget https://github.com/rpm-software-management/libdnf/archive/$LIBDNF_VERSION.tar.gz tar xf $LIBDNF_VERSION.tar.gz fi if [ ! -f libdnf-$LIBDNF_VERSION-build/build.ninja ]; then cp /usr/share/cmake/Modules/FindLibSolv.cmake libdnf-$LIBDNF_VERSION/cmake/modules cmake \ -DCMAKE_BUILD_TYPE=Release \ -DCMAKE_INSTALL_PREFIX=/usr \ -DCMAKE_INSTALL_LIBDIR=lib/x86_64-linux-gnu \ -DWITH_GTKDOC=OFF \ -DWITH_HTML=OFF \ -DWITH_MAN=OFF \ -DWITH_ZCHUNK=OFF \ -DBUILD_SHARED_LIBS=ON \ -DCMAKE_CXX_FLAGS="-pthread" \ -DPYTHON_DESIRED=3 \ -Wno-dev \ -B libdnf-$LIBDNF_VERSION-build \ -S libdnf-$LIBDNF_VERSION fi cmake --build libdnf-$LIBDNF_VERSION-build cmake --install libdnf-$LIBDNF_VERSION-build DESTDIR="" cmake --install libdnf-$LIBDNF_VERSION-build if [ ! -f $DNF_VERSION.tar.gz ]; then wget https://github.com/rpm-software-management/dnf/archive/$DNF_VERSION.tar.gz tar xf $DNF_VERSION.tar.gz fi if [ ! -f dnf-$DNF_VERSION-build/build.ninja ]; then cmake \ -DCMAKE_BUILD_TYPE=Release \ -DCMAKE_INSTALL_PREFIX=/usr \ -DPYTHON_DESIRED=3 \ -DWITH_MAN=0 \ -Wno-dev \ -B dnf-$DNF_VERSION-build \ -S dnf-$DNF_VERSION fi cmake --build dnf-$DNF_VERSION-build cmake --install dnf-$DNF_VERSION-build # All python libraries are installed to a location that's not in the default # search path so let's fix that by moving those python files to a location that # is in the default search path. mkdir -p "$DESTDIR"/usr/lib/python3/dist-packages mv "$DESTDIR"/usr/lib/python3.8/site-packages/* "$DESTDIR"/usr/lib/python3/dist-packages ln -sf /usr/bin/dnf-3 "$DESTDIR"/usr/bin/dnf mkosi-12/bin/000077500000000000000000000000001415136147600132005ustar00rootroot00000000000000mkosi-12/bin/mkosi000077500000000000000000000043631415136147600142560ustar00rootroot00000000000000#!/usr/bin/env bash # This script calls the main function of the internal mkosi module, which is # functionally equivalent to calling "python3 -m mkosi". # # The reason for eschewing the setuptools entrypoint mechanism for a custom # script, is that scripts generated via the console_scripts entrypoint are # unable to be called via sudo, which mkosi needs, when installed into a user's # home directory via "python3 -m pip install --user" # # We support installation via: # sudo python3 -m pip install # python3 -m pip install --user # python3 -m pip install --user --editable # /path/to/venv/bin/python3 -m pip install # and running directly from the source checkout. # # In the first and the next-to-last cases this script is a noop because we leave # it up to the python binary to set up its path. In the case of '--user' # installation we prepend PYTHON_PATH with the original user's # ~/.local/lib/pythonX.Y/site-packages or the directory where mkosi has been # cloned to, for "--editable" installations, when this script is run via sudo. # When running from a source checkout, we prepend that directory to the path. PYROOT=$(dirname "$0") if [[ -x "${PYROOT}/python3" ]] then PYTHON="${PYROOT}/python3" SYSTEM_PYTHON_OR_VENV=true else PYTHON=$(command -v python3) SYSTEM_PYTHON_OR_VENV=false fi PYVERSION=$($PYTHON --version | cut -d ' ' -f 2 | cut -d '.' -f 1-2) declare PREPEND_PYTHONPATH if [[ -e "${PYROOT}/../setup.py" ]] then PREPEND_PYTHONPATH="$(dirname ${PYROOT})" elif [[ -n $SUDO_USER ]] && \ [[ -z $VIRTUAL_ENV ]] && \ [[ "$SYSTEM_PYTHON_OR_VENV" == false ]] then OTHER_HOME=$(getent passwd "$SUDO_USER" | cut -d: -f6) SITEDIR="${OTHER_HOME}/.local/lib/python${PYVERSION}/site-packages" if [[ -r "${SITEDIR}/mkosi.egg-link" ]] then PREPEND_PYTHONPATH="$(head -n1 "${SITEDIR}/mkosi.egg-link")" else PREPEND_PYTHONPATH="$SITEDIR" fi export PYTHONDONTWRITEBYTECODE=1 fi if [[ -n "$PREPEND_PYTHONPATH" ]] && [[ -n "$PYTHONPATH" ]] then PYTHONPATH="${PREPEND_PYTHONPATH}:${PYTHONPATH}" export PYTHONPATH elif [[ -n "$PREPEND_PYTHONPATH" ]] then PYTHONPATH="${PREPEND_PYTHONPATH}" export PYTHONPATH fi exec "$PYTHON" -m mkosi "$@" mkosi-12/man/000077500000000000000000000000001415136147600132035ustar00rootroot00000000000000mkosi-12/man/mkosi.1000066400000000000000000002501311415136147600144110ustar00rootroot00000000000000.\" Automatically generated by Pandoc 2.14.0.3 .\" .TH "mkosi" "1" "2016-" "" "" .hy .SH NAME .PP mkosi \[em] Build Bespoke OS Images .SH SYNOPSIS .PP \f[C]mkosi [options\&...] build\f[R] .PP \f[C]mkosi [options\&...] clean\f[R] .PP \f[C]mkosi [options\&...] summary\f[R] .PP \f[C]mkosi [options\&...] shell [command line\&...]\f[R] .PP \f[C]mkosi [options\&...] boot [nspawn settings\&...]\f[R] .PP \f[C]mkosi [options\&...] qemu\f[R] .SH DESCRIPTION .PP \f[C]mkosi\f[R] is a tool for easily building customized OS images. It\[cq]s a fancy wrapper around \f[C]dnf --installroot\f[R], \f[C]debootstrap\f[R], \f[C]pacstrap\f[R] and \f[C]zypper\f[R] that may generate disk images with a number of bells and whistles. .SS Supported output formats .PP The following output formats are supported: .IP \[bu] 2 Raw \f[I]GPT\f[R] disk image, with ext4 as root (\f[I]gpt_ext4\f[R]) .IP \[bu] 2 Raw \f[I]GPT\f[R] disk image, with xfs as root (\f[I]gpt_xfs\f[R]) .IP \[bu] 2 Raw \f[I]GPT\f[R] disk image, with btrfs as root (\f[I]gpt_btrfs\f[R]) .IP \[bu] 2 Raw \f[I]GPT\f[R] disk image, with squashfs as read-only root (\f[I]gpt_squashfs\f[R]) .IP \[bu] 2 Plain squashfs image, without partition table, as read-only root (\f[I]plain_squashfs\f[R]) .IP \[bu] 2 Plain directory, containing the OS tree (\f[I]directory\f[R]) .IP \[bu] 2 btrfs subvolume, with separate subvolumes for \f[C]/var\f[R], \f[C]/home\f[R], \f[C]/srv\f[R], \f[C]/var/tmp\f[R] (\f[I]subvolume\f[R]) .IP \[bu] 2 Tar archive (\f[I]tar\f[R]) .IP \[bu] 2 CPIO archive (\f[I]cpio\f[R]) in the format appropriate for a kernel initrd .PP When a \f[I]GPT\f[R] disk image is created, the following additional options are available: .IP \[bu] 2 A swap partition may be added in .IP \[bu] 2 The image may be made bootable on \f[I]EFI\f[R] and \f[I]BIOS\f[R] systems .IP \[bu] 2 Separate partitions for \f[C]/srv\f[R] and \f[C]/home\f[R] may be added in .IP \[bu] 2 The root, \f[C]/srv\f[R] and \f[C]/home\f[R] partitions may optionally be encrypted with LUKS. .IP \[bu] 2 A dm-verity partition may be added in that adds runtime integrity data for the root partition .SS Other features .IP \[bu] 2 Optionally, create an \f[I]SHA256SUMS\f[R] checksum file for the result, possibly even signed via \f[C]gpg\f[R]. .IP \[bu] 2 Optionally, place a specific \f[C].nspawn\f[R] settings file along with the result. .IP \[bu] 2 Optionally, build a local project\[cq]s \f[I]source\f[R] tree in the image and add the result to the generated image. .IP \[bu] 2 Optionally, share \f[I]RPM\f[R]/\f[I]DEB\f[R] package cache between multiple runs, in order to optimize build speeds. .IP \[bu] 2 Optionally, the resulting image may be compressed with \f[I]XZ\f[R]. .IP \[bu] 2 Optionally, the resulting image may be converted into a \f[I]QCOW2\f[R] file suitable for \f[C]qemu\f[R] storage. .IP \[bu] 2 Optionally, btrfs\[cq] read-only flag for the root subvolume may be set. .IP \[bu] 2 Optionally, btrfs\[cq] compression may be enabled for all created subvolumes. .IP \[bu] 2 By default images are created without all files marked as documentation in the packages, on distributions where the package manager supports this. Use the \f[C]WithDocs=yes\f[R] flag to build an image with docs added. .SS Command Line Verbs .PP The following command line verbs are known: .TP \f[B]\f[CB]build\f[B]\f[R] This builds the image, based on the settings passed in on the command line or read from a \f[C]mkosi.default\f[R] file. This verb is the default if no verb is explicitly specified. This command must be executed as \f[C]root\f[R]. Any arguments passed after \f[C]build\f[R] are passed as arguments to the build script (if there is one). .TP \f[B]\f[CB]clean\f[B]\f[R] Remove build artifacts generated on a previous build. If combined with \f[C]-f\f[R], also removes incremental build cache images. If \f[C]-f\f[R] is specified twice, also removes any package cache. .TP \f[B]\f[CB]summary\f[B]\f[R] Outputs a human-readable summary of all options used for building an image. This will parse the command line and \f[C]mkosi.default\f[R] file as it would do on \f[C]build\f[R], but only output what it is configured for and not actually build anything.\[ga] .TP \f[B]\f[CB]shell\f[B]\f[R] This builds the image if it is not built yet, and then invokes \f[C]systemd-nspawn\f[R] to acquire an interactive shell prompt in it. If this verb is used an optional command line may be specified which is then invoked in place of the shell in the container. Combine this with \f[C]-f\f[R] in order to rebuild the image unconditionally before acquiring the shell, see below. This command must be executed as \f[C]root\f[R]. .TP \f[B]\f[CB]boot\f[B]\f[R] Similar to \f[C]shell\f[R] but boots the image up using \f[C]systemd-nspawn\f[R]. If this verb is used an optional command line may be specified which is passed as \[lq]kernel command line\[rq] to the init system in the image. .TP \f[B]\f[CB]qemu\f[B]\f[R] Similar to \f[C]boot\f[R] but uses \f[C]qemu\f[R] to boot up the image, i.e.\ instead of container virtualization VM virtualization is used. This verb is only supported on images that contain a boot loader, i.e.\ those built with \f[C]Bootable=yes\f[R] (see below). This command must be executed as \f[C]root\f[R] unless the image already exists and \f[C]-f\f[R] is not specified. .TP \f[B]\f[CB]ssh\f[B]\f[R] When the image is built with the \f[C]Ssh=yes\f[R] option, this command connects to a booted (\f[C]boot\f[R], \f[C]qemu\f[R] verbs) container/VM via SSH. Make sure to run \f[C]mkosi ssh\f[R] with the same config as \f[C]mkosi build\f[R] was run with so that it has the necessary information available to connect to the running container/VM via SSH. .TP \f[B]\f[CB]serve\f[B]\f[R] This builds the image if it is not built yet, and then serves the output directory (i.e.\ usually \f[C]mkosi.output/\f[R], see below) via a small embedded HTTP server, listening on port 8081. Combine with \f[C]-f\f[R] in order to rebuild the image unconditionally before serving it. This command is useful for testing network based acquisition of OS images, for example via \f[C]machinectl pull-raw \&...\f[R] and \f[C]machinectl pull-tar \&...\f[R]. .TP \f[B]\f[CB]bump\f[B]\f[R] Determines the current image version string (as configured with \f[C]ImageVersion=\f[R]/\f[C]--image-version=\f[R]), increases its last dot-separated component by one and writes the resulting version string to \f[C]mkosi.version\f[R]. This is useful for implementing a simple versioning scheme: each time this verb is called the version is bumped in preparation for the subsequent build. Note that \f[C]--auto-bump\f[R]/\f[C]-B\f[R] may be used to automatically bump the version after each successful build. .TP \f[B]\f[CB]help\f[B]\f[R] This verb is equivalent to the \f[C]--help\f[R] switch documented below: it shows a brief usage explanation. .SS Execution flow .PP Execution flow for \f[C]mkosi build\f[R]. Columns represent the execution context. Default values/calls are shown in parentheses. When building with \f[C]--incremental\f[R] mkosi creates a cache of the distribution installation for both images if not already existing and replaces the distribution installation in consecutive runs with data from the cached one. .IP .nf \f[C] HOST . BUILD . FINAL . IMAGE . IMAGE . . start . . | . . v . . build script? -------exists-----> copy . | . skeleton trees . | . (mkosi.skeleton/) . none . | . | . v . v . install . skip . distribution, . build image . packages and . | . build packages, . | . run . | . prepare script . | . (mkosi.prepare build) . | . or if incremental . | . use cached build image . | . | . | . v . | . copy . | . build sources . | . (./) . | . | . | . v . | . copy . | . extra trees . | . (mkosi.extra/) . | . | . | . v . | . run . | . postinstall script . | . (mkosi.postinst build) . | . | . | .-------------------------\[aq] . | | . . | v . . | run . . | finalize script . . |(mkosi.finalize build). . | | . . | \[aq]-------------------------. . | . | . | . v . | . run . | . build script . | . (mkosi.build) . | . | . \[aq]-----------------------------------+------------------------. . . | . . v . . copy . . skeleton trees . . (mkosi.skeleton/) . . | . . v . . install . . distribution . . and packages, . . run . . prepare script . . (mkosi.prepare final) . . or if incremental . . use cached final image . . | . . v . . copy . . build results . . | . . v . . copy . . extra trees . . (mkosi.extra/) . . | . . v . . run . . postinstall script . . (mkosi.postinst final) . . | . . v . . | . . perform cleanup . . (remove files, packages, . . package metadata) . . | .--------------------------------------------------\[aq] | . . v . . run . . finalize script . . (mkosi.finalize final) . . | . . .---------\[aq] . . | . . v . . end . . . . HOST . BUILD . FINAL . IMAGE . IMAGE . . \f[R] .fi .SS Configuration Settings .PP The following settings can be set through configuration files (the syntax with \f[C]SomeSetting=value\f[R]) and on the command line (the syntax with \f[C]--some-setting=value\f[R]). For some command line parameters, a single-letter shortcut is also allowed. In the configuration files, the setting must be in the appropriate section, so the settings are grouped by section below. .PP Command line options that take no argument are shown without \[lq]=\[rq] in their long version. In the config files, they should be specified with a boolean argument: either \[lq]1\[rq], \[lq]yes\[rq], or \[lq]true\[rq] to enable, or \[lq]0\[rq], \[lq]no\[rq], \[lq]false\[rq] to disable. .SS [Distribution] Section .TP \f[B]\f[CB]Distribution=\f[B]\f[R], \f[B]\f[CB]--distribution=\f[B]\f[R], \f[B]\f[CB]-d\f[B]\f[R] The distribution to install in the image. Takes one of the following arguments: \f[C]fedora\f[R], \f[C]debian\f[R], \f[C]ubuntu\f[R], \f[C]arch\f[R], \f[C]opensuse\f[R], \f[C]mageia\f[R], \f[C]centos\f[R], \f[C]centos_epel\f[R], \f[C]clear\f[R], \f[C]photon\f[R], \f[C]openmandriva\f[R], \f[C]rocky\f[R], \f[C]rocky_epel\f[R], \f[C]alma\f[R], \f[C]alma_epel\f[R]. If not specified, defaults to the distribution of the host. .TP \f[B]\f[CB]Release=\f[B]\f[R], \f[B]\f[CB]--release=\f[B]\f[R], \f[B]\f[CB]-r\f[B]\f[R] The release of the distribution to install in the image. The precise syntax of the argument this takes depends on the distribution used, and is either a numeric string (in case of Fedora Linux, CentOS, \&..., e.g.\ \f[C]29\f[R]), or a distribution version name (in case of Debian, Ubuntu, \&..., e.g.\ \f[C]artful\f[R]). If neither this option, nor \f[C]Distribution=\f[R] is specified, defaults to the distribution version of the host. If the distribution is specified, defaults to a recent version of it. .TP \f[B]\f[CB]Mirror=\f[B]\f[R], \f[B]\f[CB]--mirror=\f[B]\f[R], \f[B]\f[CB]-m\f[B]\f[R] The mirror to use for downloading the distribution packages. Expects a mirror URL as argument. .TP \f[B]\f[CB]Repositories=\f[B]\f[R], \f[B]\f[CB]--repositories=\f[B]\f[R] Additional package repositories to use during installation. Expects one or more URLs as argument, separated by commas. This option may be used multiple times, in which case the list of repositories to use is combined. Use \[lq]!*\[rq] to remove all repositories from to the list or use e.g.\ \[lq]!repo-url\[rq] to remove just one specific repository. For Arch Linux, additional repositories must be passed in the form \f[C]::\f[R] (e.g.\ \f[C]myrepo::https://myrepo.net\f[R]). .TP \f[B]\f[CB]UseHostRepositories=\f[B]\f[R], \f[B]\f[CB]--use-host-repositories\f[B]\f[R] This option is only applicable for dnf-based distributions: \f[I]CentOS\f[R], \f[I]Fedora Linux\f[R], \f[I]Mageia\f[R], \f[I]Photon\f[R], \f[I]Rocky Linux\f[R], \f[I]Alma Linux\f[R] and \f[I]OpenMandriva\f[R]. Allows use of the host\[cq]s existing dnf repositories. By default, a hardcoded set of default dnf repositories is generated and used. Use \f[C]--repositories=\f[R] to identify a custom set of repositories to be enabled and used for the build. .TP \f[B]\f[CB]Architecture=\f[B]\f[R], \f[B]\f[CB]--architecture=\f[B]\f[R] The architecture to build the image for. Note that this currently only works for architectures compatible with the host\[cq]s architecture. .SS [Output] Section .TP \f[B]\f[CB]Format=\f[B]\f[R], \f[B]\f[CB]--format=\f[B]\f[R], \f[B]\f[CB]-t\f[B]\f[R] The image format type to generate. One of \f[C]directory\f[R] (for generating OS images inside a local directory), \f[C]subvolume\f[R] (similar, but as a btrfs subvolume), \f[C]tar\f[R] (similar, but a tarball of the image is generated), \f[C]cpio\f[R] (similar, but a cpio archive is generated), \f[C]gpt_ext4\f[R] (a block device image with an ext4 file system inside a GPT partition table), \f[C]gpt_xfs\f[R] (similar, but with an xfs file system), \f[C]gpt_btrfs\f[R] (similar, but with an btrfs file system), \f[C]gpt_squashfs\f[R] (similar, but with a squashfs file system), \f[C]plain_squashfs\f[R] (a plain squashfs file system without a partition table). .TP \f[B]\f[CB]ManifestFormat=\f[B]\f[R], \f[B]\f[CB]--manifest-format=\f[B]\f[R] The manifest format type or types to generate. A comma-delimited list consisting of \f[C]json\f[R] (the standard JSON output format that describes the packages installed), \f[C]changelog\f[R] (a human-readable text format designed for diffing). Defaults to \f[C]json\f[R]. .TP \f[B]\f[CB]Output=\f[B]\f[R], \f[B]\f[CB]--output=\f[B]\f[R], \f[B]\f[CB]-o\f[B]\f[R] Path for the output image file to generate. Takes a relative or absolute path where the generated image will be placed. If neither this option nor \f[C]OutputDirectory=\f[R] is used, the image is generated under the name \f[C]image\f[R], but its name suffixed with an appropriate file suffix (e.g.\ \f[C]image.raw.xz\f[R] in case \f[C]gpt_ext4\f[R] is used in combination with \f[C]xz\f[R] compression). If the \f[C]ImageId=\f[R] option is configured it is used instead of \f[C]image\f[R] in the default output name. If an image version is specified via \f[C]ImageVersion=\f[R], it is included in the default name, e.g.\ a specified image version of \f[C]7.8\f[R] might result in an image file name of \f[C]image_7.8.raw.xz\f[R]. .TP \f[B]\f[CB]OutputSplitRoot=\f[B]\f[R], \f[B]\f[CB]--output-split-root=\f[B]\f[R], \f[B]\f[CB]OutputSplitVerify=\f[B]\f[R], \f[B]\f[CB]--output-split-verity=\f[B]\f[R], \f[B]\f[CB]OutputSplitKernel=\f[B]\f[R], \f[B]\f[CB]--output-split-kernel=\f[B]\f[R] Paths for the split-out output image files, when \f[C]SplitArtifacts=yes\f[R] is used. If unspecified, the relevant split artifact files will be named like the main image, but with \f[C].root\f[R], \f[C].verity\f[R], and \f[C].efi\f[R] suffixes inserted (and in turn possibly suffixed by compression suffix, if compression is enabled). .TP \f[B]\f[CB]OutputDirectory=\f[B]\f[R], \f[B]\f[CB]--output-dir=\f[B]\f[R], \f[B]\f[CB]-O\f[B]\f[R] Path to a directory where to place all generated artifacts (i.e.\ the generated image when an output path is not given, \f[C]SHA256SUMS\f[R] file, etc.). If this is not specified and the directory \f[C]mkosi.output/\f[R] exists in the local directory, it is automatically used for this purpose. If the setting is not used and \f[C]mkosi.output/\f[R] does not exist, all output artifacts are placed adjacent to the output image file. .TP \f[B]\f[CB]WorkspaceDirectory=\f[B]\f[R], \f[B]\f[CB]--workspace-dir=\f[B]\f[R] Path to a directory where to store data required temporarily while building the image. This directory should have enough space to store the full OS image, though in most modes the actually used disk space is smaller. If not specified, and \f[C]mkosi.workspace/\f[R] exists in the local directory, it is used for this purpose. Otherwise, a subdirectory in the temporary storage area is used (\f[C]$TMPDIR\f[R] if set, \f[C]/var/tmp/\f[R] otherwise). The data in this directory is removed automatically after each build. It\[cq]s safe to manually remove the contents of this directory should an \f[C]mkosi\f[R] invocation be aborted abnormally (for example, due to reboot/power failure). If the \f[C]btrfs\f[R] output modes are selected this directory must be backed by \f[C]btrfs\f[R] too. .TP \f[B]\f[CB]Force=\f[B]\f[R], \f[B]\f[CB]--force\f[B]\f[R], \f[B]\f[CB]-f\f[B]\f[R] Replace the output file if it already exists, when building an image. By default when building an image and an output artifact already exists \f[C]mkosi\f[R] will refuse operation. Specify this option once to delete all build artifacts from a previous run before re-building the image. If incremental builds are enabled, specifying this option twice will ensure the intermediary cache files are removed, too, before the re-build is initiated. If a package cache is used (also see the \[lq]Files\[rq] section below), specifying this option thrice will ensure the package cache is removed too, before the re-build is initiated. For the \f[C]clean\f[R] operation this option has a slightly different effect: by default the verb will only remove build artifacts from a previous run, when specified once the incremental cache files are deleted too, and when specified twice the package cache is also removed. .PP .TP \f[B]\f[CB]GPTFirstLBA=\f[B]\f[R], \f[B]\f[CB]--gpt-first-lba=\f[B]\f[R] Override the first usable LBA (Logical Block Address) within the GPT header. This defaults to \f[C]2048\f[R], which is actually the desired value. However, some tools, e.g.\ the \f[C]prl_disk_tool\f[R] utility from the Parallels virtualization suite require this to be set to \f[C]34\f[R], otherwise they might fail to resize the disk image and/or partitions inside it. .TP \f[B]\f[CB]Bootable=\f[B]\f[R], \f[B]\f[CB]--bootable\f[B]\f[R], \f[B]\f[CB]-b\f[B]\f[R] Generate a bootable image. By default this will generate an image bootable on UEFI systems. Use \f[C]BootProtocols=\f[R] to select support for a different boot protocol. .TP \f[B]\f[CB]BootProtocols=\f[B]\f[R], \f[B]\f[CB]--boot-protocols=\f[B]\f[R] Pick one or more boot protocols to support when generating a bootable image, as enabled with \f[C]Bootable=\f[R]. Takes a comma-separated list of \f[C]uefi\f[R] or \f[C]bios\f[R]. May be specified more than once in which case the specified lists are merged. If \f[C]uefi\f[R] is specified the \f[C]sd-boot\f[R] UEFI boot loader is used, if \f[C]bios\f[R] is specified the GNU Grub boot loader is used. Use \[lq]!*\[rq] to remove all previously added protocols or \[lq]!protocol\[rq] to remove one protocol. .TP \f[B]\f[CB]KernelCommandLine=\f[B]\f[R], \f[B]\f[CB]--kernel-command-line=\f[B]\f[R] Use the specified kernel command line when building bootable images. By default command line arguments get appended. To remove all arguments from the current list pass \[lq]!*\[rq]. To remove specific arguments add a space separated list of \[lq]!\[rq] prefixed arguments. For example adding \[lq]!* console=ttyS0 rw\[rq] to a \f[C]mkosi.default\f[R] file or the command line arguments passes \[lq]console=ttyS0 rw\[rq] to the kernel in any case. Just adding \[lq]console=ttyS0 rw\[rq] would append these two arguments to the kernel command line created by lower priority configuration files or previous \f[C]KernelCommandLine=\f[R] command line arguments. .TP \f[B]\f[CB]SecureBoot=\f[B]\f[R], \f[B]\f[CB]--secure-boot\f[B]\f[R] Sign the resulting kernel/initrd image for UEFI SecureBoot. .TP \f[B]\f[CB]SecureBootKey=\f[B]\f[R], \f[B]\f[CB]--secure-boot-key=\f[B]\f[R] Path to the PEM file containing the secret key for signing the UEFI kernel image, if \f[C]SecureBoot=\f[R] is used. .TP \f[B]\f[CB]SecureBootCertificate=\f[B]\f[R], \f[B]\f[CB]--secure-boot-certificate=\f[B]\f[R] Path to the X.509 file containing the certificate for the signed UEFI kernel image, if \f[C]SecureBoot=\f[R] is used. .TP \f[B]\f[CB]SecureBootCommonName=\f[B]\f[R], \f[B]\f[CB]--secure-boot-common-name=\f[B]\f[R] Common name to be used when generating SecureBoot keys via mkosi\[cq]s \f[C]genkey\f[R] command. Defaults to \f[C]mkosi of %u\f[R], where \f[C]%u\f[R] expands to the username of the user invoking mkosi. .TP \f[B]\f[CB]SecureBootValidDays=\f[B]\f[R], \f[B]\f[CB]--secure-boot-valid-days=\f[B]\f[R] Number of days that the keys should remain valid when generating SecureBoot keys via mkosi\[cq]s \f[C]genkey\f[R] command. Defaults to two years (730 days). .TP \f[B]\f[CB]ReadOnly=\f[B]\f[R], \f[B]\f[CB]--read-only\f[B]\f[R] Set the read-only flag on the root partition in the partition table. Only applies to \f[C]gpt_ext4\f[R], \f[C]gpt_xfs\f[R], \f[C]gpt_btrfs\f[R], \f[C]subvolume\f[R] output formats, and is implied on \f[C]gpt_squashfs\f[R] and \f[C]plain_squashfs\f[R]. The read-only flag is essentially a hint to tools using the image (see https://systemd.io/DISCOVERABLE_PARTITIONS/). In particular, all systemd tools like \f[C]systemd-nspawn\f[R] and \f[C]systemd-gpt-auto-generator\f[R] will mount such partitions read-only, but tools from other project may ignore the flag. .TP \f[B]\f[CB]Minimize=\f[B]\f[R], \f[B]\f[CB]--minimize\f[B]\f[R] Attempt to make the resulting root file system as small as possible by removing free space from the file system. Only supported for \f[C]gpt_ext4\f[R] and \f[C]gpt_btrfs\f[R]. For ext4 this relies on \f[C]resize2fs -M\f[R], which reduces the free disk space but is not perfect and generally leaves some free space. For btrfs the results are optimal and no free space is left. .TP \f[B]\f[CB]Encrypt=\f[B]\f[R], \f[B]\f[CB]--encrypt\f[B]\f[R] Encrypt all partitions in the file system or just the root file system. Takes either \f[C]all\f[R] or \f[C]data\f[R] as argument. If \f[C]all\f[R], the root, \f[C]/home\f[R] and \f[C]/srv\f[R] file systems will be encrypted using dm-crypt/LUKS (with its default settings). If \f[C]data\f[R], the root file system will be left unencrypted, but \f[C]/home\f[R] and \f[C]/srv\f[R] will be encrypted. The passphrase to use is read from the \f[C]mkosi.passphrase\f[R] file in the current working directory. Note that the UEFI System Partition (ESP) containing the boot loader and kernel to boot is never encrypted since it needs to be accessible by the firmware. .TP \f[B]\f[CB]Verity=\f[B]\f[R], \f[B]\f[CB]--verity\f[B]\f[R] Add a \[lq]Verity\[rq] integrity partition to the image. Takes a boolean or the special value \f[C]signed\f[R], and defaults to disabled. If enabled, the root partition (or \f[C]/usr/\f[R] partition, in case \f[C]UsrOnly=\f[R] is enabled) is protected with \f[C]dm-verity\f[R] against offline modification, the verification data is placed in an additional GPT partition. Implies \f[C]ReadOnly=yes\f[R]. If this is enabled, the Verity root hash is written to an output file with \f[C].roothash\f[R] or \f[C].usrhash\f[R] suffix. If set to \f[C]signed\f[R], Verity is also enabled, but the resulting root hash is then also signed (in PKCS#7 format) with the signature key configured with \f[C]SecureBootKey=\f[R]. Or in other words: the SecureBoot key pair is then used to both sign the kernel, if that is enabled, and the root/\f[C]/usr/\f[R] file system. This signature is then stored in an additional output file with the \f[C].roothash.p7s\f[R] or \f[C].usrhash.p7s\f[R] suffix in DER format. It is also written to an additional partition in the image. The latter allows generating self-contained signed disk images, implementing the Verity provisions described in the Discoverable Partitions Specification (https://systemd.io/DISCOVERABLE_PARTITIONS). .TP \f[B]\f[CB]CompressFs=\f[B]\f[R], \f[B]\f[CB]--compress-fs=\f[B]\f[R] Enable or disable internal compression in the file system. Only applies to output formats with squashfs or btrfs. Takes one of \f[C]zlib\f[R], \f[C]lzo\f[R], \f[C]zstd\f[R], \f[C]lz4\f[R], \f[C]xz\f[R] or a boolean value as argument. If the latter is used compression is enabled/disabled and the default algorithm is used. In case of the \f[C]squashfs\f[R] output formats compression is implied, but this option may be used to select the algorithm. .TP \f[B]\f[CB]CompressOutput=\f[B]\f[R], \f[B]\f[CB]--compress-output=\f[B]\f[R] Configure compression for the resulting image or archive. The argument can be either a boolean or a compression algorithm (\f[C]xz\f[R], \f[C]zstd\f[R]). \f[C]xz\f[R] compression is used by default. Note that when applied to block device image types this means the image cannot be started directly but needs to be decompressed first. This also means that the \f[C]shell\f[R], \f[C]boot\f[R], \f[C]qemu\f[R] verbs are not available when this option is used. Implied for \f[C]tar\f[R] and \f[C]cpio\f[R]. .TP \f[B]\f[CB]Compress=\f[B]\f[R], \f[B]\f[CB]--compress=\f[B]\f[R] Enable compression. Using this option is equivalent to either \f[C]CompressFs=\f[R] or \f[C]CompressOutput=\f[R]; the appropriate type of compression is selected automatically. .TP \f[B]\f[CB]Mksquashfs=\f[B]\f[R], \f[B]\f[CB]--mksquashfs=\f[B]\f[R] Set the path to the \f[C]mksquashfs\f[R] executable to use. This is useful in case the parameters for the tool shall be augmented, as the tool may be replaced by a script invoking it with the right parameters, this way. .TP \f[B]\f[CB]QCow2=\f[B]\f[R], \f[B]\f[CB]--qcow2\f[B]\f[R] Encode the resulting image as QEMU QCOW2 image. This only applies to \f[C]gpt_ext4\f[R], \f[C]gpt_xfs\f[R], \f[C]gpt_btrfs\f[R], \f[C]gpt_squashfs\f[R]. QCOW2 images can be read natively by \f[C]qemu\f[R], but not by the Linux kernel. This means the \f[C]shell\f[R] and \f[C]boot\f[R] verbs are not available when this option is used, however \f[C]qemu\f[R] will work. .TP \f[B]\f[CB]Hostname=\f[B]\f[R], \f[B]\f[CB]--hostname=\f[B]\f[R] Set the image\[cq]s hostname to the specified name. .TP \f[B]\f[CB]ImageVersion=\f[B]\f[R], \f[B]\f[CB]--image-version=\f[B]\f[R] Configure the image version. This accepts any string, but it is recommended to specify a series of dot separated components. The version may also be configured in a file \f[C]mkosi.version\f[R] in which case it may be conveniently managed via the \f[C]bump\f[R] verb or the \f[C]--auto-bump\f[R] switch. When specified the image version is included in the default output file name, i.e.\ instead of \f[C]image.raw\f[R] the default will be \f[C]image_0.1.raw\f[R] for version \f[C]0.1\f[R] of the image, and similar. The version is also passed via the \f[C]$IMAGE_VERSION\f[R] to any build scripts invoked (which may be useful to patch it into \f[C]/etc/os-release\f[R] or similar, in particular the \f[C]IMAGE_VERSION=\f[R] field of it). .TP \f[B]\f[CB]ImageId=\f[B]\f[R], \f[B]\f[CB]--image-id=\f[B]\f[R] Configure the image identifier. This accepts a freeform string that shall be used to identify the image with. If set the default output file will be named after it (possibly suffixed with the version). If this option is used the root, \f[C]/usr/\f[R] and Verity partitions in the image will have their labels set to this (possibly suffixed by the image version). The identifier is also passed via the \f[C]$IMAGE_ID\f[R] to any build scripts invoked (which may be useful to patch it into \f[C]/etc/os-release\f[R] or similar, in particular the \f[C]IMAGE_ID=\f[R] field of it). .TP \f[B]\f[CB]WithUnifiedKernelImages=\f[B]\f[R], \f[B]\f[CB]--without-unified-kernel-images\f[B]\f[R] If specified, mkosi does not build unified kernel images and instead installs kernels with a separate initrd and boot loader config to the efi or bootloader partition. .TP \f[B]\f[CB]HostonlyInitrd=\f[B]\f[R], \f[B]\f[CB]--hostonly-initrd\f[B]\f[R] If specified, mkosi will run the tool to create the initrd such that a non-generic initrd is created that will only be able to run on the system mkosi is run on. Currently mkosi uses dracut for all supported distributions except Clear Linux and this option translates to enabling dracut\[cq]s hostonly option. .TP \f[B]\f[CB]UsrOnly=\f[B]\f[R], \f[B]\f[CB]--usr-only\f[B]\f[R] If specified, \f[C]mkosi\f[R] will only add the \f[C]/usr/\f[R] directory tree (instead of the whole root file system) to the image. This is useful for fully stateless systems that come up pristine on every single boot, where \f[C]/etc/\f[R] and \f[C]/var/\f[R] are populated by \f[C]systemd-tmpfiles\f[R]/\f[C]systemd-sysusers\f[R] and related calls, or systems that are originally shipped without a root file system, but where \f[C]systemd-repart\f[R] adds one on the first boot. .TP \f[B]\f[CB]SplitArtifacts=\f[B]\f[R], \f[B]\f[CB]--split-artifacts\f[B]\f[R] If specified and building an image with a partition table, also write out the root file system partition, its Verity partition (if configured) and the generated unified kernel (if configured) into separate output files. This is useful in A/B update scenarios where an existing disk image shall be augmented with a new version of a root or \f[C]/usr\f[R] partition along with its Verity partition and unified kernel. .TP \f[B]\f[CB]NoChown=\f[B]\f[R], \f[B]\f[CB]--no-chown\f[B]\f[R] By default, if \f[C]mkosi\f[R] is run inside a \f[C]sudo\f[R] environment all generated artifacts have their UNIX user/group ownership changed to the user which invoked \f[C]sudo\f[R]. With this option this may be turned off and all generated files are owned by \f[C]root\f[R]. .TP \f[B]\f[CB]TarStripSELinuxContext=\f[B]\f[R], \f[B]\f[CB]--tar-strip-selinux-context\f[B]\f[R] If running on a SELinux-enabled system (Fedora Linux, CentOS, Rocky Linux, Alma Linux), files inside the container are tagged with SELinux context extended attributes (\f[C]xattrs\f[R]), which may interfere with host SELinux rules in building or further container import stages. This option strips SELinux context attributes from the resulting tar archive. .SS [Content] Section .TP \f[B]\f[CB]BasePackages=\f[B]\f[R], \f[B]\f[CB]--base-packages\f[B]\f[R] Takes a boolean or the special value \f[C]conditional\f[R]. If true, automatically install packages to ensure basic functionality, as appropriate for the given image type. For example, \f[C]systemd\f[R] is always included, \f[C]systemd-udev\f[R] and \f[C]dracut\f[R] if the image is bootable, and so on. If false, only packages specified with \f[C]Packages=\f[R] will be installed. If \f[C]conditional\f[R], the list of packages to install will be extended with boolean dependencies (c.f. https://rpm.org/user_doc/boolean_dependencies.html), to install specific packages when \f[I]other\f[R] packages are in the list. For example, \f[C]systemd-udev\f[R] may be automatically included if the image is bootable and \f[C]systemd\f[R] is installed. With this, various \[lq]base\[rq] packages still need to be specified if they should be included, but the corresponding \[lq]extension\[rq] packages will be added automatically when appropriate. This feature depends on support in the package manager, so it is not implemented for all distributions. .TP \f[B]\f[CB]Packages=\f[B]\f[R], \f[B]\f[CB]--package=\f[B]\f[R], \f[B]\f[CB]-p\f[B]\f[R] Install the specified distribution packages (i.e.\ RPM, DEB, \&...) in the image. Takes a comma separated list of package specifications. This option may be used multiple times in which case the specified package lists are combined. Packages specified this way will be installed both in the development and the final image. Use \f[C]BuildPackages=\f[R] to specify packages that shall only be used for the image generated in the build image, but that shall not appear in the final image. The types and syntax of \[lq]package specifications\[rq] that are allowed depend on the package installer (e.g.\ \f[C]dnf\f[R] or \f[C]yum\f[R] for \f[C]rpm\f[R]-based distros or \f[C]apt\f[R] for \f[C]deb\f[R]-based distros), but may include package names, package names with version and/or architecture, package name globs, paths to packages in the file system, package groups, and virtual provides, including file paths. To remove a package e.g.\ added by a \f[C]mkosi.default\f[R] configuration file prepend the package name with \f[C]!\f[R]. For example -p \[lq]!apache2\[rq] would remove the apache2 package. To replace the apache2 package by the httpd package just add -p \[lq]!apache2,httpd\[rq] to the command line arguments. To remove all packages use \[lq]!*\[rq]. Example: when using an distro that uses \f[C]dnf\f[R], \f[C]Packages=meson libfdisk-devel.i686 git-* prebuilt/rpms/systemd-249-rc1.local.rpm /usr/bin/ld \[at]development-tools python3dist(mypy)\f[R] would install the \f[C]meson\f[R] package (in the latest version), the 32-bit version of the \f[C]libfdisk-devel\f[R] package, all available packages that start with the \f[C]git-\f[R] prefix, a \f[C]systemd\f[R] rpm from the local file system, one of the packages that provides \f[C]/usr/bin/ld\f[R], the packages in the \[lq]Development Tools\[rq] group, and the package that contains the \f[C]mypy\f[R] python module. .TP \f[B]\f[CB]WithDocs=\f[B]\f[R], \f[B]\f[CB]--with-docs\f[B]\f[R] Include documentation in the image built. By default if the underlying distribution package manager supports it documentation is not included in the image built. The \f[C]$WITH_DOCS\f[R] environment variable passed to the \f[C]mkosi.build\f[R] script indicates whether this option was used or not. .TP \f[B]\f[CB]WithTests=\f[B]\f[R], \f[B]\f[CB]--without-tests\f[B]\f[R], \f[B]\f[CB]-T\f[B]\f[R] If set to false (or when the command-line option is used), the \f[C]$WITH_TESTS\f[R] environment variable is set to \f[C]0\f[R] when the \f[C]mkosi.build\f[R] script is invoked. This is supposed to be used by the build script to bypass any unit or integration tests that are normally run during the source build process. Note that this option has no effect unless the \f[C]mkosi.build\f[R] build script honors it. .TP \f[B]\f[CB]Cache=\f[B]\f[R], \f[B]\f[CB]--cache=\f[B]\f[R] Takes a path to a directory to use as package cache for the distribution package manager used. If this option is not used, but a \f[C]mkosi.cache/\f[R] directory is found in the local directory it is automatically used for this purpose. The directory configured this way is mounted into both the development and the final image while the package manager is running. .TP \f[B]\f[CB]SkeletonTree=\f[B]\f[R], \f[B]\f[CB]--skeleton-tree=\f[B]\f[R] Takes a path to a directory to copy into the OS tree before invoking the package manager. Use this to insert files and directories into the OS tree before the package manager installs any packages. If this option is not used, but the \f[C]mkosi.skeleton/\f[R] directory is found in the local directory it is automatically used for this purpose (also see the \[lq]Files\[rq] section below). Instead of a directory, a tar file may be provided. In this case it is unpacked into the OS tree before the package manager is invoked. This mode of operation allows setting permissions and file ownership explicitly, in particular for projects stored in a version control system such as \f[C]git\f[R] which retain full file ownership and access mode metadata for committed files. If the tar file \f[C]mkosi.skeleton.tar\f[R] is found in the local directory it will be automatically used for this purpose. .TP \f[B]\f[CB]ExtraTree=\f[B]\f[R], \f[B]\f[CB]--extra-tree=\f[B]\f[R] Takes a path to a directory to copy on top of the OS tree the package manager generated. Use this to override any default configuration files shipped with the distribution. If this option is not used, but the \f[C]mkosi.extra/\f[R] directory is found in the local directory it is automatically used for this purpose (also see the \[lq]Files\[rq] section below). As with the skeleton tree logic above, instead of a directory, a tar file may be provided too. \f[C]mkosi.skeleton.tar\f[R] will be automatically used if found in the local directory. .TP \f[B]\f[CB]CleanPackageMetadata=\f[B]\f[R], \f[B]\f[CB]--clean-package-metadata=\f[B]\f[R] Enable/disable removal of package manager databases, caches, and logs at the end of installation. Can be specified as true, false, or \[lq]\f[C]auto\f[R]\[rq] (the default). With \[lq]\f[C]auto\f[R]\[rq], files will be removed if the respective package manager executable is \f[I]not\f[R] present at the end of the installation. .TP \f[B]\f[CB]RemoveFiles=\f[B]\f[R], \f[B]\f[CB]--remove-files=\f[B]\f[R] Takes a comma-separated list of globs. Files in the image matching the globs will be purged at the end. .TP \f[B]\f[CB]RemovePackages=\f[B]\f[R], \f[B]\f[CB]--remove-package=\f[B]\f[R] Takes a comma-separated list of package specifications for removal, in the same format as \f[C]Packages=\f[R]. The removal will be performed as one of the last steps. This step is skipped if \f[C]CleanPackageMetadata=no\f[R] is used. This option is currently only implemented for distributions using \f[C]dnf\f[R]. .TP \f[B]\f[CB]Environment=\f[B]\f[R], \f[B]\f[CB]--environment=\f[B]\f[R] Adds variables to the environment that the build/prepare/postinstall/finalize scripts are executed with. Takes a space-separated list of variable assignments or just variable names. In the latter case, the values of those variables will be passed through from the environment in which \f[C]mkosi\f[R] was invoked. This option may be specified more than once, in which case all listed variables will be set. If the same variable is set twice, the later setting overrides the earlier one. .TP \f[B]\f[CB]BuildSources=\f[B]\f[R], \f[B]\f[CB]--build-sources=\f[B]\f[R] Takes a path to a source tree to copy into the development image, if the build script is used. This only applies if a build script is used, and defaults to the local directory. Use \f[C]SourceFileTransfer=\f[R] to configure how the files are transferred from the host to the container image. .TP \f[B]\f[CB]BuildDirectory=\f[B]\f[R], \f[B]\f[CB]--build-dir=\f[B]\f[R] Takes a path of a directory to use as build directory for build systems that support out-of-tree builds (such as Meson). The directory used this way is shared between repeated builds, and allows the build system to reuse artifacts (such as object files, executable, \&...) generated on previous invocations. This directory is mounted into the development image when the build script is invoked. The build script can find the path to this directory in the \f[C]$BUILDDIR\f[R] environment variable. If this option is not specified, but a directory \f[C]mkosi.builddir/\f[R] exists in the local directory it is automatically used for this purpose (also see the \[lq]Files\[rq] section below). .TP \f[B]\f[CB]IncludeDirectory=\f[B]\f[R], \f[B]\f[CB]--include-directory=\f[B]\f[R] Takes a path of a directory to use as the include directory. This directory is mounted at \f[C]/usr/include\f[R] when building the build image and running the build script. This means all include files installed to \f[C]/usr/include\f[R] will be stored in this directory. This is useful to make include files available on the host system for use by language servers to provide code completion. If this option is not specified, but a directory \f[C]mkosi.includedir/\f[R] exists in the local directory, it is automatically used for this purpose (also see the \[lq]Files\[rq] section below). .TP \f[B]\f[CB]InstallDirectory=\f[B]\f[R], \f[B]\f[CB]--install-directory=\f[B]\f[R] Takes a path of a directory to use as the install directory. The directory used this way is shared between builds and allows the build system to not have to reinstall files that were already installed by a previous build and didn\[cq]t change. The build script can find the path to this directory in the \f[C]$DESTDIR\f[R] environment variable. If this option is not specified, but a directory \f[C]mkosi.installdir\f[R] exists in the local directory, it is automatically used for this purpose (also see the \[lq]Files\[rq] section below). .TP \f[B]\f[CB]BuildPackages=\f[B]\f[R], \f[B]\f[CB]--build-package=\f[B]\f[R] Similar to \f[C]Packages=\f[R], but configures packages to install only in the first phase of the build, into the development image. This option should be used to list packages containing header files, compilers, build systems, linkers and other build tools the \f[C]mkosi.build\f[R] script requires to operate. Note that packages listed here are only included in the image created during the first phase of the build, and are absent in the final image. Use \f[C]Packages=\f[R] to list packages that shall be included in both. Packages are appended to the list. Packages prefixed with \[lq]!\[rq] are removed from the list. \[lq]!*\[rq] removes all packages from the list. .TP \f[B]\f[CB]Password=\f[B]\f[R], \f[B]\f[CB]--password=\f[B]\f[R] Set the password of the \f[C]root\f[R] user. By default the \f[C]root\f[R] account is locked. If this option is not used, but a file \f[C]mkosi.rootpw\f[R] exists in the local directory, the root password is automatically read from it. .TP \f[B]\f[CB]PasswordIsHashed=\f[B]\f[R], \f[B]\f[CB]--password-is-hashed\f[B]\f[R] Indicate that the password supplied for the \f[C]root\f[R] user has already been hashed, so that the string supplied with \f[C]Password=\f[R] or \f[C]mkosi.rootpw\f[R] will be written to \f[C]/etc/shadow\f[R] literally. .TP \f[B]\f[CB]Autologin=\f[B]\f[R], \f[B]\f[CB]--autologin\f[B]\f[R] Enable autologin for the \f[C]root\f[R] user on \f[C]/dev/pts/0\f[R] (nspawn), \f[C]/dev/tty1\f[R] (QEMU) and \f[C]/dev/ttyS0\f[R] (QEMU with \f[C]QemuHeadless=yes\f[R]) by patching \f[C]/etc/pam.d/login\f[R]. .TP \f[B]\f[CB]SkipFinalPhase=\f[B]\f[R], \f[B]\f[CB]--skip-final-phase=\f[B]\f[R] Causes the (second) final image build stage to be skipped. This is useful in combination with a build script, for when you care about the artifacts that were created locally in \f[C]$BUILDDIR\f[R], but ultimately plan to discard the final image. .TP \f[B]\f[CB]BuildScript=\f[B]\f[R], \f[B]\f[CB]--build-script=\f[B]\f[R] Takes a path to an executable that is used as build script for this image. If this option is used the build process will be two-phased instead of single-phased. The specified script is copied onto the development image and executed inside an \f[C]systemd-nspawn\f[R] container environment. If this option is not used, but the \f[C]mkosi.build\f[R] file found in the local directory it is automatically used for this purpose (also see the \[lq]Files\[rq] section below). Specify an empty value to disable automatic detection. .TP \f[B]\f[CB]PrepareScript=\f[B]\f[R], \f[B]\f[CB]--prepare-script=\f[B]\f[R] Takes a path to an executable that is invoked inside the image right after installing the software packages. It is the last step before the image is cached (if incremental mode is enabled). This script is invoked inside a \f[C]systemd-nspawn\f[R] container environment, and thus does not have access to host resources. If this option is not used, but an executable script \f[C]mkosi.prepare\f[R] is found in the local directory, it is automatically used for this purpose. Specify an empty value to disable automatic detection. .TP \f[B]\f[CB]PostInstallationScript=\f[B]\f[R], \f[B]\f[CB]--postinst-script=\f[B]\f[R] Takes a path to an executable that is invoked inside the final image right after copying in the build artifacts generated in the first phase of the build. This script is invoked inside a \f[C]systemd-nspawn\f[R] container environment, and thus does not have access to host resources. If this option is not used, but an executable \f[C]mkosi.postinst\f[R] is found in the local directory, it is automatically used for this purpose. Specify an empty value to disable automatic detection. .TP \f[B]\f[CB]FinalizeScript=\f[B]\f[R], \f[B]\f[CB]--finalize-script=\f[B]\f[R] Takes a path to an executable that is invoked outside the final image right after copying in the build artifacts generated in the first phase of the build, and after having executed the \f[C]mkosi.postinst\f[R] script (see \f[C]PostInstallationScript=\f[R]). This script is invoked directly in the host environment, and hence has full access to the host\[cq]s resources. If this option is not used, but an executable \f[C]mkosi.finalize\f[R] is found in the local directory, it is automatically used for this purpose. Specify an empty value to disable automatic detection. .TP \f[B]\f[CB]SourceFileTransfer=\f[B]\f[R], \f[B]\f[CB]--source-file-transfer=\f[B]\f[R] Configures how the source file tree (as configured with \f[C]BuildSources=\f[R]) is transferred into the container image during the first phase of the build. Takes one of \f[C]copy-all\f[R] (to copy all files from the source tree), \f[C]copy-git-cached\f[R] (to copy only those files \f[C]git-ls-files --cached\f[R] lists), \f[C]copy-git-others\f[R] (to copy only those files \f[C]git-ls-files --others\f[R] lists), \f[C]mount\f[R] to bind mount the source tree directly. Defaults to \f[C]copy-git-cached\f[R] if a \f[C]git\f[R] source tree is detected, otherwise \f[C]copy-all\f[R]. When you specify \f[C]copy-git-more\f[R], it is the same as \f[C]copy-git-cached\f[R], except it also includes the \f[C].git/\f[R] directory. .TP \f[B]\f[CB]SourceFileTransferFinal=\f[B]\f[R], \f[B]\f[CB]--source-file-transfer-final=\f[B]\f[R] Same as \f[C]SourceFileTransfer=\f[R], but for the final image instead of the build image. Takes the same values as \f[C]SourceFileFransfer=\f[R] except \f[C]mount\f[R]. By default, sources are not copied into the final image. .TP \f[B]\f[CB]SourceResolveSymlinks=\f[B]\f[R], \f[B]\f[CB]--source-resolve-symlinks\f[B]\f[R] If given, any symbolic links in the source file tree are resolved and the file contents are copied to the build image. If not given, they are left as symbolic links. This only applies if \f[C]SourceFileTransfer=\f[R] is \f[C]copy-all\f[R]. Defaults to leaving them as symbolic links. .TP \f[B]\f[CB]SourceResolveSymlinksFinal=\f[B]\f[R], \f[B]\f[CB]--source-resolve-symlinks-final\f[B]\f[R] Same as \f[C]SourceResolveSymlinks=\f[R], but for the final image instead of the build image. .TP \f[B]\f[CB]WithNetwork=\f[B]\f[R], \f[B]\f[CB]--with-network\f[B]\f[R] When true, enables network connectivity while the build script \f[C]mkosi.build\f[R] is invoked. By default, the build script runs with networking turned off. The \f[C]$WITH_NETWORK\f[R] environment variable is passed to the \f[C]mkosi.build\f[R] build script indicating whether the build is done with or without network. If specified as \f[C]never\f[R], the package manager is instructed not to contact the network for updating package data. This provides a minimal level of reproducibility, as long as the package data cache is already fully populated. .TP \f[B]\f[CB]Settings=\f[B]\f[R], \f[B]\f[CB]--settings=\f[B]\f[R] Specifies a \f[C].nspawn\f[R] settings file for \f[C]systemd-nspawn\f[R] to use in the \f[C]boot\f[R] and \f[C]shell\f[R] verbs, and to place next to the generated image file. This is useful to configure the \f[C]systemd-nspawn\f[R] environment when the image is run. If this setting is not used but an \f[C]mkosi.nspawn\f[R] file found in the local directory it is automatically used for this purpose. .SS [Partitions] Section .TP \f[B]\f[CB]BaseImage=\f[B]\f[R], \f[B]\f[CB]--base-image=\f[B]\f[R] Use the specified directory or file system image as the base image, and create the output image that consists only of changes from this base. The base image is attached as the lower file system in an overlayfs structure, and the output filesystem becomes the upper layer, initially empty. Thus files that are not modified compared to the base image are not present in the output image. This option may be used to create systemd \[lq]system extensions\[rq] or portable services. See https://systemd.io/PORTABLE_SERVICES/#extension-images for more information. .TP \f[B]\f[CB]RootSize=\f[B]\f[R], \f[B]\f[CB]--root-size=\f[B]\f[R] Takes a size in bytes for the root file system. The specified numeric value may be suffixed with \f[C]K\f[R], \f[C]M\f[R], \f[C]G\f[R] to indicate kilo-, mega- and gigabytes (all to the base of 1024). This applies to output formats \f[C]gpt_ext4\f[R], \f[C]gpt_xfs\f[R], \f[C]gpt_btrfs\f[R]. Defaults to 3G. .TP \f[B]\f[CB]ESPSize=\f[B]\f[R], \f[B]\f[CB]--esp-size=\f[B]\f[R] Similar to \f[C]RootSize=\f[R], configures the size of the UEFI System Partition (ESP). This is only relevant if the \f[C]Bootable=\f[R] option is used to generate a bootable image. Defaults to 256 MB. .TP \f[B]\f[CB]SwapSize=\f[B]\f[R], \f[B]\f[CB]--swap-size=\f[B]\f[R] Similar to \f[C]RootSize=\f[R], configures the size of a swap partition on the image. If omitted, no swap partition is created. .TP \f[B]\f[CB]HomeSize=\f[B]\f[R], \f[B]\f[CB]--home-size=\f[B]\f[R] Similar to \f[C]RootSize=\f[R], configures the size of the \f[C]/home\f[R] partition. If omitted, no separate \f[C]/home\f[R] partition is created. .TP \f[B]\f[CB]SrvSize=\f[B]\f[R], \f[B]\f[CB]--srv-size=\f[B]\f[R] Similar to \f[C]RootSize=\f[R], configures the size of the \f[C]/srv\f[R] partition. If omitted, no separate \f[C]/srv\f[R] partition is created. .SS [Validation] Section .TP \f[B]\f[CB]Checksum=\f[B]\f[R], \f[B]\f[CB]--checksum\f[B]\f[R] Generate a \f[C]SHA256SUMS\f[R] file of all generated artifacts after the build is complete. .TP \f[B]\f[CB]Sign=\f[B]\f[R], \f[B]\f[CB]--sign\f[B]\f[R] Sign the generated \f[C]SHA256SUMS\f[R] using \f[C]gpg\f[R] after completion. .TP \f[B]\f[CB]Key=\f[B]\f[R], \f[B]\f[CB]--key=\f[B]\f[R] Select the \f[C]gpg\f[R] key to use for signing \f[C]SHA256SUMS\f[R]. This key must be already present in the \f[C]gpg\f[R] keyring. .TP \f[B]\f[CB]BMap=\f[B]\f[R], \f[B]\f[CB]--bmap\f[B]\f[R] Generate a \f[C]bmap\f[R] file for usage with \f[C]bmaptool\f[R] from the generated image file. .SS [Host] Section .TP \f[B]\f[CB]ExtraSearchPaths=\f[B]\f[R], \f[B]\f[CB]--extra-search-paths=\f[B]\f[R] List of colon-separated paths to look for tools in, before using the regular \f[C]$PATH\f[R] search path. .TP \f[B]\f[CB]QemuHeadless=\f[B]\f[R], \f[B]\f[CB]--qemu-headless=\f[B]\f[R] When used with the \f[C]build\f[R] verb, this option adds \f[C]console=ttyS0\f[R] to the image\[cq]s kernel command line and sets the terminal type of the serial console in the image to the terminal type of the host (more specifically, the value of the \f[C]$TERM\f[R] environment variable passed to mkosi). This makes sure that all terminal features such as colors and shortcuts still work as expected when connecting to the qemu VM over the serial console (for example via \f[C]-nographic\f[R]). When used with the \f[C]qemu\f[R] verb, this option adds the \f[C]-nographic\f[R] option to \f[C]qemu\f[R]\[cq]s command line so qemu starts a headless vm and connects to its serial console from the current terminal instead of launching the VM in a separate window. .TP \f[B]\f[CB]QemuSmp=\f[B]\f[R], \f[B]\f[CB]--qemu-smp=\f[B]\f[R] When used with the \f[C]qemu\f[R] verb, this options sets \f[C]qemu\f[R]\[cq]s \f[C]-smp\f[R] argument which controls the number of guest\[cq]s CPUs. Defaults to \f[C]2\f[R]. .TP \f[B]\f[CB]QemuMem=\f[B]\f[R], \f[B]\f[CB]--qemu-mem=\f[B]\f[R] When used with the \f[C]qemu\f[R] verb, this options sets \f[C]qemu\f[R]\[cq]s \f[C]-m\f[R] argument which controls the amount of guest\[cq]s RAM. Defaults to \f[C]1G\f[R]. .TP \f[B]\f[CB]NetworkVeth=\f[B]\f[R], \f[B]\f[CB]--network-veth\f[B]\f[R] When used with the boot or qemu verbs, this option creates a virtual ethernet link between the host and the container/VM. The host interface is automatically picked up by systemd-networkd as documented in systemd-nspawn\[cq]s man page: https://www.freedesktop.org/software/systemd/man/systemd-nspawn.html#-n .TP \f[B]\f[CB]Ephemeral=\f[B]\f[R], \f[B]\f[CB]--ephemeral\f[B]\f[R] When used with the \f[C]shell\f[R], \f[C]boot\f[R], or \f[C]qemu\f[R] verbs, this option runs the specified verb on a temporary snapshot of the output image that is removed immediately when the container terminates. Taking the temporary snapshot is more efficient on file systems that support subvolume snapshots or `reflinks' natively (\[lq]btrfs\[rq] or new \[lq]xfs\[rq]) than on more traditional file systems that do not (\[lq]ext4\[rq]). .TP \f[B]\f[CB]Ssh=\f[B]\f[R], \f[B]\f[CB]--ssh\f[B]\f[R] If specified, installs and enables \f[C]sshd\f[R] in the final image and generates a SSH keypair and adds the public key to root\[cq]s \f[C]authorized_keys\f[R] in the final image. The private key is stored in mkosi\[cq]s output directory. When building with this option and running the image using \f[C]mkosi boot\f[R] or \f[C]mkosi qemu\f[R], the \f[C]mkosi ssh\f[R] command can be used to connect to the container/VM via SSH. .TP \f[B]\f[CB]SshKey=\f[B]\f[R], \f[B]\f[CB]--ssh-key=\f[B]\f[R] If specified, use the given private key when connecting to the guest machine via \f[C]mkosi ssh\f[R]. This requires the public key counterpart to be present in the same location, suffixed with \f[C].pub\f[R] (as done by \f[C]ssh-keygen\f[R]). If this option is not present, \f[C]mkosi\f[R] generates a new key pair automatically. .TP \f[B]\f[CB]SshAgent=\f[B]\f[R], \f[B]\f[CB]--ssh-agent=\f[B]\f[R] If specified as a path, use the given socket to connect to the ssh agent when building an image and when connecting via \f[C]mkosi ssh\f[R] instead of hard-coding a key. If specified as \f[C]true\f[R], \f[C]$SSH_AUTH_SOCK\f[R] will be parsed instead (hint: use \f[C]sudo\f[R] with \f[C]-E\f[R]). The keys listed by \f[C]ssh-add -L\f[R] will be installed as authorized keys in the built image. The \f[C]ssh\f[R] invocation done by \f[C]mkosi ssh\f[R] will inherit \f[C]$SSH_AUTH_SOCK\f[R] for authentication purposes. .TP \f[B]\f[CB]SshPort=\f[B]\f[R], \f[B]\f[CB]--ssh-port=\f[B]\f[R] In the image, sshd will be configured to listen on this port. \f[C]mkosi ssh\f[R] will connect to this port. .TP \f[B]\f[CB]SshTimeout=\f[B]\f[R], \f[B]\f[CB]--ssh-timeout=\f[B]\f[R] When used with the \f[C]ssh\f[R] verb, \f[C]mkosi\f[R] will attempt to retry the SSH connection up to given timeout (in seconds) in case it fails. This option is useful mainly in scripted environments where the \f[C]qemu\f[R] and \f[C]ssh\f[R] verbs are used in a quick succession and the veth device might not get enough time to configure itself. .SS Commandline-only Options .PP Those settings cannot be configured in the configuration files. .TP \f[B]\f[CB]--directory=\f[B]\f[R], \f[B]\f[CB]-C\f[B]\f[R] Takes a path to a directory. \f[C]mkosi\f[R] switches to this directory before doing anything. Note that the various \f[C]mkosi.*\f[R] files are searched for only after changing to this directory, hence using this option is an effective way to build a project located in a specific directory. .TP \f[B]\f[CB]--default=\f[B]\f[R] Loads additional settings from the specified settings file. Most command line options may also be configured in a settings file. See the table below to see which command line options match which settings file option. If this option is not used, but a file \f[C]mkosi.default\f[R] is found in the local directory it is automatically used for this purpose. If a setting is configured both on the command line and in the settings file, the command line generally wins, except for options taking lists in which case both lists are combined. .TP \f[B]\f[CB]--all\f[B]\f[R], \f[B]\f[CB]-a\f[B]\f[R] Iterate through all files \f[C]mkosi.*\f[R] in the \f[C]mkosi.files/\f[R] subdirectory, and build each as if \f[C]--default=mkosi.files/mkosi.\&...\f[R] was invoked. This is a quick way to build a large number of images in one go. Any additional specified command line arguments override the relevant options in all files processed this way. .TP \f[B]\f[CB]--all-directory=\f[B]\f[R] If specified, overrides the directory the \f[C]--all\f[R] logic described above looks for settings files in. If unspecified, defaults to \f[C]mkosi.files/\f[R] in the current working directory. .TP \f[B]\f[CB]--incremental\f[B]\f[R], \f[B]\f[CB]-i\f[B]\f[R] Enable incremental build mode. This only applies if the two-phase \f[C]mkosi.build\f[R] build script logic is used. In this mode, a copy of the OS image is created immediately after all OS packages are unpacked but before the \f[C]mkosi.build\f[R] script is invoked in the development container. Similarly, a copy of the final image is created immediately before the build artifacts from the \f[C]mkosi.build\f[R] script are copied in. On subsequent invocations of \f[C]mkosi\f[R] with the \f[C]-i\f[R] switch these cached images may be used to skip the OS package unpacking, thus drastically speeding up repetitive build times. Note that when this is used and a pair of cached incremental images exists they are not automatically regenerated, even if options such as \f[C]Packages=\f[R] are modified. In order to force rebuilding of these cached images, combine \f[C]-i\f[R] with \f[C]-ff\f[R] to ensure cached images are first removed and then re-created. .TP \f[B]\f[CB]--debug=\f[B]\f[R] Enable additional debugging output. Takes a comma-separated list of arguments specifying the area of interest. Pass any invalid value (e.g.\ empty) to list currently accepted values. .TP \f[B]\f[CB]--version\f[B]\f[R] Show package version. .TP \f[B]\f[CB]--help\f[B]\f[R], \f[B]\f[CB]-h\f[B]\f[R] Show brief usage information. .TP \f[B]\f[CB]--auto-bump\f[B]\f[R], \f[B]\f[CB]-B\f[B]\f[R] If specified, after each successful build the the version is bumped in a fashion equivalent to the \f[C]bump\f[R] verb, in preparation for the next build. This is useful for simple, linear version management: each build in a series will have a version number one higher then the previous one. .SS Supported distributions .PP Images may be created containing installations of the following operating systems: .IP \[bu] 2 \f[I]Fedora Linux\f[R] .IP \[bu] 2 \f[I]Debian\f[R] .IP \[bu] 2 \f[I]Ubuntu\f[R] .IP \[bu] 2 \f[I]Arch Linux\f[R] .IP \[bu] 2 \f[I]openSUSE\f[R] .IP \[bu] 2 \f[I]Mageia\f[R] .IP \[bu] 2 \f[I]CentOS\f[R] .IP \[bu] 2 \f[I]Clear Linux\f[R] .IP \[bu] 2 \f[I]Photon\f[R] .IP \[bu] 2 \f[I]OpenMandriva\f[R] .IP \[bu] 2 \f[I]Rocky Linux\f[R] .IP \[bu] 2 \f[I]Alma Linux\f[R] .IP \[bu] 2 \f[I]Gentoo\f[R] .PP In theory, any distribution may be used on the host for building images containing any other distribution, as long as the necessary tools are available. Specifically, any distribution that packages \f[C]debootstrap\f[R] may be used to build \f[I]Debian\f[R] or \f[I]Ubuntu\f[R] images. Any distribution that packages \f[C]dnf\f[R] may be used to build \f[I]Fedora Linux\f[R], \f[I]Mageia\f[R] or \f[I]OpenMandriva\f[R] images. Any distro that packages \f[C]pacstrap\f[R] may be used to build \f[I]Arch Linux\f[R] images. Any distribution that packages \f[C]zypper\f[R] may be used to build \f[I]openSUSE\f[R] images. Any distribution that packages \f[C]yum\f[R] (or the newer replacement \f[C]dnf\f[R]) may be used to build \f[I]CentOS\f[R], \f[I]Rocky Linux\f[R], or \f[I]Alma Linux\f[R] images. Any distribution that packages \f[C]emerge\f[R] may be used to build \f[I]Gentoo\f[R] images. .PP Currently, \f[I]Fedora Linux\f[R] packages all relevant tools as of Fedora 28. .SS Compatibility .PP Legacy concepts are avoided: generated images use \f[I]GPT\f[R] disk labels (and no \f[I]MBR\f[R] labels), and only systemd-based images may be generated. .PP All generated \f[I]GPT\f[R] disk images may be booted in a local container directly with: .IP .nf \f[C] systemd-nspawn -bi image.raw \f[R] .fi .PP Additionally, bootable \f[I]GPT\f[R] disk images (as created with the \f[C]--bootable\f[R] flag) work when booted directly by \f[I]EFI\f[R] and \f[I]BIOS\f[R] systems, for example in \f[I]KVM\f[R] via: .IP .nf \f[C] qemu-kvm -m 512 -smp 2 -bios /usr/share/edk2/ovmf/OVMF_CODE.fd -drive format=raw,file=image.raw \f[R] .fi .PP \f[I]EFI\f[R] bootable \f[I]GPT\f[R] images are larger than plain \f[I]GPT\f[R] images, as they additionally carry an \f[I]EFI\f[R] system partition containing a boot loader, as well as a kernel, kernel modules, udev and more. .PP All directory or btrfs subvolume images may be booted directly with: .IP .nf \f[C] systemd-nspawn -bD image \f[R] .fi .SH Files .PP To make it easy to build images for development versions of your projects, mkosi can read configuration data from the local directory, under the assumption that it is invoked from a \f[I]source\f[R] tree. Specifically, the following files are used if they exist in the local directory: .IP \[bu] 2 The \f[B]\f[CB]mkosi.default\f[B]\f[R] file provides the default configuration for the image building process. For example, it may specify the distribution to use (\f[C]fedora\f[R], \f[C]ubuntu\f[R], \f[C]debian\f[R], \f[C]arch\f[R], \f[C]opensuse\f[R], \f[C]mageia\f[R], \f[C]openmandriva\f[R], \f[C]gentoo\f[R]) for the image, or additional distribution packages to install. Note that all options encoded in this configuration file may also be set on the command line, and this file is hence little more than a way to make sure invoking \f[C]mkosi\f[R] without further parameters in your \f[I]source\f[R] tree is enough to get the right image of your choice set up. .RS 2 .PP Additionally, if a \f[I]\f[CI]mkosi.default.d/\f[I]\f[R] directory exists, each file in it is loaded in the same manner adding/overriding the values specified in \f[C]mkosi.default\f[R]. If \f[C]mkosi.default.d/\f[R] contains a directory named after the distribution being built, each file in that directory is also processed. .PP The file format is inspired by Windows \f[C].ini\f[R] files and supports multi-line assignments: any line with initial whitespace is considered a continuation line of the line before. Command-line arguments, as shown in the help description, have to be included in a configuration block (e.g.\ \[lq]\f[C][Content]\f[R]\[rq]) corresponding to the argument group (e.g.\ \[lq]\f[C]Content\f[R]\[rq]), and the argument gets converted as follows: \[lq]\f[C]--with-network\f[R]\[rq] becomes \[lq]\f[C]WithNetwork=yes\f[R]\[rq]. For further details see the table above. .RE .IP \[bu] 2 The \f[B]\f[CB]mkosi.skeleton/\f[B]\f[R] directory or \f[B]\f[CB]mkosi.skeleton.tar\f[B]\f[R] archive may be used to insert files into the image. The files are copied \f[I]before\f[R] the distribution packages are installed into the image. This allows creation of files that need to be provided early, for example to configure the package manager or set systemd presets. .RS 2 .PP When using the directory, file ownership is not preserved: all files copied will be owned by root. To preserve ownership, use a tar archive. .RE .IP \[bu] 2 The \f[B]\f[CB]mkosi.extra/\f[B]\f[R] directory or \f[B]\f[CB]mkosi.extra.tar\f[B]\f[R] archive may be used to insert additional files into the image, on top of what the distribution includes in its packages. They are similar to \f[C]mkosi.skeleton/\f[R] and \f[C]mkosi.skeleton.tar\f[R], but the files are copied into the directory tree of the image \f[I]after\f[R] the OS was installed. .RS 2 .PP When using the directory, file ownership is not preserved: all files copied will be owned by root. To preserve ownership, use a tar archive. .RE .IP \[bu] 2 \f[B]\f[CB]mkosi.build\f[B]\f[R] may be an executable script. If it exists, the image will be built twice: the first iteration will be the \f[I]development\f[R] image, the second iteration will be the \f[I]final\f[R] image. The \f[I]development\f[R] image is used to build the project in the current working directory (the \f[I]source\f[R] tree). For that the whole directory is copied into the image, along with the \f[C]mkosi.build\f[R] script. The script is then invoked inside the image (via \f[C]systemd-nspawn\f[R]), with \f[C]$SRCDIR\f[R] pointing to the \f[I]source\f[R] tree. \f[C]$DESTDIR\f[R] points to a directory where the script should place any files generated it would like to end up in the \f[I]final\f[R] image. Note that \f[C]make\f[R]/\f[C]automake\f[R]/\f[C]meson\f[R] based build systems generally honor \f[C]$DESTDIR\f[R], thus making it very natural to build \f[I]source\f[R] trees from the build script. After the \f[I]development\f[R] image was built and the build script ran inside of it, it is removed again. After that the \f[I]final\f[R] image is built, without any \f[I]source\f[R] tree or build script copied in. However, this time the contents of \f[C]$DESTDIR\f[R] are added into the image. .RS 2 .PP When the source tree is copied into the \f[I]build\f[R] image, all files are copied, except for \f[C]mkosi.builddir/\f[R], \f[C]mkosi.cache/\f[R] and \f[C]mkosi.output/\f[R]. That said, \f[C].gitignore\f[R] is respected if the source tree is a \f[C]git\f[R] checkout. If multiple different images shall be built from the same source tree it is essential to exclude their output files from this copy operation, as otherwise a version of an image built earlier might be included in a later build, which is usually not intended. An alternative to excluding these built images via \f[C].gitignore\f[R] entries is to use the \f[C]mkosi.output/\f[R] directory, which is an easy way to exclude all build artifacts. .PP The \f[C]$MKOSI_DEFAULT\f[R] environment variable will be set inside of this script so that you know which \f[C]mkosi.default\f[R] (if any) was passed in. .RE .IP \[bu] 2 The \f[B]\f[CB]mkosi.prepare\f[B]\f[R] script is invoked directly after the software packages are installed, from within the image context, if it exists. It is once called for the \f[I]development\f[R] image (if this is enabled, see above) with the \[lq]build\[rq] command line parameter, right before copying the extra tree. It is called a second time for the \f[I]final\f[R] image with the \[lq]final\[rq] command line parameter. This script has network access and may be used to install packages from other sources than the distro\[cq]s package manager (e.g.\ \f[C]pip\f[R], \f[C]npm\f[R], \&...), after all software packages are installed but before the image is cached (if incremental mode is enabled). This script is executed within \f[C]$SRCDIR\f[R]. In contrast to a general purpose installation, it is safe to install packages to the system (\f[C]pip install\f[R], \f[C]npm install -g\f[R]) instead of in \f[C]$SRCDIR\f[R] itself because the build image is only used for a single project and can easily be thrown away and rebuilt so there\[cq]s no risk of conflicting dependencies and no risk of polluting the host system. .IP \[bu] 2 The \f[B]\f[CB]mkosi.postinst\f[B]\f[R] script is invoked as the penultimate step of preparing an image, from within the image context, if it exists. It is called first for the \f[I]development\f[R] image (if this is enabled, see above) with the \[lq]build\[rq] command line parameter, right before invoking the build script. It is called a second time for the \f[I]final\f[R] image with the \[lq]final\[rq] command line parameter, right before the image is considered complete. This script may be used to alter the images without any restrictions, after all software packages and built sources have been installed. Note that this script is executed directly in the image context with the final root directory in place, without any \f[C]$SRCDIR\f[R]/\f[C]$DESTDIR\f[R] setup. .IP \[bu] 2 The \f[B]\f[CB]mkosi.finalize\f[B]\f[R] script, if it exists, is invoked as last step of preparing an image, from the host system. It is once called for the \f[I]development\f[R] image (if this is enabled, see above) with the \[lq]build\[rq] command line parameter, as the last step before invoking the build script, after the \f[C]mkosi.postinst\f[R] script is invoked. It is called the second time with the \[lq]final\[rq] command line parameter as the last step before the image is considered complete. The environment variable \f[C]$BUILDROOT\f[R] points to the root directory of the installation image. Additional verbs may be added in the future, the script should be prepared for that. This script may be used to alter the images without any restrictions, after all software packages and built sources have been installed. This script is more flexible than \f[C]mkosi.postinst\f[R] in two regards: it has access to the host file system so it\[cq]s easier to copy in additional files or to modify the image based on external configuration, and the script is run in the host, so it can be used even without emulation even if the image has a foreign architecture. .IP \[bu] 2 The \f[B]\f[CB]mkosi.mksquashfs-tool\f[B]\f[R] script, if it exists, will be called wherever \f[C]mksquashfs\f[R] would be called. .IP \[bu] 2 The \f[B]\f[CB]mkosi.nspawn\f[B]\f[R] nspawn settings file will be copied into the same place as the output image file, if it exists. This is useful since nspawn looks for settings files next to image files it boots, for additional container runtime settings. .IP \[bu] 2 The \f[B]\f[CB]mkosi.cache/\f[B]\f[R] directory, if it exists, is automatically used as package download cache, in order to speed repeated runs of the tool. .IP \[bu] 2 The \f[B]\f[CB]mkosi.builddir/\f[B]\f[R] directory, if it exists, is automatically used as out-of-tree build directory, if the build commands in the \f[C]mkosi.build\f[R] script support it. Specifically, this directory will be mounted into the build container, and the \f[C]$BUILDDIR\f[R] environment variable will be set to it when the build script is invoked. The build script may then use this directory as build directory, for automake-style or ninja-style out-of-tree builds. This speeds up builds considerably, in particular when \f[C]mkosi\f[R] is used in incremental mode (\f[C]-i\f[R]): not only the disk images, but also the build tree is reused between subsequent invocations. Note that if this directory does not exist the \f[C]$BUILDDIR\f[R] environment variable is not set, and it is up to build script to decide whether to do in in-tree or an out-of-tree build, and which build directory to use. .IP \[bu] 2 The \f[B]\f[CB]mkosi.includedir/\f[B]\f[R] directory, if it exists, is automatically used as an out-of-tree include directory for header files. Specifically, it will be mounted in the build container at \f[C]/usr/include/\f[R] when building the build image and when running the build script. After building the (cached) build image, this directory will contain all the files installed to \f[C]/usr/include\f[R]. Language servers or other tools can use these files to provide a better editing experience for developers working on a project. .IP \[bu] 2 The \f[B]\f[CB]mkosi.installdir/\f[B]\f[R] directory, if it exists, is automatically used as the install directory. Specifically, this directory will be mounted into the container at \f[C]/root/dest\f[R] when running the build script. After running the build script, the contents of this directory are installed into the final image. This is useful to cache the install step of the build. If used, subsequent builds will only have to reinstall files that have changed since the previous build. .IP \[bu] 2 The \f[B]\f[CB]mkosi.rootpw\f[B]\f[R] file can be used to provide the password or hashed password (if \f[C]--password-is-hashed\f[R] is set) for the root user of the image. The password may optionally be followed by a newline character which is implicitly removed. The file must have an access mode of 0600 or less. If this file does not exist, the distribution\[cq]s default root password is set (which usually means access to the root user is blocked). .IP \[bu] 2 The \f[B]\f[CB]mkosi.passphrase\f[B]\f[R] file provides the passphrase to use when LUKS encryption is selected. It should contain the passphrase literally, and not end in a newline character (i.e.\ in the same format as cryptsetup and \f[C]/etc/crypttab\f[R] expect the passphrase files). The file must have an access mode of 0600 or less. If this file does not exist and encryption is requested, the user is queried instead. .IP \[bu] 2 The \f[B]\f[CB]mkosi.secure-boot.crt\f[B]\f[R] and \f[B]\f[CB]mkosi.secure-boot.key\f[B]\f[R] files contain an X.509 certificate and PEM private key to use when UEFI SecureBoot support is enabled. All EFI binaries included in the image\[cq]s ESP are signed with this key, as a late step in the build process. .IP \[bu] 2 The \f[B]\f[CB]mkosi.output/\f[B]\f[R] directory will be used for all build artifacts, if the image output path is not configured (i.e.\ no \f[C]--output=\f[R] setting specified), or configured to a filename (i.e.\ a path containing no \f[C]/\f[R] character). This includes the image itself, the root hash file in case Verity is used, the checksum and its signature if that\[cq]s enabled, and the nspawn settings file if there is any. Note that this directory is not used if the image output path contains at least one slash, and has no effect in that case. This setting is particularly useful if multiple different images shall be built from the same working directory, as otherwise the build result of a preceding run might be copied into a build image as part of the source tree (see above). .PP All these files are optional. .PP Note that the location of all these files may also be configured during invocation via command line switches, and as settings in \f[C]mkosi.default\f[R], in case the default settings are not acceptable for a project. .SH BUILD PHASES .PP If no build script \f[C]mkosi.build\f[R] (see above) is used the build consists of a single phase only: the final image is generated as the combination of \f[C]mkosi.skeleton/\f[R] (see above), the unpacked distribution packages and \f[C]mkosi.extra/\f[R]. .PP If a build script \f[C]mkosi.build\f[R] is used the build consists of two phases: in the the first \f[C]development\f[R] phase an image that includes necessary build tools (i.e.\ the combination of \f[C]Packages=\f[R] and \f[C]BuildPackages=\f[R] is installed) is generated (i.e.\ the combination of \f[C]mkosi.skeleton/\f[R] and unpacked distribution packages). Into this image the source tree is copied and \f[C]mkosi.build\f[R] executed. The artifacts the \f[C]mkosi.build\f[R] generates are saved. Then, the second \f[C]final\f[R] phase starts: an image that excludes the build tools (i.e.\ only \f[C]Packages=\f[R] is installed, \f[C]BuildPackages=\f[R] is not) is generated. This time the build artifacts saved from the first phase are copied in, and \f[C]mkosi.extra\f[R] copied on top, thus generating the final image. .PP The two-phased approach ensures that source tree is executed in a clean and comprehensive environment, while at the same the final image remains minimal and contains only those packages necessary at runtime, but avoiding those necessary at build-time. .PP Note that only the package cache \f[C]mkosi.cache/\f[R] is shared between the two phases. The distribution package manager is executed exactly once in each phase, always starting from a directory tree that is populated with \f[C]mkosi.skeleton\f[R] but nothing else. .SH CACHING .PP \f[C]mkosi\f[R] supports three different caches for speeding up repetitive re-building of images. Specifically: .IP "1." 3 The package cache of the distribution package manager may be cached between builds. This is configured with the \f[C]--cache=\f[R] option or the \f[C]mkosi.cache/\f[R] directory. This form of caching relies on the distribution\[cq]s package manager, and caches distribution packages (RPM, DEB, \&...) after they are downloaded, but before they are unpacked. .IP "2." 3 If an \f[C]mkosi.build\f[R] script is used, by enabling incremental build mode with \f[C]--incremental\f[R], a cached copy of the development and final images can be made immediately before the build sources are copied in (for the development image) or the artifacts generated by \f[C]mkosi.build\f[R] are copied in (in case of the final image). This form of caching allows bypassing the time-consuming package unpacking step of the distribution package managers, but is only effective if the list of packages to use remains stable, but the build sources and its scripts change regularly. Note that this cache requires manual flushing: whenever the package list is modified the cached images need to be explicitly removed before the next re-build, using the \f[C]-f\f[R] switch. .IP "3." 3 Finally, between multiple builds the build artifact directory may be shared, using the \f[C]mkosi.builddir/\f[R] directory. This directory allows build systems such as Meson to reuse already compiled sources from a previous built, thus speeding up the build process of the \f[C]mkosi.build\f[R] build script. .PP The package cache (i.e.\ the first item above) is unconditionally useful. The latter two caches only apply to uses of \f[C]mkosi\f[R] with a source tree and build script. When all three are enabled together turn-around times for complete image builds are minimal, as only changed source files need to be recompiled: an OS image rebuilt will be almost as quick to build the source tree only. .SH ENVIRONMENT VARIABLES .PP The build script \f[C]mkosi.build\f[R] receives the following environment variables: .IP \[bu] 2 \f[C]$SRCDIR\f[R] contains the path to the sources to build. .IP \[bu] 2 \f[C]$DESTDIR\f[R] is a directory into which any artifacts generated by the build script shall be placed. .IP \[bu] 2 \f[C]$BUILDDIR\f[R] is only defined if \f[C]mkosi.builddir\f[R] and points to the build directory to use. This is useful for all build systems that support out-of-tree builds to reuse already built artifacts from previous runs. .IP \[bu] 2 \f[C]$WITH_DOCS\f[R] is either \f[C]0\f[R] or \f[C]1\f[R] depending on whether a build without or with installed documentation was requested (\f[C]WithDocs=yes\f[R]). The build script should suppress installation of any package documentation to \f[C]$DESTDIR\f[R] in case \f[C]$WITH_DOCS\f[R] is set to \f[C]0\f[R]. .IP \[bu] 2 \f[C]$WITH_TESTS\f[R] is either \f[C]0\f[R]or \f[C]1\f[R] depending on whether a build without or with running the test suite was requested (\f[C]WithTests=no\f[R]). The build script should avoid running any unit or integration tests in case \f[C]$WITH_TESTS\f[R] is \f[C]0\f[R]. .IP \[bu] 2 \f[C]$WITH_NETWORK\f[R] is either \f[C]0\f[R]or \f[C]1\f[R] depending on whether a build without or with networking is being executed (\f[C]WithNetwork=no\f[R]). The build script should avoid any network communication in case \f[C]$WITH_NETWORK\f[R] is \f[C]0\f[R]. .SH EXAMPLES .PP Create and run a raw \f[I]GPT\f[R] image with \f[I]ext4\f[R], as \f[C]image.raw\f[R]: .IP .nf \f[C] # mkosi # systemd-nspawn -b -i image.raw \f[R] .fi .PP Create and run a bootable btrfs \f[I]GPT\f[R] image, as \f[C]foobar.raw\f[R]: .IP .nf \f[C] # mkosi -t gpt_btrfs --bootable -o foobar.raw # systemd-nspawn -b -i foobar.raw # qemu-kvm -m 512 -smp 2 -bios /usr/share/edk2/ovmf/OVMF_CODE.fd -drive format=raw,file=foobar.raw \f[R] .fi .PP Create and run a \f[I]Fedora Linux\f[R] image into a plain directory: .IP .nf \f[C] # mkosi -d fedora -t directory -o quux # systemd-nspawn -b -D quux \f[R] .fi .PP Create a compressed image \f[C]image.raw.xz\f[R] and add a checksum file, and install \f[I]SSH\f[R] into it: .IP .nf \f[C] # mkosi -d fedora -t gpt_squashfs --checksum --compress --package=openssh-clients \f[R] .fi .PP Inside the source directory of an \f[C]automake\f[R]-based project, configure \f[I]mkosi\f[R] so that simply invoking \f[C]mkosi\f[R] without any parameters builds an OS image containing a built version of the project in its current state: .IP .nf \f[C] # cat >mkosi.default <mkosi.build < copy . | . skeleton trees . | . (mkosi.skeleton/) . none . | . | . v . v . install . skip . distribution, . build image . packages and . | . build packages, . | . run . | . prepare script . | . (mkosi.prepare build) . | . or if incremental . | . use cached build image . | . | . | . v . | . copy . | . build sources . | . (./) . | . | . | . v . | . copy . | . extra trees . | . (mkosi.extra/) . | . | . | . v . | . run . | . postinstall script . | . (mkosi.postinst build) . | . | . | .-------------------------' . | | . . | v . . | run . . | finalize script . . |(mkosi.finalize build). . | | . . | '-------------------------. . | . | . | . v . | . run . | . build script . | . (mkosi.build) . | . | . '-----------------------------------+------------------------. . . | . . v . . copy . . skeleton trees . . (mkosi.skeleton/) . . | . . v . . install . . distribution . . and packages, . . run . . prepare script . . (mkosi.prepare final) . . or if incremental . . use cached final image . . | . . v . . copy . . build results . . | . . v . . copy . . extra trees . . (mkosi.extra/) . . | . . v . . run . . postinstall script . . (mkosi.postinst final) . . | . . v . . | . . perform cleanup . . (remove files, packages, . . package metadata) . . | .--------------------------------------------------' | . . v . . run . . finalize script . . (mkosi.finalize final) . . | . . .---------' . . | . . v . . end . . . . HOST . BUILD . FINAL . IMAGE . IMAGE . . ``` ## Configuration Settings The following settings can be set through configuration files (the syntax with `SomeSetting=value`) and on the command line (the syntax with `--some-setting=value`). For some command line parameters, a single-letter shortcut is also allowed. In the configuration files, the setting must be in the appropriate section, so the settings are grouped by section below. Command line options that take no argument are shown without "=" in their long version. In the config files, they should be specified with a boolean argument: either "1", "yes", or "true" to enable, or "0", "no", "false" to disable. ### [Distribution] Section `Distribution=`, `--distribution=`, `-d` : The distribution to install in the image. Takes one of the following arguments: `fedora`, `debian`, `ubuntu`, `arch`, `opensuse`, `mageia`, `centos`, `centos_epel`, `clear`, `photon`, `openmandriva`, `rocky`, `rocky_epel`, `alma`, `alma_epel`. If not specified, defaults to the distribution of the host. `Release=`, `--release=`, `-r` : The release of the distribution to install in the image. The precise syntax of the argument this takes depends on the distribution used, and is either a numeric string (in case of Fedora Linux, CentOS, …, e.g. `29`), or a distribution version name (in case of Debian, Ubuntu, …, e.g. `artful`). If neither this option, nor `Distribution=` is specified, defaults to the distribution version of the host. If the distribution is specified, defaults to a recent version of it. `Mirror=`, `--mirror=`, `-m` : The mirror to use for downloading the distribution packages. Expects a mirror URL as argument. `Repositories=`, `--repositories=` : Additional package repositories to use during installation. Expects one or more URLs as argument, separated by commas. This option may be used multiple times, in which case the list of repositories to use is combined. Use "!\*" to remove all repositories from to the list or use e.g. "!repo-url" to remove just one specific repository. For Arch Linux, additional repositories must be passed in the form `::` (e.g. `myrepo::https://myrepo.net`). `UseHostRepositories=`, `--use-host-repositories` : This option is only applicable for dnf-based distributions: *CentOS*, *Fedora Linux*, *Mageia*, *Photon*, *Rocky Linux*, *Alma Linux* and *OpenMandriva*. Allows use of the host's existing dnf repositories. By default, a hardcoded set of default dnf repositories is generated and used. Use `--repositories=` to identify a custom set of repositories to be enabled and used for the build. `Architecture=`, `--architecture=` : The architecture to build the image for. Note that this currently only works for architectures compatible with the host's architecture. ### [Output] Section `Format=`, `--format=`, `-t` : The image format type to generate. One of `directory` (for generating OS images inside a local directory), `subvolume` (similar, but as a btrfs subvolume), `tar` (similar, but a tarball of the image is generated), `cpio` (similar, but a cpio archive is generated), `gpt_ext4` (a block device image with an ext4 file system inside a GPT partition table), `gpt_xfs` (similar, but with an xfs file system), `gpt_btrfs` (similar, but with an btrfs file system), `gpt_squashfs` (similar, but with a squashfs file system), `plain_squashfs` (a plain squashfs file system without a partition table). `ManifestFormat=`, `--manifest-format=` : The manifest format type or types to generate. A comma-delimited list consisting of `json` (the standard JSON output format that describes the packages installed), `changelog` (a human-readable text format designed for diffing). Defaults to `json`. `Output=`, `--output=`, `-o` : Path for the output image file to generate. Takes a relative or absolute path where the generated image will be placed. If neither this option nor `OutputDirectory=` is used, the image is generated under the name `image`, but its name suffixed with an appropriate file suffix (e.g. `image.raw.xz` in case `gpt_ext4` is used in combination with `xz` compression). If the `ImageId=` option is configured it is used instead of `image` in the default output name. If an image version is specified via `ImageVersion=`, it is included in the default name, e.g. a specified image version of `7.8` might result in an image file name of `image_7.8.raw.xz`. `OutputSplitRoot=`, `--output-split-root=`, `OutputSplitVerify=`, `--output-split-verity=`, `OutputSplitKernel=`, `--output-split-kernel=` : Paths for the split-out output image files, when `SplitArtifacts=yes` is used. If unspecified, the relevant split artifact files will be named like the main image, but with `.root`, `.verity`, and `.efi` suffixes inserted (and in turn possibly suffixed by compression suffix, if compression is enabled). `OutputDirectory=`, `--output-dir=`, `-O` : Path to a directory where to place all generated artifacts (i.e. the generated image when an output path is not given, `SHA256SUMS` file, etc.). If this is not specified and the directory `mkosi.output/` exists in the local directory, it is automatically used for this purpose. If the setting is not used and `mkosi.output/` does not exist, all output artifacts are placed adjacent to the output image file. `WorkspaceDirectory=`, `--workspace-dir=` : Path to a directory where to store data required temporarily while building the image. This directory should have enough space to store the full OS image, though in most modes the actually used disk space is smaller. If not specified, and `mkosi.workspace/` exists in the local directory, it is used for this purpose. Otherwise, a subdirectory in the temporary storage area is used (`$TMPDIR` if set, `/var/tmp/` otherwise). : The data in this directory is removed automatically after each build. It's safe to manually remove the contents of this directory should an `mkosi` invocation be aborted abnormally (for example, due to reboot/power failure). If the `btrfs` output modes are selected this directory must be backed by `btrfs` too. `Force=`, `--force`, `-f` : Replace the output file if it already exists, when building an image. By default when building an image and an output artifact already exists `mkosi` will refuse operation. Specify this option once to delete all build artifacts from a previous run before re-building the image. If incremental builds are enabled, specifying this option twice will ensure the intermediary cache files are removed, too, before the re-build is initiated. If a package cache is used (also see the "Files" section below), specifying this option thrice will ensure the package cache is removed too, before the re-build is initiated. For the `clean` operation this option has a slightly different effect: by default the verb will only remove build artifacts from a previous run, when specified once the incremental cache files are deleted too, and when specified twice the package cache is also removed. `GPTFirstLBA=`, `--gpt-first-lba=` : Override the first usable LBA (Logical Block Address) within the GPT header. This defaults to `2048`, which is actually the desired value. However, some tools, e.g. the `prl_disk_tool` utility from the Parallels virtualization suite require this to be set to `34`, otherwise they might fail to resize the disk image and/or partitions inside it. `Bootable=`, `--bootable`, `-b` : Generate a bootable image. By default this will generate an image bootable on UEFI systems. Use `BootProtocols=` to select support for a different boot protocol. `BootProtocols=`, `--boot-protocols=` : Pick one or more boot protocols to support when generating a bootable image, as enabled with `Bootable=`. Takes a comma-separated list of `uefi` or `bios`. May be specified more than once in which case the specified lists are merged. If `uefi` is specified the `sd-boot` UEFI boot loader is used, if `bios` is specified the GNU Grub boot loader is used. Use "!\*" to remove all previously added protocols or "!protocol" to remove one protocol. `KernelCommandLine=`, `--kernel-command-line=` : Use the specified kernel command line when building bootable images. By default command line arguments get appended. To remove all arguments from the current list pass "!\*". To remove specific arguments add a space separated list of "!" prefixed arguments. For example adding "!\* console=ttyS0 rw" to a `mkosi.default` file or the command line arguments passes "console=ttyS0 rw" to the kernel in any case. Just adding "console=ttyS0 rw" would append these two arguments to the kernel command line created by lower priority configuration files or previous `KernelCommandLine=` command line arguments. `SecureBoot=`, `--secure-boot` : Sign the resulting kernel/initrd image for UEFI SecureBoot. `SecureBootKey=`, `--secure-boot-key=` : Path to the PEM file containing the secret key for signing the UEFI kernel image, if `SecureBoot=` is used. `SecureBootCertificate=`, `--secure-boot-certificate=` : Path to the X.509 file containing the certificate for the signed UEFI kernel image, if `SecureBoot=` is used. `SecureBootCommonName=`, `--secure-boot-common-name=` : Common name to be used when generating SecureBoot keys via mkosi's `genkey` command. Defaults to `mkosi of %u`, where `%u` expands to the username of the user invoking mkosi. `SecureBootValidDays=`, `--secure-boot-valid-days=` : Number of days that the keys should remain valid when generating SecureBoot keys via mkosi's `genkey` command. Defaults to two years (730 days). `ReadOnly=`, `--read-only` : Set the read-only flag on the root partition in the partition table. Only applies to `gpt_ext4`, `gpt_xfs`, `gpt_btrfs`, `subvolume` output formats, and is implied on `gpt_squashfs` and `plain_squashfs`. : The read-only flag is essentially a hint to tools using the image (see https://systemd.io/DISCOVERABLE_PARTITIONS/). In particular, all systemd tools like `systemd-nspawn` and `systemd-gpt-auto-generator` will mount such partitions read-only, but tools from other project may ignore the flag. [//]: # (Please add external tools to the list here.) `Minimize=`, `--minimize` : Attempt to make the resulting root file system as small as possible by removing free space from the file system. Only supported for `gpt_ext4` and `gpt_btrfs`. For ext4 this relies on `resize2fs -M`, which reduces the free disk space but is not perfect and generally leaves some free space. For btrfs the results are optimal and no free space is left. `Encrypt=`, `--encrypt` : Encrypt all partitions in the file system or just the root file system. Takes either `all` or `data` as argument. If `all`, the root, `/home` and `/srv` file systems will be encrypted using dm-crypt/LUKS (with its default settings). If `data`, the root file system will be left unencrypted, but `/home` and `/srv` will be encrypted. The passphrase to use is read from the `mkosi.passphrase` file in the current working directory. Note that the UEFI System Partition (ESP) containing the boot loader and kernel to boot is never encrypted since it needs to be accessible by the firmware. `Verity=`, `--verity` : Add a "Verity" integrity partition to the image. Takes a boolean or the special value `signed`, and defaults to disabled. If enabled, the root partition (or `/usr/` partition, in case `UsrOnly=` is enabled) is protected with `dm-verity` against offline modification, the verification data is placed in an additional GPT partition. Implies `ReadOnly=yes`. If this is enabled, the Verity root hash is written to an output file with `.roothash` or `.usrhash` suffix. If set to `signed`, Verity is also enabled, but the resulting root hash is then also signed (in PKCS#7 format) with the signature key configured with `SecureBootKey=`. Or in other words: the SecureBoot key pair is then used to both sign the kernel, if that is enabled, and the root/`/usr/` file system. This signature is then stored in an additional output file with the `.roothash.p7s` or `.usrhash.p7s` suffix in DER format. It is also written to an additional partition in the image. The latter allows generating self-contained signed disk images, implementing the Verity provisions described in the [Discoverable Partitions Specification](https://systemd.io/DISCOVERABLE_PARTITIONS). `CompressFs=`, `--compress-fs=` : Enable or disable internal compression in the file system. Only applies to output formats with squashfs or btrfs. Takes one of `zlib`, `lzo`, `zstd`, `lz4`, `xz` or a boolean value as argument. If the latter is used compression is enabled/disabled and the default algorithm is used. In case of the `squashfs` output formats compression is implied, but this option may be used to select the algorithm. `CompressOutput=`, `--compress-output=` : Configure compression for the resulting image or archive. The argument can be either a boolean or a compression algorithm (`xz`, `zstd`). `xz` compression is used by default. Note that when applied to block device image types this means the image cannot be started directly but needs to be decompressed first. This also means that the `shell`, `boot`, `qemu` verbs are not available when this option is used. Implied for `tar` and `cpio`. `Compress=`, `--compress=` : Enable compression. Using this option is equivalent to either `CompressFs=` or `CompressOutput=`; the appropriate type of compression is selected automatically. `Mksquashfs=`, `--mksquashfs=` : Set the path to the `mksquashfs` executable to use. This is useful in case the parameters for the tool shall be augmented, as the tool may be replaced by a script invoking it with the right parameters, this way. `QCow2=`, `--qcow2` : Encode the resulting image as QEMU QCOW2 image. This only applies to `gpt_ext4`, `gpt_xfs`, `gpt_btrfs`, `gpt_squashfs`. QCOW2 images can be read natively by `qemu`, but not by the Linux kernel. This means the `shell` and `boot` verbs are not available when this option is used, however `qemu` will work. `Hostname=`, `--hostname=` : Set the image's hostname to the specified name. `ImageVersion=`, `--image-version=` : Configure the image version. This accepts any string, but it is recommended to specify a series of dot separated components. The version may also be configured in a file `mkosi.version` in which case it may be conveniently managed via the `bump` verb or the `--auto-bump` switch. When specified the image version is included in the default output file name, i.e. instead of `image.raw` the default will be `image_0.1.raw` for version `0.1` of the image, and similar. The version is also passed via the `$IMAGE_VERSION` to any build scripts invoked (which may be useful to patch it into `/etc/os-release` or similar, in particular the `IMAGE_VERSION=` field of it). `ImageId=`, `--image-id=` : Configure the image identifier. This accepts a freeform string that shall be used to identify the image with. If set the default output file will be named after it (possibly suffixed with the version). If this option is used the root, `/usr/` and Verity partitions in the image will have their labels set to this (possibly suffixed by the image version). The identifier is also passed via the `$IMAGE_ID` to any build scripts invoked (which may be useful to patch it into `/etc/os-release` or similar, in particular the `IMAGE_ID=` field of it). `WithUnifiedKernelImages=`, `--without-unified-kernel-images` : If specified, mkosi does not build unified kernel images and instead installs kernels with a separate initrd and boot loader config to the efi or bootloader partition. `HostonlyInitrd=`, `--hostonly-initrd` : If specified, mkosi will run the tool to create the initrd such that a non-generic initrd is created that will only be able to run on the system mkosi is run on. Currently mkosi uses dracut for all supported distributions except Clear Linux and this option translates to enabling dracut's hostonly option. `UsrOnly=`, `--usr-only` : If specified, `mkosi` will only add the `/usr/` directory tree (instead of the whole root file system) to the image. This is useful for fully stateless systems that come up pristine on every single boot, where `/etc/` and `/var/` are populated by `systemd-tmpfiles`/`systemd-sysusers` and related calls, or systems that are originally shipped without a root file system, but where `systemd-repart` adds one on the first boot. `SplitArtifacts=`, `--split-artifacts` : If specified and building an image with a partition table, also write out the root file system partition, its Verity partition (if configured) and the generated unified kernel (if configured) into separate output files. This is useful in A/B update scenarios where an existing disk image shall be augmented with a new version of a root or `/usr` partition along with its Verity partition and unified kernel. `NoChown=`, `--no-chown` : By default, if `mkosi` is run inside a `sudo` environment all generated artifacts have their UNIX user/group ownership changed to the user which invoked `sudo`. With this option this may be turned off and all generated files are owned by `root`. `TarStripSELinuxContext=`, `--tar-strip-selinux-context` : If running on a SELinux-enabled system (Fedora Linux, CentOS, Rocky Linux, Alma Linux), files inside the container are tagged with SELinux context extended attributes (`xattrs`), which may interfere with host SELinux rules in building or further container import stages. This option strips SELinux context attributes from the resulting tar archive. ### [Content] Section `BasePackages=`, `--base-packages` : Takes a boolean or the special value `conditional`. If true, automatically install packages to ensure basic functionality, as appropriate for the given image type. For example, `systemd` is always included, `systemd-udev` and `dracut` if the image is bootable, and so on. : If false, only packages specified with `Packages=` will be installed. : If `conditional`, the list of packages to install will be extended with boolean dependencies (c.f. https://rpm.org/user_doc/boolean_dependencies.html), to install specific packages when *other* packages are in the list. For example, `systemd-udev` may be automatically included if the image is bootable and `systemd` is installed. With this, various "base" packages still need to be specified if they should be included, but the corresponding "extension" packages will be added automatically when appropriate. This feature depends on support in the package manager, so it is not implemented for all distributions. `Packages=`, `--package=`, `-p` : Install the specified distribution packages (i.e. RPM, DEB, …) in the image. Takes a comma separated list of package specifications. This option may be used multiple times in which case the specified package lists are combined. Packages specified this way will be installed both in the development and the final image. Use `BuildPackages=` to specify packages that shall only be used for the image generated in the build image, but that shall not appear in the final image. : The types and syntax of "package specifications" that are allowed depend on the package installer (e.g. `dnf` or `yum` for `rpm`-based distros or `apt` for `deb`-based distros), but may include package names, package names with version and/or architecture, package name globs, paths to packages in the file system, package groups, and virtual provides, including file paths. : To remove a package e.g. added by a `mkosi.default` configuration file prepend the package name with `!`. For example -p "!apache2" would remove the apache2 package. To replace the apache2 package by the httpd package just add -p "!apache2,httpd" to the command line arguments. To remove all packages use "!\*". : Example: when using an distro that uses `dnf`, `Packages=meson libfdisk-devel.i686 git-* prebuilt/rpms/systemd-249-rc1.local.rpm /usr/bin/ld @development-tools python3dist(mypy)` would install the `meson` package (in the latest version), the 32-bit version of the `libfdisk-devel` package, all available packages that start with the `git-` prefix, a `systemd` rpm from the local file system, one of the packages that provides `/usr/bin/ld`, the packages in the "Development Tools" group, and the package that contains the `mypy` python module. `WithDocs=`, `--with-docs` : Include documentation in the image built. By default if the underlying distribution package manager supports it documentation is not included in the image built. The `$WITH_DOCS` environment variable passed to the `mkosi.build` script indicates whether this option was used or not. `WithTests=`, `--without-tests`, `-T` : If set to false (or when the command-line option is used), the `$WITH_TESTS` environment variable is set to `0` when the `mkosi.build` script is invoked. This is supposed to be used by the build script to bypass any unit or integration tests that are normally run during the source build process. Note that this option has no effect unless the `mkosi.build` build script honors it. `Cache=`, `--cache=` : Takes a path to a directory to use as package cache for the distribution package manager used. If this option is not used, but a `mkosi.cache/` directory is found in the local directory it is automatically used for this purpose. The directory configured this way is mounted into both the development and the final image while the package manager is running. `SkeletonTree=`, `--skeleton-tree=` : Takes a path to a directory to copy into the OS tree before invoking the package manager. Use this to insert files and directories into the OS tree before the package manager installs any packages. If this option is not used, but the `mkosi.skeleton/` directory is found in the local directory it is automatically used for this purpose (also see the "Files" section below). : Instead of a directory, a tar file may be provided. In this case it is unpacked into the OS tree before the package manager is invoked. This mode of operation allows setting permissions and file ownership explicitly, in particular for projects stored in a version control system such as `git` which retain full file ownership and access mode metadata for committed files. If the tar file `mkosi.skeleton.tar` is found in the local directory it will be automatically used for this purpose. `ExtraTree=`, `--extra-tree=` : Takes a path to a directory to copy on top of the OS tree the package manager generated. Use this to override any default configuration files shipped with the distribution. If this option is not used, but the `mkosi.extra/` directory is found in the local directory it is automatically used for this purpose (also see the "Files" section below). : As with the skeleton tree logic above, instead of a directory, a tar file may be provided too. `mkosi.skeleton.tar` will be automatically used if found in the local directory. `CleanPackageMetadata=`, `--clean-package-metadata=` : Enable/disable removal of package manager databases, caches, and logs at the end of installation. Can be specified as true, false, or "`auto`" (the default). With "`auto`", files will be removed if the respective package manager executable is *not* present at the end of the installation. `RemoveFiles=`, `--remove-files=` : Takes a comma-separated list of globs. Files in the image matching the globs will be purged at the end. `RemovePackages=`, `--remove-package=` : Takes a comma-separated list of package specifications for removal, in the same format as `Packages=`. The removal will be performed as one of the last steps. This step is skipped if `CleanPackageMetadata=no` is used. : This option is currently only implemented for distributions using `dnf`. `Environment=`, `--environment=` : Adds variables to the environment that the build/prepare/postinstall/finalize scripts are executed with. Takes a space-separated list of variable assignments or just variable names. In the latter case, the values of those variables will be passed through from the environment in which `mkosi` was invoked. This option may be specified more than once, in which case all listed variables will be set. If the same variable is set twice, the later setting overrides the earlier one. `BuildSources=`, `--build-sources=` : Takes a path to a source tree to copy into the development image, if the build script is used. This only applies if a build script is used, and defaults to the local directory. Use `SourceFileTransfer=` to configure how the files are transferred from the host to the container image. `BuildDirectory=`, `--build-dir=` : Takes a path of a directory to use as build directory for build systems that support out-of-tree builds (such as Meson). The directory used this way is shared between repeated builds, and allows the build system to reuse artifacts (such as object files, executable, …) generated on previous invocations. This directory is mounted into the development image when the build script is invoked. The build script can find the path to this directory in the `$BUILDDIR` environment variable. If this option is not specified, but a directory `mkosi.builddir/` exists in the local directory it is automatically used for this purpose (also see the "Files" section below). `IncludeDirectory=`, `--include-directory=` : Takes a path of a directory to use as the include directory. This directory is mounted at `/usr/include` when building the build image and running the build script. This means all include files installed to `/usr/include` will be stored in this directory. This is useful to make include files available on the host system for use by language servers to provide code completion. If this option is not specified, but a directory `mkosi.includedir/` exists in the local directory, it is automatically used for this purpose (also see the "Files" section below). `InstallDirectory=`, `--install-directory=` : Takes a path of a directory to use as the install directory. The directory used this way is shared between builds and allows the build system to not have to reinstall files that were already installed by a previous build and didn't change. The build script can find the path to this directory in the `$DESTDIR` environment variable. If this option is not specified, but a directory `mkosi.installdir` exists in the local directory, it is automatically used for this purpose (also see the "Files" section below). `BuildPackages=`, `--build-package=` : Similar to `Packages=`, but configures packages to install only in the first phase of the build, into the development image. This option should be used to list packages containing header files, compilers, build systems, linkers and other build tools the `mkosi.build` script requires to operate. Note that packages listed here are only included in the image created during the first phase of the build, and are absent in the final image. Use `Packages=` to list packages that shall be included in both. : Packages are appended to the list. Packages prefixed with "!" are removed from the list. "!\*" removes all packages from the list. `Password=`, `--password=` : Set the password of the `root` user. By default the `root` account is locked. If this option is not used, but a file `mkosi.rootpw` exists in the local directory, the root password is automatically read from it. `PasswordIsHashed=`, `--password-is-hashed` : Indicate that the password supplied for the `root` user has already been hashed, so that the string supplied with `Password=` or `mkosi.rootpw` will be written to `/etc/shadow` literally. `Autologin=`, `--autologin` : Enable autologin for the `root` user on `/dev/pts/0` (nspawn), `/dev/tty1` (QEMU) and `/dev/ttyS0` (QEMU with `QemuHeadless=yes`) by patching `/etc/pam.d/login`. `SkipFinalPhase=`, `--skip-final-phase=` : Causes the (second) final image build stage to be skipped. This is useful in combination with a build script, for when you care about the artifacts that were created locally in `$BUILDDIR`, but ultimately plan to discard the final image. `BuildScript=`, `--build-script=` : Takes a path to an executable that is used as build script for this image. If this option is used the build process will be two-phased instead of single-phased. The specified script is copied onto the development image and executed inside an `systemd-nspawn` container environment. If this option is not used, but the `mkosi.build` file found in the local directory it is automatically used for this purpose (also see the "Files" section below). Specify an empty value to disable automatic detection. `PrepareScript=`, `--prepare-script=` : Takes a path to an executable that is invoked inside the image right after installing the software packages. It is the last step before the image is cached (if incremental mode is enabled). This script is invoked inside a `systemd-nspawn` container environment, and thus does not have access to host resources. If this option is not used, but an executable script `mkosi.prepare` is found in the local directory, it is automatically used for this purpose. Specify an empty value to disable automatic detection. `PostInstallationScript=`, `--postinst-script=` : Takes a path to an executable that is invoked inside the final image right after copying in the build artifacts generated in the first phase of the build. This script is invoked inside a `systemd-nspawn` container environment, and thus does not have access to host resources. If this option is not used, but an executable `mkosi.postinst` is found in the local directory, it is automatically used for this purpose. Specify an empty value to disable automatic detection. `FinalizeScript=`, `--finalize-script=` : Takes a path to an executable that is invoked outside the final image right after copying in the build artifacts generated in the first phase of the build, and after having executed the `mkosi.postinst` script (see `PostInstallationScript=`). This script is invoked directly in the host environment, and hence has full access to the host's resources. If this option is not used, but an executable `mkosi.finalize` is found in the local directory, it is automatically used for this purpose. Specify an empty value to disable automatic detection. `SourceFileTransfer=`, `--source-file-transfer=` : Configures how the source file tree (as configured with `BuildSources=`) is transferred into the container image during the first phase of the build. Takes one of `copy-all` (to copy all files from the source tree), `copy-git-cached` (to copy only those files `git-ls-files --cached` lists), `copy-git-others` (to copy only those files `git-ls-files --others` lists), `mount` to bind mount the source tree directly. Defaults to `copy-git-cached` if a `git` source tree is detected, otherwise `copy-all`. When you specify `copy-git-more`, it is the same as `copy-git-cached`, except it also includes the `.git/` directory. `SourceFileTransferFinal=`, `--source-file-transfer-final=` : Same as `SourceFileTransfer=`, but for the final image instead of the build image. Takes the same values as `SourceFileFransfer=` except `mount`. By default, sources are not copied into the final image. `SourceResolveSymlinks=`, `--source-resolve-symlinks` : If given, any symbolic links in the source file tree are resolved and the file contents are copied to the build image. If not given, they are left as symbolic links. This only applies if `SourceFileTransfer=` is `copy-all`. Defaults to leaving them as symbolic links. `SourceResolveSymlinksFinal=`, `--source-resolve-symlinks-final` : Same as `SourceResolveSymlinks=`, but for the final image instead of the build image. `WithNetwork=`, `--with-network` : When true, enables network connectivity while the build script `mkosi.build` is invoked. By default, the build script runs with networking turned off. The `$WITH_NETWORK` environment variable is passed to the `mkosi.build` build script indicating whether the build is done with or without network. If specified as `never`, the package manager is instructed not to contact the network for updating package data. This provides a minimal level of reproducibility, as long as the package data cache is already fully populated. `Settings=`, `--settings=` : Specifies a `.nspawn` settings file for `systemd-nspawn` to use in the `boot` and `shell` verbs, and to place next to the generated image file. This is useful to configure the `systemd-nspawn` environment when the image is run. If this setting is not used but an `mkosi.nspawn` file found in the local directory it is automatically used for this purpose. ### [Partitions] Section `BaseImage=`, `--base-image=` : Use the specified directory or file system image as the base image, and create the output image that consists only of changes from this base. The base image is attached as the lower file system in an overlayfs structure, and the output filesystem becomes the upper layer, initially empty. Thus files that are not modified compared to the base image are not present in the output image. : This option may be used to create systemd "system extensions" or portable services. See https://systemd.io/PORTABLE_SERVICES/#extension-images for more information. `RootSize=`, `--root-size=` : Takes a size in bytes for the root file system. The specified numeric value may be suffixed with `K`, `M`, `G` to indicate kilo-, mega- and gigabytes (all to the base of 1024). This applies to output formats `gpt_ext4`, `gpt_xfs`, `gpt_btrfs`. Defaults to 3G. `ESPSize=`, `--esp-size=` : Similar to `RootSize=`, configures the size of the UEFI System Partition (ESP). This is only relevant if the `Bootable=` option is used to generate a bootable image. Defaults to 256 MB. `SwapSize=`, `--swap-size=` : Similar to `RootSize=`, configures the size of a swap partition on the image. If omitted, no swap partition is created. `HomeSize=`, `--home-size=` : Similar to `RootSize=`, configures the size of the `/home` partition. If omitted, no separate `/home` partition is created. `SrvSize=`, `--srv-size=` : Similar to `RootSize=`, configures the size of the `/srv` partition. If omitted, no separate `/srv` partition is created. ### [Validation] Section `Checksum=`, `--checksum` : Generate a `SHA256SUMS` file of all generated artifacts after the build is complete. `Sign=`, `--sign` : Sign the generated `SHA256SUMS` using `gpg` after completion. `Key=`, `--key=` : Select the `gpg` key to use for signing `SHA256SUMS`. This key must be already present in the `gpg` keyring. `BMap=`, `--bmap` : Generate a `bmap` file for usage with `bmaptool` from the generated image file. ### [Host] Section `ExtraSearchPaths=`, `--extra-search-paths=` : List of colon-separated paths to look for tools in, before using the regular `$PATH` search path. `QemuHeadless=`, `--qemu-headless=` : When used with the `build` verb, this option adds `console=ttyS0` to the image's kernel command line and sets the terminal type of the serial console in the image to the terminal type of the host (more specifically, the value of the `$TERM` environment variable passed to mkosi). This makes sure that all terminal features such as colors and shortcuts still work as expected when connecting to the qemu VM over the serial console (for example via `-nographic`). : When used with the `qemu` verb, this option adds the `-nographic` option to `qemu`'s command line so qemu starts a headless vm and connects to its serial console from the current terminal instead of launching the VM in a separate window. `QemuSmp=`, `--qemu-smp=` : When used with the `qemu` verb, this options sets `qemu`'s `-smp` argument which controls the number of guest's CPUs. Defaults to `2`. `QemuMem=`, `--qemu-mem=` : When used with the `qemu` verb, this options sets `qemu`'s `-m` argument which controls the amount of guest's RAM. Defaults to `1G`. `NetworkVeth=`, `--network-veth` : When used with the boot or qemu verbs, this option creates a virtual ethernet link between the host and the container/VM. The host interface is automatically picked up by systemd-networkd as documented in systemd-nspawn's man page: https://www.freedesktop.org/software/systemd/man/systemd-nspawn.html#-n `Ephemeral=`, `--ephemeral` : When used with the `shell`, `boot`, or `qemu` verbs, this option runs the specified verb on a temporary snapshot of the output image that is removed immediately when the container terminates. Taking the temporary snapshot is more efficient on file systems that support subvolume snapshots or 'reflinks' natively ("btrfs" or new "xfs") than on more traditional file systems that do not ("ext4"). `Ssh=`, `--ssh` : If specified, installs and enables `sshd` in the final image and generates a SSH keypair and adds the public key to root's `authorized_keys` in the final image. The private key is stored in mkosi's output directory. When building with this option and running the image using `mkosi boot` or `mkosi qemu`, the `mkosi ssh` command can be used to connect to the container/VM via SSH. `SshKey=`, `--ssh-key=` : If specified, use the given private key when connecting to the guest machine via `mkosi ssh`. This requires the public key counterpart to be present in the same location, suffixed with `.pub` (as done by `ssh-keygen`). If this option is not present, `mkosi` generates a new key pair automatically. `SshAgent=`, `--ssh-agent=` : If specified as a path, use the given socket to connect to the ssh agent when building an image and when connecting via `mkosi ssh` instead of hard-coding a key. If specified as `true`, `$SSH_AUTH_SOCK` will be parsed instead (hint: use `sudo` with `-E`). The keys listed by `ssh-add -L` will be installed as authorized keys in the built image. The `ssh` invocation done by `mkosi ssh` will inherit `$SSH_AUTH_SOCK` for authentication purposes. `SshPort=`, `--ssh-port=` : In the image, sshd will be configured to listen on this port. `mkosi ssh` will connect to this port. `SshTimeout=`, `--ssh-timeout=` : When used with the `ssh` verb, `mkosi` will attempt to retry the SSH connection up to given timeout (in seconds) in case it fails. This option is useful mainly in scripted environments where the `qemu` and `ssh` verbs are used in a quick succession and the veth device might not get enough time to configure itself. ### Commandline-only Options Those settings cannot be configured in the configuration files. `--directory=`, `-C` : Takes a path to a directory. `mkosi` switches to this directory before doing anything. Note that the various `mkosi.*` files are searched for only after changing to this directory, hence using this option is an effective way to build a project located in a specific directory. `--default=` : Loads additional settings from the specified settings file. Most command line options may also be configured in a settings file. See the table below to see which command line options match which settings file option. If this option is not used, but a file `mkosi.default` is found in the local directory it is automatically used for this purpose. If a setting is configured both on the command line and in the settings file, the command line generally wins, except for options taking lists in which case both lists are combined. `--all`, `-a` : Iterate through all files `mkosi.*` in the `mkosi.files/` subdirectory, and build each as if `--default=mkosi.files/mkosi.…` was invoked. This is a quick way to build a large number of images in one go. Any additional specified command line arguments override the relevant options in all files processed this way. `--all-directory=` : If specified, overrides the directory the `--all` logic described above looks for settings files in. If unspecified, defaults to `mkosi.files/` in the current working directory. `--incremental`, `-i` : Enable incremental build mode. This only applies if the two-phase `mkosi.build` build script logic is used. In this mode, a copy of the OS image is created immediately after all OS packages are unpacked but before the `mkosi.build` script is invoked in the development container. Similarly, a copy of the final image is created immediately before the build artifacts from the `mkosi.build` script are copied in. On subsequent invocations of `mkosi` with the `-i` switch these cached images may be used to skip the OS package unpacking, thus drastically speeding up repetitive build times. Note that when this is used and a pair of cached incremental images exists they are not automatically regenerated, even if options such as `Packages=` are modified. In order to force rebuilding of these cached images, combine `-i` with `-ff` to ensure cached images are first removed and then re-created. `--debug=` : Enable additional debugging output. Takes a comma-separated list of arguments specifying the area of interest. Pass any invalid value (e.g. empty) to list currently accepted values. `--version` : Show package version. `--help`, `-h` : Show brief usage information. `--auto-bump`, `-B` : If specified, after each successful build the the version is bumped in a fashion equivalent to the `bump` verb, in preparation for the next build. This is useful for simple, linear version management: each build in a series will have a version number one higher then the previous one. ## Supported distributions Images may be created containing installations of the following operating systems: * *Fedora Linux* * *Debian* * *Ubuntu* * *Arch Linux* * *openSUSE* * *Mageia* * *CentOS* * *Clear Linux* * *Photon* * *OpenMandriva* * *Rocky Linux* * *Alma Linux* * *Gentoo* In theory, any distribution may be used on the host for building images containing any other distribution, as long as the necessary tools are available. Specifically, any distribution that packages `debootstrap` may be used to build *Debian* or *Ubuntu* images. Any distribution that packages `dnf` may be used to build *Fedora Linux*, *Mageia* or *OpenMandriva* images. Any distro that packages `pacstrap` may be used to build *Arch Linux* images. Any distribution that packages `zypper` may be used to build *openSUSE* images. Any distribution that packages `yum` (or the newer replacement `dnf`) may be used to build *CentOS*, *Rocky Linux*, or *Alma Linux* images. Any distribution that packages `emerge` may be used to build *Gentoo* images. Currently, *Fedora Linux* packages all relevant tools as of Fedora 28. ## Compatibility Legacy concepts are avoided: generated images use *GPT* disk labels (and no *MBR* labels), and only systemd-based images may be generated. All generated *GPT* disk images may be booted in a local container directly with: ```bash systemd-nspawn -bi image.raw ``` Additionally, bootable *GPT* disk images (as created with the `--bootable` flag) work when booted directly by *EFI* and *BIOS* systems, for example in *KVM* via: ```bash qemu-kvm -m 512 -smp 2 -bios /usr/share/edk2/ovmf/OVMF_CODE.fd -drive format=raw,file=image.raw ``` *EFI* bootable *GPT* images are larger than plain *GPT* images, as they additionally carry an *EFI* system partition containing a boot loader, as well as a kernel, kernel modules, udev and more. All directory or btrfs subvolume images may be booted directly with: ```bash systemd-nspawn -bD image ``` # Files To make it easy to build images for development versions of your projects, mkosi can read configuration data from the local directory, under the assumption that it is invoked from a *source* tree. Specifically, the following files are used if they exist in the local directory: * The **`mkosi.default`** file provides the default configuration for the image building process. For example, it may specify the distribution to use (`fedora`, `ubuntu`, `debian`, `arch`, `opensuse`, `mageia`, `openmandriva`, `gentoo`) for the image, or additional distribution packages to install. Note that all options encoded in this configuration file may also be set on the command line, and this file is hence little more than a way to make sure invoking `mkosi` without further parameters in your *source* tree is enough to get the right image of your choice set up. Additionally, if a *`mkosi.default.d/`* directory exists, each file in it is loaded in the same manner adding/overriding the values specified in `mkosi.default`. If `mkosi.default.d/` contains a directory named after the distribution being built, each file in that directory is also processed. The file format is inspired by Windows `.ini` files and supports multi-line assignments: any line with initial whitespace is considered a continuation line of the line before. Command-line arguments, as shown in the help description, have to be included in a configuration block (e.g. "`[Content]`") corresponding to the argument group (e.g. "`Content`"), and the argument gets converted as follows: "`--with-network`" becomes "`WithNetwork=yes`". For further details see the table above. * The **`mkosi.skeleton/`** directory or **`mkosi.skeleton.tar`** archive may be used to insert files into the image. The files are copied *before* the distribution packages are installed into the image. This allows creation of files that need to be provided early, for example to configure the package manager or set systemd presets. When using the directory, file ownership is not preserved: all files copied will be owned by root. To preserve ownership, use a tar archive. * The **`mkosi.extra/`** directory or **`mkosi.extra.tar`** archive may be used to insert additional files into the image, on top of what the distribution includes in its packages. They are similar to `mkosi.skeleton/` and `mkosi.skeleton.tar`, but the files are copied into the directory tree of the image *after* the OS was installed. When using the directory, file ownership is not preserved: all files copied will be owned by root. To preserve ownership, use a tar archive. * **`mkosi.build`** may be an executable script. If it exists, the image will be built twice: the first iteration will be the *development* image, the second iteration will be the *final* image. The *development* image is used to build the project in the current working directory (the *source* tree). For that the whole directory is copied into the image, along with the `mkosi.build` script. The script is then invoked inside the image (via `systemd-nspawn`), with `$SRCDIR` pointing to the *source* tree. `$DESTDIR` points to a directory where the script should place any files generated it would like to end up in the *final* image. Note that `make`/`automake`/`meson` based build systems generally honor `$DESTDIR`, thus making it very natural to build *source* trees from the build script. After the *development* image was built and the build script ran inside of it, it is removed again. After that the *final* image is built, without any *source* tree or build script copied in. However, this time the contents of `$DESTDIR` are added into the image. When the source tree is copied into the *build* image, all files are copied, except for `mkosi.builddir/`, `mkosi.cache/` and `mkosi.output/`. That said, `.gitignore` is respected if the source tree is a `git` checkout. If multiple different images shall be built from the same source tree it is essential to exclude their output files from this copy operation, as otherwise a version of an image built earlier might be included in a later build, which is usually not intended. An alternative to excluding these built images via `.gitignore` entries is to use the `mkosi.output/` directory, which is an easy way to exclude all build artifacts. The `$MKOSI_DEFAULT` environment variable will be set inside of this script so that you know which `mkosi.default` (if any) was passed in. * The **`mkosi.prepare`** script is invoked directly after the software packages are installed, from within the image context, if it exists. It is once called for the *development* image (if this is enabled, see above) with the "build" command line parameter, right before copying the extra tree. It is called a second time for the *final* image with the "final" command line parameter. This script has network access and may be used to install packages from other sources than the distro's package manager (e.g. `pip`, `npm`, ...), after all software packages are installed but before the image is cached (if incremental mode is enabled). This script is executed within `$SRCDIR`. In contrast to a general purpose installation, it is safe to install packages to the system (`pip install`, `npm install -g`) instead of in `$SRCDIR` itself because the build image is only used for a single project and can easily be thrown away and rebuilt so there's no risk of conflicting dependencies and no risk of polluting the host system. * The **`mkosi.postinst`** script is invoked as the penultimate step of preparing an image, from within the image context, if it exists. It is called first for the *development* image (if this is enabled, see above) with the "build" command line parameter, right before invoking the build script. It is called a second time for the *final* image with the "final" command line parameter, right before the image is considered complete. This script may be used to alter the images without any restrictions, after all software packages and built sources have been installed. Note that this script is executed directly in the image context with the final root directory in place, without any `$SRCDIR`/`$DESTDIR` setup. * The **`mkosi.finalize`** script, if it exists, is invoked as last step of preparing an image, from the host system. It is once called for the *development* image (if this is enabled, see above) with the "build" command line parameter, as the last step before invoking the build script, after the `mkosi.postinst` script is invoked. It is called the second time with the "final" command line parameter as the last step before the image is considered complete. The environment variable `$BUILDROOT` points to the root directory of the installation image. Additional verbs may be added in the future, the script should be prepared for that. This script may be used to alter the images without any restrictions, after all software packages and built sources have been installed. This script is more flexible than `mkosi.postinst` in two regards: it has access to the host file system so it's easier to copy in additional files or to modify the image based on external configuration, and the script is run in the host, so it can be used even without emulation even if the image has a foreign architecture. * The **`mkosi.mksquashfs-tool`** script, if it exists, will be called wherever `mksquashfs` would be called. * The **`mkosi.nspawn`** nspawn settings file will be copied into the same place as the output image file, if it exists. This is useful since nspawn looks for settings files next to image files it boots, for additional container runtime settings. * The **`mkosi.cache/`** directory, if it exists, is automatically used as package download cache, in order to speed repeated runs of the tool. * The **`mkosi.builddir/`** directory, if it exists, is automatically used as out-of-tree build directory, if the build commands in the `mkosi.build` script support it. Specifically, this directory will be mounted into the build container, and the `$BUILDDIR` environment variable will be set to it when the build script is invoked. The build script may then use this directory as build directory, for automake-style or ninja-style out-of-tree builds. This speeds up builds considerably, in particular when `mkosi` is used in incremental mode (`-i`): not only the disk images, but also the build tree is reused between subsequent invocations. Note that if this directory does not exist the `$BUILDDIR` environment variable is not set, and it is up to build script to decide whether to do in in-tree or an out-of-tree build, and which build directory to use. * The **`mkosi.includedir/`** directory, if it exists, is automatically used as an out-of-tree include directory for header files. Specifically, it will be mounted in the build container at `/usr/include/` when building the build image and when running the build script. After building the (cached) build image, this directory will contain all the files installed to `/usr/include`. Language servers or other tools can use these files to provide a better editing experience for developers working on a project. * The **`mkosi.installdir/`** directory, if it exists, is automatically used as the install directory. Specifically, this directory will be mounted into the container at `/root/dest` when running the build script. After running the build script, the contents of this directory are installed into the final image. This is useful to cache the install step of the build. If used, subsequent builds will only have to reinstall files that have changed since the previous build. * The **`mkosi.rootpw`** file can be used to provide the password or hashed password (if `--password-is-hashed` is set) for the root user of the image. The password may optionally be followed by a newline character which is implicitly removed. The file must have an access mode of 0600 or less. If this file does not exist, the distribution's default root password is set (which usually means access to the root user is blocked). * The **`mkosi.passphrase`** file provides the passphrase to use when LUKS encryption is selected. It should contain the passphrase literally, and not end in a newline character (i.e. in the same format as cryptsetup and `/etc/crypttab` expect the passphrase files). The file must have an access mode of 0600 or less. If this file does not exist and encryption is requested, the user is queried instead. * The **`mkosi.secure-boot.crt`** and **`mkosi.secure-boot.key`** files contain an X.509 certificate and PEM private key to use when UEFI SecureBoot support is enabled. All EFI binaries included in the image's ESP are signed with this key, as a late step in the build process. * The **`mkosi.output/`** directory will be used for all build artifacts, if the image output path is not configured (i.e. no `--output=` setting specified), or configured to a filename (i.e. a path containing no `/` character). This includes the image itself, the root hash file in case Verity is used, the checksum and its signature if that's enabled, and the nspawn settings file if there is any. Note that this directory is not used if the image output path contains at least one slash, and has no effect in that case. This setting is particularly useful if multiple different images shall be built from the same working directory, as otherwise the build result of a preceding run might be copied into a build image as part of the source tree (see above). All these files are optional. Note that the location of all these files may also be configured during invocation via command line switches, and as settings in `mkosi.default`, in case the default settings are not acceptable for a project. # BUILD PHASES If no build script `mkosi.build` (see above) is used the build consists of a single phase only: the final image is generated as the combination of `mkosi.skeleton/` (see above), the unpacked distribution packages and `mkosi.extra/`. If a build script `mkosi.build` is used the build consists of two phases: in the the first `development` phase an image that includes necessary build tools (i.e. the combination of `Packages=` and `BuildPackages=` is installed) is generated (i.e. the combination of `mkosi.skeleton/` and unpacked distribution packages). Into this image the source tree is copied and `mkosi.build` executed. The artifacts the `mkosi.build` generates are saved. Then, the second `final` phase starts: an image that excludes the build tools (i.e. only `Packages=` is installed, `BuildPackages=` is not) is generated. This time the build artifacts saved from the first phase are copied in, and `mkosi.extra` copied on top, thus generating the final image. The two-phased approach ensures that source tree is executed in a clean and comprehensive environment, while at the same the final image remains minimal and contains only those packages necessary at runtime, but avoiding those necessary at build-time. Note that only the package cache `mkosi.cache/` is shared between the two phases. The distribution package manager is executed exactly once in each phase, always starting from a directory tree that is populated with `mkosi.skeleton` but nothing else. # CACHING `mkosi` supports three different caches for speeding up repetitive re-building of images. Specifically: 1. The package cache of the distribution package manager may be cached between builds. This is configured with the `--cache=` option or the `mkosi.cache/` directory. This form of caching relies on the distribution's package manager, and caches distribution packages (RPM, DEB, …) after they are downloaded, but before they are unpacked. 2. If an `mkosi.build` script is used, by enabling incremental build mode with `--incremental`, a cached copy of the development and final images can be made immediately before the build sources are copied in (for the development image) or the artifacts generated by `mkosi.build` are copied in (in case of the final image). This form of caching allows bypassing the time-consuming package unpacking step of the distribution package managers, but is only effective if the list of packages to use remains stable, but the build sources and its scripts change regularly. Note that this cache requires manual flushing: whenever the package list is modified the cached images need to be explicitly removed before the next re-build, using the `-f` switch. 3. Finally, between multiple builds the build artifact directory may be shared, using the `mkosi.builddir/` directory. This directory allows build systems such as Meson to reuse already compiled sources from a previous built, thus speeding up the build process of the `mkosi.build` build script. The package cache (i.e. the first item above) is unconditionally useful. The latter two caches only apply to uses of `mkosi` with a source tree and build script. When all three are enabled together turn-around times for complete image builds are minimal, as only changed source files need to be recompiled: an OS image rebuilt will be almost as quick to build the source tree only. # ENVIRONMENT VARIABLES The build script `mkosi.build` receives the following environment variables: * `$SRCDIR` contains the path to the sources to build. * `$DESTDIR` is a directory into which any artifacts generated by the build script shall be placed. * `$BUILDDIR` is only defined if `mkosi.builddir` and points to the build directory to use. This is useful for all build systems that support out-of-tree builds to reuse already built artifacts from previous runs. * `$WITH_DOCS` is either `0` or `1` depending on whether a build without or with installed documentation was requested (`WithDocs=yes`). The build script should suppress installation of any package documentation to `$DESTDIR` in case `$WITH_DOCS` is set to `0`. * `$WITH_TESTS` is either `0`or `1` depending on whether a build without or with running the test suite was requested (`WithTests=no`). The build script should avoid running any unit or integration tests in case `$WITH_TESTS` is `0`. * `$WITH_NETWORK` is either `0`or `1` depending on whether a build without or with networking is being executed (`WithNetwork=no`). The build script should avoid any network communication in case `$WITH_NETWORK` is `0`. # EXAMPLES Create and run a raw *GPT* image with *ext4*, as `image.raw`: ```bash # mkosi # systemd-nspawn -b -i image.raw ``` Create and run a bootable btrfs *GPT* image, as `foobar.raw`: ```bash # mkosi -t gpt_btrfs --bootable -o foobar.raw # systemd-nspawn -b -i foobar.raw # qemu-kvm -m 512 -smp 2 -bios /usr/share/edk2/ovmf/OVMF_CODE.fd -drive format=raw,file=foobar.raw ``` Create and run a *Fedora Linux* image into a plain directory: ```bash # mkosi -d fedora -t directory -o quux # systemd-nspawn -b -D quux ``` Create a compressed image `image.raw.xz` and add a checksum file, and install *SSH* into it: ```bash # mkosi -d fedora -t gpt_squashfs --checksum --compress --package=openssh-clients ``` Inside the source directory of an `automake`-based project, configure *mkosi* so that simply invoking `mkosi` without any parameters builds an OS image containing a built version of the project in its current state: ```bash # cat >mkosi.default <mkosi.build < None: text = importlib.resources.read_text(resource, key) where.write_text(text) if mode is not None: where.chmod(mode) elif executable: make_executable(where) def add_dropin_config(root: Path, unit: str, name: str, content: str) -> None: """Add a dropin config `name.conf` in /etc/systemd/system for `unit`.""" dropin = root / f"etc/systemd/system/{unit}.d/{name}.conf" dropin.parent.mkdir(mode=0o755, parents=True, exist_ok=True) dropin.write_text(dedent(content)) dropin.chmod(0o644) def add_dropin_config_from_resource( root: Path, unit: str, name: str, resource: str, key: str ) -> None: dropin = root / f"etc/systemd/system/{unit}.d/{name}.conf" dropin.parent.mkdir(mode=0o755, parents=True, exist_ok=True) write_resource(dropin, resource, key, mode=0o644) T = TypeVar("T") V = TypeVar("V") def dictify(f: Callable[..., Iterator[Tuple[T, V]]]) -> Callable[..., Dict[T, V]]: def wrapper(*args: Any, **kwargs: Any) -> Dict[T, V]: return dict(f(*args, **kwargs)) return functools.update_wrapper(wrapper, f) @dictify def read_os_release() -> Iterator[Tuple[str, str]]: try: filename = "/etc/os-release" f = open(filename) except FileNotFoundError: filename = "/usr/lib/os-release" f = open(filename) for line_number, line in enumerate(f, start=1): line = line.rstrip() if not line or line.startswith("#"): continue m = re.match(r"([A-Z][A-Z_0-9]+)=(.*)", line) if m: name, val = m.groups() if val and val[0] in "\"'": val = ast.literal_eval(val) yield name, val else: print(f"{filename}:{line_number}: bad line {line!r}", file=sys.stderr) def print_running_cmd(cmdline: Iterable[str]) -> None: MkosiPrinter.print_step("Running command:") MkosiPrinter.print_step(" ".join(shlex.quote(x) for x in cmdline) + "\n") GPT_ROOT_ALPHA = uuid.UUID("6523f8ae3eb14e2aa05a18b695ae656f") # NOQA: E221 GPT_ROOT_ARC = uuid.UUID("d27f46ed29194cb8bd259531f3c16534") # NOQA: E221 GPT_ROOT_ARM = uuid.UUID("69dad7102ce44e3cb16c21a1d49abed3") # NOQA: E221 GPT_ROOT_ARM64 = uuid.UUID("b921b0451df041c3af444c6f280d3fae") # NOQA: E221 GPT_ROOT_IA64 = uuid.UUID("993d8d3df80e4225855a9daf8ed7ea97") # NOQA: E221 GPT_ROOT_LOONGARCH64 = uuid.UUID("77055800792c4f94b39a98c91b762bb6") # NOQA: E221 GPT_ROOT_MIPS_LE = uuid.UUID("37c58c8ad9134156a25f48b1b64e07f0") # NOQA: E221 GPT_ROOT_MIPS64_LE = uuid.UUID("700bda437a344507b179eeb93d7a7ca3") # NOQA: E221 GPT_ROOT_PPC = uuid.UUID("1de3f1effa9847b58dcd4a860a654d78") # NOQA: E221 GPT_ROOT_PPC64 = uuid.UUID("912ade1da83949138964a10eee08fbd2") # NOQA: E221 GPT_ROOT_PPC64LE = uuid.UUID("c31c45e63f39412e80fb4809c4980599") # NOQA: E221 GPT_ROOT_RISCV32 = uuid.UUID("60d5a7fe8e7d435cb7143dd8162144e1") # NOQA: E221 GPT_ROOT_RISCV64 = uuid.UUID("72ec70a6cf7440e6bd494bda08e8f224") # NOQA: E221 GPT_ROOT_S390 = uuid.UUID("08a7acea624c4a2091e86e0fa67d23f9") # NOQA: E221 GPT_ROOT_S390X = uuid.UUID("5eead9a9fe094a1ea1d7520d00531306") # NOQA: E221 GPT_ROOT_TILEGX = uuid.UUID("c50cdd7038624cc390e1809a8c93ee2c") # NOQA: E221 GPT_ROOT_X86 = uuid.UUID("44479540f29741b29af7d131d5f0458a") # NOQA: E221 GPT_ROOT_X86_64 = uuid.UUID("4f68bce3e8cd4db196e7fbcaf984b709") # NOQA: E221 GPT_USR_ALPHA = uuid.UUID("e18cf08c33ec4c0d8246c6c6fb3da024") # NOQA: E221 GPT_USR_ARC = uuid.UUID("7978a68363164922bbee38bff5a2fecc") # NOQA: E221 GPT_USR_ARM = uuid.UUID("7d0359a302b34f0a865c654403e70625") # NOQA: E221 GPT_USR_ARM64 = uuid.UUID("b0e01050ee5f4390949a9101b17104e9") # NOQA: E221 GPT_USR_IA64 = uuid.UUID("4301d2a64e3b4b2abb949e0b2c4225ea") # NOQA: E221 GPT_USR_LOONGARCH64 = uuid.UUID("e611c702575c4cbe9a46434fa0bf7e3f") # NOQA: E221 GPT_USR_MIPS_LE = uuid.UUID("0f4868e999524706979f3ed3a473e947") # NOQA: E221 GPT_USR_MIPS64_LE = uuid.UUID("c97c1f32ba0640b49f22236061b08aa8") # NOQA: E221 GPT_USR_PPC = uuid.UUID("7d14fec5cc71415d9d6c06bf0b3c3eaf") # NOQA: E221 GPT_USR_PPC64 = uuid.UUID("2c9739e2f06846b39fd001c5a9afbcca") # NOQA: E221 GPT_USR_PPC64LE = uuid.UUID("15bb03af77e74d4ab12bc0d084f7491c") # NOQA: E221 GPT_USR_RISCV32 = uuid.UUID("b933fb225c3f4f91af90e2bb0fa50702") # NOQA: E221 GPT_USR_RISCV64 = uuid.UUID("beaec34b8442439ba40b984381ed097d") # NOQA: E221 GPT_USR_S390 = uuid.UUID("cd0f869bd0fb4ca0b1419ea87cc78d66") # NOQA: E221 GPT_USR_S390X = uuid.UUID("8a4f577050aa4ed3874a99b710db6fea") # NOQA: E221 GPT_USR_TILEGX = uuid.UUID("55497029c7c144ccaa39815ed1558630") # NOQA: E221 GPT_USR_X86 = uuid.UUID("75250d768cc6458ebd66bd47cc81a812") # NOQA: E221 GPT_USR_X86_64 = uuid.UUID("8484680c952148c69c11b0720656f69e") # NOQA: E221 GPT_ROOT_ALPHA_VERITY = uuid.UUID("fc56d9e9e6e54c06be32e74407ce09a5") # NOQA: E221 GPT_ROOT_ARC_VERITY = uuid.UUID("24b2d9750f974521afa1cd531e421b8d") # NOQA: E221 GPT_ROOT_ARM_VERITY = uuid.UUID("7386cdf2203c47a9a498f2ecce45a2d6") # NOQA: E221 GPT_ROOT_ARM64_VERITY = uuid.UUID("df3300ced69f4c92978c9bfb0f38d820") # NOQA: E221 GPT_ROOT_IA64_VERITY = uuid.UUID("86ed10d5b60745bb8957d350f23d0571") # NOQA: E221 GPT_ROOT_LOONGARCH64_VERITY = uuid.UUID("f3393b22e9af4613a9489d3bfbd0c535") # NOQA: E221 GPT_ROOT_MIPS_LE_VERITY = uuid.UUID("d7d150d22a044a338f1216651205ff7b") # NOQA: E221 GPT_ROOT_MIPS64_LE_VERITY = uuid.UUID("16b417f83e064f578dd29b5232f41aa6") # NOQA: E221 GPT_ROOT_PPC64LE_VERITY = uuid.UUID("906bd94445894aaea4e4dd983917446a") # NOQA: E221 GPT_ROOT_PPC64_VERITY = uuid.UUID("9225a9a33c194d89b4f6eeff88f17631") # NOQA: E221 GPT_ROOT_PPC_VERITY = uuid.UUID("98cfe649158846dcb2f0add147424925") # NOQA: E221 GPT_ROOT_RISCV32_VERITY = uuid.UUID("ae0253be11674007ac6843926c14c5de") # NOQA: E221 GPT_ROOT_RISCV64_VERITY = uuid.UUID("b6ed5582440b4209b8da5ff7c419ea3d") # NOQA: E221 GPT_ROOT_S390X_VERITY = uuid.UUID("b325bfbec7be4ab88357139e652d2f6b") # NOQA: E221 GPT_ROOT_S390_VERITY = uuid.UUID("7ac63b47b25c463b8df8b4a94e6c90e1") # NOQA: E221 GPT_ROOT_TILEGX_VERITY = uuid.UUID("966061ec28e44b2eb4a51f0a825a1d84") # NOQA: E221 GPT_ROOT_X86_64_VERITY = uuid.UUID("2c7357edebd246d9aec123d437ec2bf5") # NOQA: E221 GPT_ROOT_X86_VERITY = uuid.UUID("d13c5d3bb5d1422ab29f9454fdc89d76") # NOQA: E221 GPT_USR_ALPHA_VERITY = uuid.UUID("8cce0d25c0d04a44bd8746331bf1df67") # NOQA: E221 GPT_USR_ARC_VERITY = uuid.UUID("fca0598cd88045918c164eda05c7347c") # NOQA: E221 GPT_USR_ARM_VERITY = uuid.UUID("c215d7517bcd4649be906627490a4c05") # NOQA: E221 GPT_USR_ARM64_VERITY = uuid.UUID("6e11a4e7fbca4dedb9e9e1a512bb664e") # NOQA: E221 GPT_USR_IA64_VERITY = uuid.UUID("6a491e033be745458e3883320e0ea880") # NOQA: E221 GPT_USR_LOONGARCH64_VERITY = uuid.UUID("f46b2c2659ae48f09106c50ed47f673d") # NOQA: E221 GPT_USR_MIPS_LE_VERITY = uuid.UUID("46b98d8db55c4e8faab337fca7f80752") # NOQA: E221 GPT_USR_MIPS64_LE_VERITY = uuid.UUID("3c3d61feb5f3414dbb718739a694a4ef") # NOQA: E221 GPT_USR_PPC64LE_VERITY = uuid.UUID("ee2b998321e8415386d9b6901a54d1ce") # NOQA: E221 GPT_USR_PPC64_VERITY = uuid.UUID("bdb528a5a259475fa87dda53fa736a07") # NOQA: E221 GPT_USR_PPC_VERITY = uuid.UUID("df765d00270e49e5bc75f47bb2118b09") # NOQA: E221 GPT_USR_RISCV32_VERITY = uuid.UUID("cb1ee4e38cd04136a0a4aa61a32e8730") # NOQA: E221 GPT_USR_RISCV64_VERITY = uuid.UUID("8f1056be9b0547c481d6be53128e5b54") # NOQA: E221 GPT_USR_S390X_VERITY = uuid.UUID("31741cc41a2a4111a581e00b447d2d06") # NOQA: E221 GPT_USR_S390_VERITY = uuid.UUID("b663c618e7bc4d6d90aa11b756bb1797") # NOQA: E221 GPT_USR_TILEGX_VERITY = uuid.UUID("2fb4bf5607fa42da81326b139f2026ae") # NOQA: E221 GPT_USR_X86_64_VERITY = uuid.UUID("77ff5f63e7b64633acf41565b864c0e6") # NOQA: E221 GPT_USR_X86_VERITY = uuid.UUID("8f461b0d14ee4e819aa9049b6fb97abd") # NOQA: E221 GPT_ROOT_ALPHA_VERITY_SIG = uuid.UUID("d46495b7a053414f80f7700c99921ef8") # NOQA: E221 GPT_ROOT_ARC_VERITY_SIG = uuid.UUID("143a70bacbd34f06919f6c05683a78bc") # NOQA: E221 GPT_ROOT_ARM_VERITY_SIG = uuid.UUID("42b0455feb11491d98d356145ba9d037") # NOQA: E221 GPT_ROOT_ARM64_VERITY_SIG = uuid.UUID("6db69de629f44758a7a5962190f00ce3") # NOQA: E221 GPT_ROOT_IA64_VERITY_SIG = uuid.UUID("e98b36ee32ba48829b120ce14655f46a") # NOQA: E221 GPT_ROOT_LOONGARCH64_VERITY_SIG = uuid.UUID("5afb67ebecc84f85ae8eac1e7c50e7d0") # NOQA: E221 GPT_ROOT_MIPS_LE_VERITY_SIG = uuid.UUID("c919cc1f44564eff918cf75e94525ca5") # NOQA: E221 GPT_ROOT_MIPS64_LE_VERITY_SIG = uuid.UUID("904e58ef5c654a319c576af5fc7c5de7") # NOQA: E221 GPT_ROOT_PPC64LE_VERITY_SIG = uuid.UUID("d4a236e7e8734c07bf1dbf6cf7f1c3c6") # NOQA: E221 GPT_ROOT_PPC64_VERITY_SIG = uuid.UUID("f5e2c20c45b24ffabce92a60737e1aaf") # NOQA: E221 GPT_ROOT_PPC_VERITY_SIG = uuid.UUID("1b31b5aaadd9463ab2edbd467fc857e7") # NOQA: E221 GPT_ROOT_RISCV32_VERITY_SIG = uuid.UUID("3a112a7587294380b4cf764d79934448") # NOQA: E221 GPT_ROOT_RISCV64_VERITY_SIG = uuid.UUID("efe0f087ea8d4469821a4c2a96a8386a") # NOQA: E221 GPT_ROOT_S390X_VERITY_SIG = uuid.UUID("c80187a573a3491a901a017c3fa953e9") # NOQA: E221 GPT_ROOT_S390_VERITY_SIG = uuid.UUID("3482388e4254435aa241766a065f9960") # NOQA: E221 GPT_ROOT_TILEGX_VERITY_SIG = uuid.UUID("b367143997b04a5390f72d5a8f3ad47b") # NOQA: E221 GPT_ROOT_X86_64_VERITY_SIG = uuid.UUID("41092b059fc84523994f2def0408b176") # NOQA: E221 GPT_ROOT_X86_VERITY_SIG = uuid.UUID("5996fc05109c48de808b23fa0830b676") # NOQA: E221 GPT_USR_ALPHA_VERITY_SIG = uuid.UUID("5c6e1c76076a457aa0fef3b4cd21ce6e") # NOQA: E221 GPT_USR_ARC_VERITY_SIG = uuid.UUID("94f9a9a19971427aa40050cb297f0f35") # NOQA: E221 GPT_USR_ARM_VERITY_SIG = uuid.UUID("d7ff812f37d14902a810d76ba57b975a") # NOQA: E221 GPT_USR_ARM64_VERITY_SIG = uuid.UUID("c23ce4ff44bd4b00b2d4b41b3419e02a") # NOQA: E221 GPT_USR_IA64_VERITY_SIG = uuid.UUID("8de58bc22a43460db14ea76e4a17b47f") # NOQA: E221 GPT_USR_LOONGARCH64_VERITY_SIG = uuid.UUID("b024f315d330444c846144bbde524e99") # NOQA: E221 GPT_USR_MIPS_LE_VERITY_SIG = uuid.UUID("3e23ca0ba4bc4b4e80875ab6a26aa8a9") # NOQA: E221 GPT_USR_MIPS64_LE_VERITY_SIG = uuid.UUID("f2c2c7eeadcc4351b5c6ee9816b66e16") # NOQA: E221 GPT_USR_PPC64LE_VERITY_SIG = uuid.UUID("c8bfbd1e268e45218bbabf314c399557") # NOQA: E221 GPT_USR_PPC64_VERITY_SIG = uuid.UUID("0b888863d7f84d9e9766239fce4d58af") # NOQA: E221 GPT_USR_PPC_VERITY_SIG = uuid.UUID("7007891dd3714a8086a45cb875b9302e") # NOQA: E221 GPT_USR_RISCV32_VERITY_SIG = uuid.UUID("c3836a13313745bab583b16c50fe5eb4") # NOQA: E221 GPT_USR_RISCV64_VERITY_SIG = uuid.UUID("d2f9000a7a18453fb5cd4d32f77a7b32") # NOQA: E221 GPT_USR_S390X_VERITY_SIG = uuid.UUID("3f324816667b46ae86ee9b0c0c6c11b4") # NOQA: E221 GPT_USR_S390_VERITY_SIG = uuid.UUID("17440e4fa8d0467fa46e3912ae6ef2c5") # NOQA: E221 GPT_USR_TILEGX_VERITY_SIG = uuid.UUID("4ede75e26ccc4cc8b9c770334b087510") # NOQA: E221 GPT_USR_X86_64_VERITY_SIG = uuid.UUID("e7bb33fb06cf4e818273e543b413e2e2") # NOQA: E221 GPT_USR_X86_VERITY_SIG = uuid.UUID("974a71c0de4143c3be5d5c5ccd1ad2c0") # NOQA: E221 GPT_ESP = uuid.UUID("c12a7328f81f11d2ba4b00a0c93ec93b") # NOQA: E221 GPT_XBOOTLDR = uuid.UUID("bc13c2ff59e64262a352b275fd6f7172") # NOQA: E221 GPT_SWAP = uuid.UUID("0657fd6da4ab43c484e50933c84b4f4f") # NOQA: E221 GPT_HOME = uuid.UUID("933ac7e12eb44f13b8440e14e2aef915") # NOQA: E221 GPT_SRV = uuid.UUID("3b8f842520e04f3b907f1a25a76f98e8") # NOQA: E221 GPT_VAR = uuid.UUID("4d21b016b53445c2a9fb5c16e091fd2d") # NOQA: E221 GPT_TMP = uuid.UUID("7ec6f5573bc54acab29316ef5df639d1") # NOQA: E221 GPT_USER_HOME = uuid.UUID("773f91ef66d449b5bd83d683bf40ad16") # NOQA: E221 GPT_LINUX_GENERIC = uuid.UUID("0fc63daf848347728e793d69d8477de4") # NOQA: E221 # Mkosi specific addition to support BIOS images GPT_BIOS = uuid.UUID("2168614864496e6f744e656564454649") # NOQA: E221 # This is a non-formatted partition used to store the second stage # part of the bootloader because it doesn't necessarily fits the MBR # available space. 1MiB is more than enough for our usages and there's # little reason for customization since it only stores the bootloader and # not user-owned configuration files or kernels. See # https://en.wikipedia.org/wiki/BIOS_boot_partition # and https://www.gnu.org/software/grub/manual/grub/html_node/BIOS-installation.html BIOS_PARTITION_SIZE = 1024 * 1024 CLONE_NEWNS = 0x00020000 FEDORA_KEYS_MAP = { "7": "CAB44B996F27744E86127CDFB44269D04F2A6FD2", "8": "4FFF1F04010DEDCAE203591D62AEC3DC6DF2196F", "9": "4FFF1F04010DEDCAE203591D62AEC3DC6DF2196F", "10": "61A8ABE091FF9FBBF4B07709BF226FCC4EBFC273", "11": "AEE40C04E34560A71F043D7C1DC5C758D22E77F2", "12": "6BF178D28A789C74AC0DC63B9D1CC34857BBCCBA", "13": "8E5F73FF2A1817654D358FCA7EDC6AD6E8E40FDE", "14": "235C2936B4B70E61B373A020421CADDB97A1071F", "15": "25DBB54BDED70987F4C10042B4EBF579069C8460", "16": "05A912AC70457C3DBC82D352067F00B6A82BA4B7", "17": "CAC43FB774A4A673D81C5DE750E94C991ACA3465", "18": "7EFB8811DD11E380B679FCEDFF01125CDE7F38BD", "19": "CA81B2C85E4F4D4A1A3F723407477E65FB4B18E6", "20": "C7C9A9C89153F20183CE7CBA2EB161FA246110C1", "21": "6596B8FBABDA5227A9C5B59E89AD4E8795A43F54", "22": "C527EA07A9349B589C35E1BF11ADC0948E1431D5", "23": "EF45510680FB02326B045AFB32474CF834EC9CBA", "24": "5048BDBBA5E776E547B09CCC73BDE98381B46521", "25": "C437DCCD558A66A37D6F43724089D8F2FDB19C98", "26": "E641850B77DF435378D1D7E2812A6B4B64DAB85D", "27": "860E19B0AFA800A1751881A6F55E7430F5282EE4", "28": "128CF232A9371991C8A65695E08E7E629DB62FB1", "29": "5A03B4DD8254ECA02FDA1637A20AA56B429476B4", "30": "F1D8EC98F241AAF20DF69420EF3C111FCFC659B9", "31": "7D22D5867F2A4236474BF7B850CB390B3C3359C4", "32": "97A1AE57C3A2372CCA3A4ABA6C13026D12C944D0", "33": "963A2BEB02009608FE67EA4249FD77499570FF31", "34": "8C5BA6990BDB26E19F2A1A801161AE6945719A39", "35": "787EA6AE1147EEE56C40B30CDB4639719867C58F", "36": "53DED2CB922D8B8D9E63FD18999F7CBF38AB71F4", } def fedora_release_cmp(a: str, b: str) -> int: """Return negative if a GPTRootTypeTriplet: """The type UUID for the native GPT root partition for the given architecture Returns a tuple of three UUIDs: for the root partition, for the matching verity partition, and for the matching Verity signature partition. """ if arch is None: arch = platform.machine() if usr_only: if arch == "alpha": return GPTRootTypeTriplet(GPT_USR_ALPHA, GPT_USR_ALPHA_VERITY, GPT_USR_ALPHA_VERITY_SIG) elif arch == "arc": return GPTRootTypeTriplet(GPT_USR_ARC, GPT_USR_ARC_VERITY, GPT_USR_ARC_VERITY_SIG) elif arch.startswith("armv"): return GPTRootTypeTriplet(GPT_USR_ARM, GPT_USR_ARM_VERITY, GPT_USR_ARM_VERITY_SIG) elif arch == "aarch64": return GPTRootTypeTriplet(GPT_USR_ARM64, GPT_USR_ARM64_VERITY, GPT_USR_ARM64_VERITY_SIG) elif arch == "ia64": return GPTRootTypeTriplet(GPT_USR_IA64, GPT_USR_IA64_VERITY, GPT_USR_IA64_VERITY_SIG) elif arch == "loongarch64": return GPTRootTypeTriplet(GPT_USR_LOONGARCH64, GPT_USR_LOONGARCH64_VERITY, GPT_USR_LOONGARCH64_VERITY_SIG) elif arch == "mipsel": return GPTRootTypeTriplet(GPT_USR_MIPS_LE, GPT_USR_MIPS_LE_VERITY, GPT_USR_MIPS_LE_VERITY_SIG) elif arch == "mipsel64": return GPTRootTypeTriplet(GPT_USR_MIPS64_LE, GPT_USR_MIPS64_LE_VERITY, GPT_USR_MIPS64_LE_VERITY_SIG) elif arch == "ppc": return GPTRootTypeTriplet(GPT_USR_PPC, GPT_USR_PPC_VERITY, GPT_USR_PPC_VERITY_SIG) elif arch == "ppc64": return GPTRootTypeTriplet(GPT_USR_PPC64, GPT_USR_PPC64_VERITY, GPT_USR_PPC64_VERITY_SIG) elif arch == "ppc64le": return GPTRootTypeTriplet(GPT_USR_PPC64LE, GPT_USR_PPC64LE_VERITY, GPT_USR_PPC64LE_VERITY_SIG) elif arch == "riscv32": return GPTRootTypeTriplet(GPT_USR_RISCV32, GPT_USR_RISCV32_VERITY, GPT_USR_RISCV32_VERITY_SIG) elif arch == "riscv64": return GPTRootTypeTriplet(GPT_USR_RISCV64, GPT_USR_RISCV64_VERITY, GPT_USR_RISCV64_VERITY_SIG) elif arch == "s390": return GPTRootTypeTriplet(GPT_USR_S390, GPT_USR_S390_VERITY, GPT_USR_S390X) elif arch == "s390x": return GPTRootTypeTriplet(GPT_USR_S390X, GPT_USR_S390X_VERITY, GPT_USR_S390X_VERITY_SIG) elif arch == "tilegx": return GPTRootTypeTriplet(GPT_USR_TILEGX, GPT_USR_TILEGX_VERITY, GPT_USR_TILEGX_VERITY_SIG) elif arch in ("i386", "i486", "i586", "i686"): return GPTRootTypeTriplet(GPT_USR_X86, GPT_USR_X86_VERITY, GPT_USR_X86_VERITY_SIG) elif arch == "x86_64": return GPTRootTypeTriplet(GPT_USR_X86_64, GPT_USR_X86_64_VERITY, GPT_USR_X86_64_VERITY_SIG) else: die(f"Unknown architecture {arch}.") else: if arch == "alpha": return GPTRootTypeTriplet(GPT_ROOT_ALPHA, GPT_ROOT_ALPHA_VERITY, GPT_ROOT_ALPHA_VERITY_SIG) elif arch == "arc": return GPTRootTypeTriplet(GPT_ROOT_ARC, GPT_ROOT_ARC_VERITY, GPT_ROOT_ARC_VERITY_SIG) elif arch.startswith("armv"): return GPTRootTypeTriplet(GPT_ROOT_ARM, GPT_ROOT_ARM_VERITY, GPT_ROOT_ARM_VERITY_SIG) elif arch == "aarch64": return GPTRootTypeTriplet(GPT_ROOT_ARM64, GPT_ROOT_ARM64_VERITY, GPT_ROOT_ARM64_VERITY_SIG) elif arch == "ia64": return GPTRootTypeTriplet(GPT_ROOT_IA64, GPT_ROOT_IA64_VERITY, GPT_ROOT_IA64_VERITY_SIG) elif arch == "loongarch64": return GPTRootTypeTriplet(GPT_ROOT_LOONGARCH64, GPT_ROOT_LOONGARCH64_VERITY, GPT_ROOT_LOONGARCH64_VERITY_SIG) elif arch == "mipsel": return GPTRootTypeTriplet(GPT_ROOT_MIPS_LE, GPT_ROOT_MIPS_LE_VERITY, GPT_ROOT_MIPS_LE_VERITY_SIG) elif arch == "mipsel64": return GPTRootTypeTriplet(GPT_ROOT_MIPS64_LE, GPT_ROOT_MIPS64_LE_VERITY, GPT_ROOT_MIPS64_LE_VERITY_SIG) elif arch == "ppc": return GPTRootTypeTriplet(GPT_ROOT_PPC, GPT_ROOT_PPC_VERITY, GPT_ROOT_PPC_VERITY_SIG) elif arch == "ppc64": return GPTRootTypeTriplet(GPT_ROOT_PPC64, GPT_ROOT_PPC64_VERITY, GPT_ROOT_PPC64_VERITY_SIG) elif arch == "ppc64le": return GPTRootTypeTriplet(GPT_ROOT_PPC64LE, GPT_ROOT_PPC64LE_VERITY, GPT_ROOT_PPC64LE_VERITY_SIG) elif arch == "riscv32": return GPTRootTypeTriplet(GPT_ROOT_RISCV32, GPT_ROOT_RISCV32_VERITY, GPT_ROOT_RISCV32_VERITY_SIG) elif arch == "riscv64": return GPTRootTypeTriplet(GPT_ROOT_RISCV64, GPT_ROOT_RISCV64_VERITY, GPT_ROOT_RISCV64_VERITY_SIG) elif arch == "s390": return GPTRootTypeTriplet(GPT_ROOT_S390, GPT_ROOT_S390_VERITY, GPT_ROOT_S390X) elif arch == "s390x": return GPTRootTypeTriplet(GPT_ROOT_S390X, GPT_ROOT_S390X_VERITY, GPT_ROOT_S390X_VERITY_SIG) elif arch == "tilegx": return GPTRootTypeTriplet(GPT_ROOT_TILEGX, GPT_ROOT_TILEGX_VERITY, GPT_ROOT_TILEGX_VERITY_SIG) elif arch in ("i386", "i486", "i586", "i686"): return GPTRootTypeTriplet(GPT_ROOT_X86, GPT_ROOT_X86_VERITY, GPT_ROOT_X86_VERITY_SIG) elif arch == "x86_64": return GPTRootTypeTriplet(GPT_ROOT_X86_64, GPT_ROOT_X86_64_VERITY, GPT_ROOT_X86_64_VERITY_SIG) else: die(f"Unknown architecture {arch}.") def roothash_suffix(usr_only: bool = False) -> str: if usr_only: return ".usrhash" return ".roothash" def roothash_p7s_suffix(usr_only: bool = False) -> str: return roothash_suffix(usr_only) + ".p7s" def unshare(flags: int) -> None: libc_name = ctypes.util.find_library("c") if libc_name is None: die("Could not find libc") libc = ctypes.CDLL(libc_name, use_errno=True) if libc.unshare(ctypes.c_int(flags)) != 0: e = ctypes.get_errno() raise OSError(e, os.strerror(e)) def format_bytes(num_bytes: int) -> str: if num_bytes >= 1024 * 1024 * 1024: return f"{num_bytes/1024**3 :0.1f}G" if num_bytes >= 1024 * 1024: return f"{num_bytes/1024**2 :0.1f}M" if num_bytes >= 1024: return f"{num_bytes/1024 :0.1f}K" return f"{num_bytes}B" _IOC_NRBITS = 8 # NOQA: E221,E222 _IOC_TYPEBITS = 8 # NOQA: E221,E222 _IOC_SIZEBITS = 14 # NOQA: E221,E222 _IOC_DIRBITS = 2 # NOQA: E221,E222 _IOC_NRSHIFT = 0 # NOQA: E221 _IOC_TYPESHIFT = _IOC_NRSHIFT + _IOC_NRBITS # NOQA: E221 _IOC_SIZESHIFT = _IOC_TYPESHIFT + _IOC_TYPEBITS # NOQA: E221 _IOC_DIRSHIFT = _IOC_SIZESHIFT + _IOC_SIZEBITS # NOQA: E221 _IOC_NONE = 0 # NOQA: E221 _IOC_WRITE = 1 # NOQA: E221 _IOC_READ = 2 # NOQA: E221 def _IOC(dir_rw: int, type_drv: int, nr: int, argtype: str) -> int: size = {"int": 4, "size_t": 8}[argtype] return dir_rw << _IOC_DIRSHIFT | type_drv << _IOC_TYPESHIFT | nr << _IOC_NRSHIFT | size << _IOC_SIZESHIFT def _IOW(type_drv: int, nr: int, size: str) -> int: return _IOC(_IOC_WRITE, type_drv, nr, size) FICLONE = _IOW(0x94, 9, "int") @contextlib.contextmanager def open_close(path: PathString, flags: int, mode: int = 0o664) -> Iterator[int]: fd = os.open(path, flags | os.O_CLOEXEC, mode) try: yield fd finally: os.close(fd) def _reflink(oldfd: int, newfd: int) -> None: fcntl.ioctl(newfd, FICLONE, oldfd) def copy_fd(oldfd: int, newfd: int) -> None: try: _reflink(oldfd, newfd) except OSError as e: if e.errno not in {errno.EXDEV, errno.EOPNOTSUPP}: raise # While mypy handles this correctly, Pyright doesn't yet. shutil.copyfileobj(open(oldfd, "rb", closefd=False), cast(Any, open(newfd, "wb", closefd=False))) def copy_file_object(oldobject: BinaryIO, newobject: BinaryIO) -> None: try: _reflink(oldobject.fileno(), newobject.fileno()) except OSError as e: if e.errno not in {errno.EXDEV, errno.EOPNOTSUPP}: raise shutil.copyfileobj(oldobject, newobject) def copy_file(oldpath: PathString, newpath: PathString) -> None: oldpath = Path(oldpath) newpath = Path(newpath) if oldpath.is_symlink(): src = os.readlink(oldpath) # TODO: use oldpath.readlink() with python3.9+ newpath.symlink_to(src) return with open_close(oldpath, os.O_RDONLY) as oldfd: st = os.stat(oldfd) try: with open_close(newpath, os.O_WRONLY | os.O_CREAT | os.O_EXCL, st.st_mode) as newfd: copy_fd(oldfd, newfd) except FileExistsError: newpath.unlink() with open_close(newpath, os.O_WRONLY | os.O_CREAT, st.st_mode) as newfd: copy_fd(oldfd, newfd) shutil.copystat(oldpath, newpath, follow_symlinks=False) def symlink_f(target: str, path: Path) -> None: try: path.symlink_to(target) except FileExistsError: os.unlink(path) path.symlink_to(target) def copy_path(oldpath: PathString, newpath: Path) -> None: try: newpath.mkdir(exist_ok=True) except FileExistsError: # something that is not a directory already exists newpath.unlink() newpath.mkdir() for entry in os.scandir(oldpath): newentry = newpath / entry.name if entry.is_dir(follow_symlinks=False): copy_path(entry.path, newentry) elif entry.is_symlink(): target = os.readlink(entry.path) symlink_f(target, newentry) shutil.copystat(entry.path, newentry, follow_symlinks=False) else: st = entry.stat(follow_symlinks=False) if stat.S_ISREG(st.st_mode): copy_file(entry.path, newentry) else: print("Ignoring", entry.path) continue shutil.copystat(oldpath, newpath, follow_symlinks=True) @complete_step("Detaching namespace") def init_namespace(args: CommandLineArguments) -> None: unshare(CLONE_NEWNS) run(["mount", "--make-rslave", "/"]) def setup_workspace(args: CommandLineArguments) -> TempDir: with complete_step("Setting up temporary workspace.", "Temporary workspace set up in {.name}") as output: if args.workspace_dir is not None: d = tempfile.TemporaryDirectory(dir=args.workspace_dir, prefix="") elif args.output_format in (OutputFormat.directory, OutputFormat.subvolume): d = tempfile.TemporaryDirectory(dir=args.output.parent, prefix=".mkosi-") else: d = tempfile.TemporaryDirectory(dir=tmp_dir(), prefix="mkosi-") output.append(d) return d def btrfs_subvol_create(path: Path, mode: int = 0o755) -> None: with set_umask(~mode & 0o7777): run(["btrfs", "subvol", "create", path]) def btrfs_subvol_delete(path: Path) -> None: # Extract the path of the subvolume relative to the filesystem c = run(["btrfs", "subvol", "show", path], stdout=PIPE, stderr=DEVNULL, text=True) subvol_path = c.stdout.splitlines()[0] # Make the subvolume RW again if it was set RO by btrfs_subvol_delete run(["btrfs", "property", "set", path, "ro", "false"]) # Recursively delete the direct children of the subvolume c = run(["btrfs", "subvol", "list", "-o", path], stdout=PIPE, stderr=DEVNULL, text=True) for line in c.stdout.splitlines(): if not line: continue child_subvol_path = line.split(" ", 8)[-1] child_path = path / cast(str, os.path.relpath(child_subvol_path, subvol_path)) btrfs_subvol_delete(child_path) # Delete the subvolume now that all its descendants have been deleted run(["btrfs", "subvol", "delete", path], stdout=DEVNULL, stderr=DEVNULL) def btrfs_subvol_make_ro(path: Path, b: bool = True) -> None: run(["btrfs", "property", "set", path, "ro", "true" if b else "false"]) @contextlib.contextmanager def btrfs_forget_stale_devices(args: CommandLineArguments) -> Iterator[None]: # When using cached images (-i), mounting btrfs images would sometimes fail # with EEXIST. This is likely because a stale device is leftover somewhere # from the previous run. To fix this, we make sure to always clean up stale # btrfs devices after unmounting the image. try: yield finally: if args.output_format.is_btrfs() and shutil.which("btrfs"): run(["btrfs", "device", "scan", "-u"]) def is_generated_root(args: Union[argparse.Namespace, CommandLineArguments]) -> bool: """Returns whether this configuration means we need to generate a file system from a prepared tree This is needed for anything squashfs and when root minimization is required.""" return args.minimize or args.output_format.is_squashfs() or args.usr_only def disable_cow(path: PathString) -> None: """Disable copy-on-write if applicable on filesystem""" run(["chattr", "+C", path], stdout=DEVNULL, stderr=DEVNULL, check=False) def root_partition_description( args: Optional[CommandLineArguments], suffix: Optional[str] = None, image_id: Optional[str] = None, image_version: Optional[str] = None, usr_only: Optional[bool] = False, ) -> str: # Support invocation with "args" or with separate parameters (which is useful when invoking it before we allocated a CommandLineArguments object) if args is not None: image_id = args.image_id image_version = args.image_version usr_only = args.usr_only # We implement two naming regimes for the partitions. If image_id # is specified we assume that there's a naming and maybe # versioning regime for the image in place, and thus use that to # generate the image. If not we pick descriptive names instead. # If an image id is specified, let's generate the root, /usr/ or # verity partition name from it, in a uniform way for all three # types. The image ID is after all a great way to identify what is # *in* the image, while the partition type UUID indicates what # *kind* of data it is. If we also have a version we include it # too. The latter is particularly useful for systemd's image # dissection logic, which will always pick the newest root or # /usr/ partition if multiple exist. if image_id is not None: if image_version is not None: return f"{image_id}_{image_version}" else: return image_id # If no image id is specified we just return a descriptive string # for the partition. prefix = "System Resources" if usr_only else "Root" return prefix + ' ' + (suffix if suffix is not None else 'Partition') def initialize_partition_table(args: CommandLineArguments) -> None: if args.partition_table is not None: return if not args.output_format.is_disk(): return table = PartitionTable(first_lba=args.gpt_first_lba) no_btrfs = args.output_format != OutputFormat.gpt_btrfs for condition, label, size, type_uuid, name, read_only in ( (args.bootable and "uefi" in args.boot_protocols, PartitionIdentifier.esp, args.esp_size, GPT_ESP, "ESP System Partition", False), (args.bootable and "bios" in args.boot_protocols, PartitionIdentifier.bios, BIOS_PARTITION_SIZE, GPT_BIOS, "BIOS Boot Partition", False), (args.xbootldr_size is not None, PartitionIdentifier.xbootldr, args.xbootldr_size, GPT_XBOOTLDR, "Boot Loader Partition", False), (args.swap_size is not None, PartitionIdentifier.swap, args.swap_size, GPT_SWAP, "Swap Partition", False), (no_btrfs and args.home_size is not None, PartitionIdentifier.home, args.home_size, GPT_HOME, "Home Partition", False), (no_btrfs and args.srv_size is not None, PartitionIdentifier.srv, args.srv_size, GPT_SRV, "Server Data Partition", False), (no_btrfs and args.var_size is not None, PartitionIdentifier.var, args.var_size, GPT_VAR, "Variable Data Partition", False), (no_btrfs and args.tmp_size is not None, PartitionIdentifier.tmp, args.tmp_size, GPT_TMP, "Temporary Data Partition", False), (not is_generated_root(args), PartitionIdentifier.root, args.root_size, gpt_root_native(args.architecture, args.usr_only).root, root_partition_description(args), args.read_only)): if condition and size is not None: table.add(label, size, type_uuid, name, read_only=read_only) args.partition_table = table def create_image(args: CommandLineArguments, for_cache: bool) -> Optional[BinaryIO]: initialize_partition_table(args) if args.partition_table is None: return None with complete_step("Creating image with partition table…", "Created image with partition table as {.name}") as output: f: BinaryIO = cast( BinaryIO, tempfile.NamedTemporaryFile(prefix=".mkosi-", delete=not for_cache, dir=args.output.parent), ) output.append(f) disable_cow(f.name) disk_size = args.partition_table.disk_size() f.truncate(disk_size) if args.partition_table.partitions: args.partition_table.run_sfdisk(f.name) return f def refresh_partition_table(args: CommandLineArguments, f: BinaryIO) -> None: initialize_partition_table(args) if args.partition_table is None: return # Let's refresh all UUIDs and labels to match the new build. This # is called whenever we reuse a cached image, to ensure that the # UUIDs/labels of partitions are generated the same way as for # non-cached builds. Note that we refresh the UUIDs/labels simply # by invoking sfdisk again. If the build parameters didn't change # this should have the effect that offsets and sizes should remain # identical, and we thus only update the UUIDs and labels. # # FIXME: One of those days we should generate the UUIDs as hashes # of the used configuration, so that they remain stable as the # configuration is identical. with complete_step("Refreshing partition table…", "Refreshed partition table."): if args.partition_table.partitions: args.partition_table.run_sfdisk(f.name, quiet=True) def refresh_file_system(args: CommandLineArguments, dev: Optional[Path], cached: bool) -> None: if dev is None: return if not cached: return # Similar to refresh_partition_table() but refreshes the UUIDs of # the file systems themselves. We want that build artifacts from # cached builds are as similar as possible to those from uncached # builds, and hence we want to randomize UUIDs explicitly like # they are for uncached builds. This is particularly relevant for # btrfs since it prohibits mounting multiple file systems at the # same time that carry the same UUID. # # FIXME: One of those days we should generate the UUIDs as hashes # of the used configuration, so that they remain stable as the # configuration is identical. with complete_step(f"Refreshing file system {dev}…"): if args.output_format == OutputFormat.gpt_btrfs: # We use -M instead of -m here, for compatibility with # older btrfs, where -M didn't exist yet. run(["btrfstune", "-M", str(uuid.uuid4()), dev]) elif args.output_format == OutputFormat.gpt_ext4: # We connect stdin to /dev/null since tune2fs otherwise # asks an unnecessary safety question on stdin, and we # don't want that, our script doesn't operate on essential # file systems anyway, but just our build images. run(["tune2fs", "-U", "random", dev], stdin=subprocess.DEVNULL) elif args.output_format == OutputFormat.gpt_xfs: run(["xfs_admin", "-U", "generate", dev]) def copy_image_temporary(src: Path, dir: Path) -> BinaryIO: with src.open("rb") as source: f: BinaryIO = cast(BinaryIO, tempfile.NamedTemporaryFile(prefix=".mkosi-", dir=dir)) # So on one hand we want CoW off, since this stuff will # have a lot of random write accesses. On the other we # want the copy to be snappy, hence we do want CoW. Let's # ask for both, and let the kernel figure things out: # let's turn off CoW on the file, but start with a CoW # copy. On btrfs that works: the initial copy is made as # CoW but later changes do not result in CoW anymore. disable_cow(f.name) copy_file_object(source, f) return f def copy_file_temporary(src: PathString, dir: Path) -> BinaryIO: with open(src, "rb") as source: f = cast(BinaryIO, tempfile.NamedTemporaryFile(prefix=".mkosi-", dir=dir)) copy_file_object(source, f) return f def reuse_cache_image( args: CommandLineArguments, do_run_build_script: bool, for_cache: bool ) -> Tuple[Optional[BinaryIO], bool]: if not args.incremental: return None, False if not args.output_format.is_disk_rw(): return None, False fname = args.cache_pre_dev if do_run_build_script else args.cache_pre_inst if for_cache: if fname and os.path.exists(fname): # Cache already generated, skip generation, note that manually removing the exising cache images is # necessary if Packages or BuildPackages change return None, True else: return None, False if fname is None: return None, False with complete_step(f"Basing off cached image {fname}", "Copied cached image as {.name}") as output: try: f = copy_image_temporary(src=fname, dir=args.output.parent) except FileNotFoundError: return None, False output.append(f) return f, True @contextlib.contextmanager def attach_image_loopback(image: Optional[BinaryIO]) -> Iterator[Optional[Path]]: if image is None: yield None return with complete_step(f"Attaching {image.name} as loopback…", "Attached {}") as output: c = run(["losetup", "--find", "--show", "--partscan", image.name], stdout=PIPE, text=True) loopdev = Path(c.stdout.strip()) output += [loopdev] try: yield loopdev finally: with complete_step(f"Detaching {loopdev}"): run(["losetup", "--detach", loopdev]) @contextlib.contextmanager def attach_base_image(base_image: Optional[Path]) -> Iterator[Optional[Path]]: """Context manager that attaches/detaches the base image directory or device""" if base_image is None: yield None return with complete_step(f"Using {base_image} as the base image"): if base_image.is_dir(): yield base_image else: with base_image.open('rb') as f, \ attach_image_loopback(f) as loopdev: yield loopdev def prepare_swap(args: CommandLineArguments, loopdev: Optional[Path], cached: bool) -> None: if loopdev is None: return if cached: return part = args.get_partition(PartitionIdentifier.swap) if not part: return with complete_step("Formatting swap partition"): run(["mkswap", "-Lswap", part.blockdev(loopdev)]) def prepare_esp(args: CommandLineArguments, loopdev: Optional[Path], cached: bool) -> None: if loopdev is None: return if cached: return part = args.get_partition(PartitionIdentifier.esp) if not part: return with complete_step("Formatting ESP partition"): run(["mkfs.fat", "-nEFI", "-F32", part.blockdev(loopdev)]) def prepare_xbootldr(args: CommandLineArguments, loopdev: Optional[Path], cached: bool) -> None: if loopdev is None: return if cached: return part = args.get_partition(PartitionIdentifier.xbootldr) if not part: return with complete_step("Formatting XBOOTLDR partition"): run(["mkfs.fat", "-nXBOOTLDR", "-F32", part.blockdev(loopdev)]) def mkfs_ext4_cmd(label: str, mount: PathString) -> List[str]: return ["mkfs.ext4", "-I", "256", "-L", label, "-M", str(mount)] def mkfs_xfs_cmd(label: str) -> List[str]: return ["mkfs.xfs", "-n", "ftype=1", "-L", label] def mkfs_btrfs_cmd(label: str) -> List[str]: return ["mkfs.btrfs", "-L", label, "-d", "single", "-m", "single"] def mkfs_generic(args: CommandLineArguments, label: str, mount: PathString, dev: Path) -> None: cmdline: Sequence[PathString] if args.output_format == OutputFormat.gpt_btrfs: cmdline = mkfs_btrfs_cmd(label) elif args.output_format == OutputFormat.gpt_xfs: cmdline = mkfs_xfs_cmd(label) else: cmdline = mkfs_ext4_cmd(label, mount) if args.output_format == OutputFormat.gpt_ext4: if (args.distribution in (Distribution.centos, Distribution.centos_epel) and is_older_than_centos8(args.release)): # e2fsprogs in centos7 is too old and doesn't support this feature cmdline += ["-O", "^metadata_csum"] if args.architecture in ("x86_64", "aarch64"): # enable 64bit filesystem feature on supported architectures cmdline += ["-O", "64bit"] run([*cmdline, dev]) def luks_format(dev: Path, passphrase: Dict[str, str]) -> None: if passphrase["type"] == "stdin": passphrase_content = (passphrase["content"] + "\n").encode("utf-8") run( [ "cryptsetup", "luksFormat", "--force-password", "--pbkdf-memory=64", "--pbkdf-parallel=1", "--pbkdf-force-iterations=1000", "--batch-mode", dev, ], input=passphrase_content, ) else: assert passphrase["type"] == "file" run( [ "cryptsetup", "luksFormat", "--force-password", "--pbkdf-memory=64", "--pbkdf-parallel=1", "--pbkdf-force-iterations=1000", "--batch-mode", dev, passphrase["content"], ] ) def luks_format_root( args: CommandLineArguments, loopdev: Path, do_run_build_script: bool, cached: bool, inserting_generated_root: bool = False, ) -> None: if args.encrypt != "all": return part = args.get_partition(PartitionIdentifier.root) if not part: return if is_generated_root(args) and not inserting_generated_root: return if do_run_build_script: return if cached: return assert args.passphrase is not None with complete_step(f"Setting up LUKS on {part.description}…"): luks_format(part.blockdev(loopdev), args.passphrase) def luks_format_home(args: CommandLineArguments, loopdev: Path, do_run_build_script: bool, cached: bool) -> None: if args.encrypt is None: return part = args.get_partition(PartitionIdentifier.home) if not part: return if do_run_build_script: return if cached: return assert args.passphrase is not None with complete_step(f"Setting up LUKS on {part.description}…"): luks_format(part.blockdev(loopdev), args.passphrase) def luks_format_srv(args: CommandLineArguments, loopdev: Path, do_run_build_script: bool, cached: bool) -> None: if args.encrypt is None: return part = args.get_partition(PartitionIdentifier.srv) if not part: return if do_run_build_script: return if cached: return assert args.passphrase is not None with complete_step(f"Setting up LUKS on {part.description}…"): luks_format(part.blockdev(loopdev), args.passphrase) def luks_format_var(args: CommandLineArguments, loopdev: Path, do_run_build_script: bool, cached: bool) -> None: if args.encrypt is None: return part = args.get_partition(PartitionIdentifier.var) if not part: return if do_run_build_script: return if cached: return assert args.passphrase is not None with complete_step(f"Setting up LUKS on {part.description}…"): luks_format(part.blockdev(loopdev), args.passphrase) def luks_format_tmp(args: CommandLineArguments, loopdev: Path, do_run_build_script: bool, cached: bool) -> None: if args.encrypt is None: return part = args.get_partition(PartitionIdentifier.tmp) if not part: return if do_run_build_script: return if cached: return assert args.passphrase is not None with complete_step(f"Setting up LUKS on {part.description}…"): luks_format(part.blockdev(loopdev), args.passphrase) @contextlib.contextmanager def luks_open(part: Partition, loopdev: Path, passphrase: Dict[str, str]) -> Iterator[Path]: name = str(uuid.uuid4()) dev = part.blockdev(loopdev) with complete_step(f"Setting up LUKS on {part.description}…"): if passphrase["type"] == "stdin": passphrase_content = (passphrase["content"] + "\n").encode("utf-8") run(["cryptsetup", "open", "--type", "luks", dev, name], input=passphrase_content) else: assert passphrase["type"] == "file" run(["cryptsetup", "--key-file", passphrase["content"], "open", "--type", "luks", dev, name]) path = Path("/dev/mapper", name) try: yield path finally: with complete_step(f"Closing LUKS on {part.description}"): run(["cryptsetup", "close", path]) def luks_setup_root( args: CommandLineArguments, loopdev: Path, do_run_build_script: bool, inserting_generated_root: bool = False ) -> ContextManager[Optional[Path]]: if args.encrypt != "all": return contextlib.nullcontext() part = args.get_partition(PartitionIdentifier.root) if not part: return contextlib.nullcontext() if is_generated_root(args) and not inserting_generated_root: return contextlib.nullcontext() if do_run_build_script: return contextlib.nullcontext() assert args.passphrase is not None return luks_open(part, loopdev, args.passphrase) def luks_setup_home( args: CommandLineArguments, loopdev: Path, do_run_build_script: bool ) -> ContextManager[Optional[Path]]: if args.encrypt is None: return contextlib.nullcontext() part = args.get_partition(PartitionIdentifier.home) if not part: return contextlib.nullcontext() if do_run_build_script: return contextlib.nullcontext() assert args.passphrase is not None return luks_open(part, loopdev, args.passphrase) def luks_setup_srv( args: CommandLineArguments, loopdev: Path, do_run_build_script: bool ) -> ContextManager[Optional[Path]]: if args.encrypt is None: return contextlib.nullcontext() part = args.get_partition(PartitionIdentifier.srv) if not part: return contextlib.nullcontext() if do_run_build_script: return contextlib.nullcontext() assert args.passphrase is not None return luks_open(part, loopdev, args.passphrase) def luks_setup_var( args: CommandLineArguments, loopdev: Path, do_run_build_script: bool ) -> ContextManager[Optional[Path]]: if args.encrypt is None: return contextlib.nullcontext() part = args.get_partition(PartitionIdentifier.var) if not part: return contextlib.nullcontext() if do_run_build_script: return contextlib.nullcontext() assert args.passphrase is not None return luks_open(part, loopdev, args.passphrase) def luks_setup_tmp( args: CommandLineArguments, loopdev: Path, do_run_build_script: bool ) -> ContextManager[Optional[Path]]: if args.encrypt is None: return contextlib.nullcontext() part = args.get_partition(PartitionIdentifier.tmp) if not part: return contextlib.nullcontext() if do_run_build_script: return contextlib.nullcontext() assert args.passphrase is not None return luks_open(part, loopdev, args.passphrase) class LuksSetupOutput(NamedTuple): root: Optional[Path] home: Optional[Path] srv: Optional[Path] var: Optional[Path] tmp: Optional[Path] @classmethod def empty(cls) -> LuksSetupOutput: return cls(None, None, None, None, None) def without_generated_root(self, args: CommandLineArguments) -> LuksSetupOutput: "A copy of self with .root optionally supressed" return LuksSetupOutput( None if is_generated_root(args) else self.root, *self[1:], ) @contextlib.contextmanager def luks_setup_all( args: CommandLineArguments, loopdev: Optional[Path], do_run_build_script: bool ) -> Iterator[LuksSetupOutput]: if not args.output_format.is_disk(): yield LuksSetupOutput.empty() return assert loopdev is not None assert args.partition_table is not None with luks_setup_root(args, loopdev, do_run_build_script) as root, \ luks_setup_home(args, loopdev, do_run_build_script) as home, \ luks_setup_srv(args, loopdev, do_run_build_script) as srv, \ luks_setup_var(args, loopdev, do_run_build_script) as var, \ luks_setup_tmp(args, loopdev, do_run_build_script) as tmp: yield LuksSetupOutput( root or args.partition_table.partition_path(PartitionIdentifier.root, loopdev), home or args.partition_table.partition_path(PartitionIdentifier.home, loopdev), srv or args.partition_table.partition_path(PartitionIdentifier.srv, loopdev), var or args.partition_table.partition_path(PartitionIdentifier.var, loopdev), tmp or args.partition_table.partition_path(PartitionIdentifier.tmp, loopdev)) def prepare_root(args: CommandLineArguments, dev: Optional[Path], cached: bool) -> None: if dev is None: return if is_generated_root(args): return if cached: return label, path = ("usr", "/usr") if args.usr_only else ("root", "/") with complete_step(f"Formatting {label} partition…"): mkfs_generic(args, label, path, dev) def prepare_home(args: CommandLineArguments, dev: Optional[Path], cached: bool) -> None: if dev is None: return if cached: return with complete_step("Formatting home partition…"): mkfs_generic(args, "home", "/home", dev) def prepare_srv(args: CommandLineArguments, dev: Optional[Path], cached: bool) -> None: if dev is None: return if cached: return with complete_step("Formatting server data partition…"): mkfs_generic(args, "srv", "/srv", dev) def prepare_var(args: CommandLineArguments, dev: Optional[Path], cached: bool) -> None: if dev is None: return if cached: return with complete_step("Formatting variable data partition…"): mkfs_generic(args, "var", "/var", dev) def prepare_tmp(args: CommandLineArguments, dev: Optional[Path], cached: bool) -> None: if dev is None: return if cached: return with complete_step("Formatting temporary data partition…"): mkfs_generic(args, "tmp", "/var/tmp", dev) def stat_is_whiteout(st: os.stat_result) -> bool: return stat.S_ISCHR(st.st_mode) and st.st_rdev == 0 def delete_whiteout_files(path: Path) -> None: """Delete any char(0,0) device nodes underneath @path Overlayfs uses such files to mark "whiteouts" (files present in the lower layers, but removed in the upper one). """ with complete_step("Removing overlay whiteout files…"): for entry in cast(Iterator[os.DirEntry[str]], scandir_recursive(path)): if stat_is_whiteout(entry.stat(follow_symlinks=False)): os.unlink(entry.path) def do_mount( what: PathString, where: Path, options: Sequence[str] = (), type: Optional[str] = None, read_only: bool = False, ) -> None: os.makedirs(where, 0o755, True) if read_only: options = ["ro", *options] cmd: List[PathString] = ["mount", "-n", what, where] if type: cmd += ["-t", type] if options: cmd += ["-o", ",".join(options)] run(cmd) def mount_loop(args: CommandLineArguments, dev: Path, where: Path, read_only: bool = False) -> None: options = [] if not args.output_format.is_squashfs(): options += ["discard"] compress = should_compress_fs(args) if compress and args.output_format == OutputFormat.gpt_btrfs and where.name not in {"efi", "boot"}: options += ["compress" if compress is True else f"compress={compress}"] do_mount(dev, where, options, read_only=read_only) def mount_bind(what: Path, where: Optional[Path] = None) -> Path: if where is None: where = what os.makedirs(what, 0o755, True) os.makedirs(where, 0o755, True) run(["mount", "--bind", what, where]) return where def mount_tmpfs(where: Path) -> None: do_mount("tmpfs", where, type="tmpfs") def mount_overlay( args: CommandLineArguments, base_image: Path, # the path to the mounted base image root root: Path, # the path to the destination image root read_only: bool = False, ) -> Tuple[Path, TempDir]: """Set up the overlay mount on `root` with `base_image` as the lower layer. Sadly the overlay cannot be mounted onto the root directly, because the workdir must be on the same filesystem as "upperdir", but cannot be its subdirectory. Thus, we set up the overlay and then bind-mount the overlay structure into the expected location. """ workdir = tempfile.TemporaryDirectory(dir=root, prefix='overlayfs-workdir') realroot = root / 'mkosi-real-root' options = [f'lowerdir={base_image}', f'upperdir={realroot}', f'workdir={workdir.name}'] do_mount("overlay", realroot, options, type="overlay", read_only=read_only) mount_bind(realroot, root) return realroot, workdir @complete_step("Cleaning up overlayfs") def clean_up_overlay(root: Path, realroot: Path, workdir: TempDir) -> None: """Destroy the overlayfs structure set up by `mount_overlay`. When this function returns, the contents of the root file system have been moved into root, and `realroot` and `workdir` are gone. If `realroot` is set, it means we mounted `root` twice: the first mount is the overlayfs mount, and the second is a bind-mount to adjust the location one level up. Thus we need unmount twice too; after the first unmount here, the image remains mounted at `root`. """ umount(root) umount(realroot) workdir.cleanup() # Let's now move the contents of realroot into root for entry in os.scandir(realroot): os.rename(realroot / entry.name, root / entry.name) realroot.rmdir() delete_whiteout_files(root) @contextlib.contextmanager def mount_image( args: CommandLineArguments, root: Path, base_image: Optional[Path], # the path to the mounted base image root loopdev: Optional[Path], image: LuksSetupOutput, root_read_only: bool = False, ) -> Iterator[None]: with complete_step("Mounting image…"): realroot: Optional[Path] = None workdir: Optional[TempDir] = None if base_image is not None: mount_bind(root) realroot, workdir = mount_overlay(args, base_image, root, root_read_only) elif image.root is not None: if args.usr_only: # In UsrOnly mode let's have a bind mount at the top so that umount --recursive works nicely later mount_bind(root) mount_loop(args, image.root, root / "usr", root_read_only) else: mount_loop(args, image.root, root, root_read_only) else: # always have a root of the tree as a mount point so we can # recursively unmount anything that ends up mounted there mount_bind(root) if image.home is not None: mount_loop(args, image.home, root / "home") if image.srv is not None: mount_loop(args, image.srv, root / "srv") if image.var is not None: mount_loop(args, image.var, root / "var") if image.tmp is not None: mount_loop(args, image.tmp, root / "var/tmp") if loopdev is not None: assert args.partition_table is not None path = args.partition_table.partition_path(PartitionIdentifier.esp, loopdev) if path: mount_loop(args, path, root / "efi") path = args.partition_table.partition_path(PartitionIdentifier.xbootldr, loopdev) if path: mount_loop(args, path, root / "boot") # Make sure /tmp and /run are not part of the image mount_tmpfs(root / "run") mount_tmpfs(root / "tmp") try: yield finally: if realroot is not None: assert workdir is not None clean_up_overlay(root, realroot, workdir) with complete_step("Unmounting image"): umount(root) def install_etc_hostname(args: CommandLineArguments, root: Path, cached: bool) -> None: if cached: return etc_hostname = root / "etc/hostname" # Always unlink first, so that we don't get in trouble due to a # symlink or suchlike. Also if no hostname is configured we really # don't want the file to exist, so that systemd's implicit # hostname logic can take effect. try: os.unlink(etc_hostname) except FileNotFoundError: pass if args.hostname: with complete_step("Assigning hostname"): etc_hostname.write_text(args.hostname + "\n") @contextlib.contextmanager def mount_api_vfs(args: CommandLineArguments, root: Path) -> Iterator[None]: subdirs = ("proc", "dev", "sys") with complete_step("Mounting API VFS"): for subdir in subdirs: mount_bind(Path("/") / subdir, root / subdir) try: yield finally: with complete_step("Unmounting API VFS"): for subdir in subdirs: umount(root / subdir) @contextlib.contextmanager def mount_cache(args: CommandLineArguments, root: Path) -> Iterator[None]: if args.cache_path is None: yield return caches = [] # We can't do this in mount_image() yet, as /var itself might have to be created as a subvolume first with complete_step("Mounting Package Cache"): if args.distribution in (Distribution.fedora, Distribution.mageia, Distribution.openmandriva): caches = [mount_bind(args.cache_path, root / "var/cache/dnf")] elif args.distribution in ( Distribution.centos, Distribution.centos_epel, Distribution.rocky, Distribution.rocky_epel, Distribution.alma, Distribution.alma_epel, ): # We mount both the YUM and the DNF cache in this case, as # YUM might just be redirected to DNF even if we invoke # the former caches = [ mount_bind(args.cache_path / "yum", root / "var/cache/yum"), mount_bind(args.cache_path / "dnf", root / "var/cache/dnf"), ] elif args.distribution in (Distribution.debian, Distribution.ubuntu): caches = [mount_bind(args.cache_path, root / "var/cache/apt/archives")] elif args.distribution == Distribution.arch: caches = [mount_bind(args.cache_path, root / "var/cache/pacman/pkg")] elif args.distribution == Distribution.gentoo: caches = [mount_bind(args.cache_path, root / "var/cache/binpkgs")] elif args.distribution == Distribution.opensuse: caches = [mount_bind(args.cache_path, root / "var/cache/zypp/packages")] elif args.distribution == Distribution.photon: caches = [mount_bind(args.cache_path / "tdnf", root / "var/cache/tdnf")] try: yield finally: with complete_step("Unmounting Package Cache"): for d in caches: # NOQA: E501 umount(d) def umount(where: Path) -> None: run(["umount", "--recursive", "-n", where]) def configure_dracut(args: CommandLineArguments, packages: Set[str], root: Path) -> None: if "dracut" not in packages: return dracut_dir = root / "etc/dracut.conf.d" dracut_dir.mkdir(mode=0o755) dracut_dir.joinpath('30-mkosi-hostonly.conf').write_text( f'hostonly={yes_no(args.hostonly_initrd)}\n' 'hostonly_default_device=no\n' ) dracut_dir.joinpath("30-mkosi-qemu.conf").write_text('add_dracutmodules+=" qemu "\n') with dracut_dir.joinpath("30-mkosi-systemd-extras.conf").open("w") as f: for extra in DRACUT_SYSTEMD_EXTRAS: f.write(f'install_optional_items+=" {extra} "\n') if args.hostonly_initrd: dracut_dir.joinpath("30-mkosi-filesystem.conf").write_text( f'filesystems+=" {(args.output_format.needed_kernel_module())} "\n' ) if args.get_partition(PartitionIdentifier.esp): # These distros need uefi_stub configured explicitly for dracut to find the systemd-boot uefi stub. if args.distribution in (Distribution.ubuntu, Distribution.debian, Distribution.mageia, Distribution.openmandriva, Distribution.gentoo): dracut_dir.joinpath("30-mkosi-uefi-stub.conf").write_text( "uefi_stub=/usr/lib/systemd/boot/efi/linuxx64.efi.stub\n" ) # efivarfs must be present in order to GPT root discovery work dracut_dir.joinpath("30-mkosi-efivarfs.conf").write_text( '[[ $(modinfo -k "$kernel" -F filename efivarfs 2>/dev/null) == /* ]] && add_drivers+=" efivarfs "\n' ) def prepare_tree_root(args: CommandLineArguments, root: Path) -> None: if args.output_format == OutputFormat.subvolume and not is_generated_root(args): with complete_step("Setting up OS tree root…"): btrfs_subvol_create(root) def root_home(args: CommandLineArguments, root: Path) -> Path: # If UsrOnly= is turned on the /root/ directory (i.e. the root # user's home directory) is not persistent (after all everything # outside of /usr/ is not around). In that case let's mount it in # from an external place, so that we can have persistency. It is # after all where we place our build sources and suchlike. if args.usr_only: return workspace(root) / "home-root" return root / "root" def prepare_tree(args: CommandLineArguments, root: Path, do_run_build_script: bool, cached: bool) -> None: if cached: return with complete_step("Setting up basic OS tree…"): if args.output_format in (OutputFormat.subvolume, OutputFormat.gpt_btrfs) and not is_generated_root(args): btrfs_subvol_create(root / "home") btrfs_subvol_create(root / "srv") btrfs_subvol_create(root / "var") btrfs_subvol_create(root / "var/tmp", 0o1777) root.joinpath("var/lib").mkdir() btrfs_subvol_create(root / "var/lib/machines", 0o700) # We need an initialized machine ID for the build & boot logic to work root.joinpath("etc").mkdir(mode=0o755, exist_ok=True) root.joinpath("etc/machine-id").write_text(f"{args.machine_id}\n") if not do_run_build_script and args.bootable: if args.get_partition(PartitionIdentifier.xbootldr): # Create directories for kernels and entries if this is enabled root.joinpath("boot/EFI").mkdir(mode=0o700) root.joinpath("boot/EFI/Linux").mkdir(mode=0o700) root.joinpath("boot/loader").mkdir(mode=0o700) root.joinpath("boot/loader/entries").mkdir(mode=0o700) root.joinpath("boot", args.machine_id).mkdir(mode=0o700) else: # If this is not enabled, let's create an empty directory on /boot root.joinpath("boot").mkdir(mode=0o700) if args.get_partition(PartitionIdentifier.esp): root.joinpath("efi/EFI").mkdir(mode=0o700) root.joinpath("efi/EFI/BOOT").mkdir(mode=0o700) root.joinpath("efi/EFI/systemd").mkdir(mode=0o700) root.joinpath("efi/loader").mkdir(mode=0o700) if not args.get_partition(PartitionIdentifier.xbootldr): # Create directories for kernels and entries, unless the XBOOTLDR partition is turned on root.joinpath("efi/EFI/Linux").mkdir(mode=0o700) root.joinpath("efi/loader/entries").mkdir(mode=0o700) root.joinpath("efi", args.machine_id).mkdir(mode=0o700) # Create some compatibility symlinks in /boot in case that is not set up otherwise root.joinpath("boot/efi").symlink_to("../efi") root.joinpath("boot/loader").symlink_to("../efi/loader") root.joinpath("boot", args.machine_id).symlink_to(f"../efi/{args.machine_id}") root.joinpath("etc/kernel").mkdir(mode=0o755) root.joinpath("etc/kernel/cmdline").write_text(" ".join(args.kernel_command_line) + "\n") if do_run_build_script or args.ssh: root_home(args, root).mkdir(mode=0o750) if args.ssh and not do_run_build_script: root_home(args, root).joinpath(".ssh").mkdir(mode=0o700) if do_run_build_script: root_home(args, root).joinpath("dest").mkdir(mode=0o755) if args.include_dir is not None: root.joinpath("usr").mkdir(mode=0o755) root.joinpath("usr/include").mkdir(mode=0o755) if args.build_dir is not None: root_home(args, root).joinpath("build").mkdir(0o755) if args.network_veth and not do_run_build_script: root.joinpath("etc/systemd").mkdir(mode=0o755) root.joinpath("etc/systemd/network").mkdir(mode=0o755) def disable_pam_securetty(root: Path) -> None: def _rm_securetty(line: str) -> str: if "pam_securetty.so" in line: return "" return line patch_file(root / "etc/pam.d/login", _rm_securetty) def url_exists(url: str) -> bool: req = urllib.request.Request(url, method="HEAD") try: if urllib.request.urlopen(req): return True except Exception: pass return False def make_executable(path: Path) -> None: st = path.stat() os.chmod(path, st.st_mode | stat.S_IEXEC) def disable_kernel_install(args: CommandLineArguments, root: Path) -> None: # Let's disable the automatic kernel installation done by the kernel RPMs. After all, we want to built # our own unified kernels that include the root hash in the kernel command line and can be signed as a # single EFI executable. Since the root hash is only known when the root file system is finalized we turn # off any kernel installation beforehand. # # For BIOS mode, we don't have that option, so do not mask the units. if not args.bootable or args.get_partition(PartitionIdentifier.bios) or not args.with_unified_kernel_images: return for subdir in ("etc", "etc/kernel", "etc/kernel/install.d"): root.joinpath(subdir).mkdir(mode=0o755, exist_ok=True) for f in ("50-dracut.install", "51-dracut-rescue.install", "90-loaderentry.install"): root.joinpath("etc/kernel/install.d", f).symlink_to("/dev/null") def reenable_kernel_install(args: CommandLineArguments, root: Path) -> None: if not args.bootable or args.get_partition(PartitionIdentifier.bios) or not args.with_unified_kernel_images: return write_resource( root / "etc/kernel/install.d/50-mkosi-dracut-unified-kernel.install", "mkosi.resources", "dracut_unified_kernel_install.sh", executable=True, ) def add_packages( args: CommandLineArguments, packages: Set[str], *names: str, conditional: Optional[str] = None ) -> None: """Add packages in @names to @packages, if enabled by --base-packages. If @conditional is specified, rpm-specific syntax for boolean dependencies will be used to include @names if @conditional is satisfied. """ assert args.base_packages is True or args.base_packages is False or args.base_packages == "conditional" if args.base_packages is True or (args.base_packages == "conditional" and conditional): for name in names: packages.add(f"({name} if {conditional})" if conditional else name) def sort_packages(packages: Iterable[str]) -> List[str]: """Sorts packages: normal first, paths second, conditional third""" m = {"(": 2, "/": 1} sort = lambda name: (m.get(name[0], 0), name) return sorted(packages, key=sort) def make_rpm_list(args: CommandLineArguments, packages: Set[str], do_run_build_script: bool) -> Set[str]: packages = packages.copy() if args.bootable: # Temporary hack: dracut only adds crypto support to the initrd, if the cryptsetup binary is installed if args.encrypt or args.verity: add_packages(args, packages, "cryptsetup", conditional="dracut") if args.output_format == OutputFormat.gpt_ext4: add_packages(args, packages, "e2fsprogs") if args.output_format == OutputFormat.gpt_xfs: add_packages(args, packages, "xfsprogs") if args.output_format == OutputFormat.gpt_btrfs: add_packages(args, packages, "btrfs-progs") if args.get_partition(PartitionIdentifier.bios): if args.distribution in (Distribution.mageia, Distribution.openmandriva): add_packages(args, packages, "grub2") else: add_packages(args, packages, "grub2-pc") if not do_run_build_script and args.ssh: add_packages(args, packages, "openssh-server") return packages def clean_dnf_metadata(root: Path, always: bool) -> None: """Remove dnf metadata if /bin/dnf is not present in the image If dnf is not installed, there doesn't seem to be much use in keeping the dnf metadata, since it's not usable from within the image anyway. """ paths = [ root / "var/lib/dnf", *root.glob("var/log/dnf.*"), *root.glob("var/log/hawkey.*"), root / "var/cache/dnf", ] cond = always or not os.access(root / "bin/dnf", os.F_OK, follow_symlinks=False) if not cond or not any(path.exists() for path in paths): return with complete_step("Cleaning dnf metadata…"): for path in paths: unlink_try_hard(path) def clean_yum_metadata(root: Path, always: bool) -> None: """Remove yum metadata if /bin/yum is not present in the image""" paths = [ root / "var/lib/yum", *root.glob("var/log/yum.*"), root / "var/cache/yum", ] cond = always or not os.access(root / "bin/yum", os.F_OK, follow_symlinks=False) if not cond or not any(path.exists() for path in paths): return with complete_step("Cleaning yum metadata…"): for path in paths: unlink_try_hard(path) def clean_rpm_metadata(root: Path, always: bool) -> None: """Remove rpm metadata if /bin/rpm is not present in the image""" path = root / "var/lib/rpm" cond = always or not os.access(root / "bin/rpm", os.F_OK, follow_symlinks=False) if not cond or not path.exists(): return with complete_step("Cleaning rpm metadata…"): unlink_try_hard(path) def clean_tdnf_metadata(root: Path, always: bool) -> None: """Remove tdnf metadata if /bin/tdnf is not present in the image""" paths = [ *root.glob("var/log/tdnf.*"), root / "var/cache/tdnf", ] cond = always or not os.access(root / "usr/bin/tdnf", os.F_OK, follow_symlinks=False) if not cond or not any(path.exists() for path in paths): return with complete_step("Cleaning tdnf metadata…"): for path in paths: unlink_try_hard(path) def clean_apt_metadata(root: Path, always: bool) -> None: """Remove apt metadata if /usr/bin/apt is not present in the image""" paths = [ root / "var/lib/apt", root / "var/log/apt", root / "var/cache/apt", ] cond = always or not os.access(root / "usr/bin/apt", os.F_OK, follow_symlinks=False) if not cond or not any(path.exists() for path in paths): return with complete_step("Cleaning apt metadata…"): for path in paths: unlink_try_hard(path) def clean_dpkg_metadata(root: Path, always: bool) -> None: """Remove dpkg metadata if /usr/bin/dpkg is not present in the image""" paths = [ root / "var/lib/dpkg", root / "var/log/dpkg.log", ] cond = always or not os.access(root / "usr/bin/dpkg", os.F_OK, follow_symlinks=False) if not cond or not any(path.exists() for path in paths): return with complete_step("Cleaning dpkg metadata…"): for path in paths: unlink_try_hard(path) def clean_package_manager_metadata(args: CommandLineArguments, root: Path) -> None: """Remove package manager metadata Try them all regardless of the distro: metadata is only removed if the package manager is present in the image. """ assert args.clean_package_metadata in (False, True, 'auto') if args.clean_package_metadata is False: return # we try then all: metadata will only be touched if any of them are in the # final image clean_dnf_metadata(root, always=args.clean_package_metadata is True) clean_yum_metadata(root, always=args.clean_package_metadata is True) clean_rpm_metadata(root, always=args.clean_package_metadata is True) clean_tdnf_metadata(root, always=args.clean_package_metadata is True) clean_apt_metadata(root, always=args.clean_package_metadata is True) clean_dpkg_metadata(root, always=args.clean_package_metadata is True) # FIXME: implement cleanup for other package managers: swupd, pacman def remove_files(args: CommandLineArguments, root: Path) -> None: """Remove files based on user-specified patterns""" if not args.remove_files: return with complete_step("Removing files…"): # Note: Path('/foo') / '/bar' == '/bar'. We need to strip the slash. # https://bugs.python.org/issue44452 paths = [root / str(p).lstrip("/") for p in args.remove_files] remove_glob(*paths) def invoke_dnf( args: CommandLineArguments, root: Path, command: str, packages: Iterable[str], ) -> None: config_file = workspace(root) / "dnf.conf" cmd = 'dnf' if shutil.which('dnf') else 'yum' cmdline = [ cmd, "-y", f"--config={config_file}", "--best", "--allowerasing", f"--releasever={args.release}", f"--installroot={root}", "--setopt=keepcache=1", "--setopt=install_weak_deps=0", ] if args.repositories: cmdline += ["--disablerepo=*"] + [f"--enablerepo={repo}" for repo in args.repositories] if args.with_network == "never": cmdline += ["-C"] if args.architecture is not None: cmdline += [f"--forcearch={args.architecture}"] if not args.with_docs: cmdline += ["--nodocs"] cmdline += [command, *sort_packages(packages)] with mount_api_vfs(args, root): run(cmdline) def install_packages_dnf( args: CommandLineArguments, root: Path, packages: Set[str], do_run_build_script: bool, ) -> None: packages = make_rpm_list(args, packages, do_run_build_script) invoke_dnf(args, root, 'install', packages) def invoke_tdnf( args: CommandLineArguments, root: Path, command: str, packages: Set[str], gpgcheck: bool, do_run_build_script: bool, ) -> None: config_file = workspace(root) / "dnf.conf" packages = make_rpm_list(args, packages, do_run_build_script) cmdline = [ "tdnf", "-y", f"--config={config_file}", f"--releasever={args.release}", f"--installroot={root}", ] if args.repositories: cmdline += ["--disablerepo=*"] + [f"--enablerepo={repo}" for repo in args.repositories] if not gpgcheck: cmdline += ["--nogpgcheck"] cmdline += [command, *sort_packages(packages)] with mount_api_vfs(args, root): run(cmdline) def install_packages_tdnf( args: CommandLineArguments, root: Path, packages: Set[str], gpgcheck: bool, do_run_build_script: bool, ) -> None: packages = make_rpm_list(args, packages, do_run_build_script) invoke_tdnf(args, root, 'install', packages, gpgcheck, do_run_build_script) class Repo(NamedTuple): id: str name: str url: str gpgpath: Path gpgurl: Optional[str] = None def setup_dnf(args: CommandLineArguments, root: Path, repos: Sequence[Repo] = ()) -> None: gpgcheck = True repo_file = workspace(root) / "mkosi.repo" with repo_file.open("w") as f: for repo in repos: gpgkey: Optional[str] = None if repo.gpgpath.exists(): gpgkey = f"file://{repo.gpgpath}" elif repo.gpgurl: gpgkey = repo.gpgurl else: warn(f"GPG key not found at {repo.gpgpath}. Not checking GPG signatures.") gpgcheck = False f.write( dedent( f"""\ [{repo.id}] name={repo.name} {repo.url} gpgkey={gpgkey or ''} enabled=1 """ ) ) if args.use_host_repositories: default_repos = "" else: default_repos = f"{'repodir' if args.distribution == Distribution.photon else 'reposdir'}={workspace(root)}" config_file = workspace(root) / "dnf.conf" config_file.write_text( dedent( f"""\ [main] gpgcheck={'1' if gpgcheck else '0'} {default_repos } """ ) ) @complete_step("Installing Photon…") def install_photon(args: CommandLineArguments, root: Path, do_run_build_script: bool) -> None: release_url = "baseurl=https://packages.vmware.com/photon/$releasever/photon_release_$releasever_$basearch" updates_url = "baseurl=https://packages.vmware.com/photon/$releasever/photon_updates_$releasever_$basearch" gpgpath = Path("/etc/pki/rpm-gpg/VMWARE-RPM-GPG-KEY") repos = [Repo("photon", f"VMware Photon OS {args.release} Release", release_url, gpgpath), Repo("photon-updates", f"VMware Photon OS {args.release} Updates", updates_url, gpgpath)] setup_dnf(args, root, repos) packages = {*args.packages} add_packages(args, packages, "minimal") if not do_run_build_script and args.bootable: add_packages(args, packages, "linux", "initramfs") install_packages_tdnf(args, root, packages, gpgpath.exists(), do_run_build_script) @complete_step("Installing Clear Linux…") def install_clear(args: CommandLineArguments, root: Path, do_run_build_script: bool) -> None: if args.release == "latest": release = "clear" else: release = "clear/" + args.release packages = {*args.packages} add_packages(args, packages, "os-core-plus") if do_run_build_script: packages.update(args.build_packages) if not do_run_build_script and args.bootable: add_packages(args, packages, "kernel-native") if not do_run_build_script and args.ssh: add_packages(args, packages, "openssh-server") swupd_extract = shutil.which("swupd-extract") if swupd_extract is None: die( dedent( """ Couldn't find swupd-extract program, download (or update it) it using: go get -u github.com/clearlinux/mixer-tools/swupd-extract and it will be installed by default in ~/go/bin/swupd-extract. Also ensure that you have openssl program in your system. """ ) ) cmdline: List[PathString] = [swupd_extract, "-output", root] if args.cache_path: cmdline += ["-state", args.cache_path] cmdline += [release, *sort_packages(packages)] run(cmdline) root.joinpath("etc/resolv.conf").symlink_to("../run/systemd/resolve/resolv.conf") # Clear Linux doesn't have a /etc/shadow at install time, it gets created # when the root first logs in. To set the password via mkosi, create one. if not do_run_build_script and args.password is not None: shadow_file = root / "etc/shadow" shadow_file.write_text("root::::::::\n") shadow_file.chmod(0o400) # Password is already empty for root, so no need to reset it later. if args.password == "": args.password = None @complete_step("Installing Fedora Linux…") def install_fedora(args: CommandLineArguments, root: Path, do_run_build_script: bool) -> None: if args.release == "rawhide": last = list(FEDORA_KEYS_MAP)[-1] warn(f"Assuming rawhide is version {last} — " + "You may specify otherwise with --release=rawhide-") args.releasever = last elif args.release.startswith("rawhide-"): args.release, args.releasever = args.release.split("-") MkosiPrinter.info(f"Fedora rawhide — release version: {args.releasever}") else: args.releasever = args.release arch = args.architecture or platform.machine() if args.mirror: baseurl = urllib.parse.urljoin(args.mirror, f"releases/{args.release}/Everything/$basearch/os/") media = urllib.parse.urljoin(baseurl.replace("$basearch", arch), "media.repo") if not url_exists(media): baseurl = urllib.parse.urljoin(args.mirror, f"development/{args.release}/Everything/$basearch/os/") release_url = f"baseurl={baseurl}" updates_url = f"baseurl={args.mirror}/updates/{args.release}/Everything/$basearch/" else: release_url = f"metalink=https://mirrors.fedoraproject.org/metalink?repo=fedora-{args.release}&arch=$basearch" updates_url = ( "metalink=https://mirrors.fedoraproject.org/metalink?" f"repo=updates-released-f{args.release}&arch=$basearch" ) if args.releasever in FEDORA_KEYS_MAP: # The website uses short identifiers: https://pagure.io/fedora-web/websites/issue/196 shortid = FEDORA_KEYS_MAP[args.releasever][-8:] gpgid = f"keys/{shortid}.txt" else: gpgid = "fedora.gpg" gpgpath = Path(f"/etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-{args.releasever}-{arch}") gpgurl = urllib.parse.urljoin("https://getfedora.org/static/", gpgid) repos = [Repo("fedora", f"Fedora {args.release.capitalize()} - base", release_url, gpgpath, gpgurl)] if args.release != 'rawhide': # On rawhide, the "updates" repo is the same as the "fedora" repo. # In other versions, the "fedora" repo is frozen at release, and "updates" provides any new packages. repos += [Repo("updates", f"Fedora {args.release.capitalize()} - updates", updates_url, gpgpath, gpgurl)] setup_dnf(args, root, repos) packages = {*args.packages} add_packages(args, packages, "fedora-release", "systemd") if fedora_release_cmp(args.release, "34") < 0: add_packages(args, packages, "glibc-minimal-langpack", conditional="glibc") if not do_run_build_script and args.bootable: add_packages(args, packages, "kernel-core", "kernel-modules", "binutils", "dracut") add_packages(args, packages, "systemd-udev", conditional="systemd") configure_dracut(args, packages, root) if do_run_build_script: packages.update(args.build_packages) if not do_run_build_script and args.network_veth: add_packages(args, packages, "systemd-networkd", conditional="systemd") install_packages_dnf(args, root, packages, do_run_build_script) root.joinpath("etc/locale.conf").write_text("LANG=C.UTF-8\n") # FIXME: should this be conditionalized on args.with_docs like in install_debian_or_ubuntu()? # But we set LANG=C.UTF-8 anyway. shutil.rmtree(root / "usr/share/locale", ignore_errors=True) @complete_step("Installing Mageia…") def install_mageia(args: CommandLineArguments, root: Path, do_run_build_script: bool) -> None: if args.mirror: baseurl = f"{args.mirror}/distrib/{args.release}/x86_64/media/core/" release_url = f"baseurl={baseurl}/release/" updates_url = f"baseurl={baseurl}/updates/" else: baseurl = f"https://www.mageia.org/mirrorlist/?release={args.release}&arch=x86_64§ion=core" release_url = f"mirrorlist={baseurl}&repo=release" updates_url = f"mirrorlist={baseurl}&repo=updates" gpgpath = Path("/etc/pki/rpm-gpg/RPM-GPG-KEY-Mageia") repos = [Repo("mageia", f"Mageia {args.release} Core Release", release_url, gpgpath), Repo("updates", f"Mageia {args.release} Core Updates", updates_url, gpgpath)] setup_dnf(args, root, repos) packages = {*args.packages} add_packages(args, packages, "basesystem-minimal") if not do_run_build_script and args.bootable: add_packages(args, packages, "kernel-server-latest", "binutils", "dracut") configure_dracut(args, packages, root) # Mageia ships /etc/50-mageia.conf that omits systemd from the initramfs and disables hostonly. # We override that again so our defaults get applied correctly on Mageia as well. root.joinpath("etc/dracut.conf.d/51-mkosi-override-mageia.conf").write_text( 'hostonly=no\n' 'omit_dracutmodules=""\n' ) if do_run_build_script: packages.update(args.build_packages) install_packages_dnf(args, root, packages, do_run_build_script) disable_pam_securetty(root) @complete_step("Installing OpenMandriva…") def install_openmandriva(args: CommandLineArguments, root: Path, do_run_build_script: bool) -> None: release = args.release.strip("'") arch = args.architecture or platform.machine() if release[0].isdigit(): release_model = "rock" elif release == "cooker": release_model = "cooker" else: release_model = release if args.mirror: baseurl = f"{args.mirror}/{release_model}/repository/{arch}/main" release_url = f"baseurl={baseurl}/release/" updates_url = f"baseurl={baseurl}/updates/" else: baseurl = f"http://mirrors.openmandriva.org/mirrors.php?platform={release_model}&arch={arch}&repo=main" release_url = f"mirrorlist={baseurl}&release=release" updates_url = f"mirrorlist={baseurl}&release=updates" gpgpath = Path("/etc/pki/rpm-gpg/RPM-GPG-KEY-OpenMandriva") repos = [Repo("openmandriva", f"OpenMandriva {release_model} Main", release_url, gpgpath), Repo("updates", f"OpenMandriva {release_model} Main Updates", updates_url, gpgpath)] setup_dnf(args, root, repos) packages = {*args.packages} # well we may use basesystem here, but that pulls lot of stuff add_packages(args, packages, "basesystem-minimal", "systemd") if not do_run_build_script and args.bootable: add_packages(args, packages, "systemd-boot", "systemd-cryptsetup", conditional="systemd") add_packages(args, packages, "kernel-release-server", "binutils", "dracut", "timezone") configure_dracut(args, packages, root) if args.network_veth: add_packages(args, packages, "systemd-networkd", conditional="systemd") if do_run_build_script: packages.update(args.build_packages) install_packages_dnf(args, root, packages, do_run_build_script) disable_pam_securetty(root) def install_centos_repos_old(args: CommandLineArguments, root: Path, epel_release: int) -> None: # Repos for CentOS 7 and earlier gpgpath = Path(f"/etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-{args.release}") gpgurl = f"https://www.centos.org/keys/RPM-GPG-KEY-CentOS-{args.release}" epel_gpgpath = Path(f"/etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-{epel_release}") epel_gpgurl = f"https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-{epel_release}" if args.mirror: release_url = f"baseurl={args.mirror}/centos/{args.release}/os/x86_64" updates_url = f"baseurl={args.mirror}/centos/{args.release}/updates/x86_64/" extras_url = f"baseurl={args.mirror}/centos/{args.release}/extras/x86_64/" centosplus_url = f"baseurl={args.mirror}/centos/{args.release}/centosplus/x86_64/" epel_url = f"baseurl={args.mirror}/epel/{epel_release}/x86_64/" else: release_url = f"mirrorlist=http://mirrorlist.centos.org/?release={args.release}&arch=x86_64&repo=os" updates_url = f"mirrorlist=http://mirrorlist.centos.org/?release={args.release}&arch=x86_64&repo=updates" extras_url = f"mirrorlist=http://mirrorlist.centos.org/?release={args.release}&arch=x86_64&repo=extras" centosplus_url = f"mirrorlist=http://mirrorlist.centos.org/?release={args.release}&arch=x86_64&repo=centosplus" epel_url = f"mirrorlist=https://mirrors.fedoraproject.org/mirrorlist?repo=epel-{epel_release}&arch=x86_64" repos = [Repo("base", f"CentOS-{args.release} - Base", release_url, gpgpath, gpgurl), Repo("updates", f"CentOS-{args.release} - Updates", updates_url, gpgpath, gpgurl), Repo("extras", f"CentOS-{args.release} - Extras", extras_url, gpgpath, gpgurl), Repo("centosplus", f"CentOS-{args.release} - Plus", centosplus_url, gpgpath, gpgurl)] if 'epel' in args.distribution.name: repos += [Repo("epel", f"name=Extra Packages for Enterprise Linux {epel_release} - $basearch", epel_url, epel_gpgpath, epel_gpgurl)] setup_dnf(args, root, repos) def install_centos_repos_new(args: CommandLineArguments, root: Path, epel_release: int) -> None: # Repos for CentOS 8 and later gpgpath = Path("/etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial") gpgurl = "https://www.centos.org/keys/RPM-GPG-KEY-CentOS-Official" epel_gpgpath = Path(f"/etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-{epel_release}") epel_gpgurl = f"https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-{epel_release}" if args.mirror: appstream_url = f"baseurl={args.mirror}/centos/{args.release}/AppStream/x86_64/os" baseos_url = f"baseurl={args.mirror}/centos/{args.release}/BaseOS/x86_64/os" extras_url = f"baseurl={args.mirror}/centos/{args.release}/extras/x86_64/os" centosplus_url = f"baseurl={args.mirror}/centos/{args.release}/centosplus/x86_64/os" epel_url = f"baseurl={args.mirror}/epel/{epel_release}/Everything/x86_64" else: appstream_url = f"mirrorlist=http://mirrorlist.centos.org/?release={args.release}&arch=x86_64&repo=AppStream" baseos_url = f"mirrorlist=http://mirrorlist.centos.org/?release={args.release}&arch=x86_64&repo=BaseOS" extras_url = f"mirrorlist=http://mirrorlist.centos.org/?release={args.release}&arch=x86_64&repo=extras" centosplus_url = f"mirrorlist=http://mirrorlist.centos.org/?release={args.release}&arch=x86_64&repo=centosplus" epel_url = f"mirrorlist=https://mirrors.fedoraproject.org/mirrorlist?repo=epel-{epel_release}&arch=x86_64" repos = [Repo("AppStream", f"CentOS-{args.release} - AppStream", appstream_url, gpgpath, gpgurl), Repo("BaseOS", f"CentOS-{args.release} - Base", baseos_url, gpgpath, gpgurl), Repo("extras", f"CentOS-{args.release} - Extras", extras_url, gpgpath, gpgurl), Repo("centosplus", f"CentOS-{args.release} - Plus", centosplus_url, gpgpath, gpgurl)] if 'epel' in args.distribution.name: repos += [Repo("epel", f"name=Extra Packages for Enterprise Linux {epel_release} - $basearch", epel_url, epel_gpgpath, epel_gpgurl)] setup_dnf(args, root, repos) def install_rocky_repos(args: CommandLineArguments, root: Path, epel_release: int) -> None: # Repos for Rocky Linux 8 and later gpgpath = Path("/etc/pki/rpm-gpg/RPM-GPG-KEY-rockyofficial") gpgurl = "https://download.rockylinux.org/pub/rocky/RPM-GPG-KEY-rockyofficial" epel_gpgpath = Path(f"/etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-{epel_release}") epel_gpgurl = f"https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-{epel_release}" if args.mirror: appstream_url = f"baseurl={args.mirror}/rocky/{args.release}/AppStream/x86_64/os" baseos_url = f"baseurl={args.mirror}/rocky/{args.release}/BaseOS/x86_64/os" extras_url = f"baseurl={args.mirror}/rocky/{args.release}/extras/x86_64/os" plus_url = f"baseurl={args.mirror}/rocky/{args.release}/plus/x86_64/os" epel_url = f"baseurl={args.mirror}/epel/{epel_release}/Everything/x86_64" else: appstream_url = ( f"mirrorlist=https://mirrors.rockylinux.org/mirrorlist?arch=x86_64&repo=AppStream-{args.release}" ) baseos_url = f"mirrorlist=https://mirrors.rockylinux.org/mirrorlist?arch=x86_64&repo=BaseOS-{args.release}" extras_url = f"mirrorlist=https://mirrors.rockylinux.org/mirrorlist?arch=x86_64&repo=extras-{args.release}" plus_url = f"mirrorlist=https://mirrors.rockylinux.org/mirrorlist?arch=x86_64&repo=rockyplus-{args.release}" epel_url = f"mirrorlist=https://mirrors.fedoraproject.org/mirrorlist?repo=epel-{epel_release}&arch=x86_64" repos = [Repo("AppStream", f"Rocky-{args.release} - AppStream", appstream_url, gpgpath, gpgurl), Repo("BaseOS", f"Rocky-{args.release} - Base", baseos_url, gpgpath, gpgurl), Repo("extras", f"Rocky-{args.release} - Extras", extras_url, gpgpath, gpgurl), Repo("plus", f"Rocky-{args.release} - Plus", plus_url, gpgpath, gpgurl)] if 'epel' in args.distribution.name: repos += [Repo("epel", f"name=Extra Packages for Enterprise Linux {epel_release} - $basearch", epel_url, epel_gpgpath, epel_gpgurl)] setup_dnf(args, root, repos) def install_alma_repos(args: CommandLineArguments, root: Path, epel_release: int) -> None: # Repos for Alma Linux 8 and later gpgpath = Path("/etc/pki/rpm-gpg/RPM-GPG-KEY-AlmaLinux") gpgurl = "https://repo.almalinux.org/almalinux/RPM-GPG-KEY-AlmaLinux" epel_gpgpath = Path(f"/etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-{epel_release}") epel_gpgurl = f"https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-{epel_release}" if args.mirror: appstream_url = f"baseurl={args.mirror}/almalinux/{args.release}/AppStream/x86_64/os" baseos_url = f"baseurl={args.mirror}/almalinux/{args.release}/BaseOS/x86_64/os" extras_url = f"baseurl={args.mirror}/almalinux/{args.release}/extras/x86_64/os" powertools_url = f"baseurl={args.mirror}/almalinux/{args.release}/PowerTools/x86_64/os" ha_url = f"baseurl={args.mirror}/almalinux/{args.release}/HighAvailability/x86_64/os" epel_url = f"baseurl={args.mirror}/epel/{epel_release}/Everything/x86_64" else: appstream_url = f"mirrorlist=https://mirrors.almalinux.org/mirrorlist/{args.release}/appstream" baseos_url = f"mirrorlist=https://mirrors.almalinux.org/mirrorlist/{args.release}/baseos" extras_url = f"mirrorlist=https://mirrors.almalinux.org/mirrorlist/{args.release}/extras" powertools_url = f"mirrorlist=https://mirrors.almalinux.org/mirrorlist/{args.release}/powertools" ha_url = f"mirrorlist=https://mirrors.almalinux.org/mirrorlist/{args.release}/ha" epel_url = f"mirrorlist=https://mirrors.fedoraproject.org/mirrorlist?repo=epel-{epel_release}&arch=x86_64" repos = [Repo("AppStream", f"AlmaLinux-{args.release} - AppStream", appstream_url, gpgpath, gpgurl), Repo("BaseOS", f"AlmaLinux-{args.release} - Base", baseos_url, gpgpath, gpgurl), Repo("extras", f"AlmaLinux-{args.release} - Extras", extras_url, gpgpath, gpgurl), Repo("Powertools", f"AlmaLinux-{args.release} - Powertools", powertools_url, gpgpath, gpgurl), Repo("HighAvailability", f"AlmaLinux-{args.release} - HighAvailability", ha_url, gpgpath, gpgurl)] if 'epel' in args.distribution.name: repos += [Repo("epel", f"name=Extra Packages for Enterprise Linux {epel_release} - $basearch", epel_url, epel_gpgpath, epel_gpgurl)] setup_dnf(args, root, repos) def is_older_than_centos8(release: str) -> bool: # CentOS 7 contains some very old versions of certain libraries # which require workarounds in different places. # Additionally the repositories have been changed between 7 and 8 epel_release = release.split(".")[0] try: return int(epel_release) <= 7 except ValueError: return False @complete_step("Installing CentOS…") def install_centos(args: CommandLineArguments, root: Path, do_run_build_script: bool) -> None: old = is_older_than_centos8(args.release) epel_release = int(args.release.split(".")[0]) if old: install_centos_repos_old(args, root, epel_release) else: install_centos_repos_new(args, root, epel_release) packages = {*args.packages} add_packages(args, packages, "centos-release", "systemd") if not do_run_build_script and args.bootable: add_packages(args, packages, "kernel", "dracut", "binutils") configure_dracut(args, packages, root) if old: add_packages( args, packages, "grub2-efi", "grub2-tools", "grub2-efi-x64-modules", "shim-x64", "efibootmgr", "efivar-libs", ) else: # this does not exist on CentOS 7 add_packages(args, packages, "systemd-udev", conditional="systemd") if do_run_build_script: packages.update(args.build_packages) if args.distribution == Distribution.centos_epel: add_packages(args, packages, "epel-release") if do_run_build_script: packages.update(args.build_packages) if not do_run_build_script and args.distribution == Distribution.centos_epel and args.network_veth: add_packages(args, packages, "systemd-networkd", conditional="systemd") install_packages_dnf(args, root, packages, do_run_build_script) @complete_step("Installing Rocky Linux…") def install_rocky(args: CommandLineArguments, root: Path, do_run_build_script: bool) -> None: epel_release = int(args.release.split(".")[0]) install_rocky_repos(args, root, epel_release) packages = {*args.packages} add_packages(args, packages, "rocky-release", "systemd") if not do_run_build_script and args.bootable: add_packages(args, packages, "kernel", "dracut", "binutils") configure_dracut(args, packages, root) add_packages(args, packages, "systemd-udev", conditional="systemd") if do_run_build_script: packages.update(args.build_packages) if args.distribution == Distribution.rocky_epel: add_packages(args, packages, "epel-release") if do_run_build_script: packages.update(args.build_packages) if not do_run_build_script and args.distribution == Distribution.rocky_epel and args.network_veth: add_packages(args, packages, "systemd-networkd", conditional="systemd") install_packages_dnf(args, root, packages, do_run_build_script) @complete_step("Installing Alma Linux…") def install_alma(args: CommandLineArguments, root: Path, do_run_build_script: bool) -> None: epel_release = int(args.release.split(".")[0]) install_alma_repos(args, root, epel_release) packages = {*args.packages} add_packages(args, packages, "almalinux-release", "systemd") if not do_run_build_script and args.bootable: add_packages(args, packages, "kernel", "dracut", "binutils") configure_dracut(args, packages, root) add_packages(args, packages, "systemd-udev", conditional="systemd") if do_run_build_script: packages.update(args.build_packages) if args.distribution == Distribution.alma_epel: add_packages(args, packages, "epel-release") if do_run_build_script: packages.update(args.build_packages) if not do_run_build_script and args.distribution == Distribution.alma_epel and args.network_veth: add_packages(args, packages, "systemd-networkd", conditional="systemd") install_packages_dnf(args, root, packages, do_run_build_script) def debootstrap_knows_arg(arg: str) -> bool: return bytes("invalid option", "UTF-8") not in run(["debootstrap", arg], stdout=PIPE, check=False).stdout def invoke_apt( args: CommandLineArguments, do_run_build_script: bool, root: Path, command: str, packages: Iterable[str], ) -> None: cmdline = ["/usr/bin/apt-get", "--assume-yes", "--no-install-recommends", "--auto-remove", command, *packages] env = { "DEBIAN_FRONTEND": "noninteractive", "DEBCONF_NONINTERACTIVE_SEEN": "true", } if not do_run_build_script and args.bootable and args.with_unified_kernel_images: # Disable dracut postinstall script for this apt-get run. env["INITRD"] = "No" run_workspace_command(args, root, cmdline, network=True, env=env) def install_debian_or_ubuntu(args: CommandLineArguments, root: Path, *, do_run_build_script: bool) -> None: # Either the image builds or it fails and we restart, we don't need safety fsyncs when bootstrapping # Add it before debootstrap, as the second stage already uses dpkg from the chroot dpkg_io_conf = root / "etc/dpkg/dpkg.cfg.d/unsafe_io" os.makedirs(dpkg_io_conf.parent, mode=0o755, exist_ok=True) dpkg_io_conf.write_text("force-unsafe-io\n") # debootstrap fails if a base image is used with an already populated root, so skip it. if args.base_image is None: repos = set(args.repositories) or {"main"} # Ubuntu needs the 'universe' repo to install 'dracut' if args.distribution == Distribution.ubuntu and args.bootable: repos.add("universe") cmdline: List[PathString] = [ "debootstrap", "--variant=minbase", "--merged-usr", f"--components={','.join(repos)}", ] if args.architecture is not None: debarch = DEBIAN_ARCHITECTURES.get(args.architecture) cmdline += [f"--arch={debarch}"] # Let's use --no-check-valid-until only if debootstrap knows it if debootstrap_knows_arg("--no-check-valid-until"): cmdline += ["--no-check-valid-until"] assert args.mirror is not None cmdline += [args.release, root, args.mirror] run(cmdline) # Install extra packages via the secondary APT run, because it is smarter and can deal better with any # conflicts. dbus and libpam-systemd are optional dependencies for systemd in debian so we include them # explicitly. extra_packages: Set[str] = set() add_packages(args, extra_packages, "systemd", "systemd-sysv", "dbus", "libpam-systemd") extra_packages.update(args.packages) if do_run_build_script: extra_packages.update(args.build_packages) if not do_run_build_script and args.bootable: add_packages(args, extra_packages, "dracut", "binutils") configure_dracut(args, extra_packages, root) if args.distribution == Distribution.ubuntu: add_packages(args, extra_packages, "linux-generic") else: add_packages(args, extra_packages, "linux-image-amd64") if args.get_partition(PartitionIdentifier.bios): add_packages(args, extra_packages, "grub-pc") if args.output_format == OutputFormat.gpt_btrfs: add_packages(args, extra_packages, "btrfs-progs") if not do_run_build_script and args.ssh: add_packages(args, extra_packages, "openssh-server") # Debian policy is to start daemons by default. The policy-rc.d script can be used choose which ones to # start. Let's install one that denies all daemon startups. # See https://people.debian.org/~hmh/invokerc.d-policyrc.d-specification.txt for more information. # Note: despite writing in /usr/sbin, this file is not shipped by the OS and instead should be managed by # the admin. policyrcd = root / "usr/sbin/policy-rc.d" policyrcd.write_text("#!/bin/sh\nexit 101\n") policyrcd.chmod(0o755) doc_paths = [ "/usr/share/locale", "/usr/share/doc", "/usr/share/man", "/usr/share/groff", "/usr/share/info", "/usr/share/lintian", "/usr/share/linda", ] if not args.with_docs: # Remove documentation installed by debootstrap cmdline = ["/bin/rm", "-rf", *doc_paths] run_workspace_command(args, root, cmdline) # Create dpkg.cfg to ignore documentation on new packages dpkg_nodoc_conf = root / "etc/dpkg/dpkg.cfg.d/01_nodoc" with dpkg_nodoc_conf.open("w") as f: f.writelines(f"path-exclude {d}/*\n" for d in doc_paths) if (not do_run_build_script and args.bootable and args.with_unified_kernel_images and args.distribution == Distribution.debian and args.release == "unstable" and args.base_image is None): # systemd-boot won't boot unified kernel images generated without a BUILD_ID or VERSION_ID in # /etc/os-release. with root.joinpath("etc/os-release").open("a") as f: f.write("BUILD_ID=unstable\n") invoke_apt(args, do_run_build_script, root, "install", extra_packages) policyrcd.unlink() dpkg_io_conf.unlink() if not args.with_docs and args.base_image is not None: # Don't ship dpkg config files in extensions, they belong with dpkg in the base image. dpkg_nodoc_conf.unlink() # type: ignore if args.base_image is None: # Debian still has pam_securetty module enabled, disable it in the base image. disable_pam_securetty(root) if args.distribution == Distribution.debian and "systemd" in extra_packages: # The default resolv.conf points to 127.0.0.1, and resolved is disabled, fix it in # the base image. root.joinpath("etc/resolv.conf").unlink() root.joinpath("etc/resolv.conf").symlink_to("../run/systemd/resolve/resolv.conf") run(["systemctl", "--root", root, "enable", "systemd-resolved"]) @complete_step("Installing Debian…") def install_debian(args: CommandLineArguments, root: Path, do_run_build_script: bool) -> None: install_debian_or_ubuntu(args, root, do_run_build_script=do_run_build_script) @complete_step("Installing Ubuntu…") def install_ubuntu(args: CommandLineArguments, root: Path, do_run_build_script: bool) -> None: install_debian_or_ubuntu(args, root, do_run_build_script=do_run_build_script) def run_pacman(root: Path, pacman_conf: Path, packages: Set[str]) -> None: try: run(["pacman-key", "--config", pacman_conf, "--init"]) run(["pacman-key", "--config", pacman_conf, "--populate"]) run(["pacman", "--config", pacman_conf, "--noconfirm", "-Sy", *sort_packages(packages)]) finally: # Kill the gpg-agent started by pacman and pacman-key. run(["gpgconf", "--homedir", root / "etc/pacman.d/gnupg", "--kill", "all"]) def patch_locale_gen(args: CommandLineArguments, root: Path) -> None: # If /etc/locale.gen exists, uncomment the desired locale and leave the rest of the file untouched. # If it doesn’t exist, just write the desired locale in it. try: def _patch_line(line: str) -> str: if line.startswith("#en_US.UTF-8"): return line[1:] return line patch_file(root / "etc/locale.gen", _patch_line) except FileNotFoundError: root.joinpath("etc/locale.gen").write_text("en_US.UTF-8 UTF-8\n") @complete_step("Installing Arch Linux…") def install_arch(args: CommandLineArguments, root: Path, do_run_build_script: bool) -> None: if args.release is not None: MkosiPrinter.info("Distribution release specification is not supported for Arch Linux, ignoring.") if args.mirror: if platform.machine() == "aarch64": server = f"Server = {args.mirror}/$arch/$repo" else: server = f"Server = {args.mirror}/$repo/os/$arch" else: # Instead of harcoding a single mirror, we retrieve a list of mirrors from Arch's mirrorlist # generator ordered by mirror score. This usually results in a solid mirror and also ensures that we # have fallback mirrors available if necessary. Finally, the mirrors will be more likely to be up to # date and we won't end up with a stable release that hardcodes a broken mirror. mirrorlist = workspace(root) / "mirrorlist" with urllib.request.urlopen( "https://www.archlinux.org/mirrorlist/?country=all&protocol=https&ip_version=4&use_mirror_status=on" ) as r: mirrors = r.readlines() uncommented = [line.decode("utf-8")[1:] for line in mirrors] with mirrorlist.open("w") as f: f.writelines(uncommented) server = f"Include = {mirrorlist}" # Create base layout for pacman and pacman-key os.makedirs(root / "var/lib/pacman", 0o755, exist_ok=True) os.makedirs(root / "etc/pacman.d/gnupg", 0o755, exist_ok=True) # Permissions on these directories are all 0o777 because of 'mount --bind' # limitations but pacman expects them to be 0o755 so we fix them before # calling pacstrap (except /var/tmp which is 0o1777). fix_permissions_dirs = { "boot": 0o755, "etc": 0o755, "etc/pacman.d": 0o755, "var": 0o755, "var/lib": 0o755, "var/cache": 0o755, "var/cache/pacman": 0o755, "var/tmp": 0o1777, "run": 0o755, } for dir, permissions in fix_permissions_dirs.items(): path = root / dir if path.exists(): path.chmod(permissions) pacman_conf = workspace(root) / "pacman.conf" with pacman_conf.open("w") as f: f.write( dedent( f"""\ [options] RootDir = {root} LogFile = /dev/null CacheDir = {root}/var/cache/pacman/pkg/ GPGDir = {root}/etc/pacman.d/gnupg/ HookDir = {root}/etc/pacman.d/hooks/ HoldPkg = pacman glibc Architecture = auto Color CheckSpace SigLevel = Required DatabaseOptional TrustAll [core] {server} [extra] {server} [community] {server} """ ) ) if args.repositories: for repository in args.repositories: # repositories must be passed in the form :: repository_name, repository_server = repository.split("::", 1) # note: for additional repositories, signature checking options are set to pacman's default values f.write( dedent( f"""\ [{repository_name}] SigLevel = Optional TrustedOnly Server = {repository_server} """ ) ) if not do_run_build_script and args.bootable: hooks_dir = root / "etc/pacman.d/hooks" scripts_dir = root / "etc/pacman.d/scripts" os.makedirs(hooks_dir, 0o755, exist_ok=True) os.makedirs(scripts_dir, 0o755, exist_ok=True) # Disable depmod pacman hook as depmod is handled by kernel-install as well. hooks_dir.joinpath("60-depmod.hook").symlink_to("/dev/null") write_resource(hooks_dir / "90-mkosi-kernel-add.hook", "mkosi.resources.arch", "90_kernel_add.hook") write_resource(scripts_dir / "mkosi-kernel-add", "mkosi.resources.arch", "kernel_add.sh", executable=True) write_resource(hooks_dir / "60-mkosi-kernel-remove.hook", "mkosi.resources.arch", "60_kernel_remove.hook") write_resource(scripts_dir / "mkosi-kernel-remove", "mkosi.resources.arch", "kernel_remove.sh", executable=True) if args.get_partition(PartitionIdentifier.esp): write_resource(hooks_dir / "91-mkosi-bootctl-update.hook", "mkosi.resources.arch", "91_bootctl_update.hook") if args.get_partition(PartitionIdentifier.bios): write_resource(hooks_dir / "90-mkosi-vmlinuz-add.hook", "mkosi.resources.arch", "90_vmlinuz_add.hook") write_resource(hooks_dir / "60-mkosi-vmlinuz-remove.hook", "mkosi.resources.arch", "60_vmlinuz_remove.hook") keyring = "archlinux" if platform.machine() == "aarch64": keyring += "arm" packages: Set[str] = set() add_packages(args, packages, "base") if not do_run_build_script and args.bootable: if args.output_format == OutputFormat.gpt_btrfs: add_packages(args, packages, "btrfs-progs") elif args.output_format == OutputFormat.gpt_xfs: add_packages(args, packages, "xfsprogs") if args.encrypt: add_packages(args, packages, "cryptsetup", "device-mapper") if args.get_partition(PartitionIdentifier.bios): add_packages(args, packages, "grub") add_packages(args, packages, "dracut", "binutils") configure_dracut(args, packages, root) packages.update(args.packages) official_kernel_packages = { "linux", "linux-lts", "linux-hardened", "linux-zen", } has_kernel_package = official_kernel_packages.intersection(args.packages) if not do_run_build_script and args.bootable and not has_kernel_package: # No user-specified kernel add_packages(args, packages, "linux") if do_run_build_script: packages.update(args.build_packages) if not do_run_build_script and args.ssh: add_packages(args, packages, "openssh") with mount_api_vfs(args, root): run_pacman(root, pacman_conf, packages) patch_locale_gen(args, root) run_workspace_command(args, root, ["/usr/bin/locale-gen"]) root.joinpath("etc/locale.conf").write_text("LANG=en_US.UTF-8\n") # Arch still uses pam_securetty which prevents root login into # systemd-nspawn containers. See https://bugs.archlinux.org/task/45903. disable_pam_securetty(root) @complete_step("Installing openSUSE…") def install_opensuse(args: CommandLineArguments, root: Path, do_run_build_script: bool) -> None: release = args.release.strip('"') # If the release looks like a timestamp, it's Tumbleweed. 13.x is legacy (14.x won't ever appear). For # anything else, let's default to Leap. if release.isdigit() or release == "tumbleweed": release_url = f"{args.mirror}/tumbleweed/repo/oss/" updates_url = f"{args.mirror}/update/tumbleweed/" elif release == "leap": release_url = f"{args.mirror}/distribution/leap/15.1/repo/oss/" updates_url = f"{args.mirror}/update/leap/15.1/oss/" elif release == "current": release_url = f"{args.mirror}/distribution/openSUSE-stable/repo/oss/" updates_url = f"{args.mirror}/update/openSUSE-current/" elif release == "stable": release_url = f"{args.mirror}/distribution/openSUSE-stable/repo/oss/" updates_url = f"{args.mirror}/update/openSUSE-stable/" else: release_url = f"{args.mirror}/distribution/leap/{release}/repo/oss/" updates_url = f"{args.mirror}/update/leap/{release}/oss/" # Configure the repositories: we need to enable packages caching here to make sure that the package cache # stays populated after "zypper install". run(["zypper", "--root", root, "addrepo", "-ck", release_url, "repo-oss"]) run(["zypper", "--root", root, "addrepo", "-ck", updates_url, "repo-update"]) if not args.with_docs: root.joinpath("etc/zypp/zypp.conf").write_text("rpm.install.excludedocs = yes\n") packages = {*args.packages} add_packages(args, packages, "systemd") if release.startswith("42."): add_packages(args, packages, "patterns-openSUSE-minimal_base") else: add_packages(args, packages, "patterns-base-minimal_base") if not do_run_build_script and args.bootable: add_packages(args, packages, "kernel-default", "dracut", "binutils") configure_dracut(args, packages, root) if args.get_partition(PartitionIdentifier.bios): add_packages(args, packages, "grub2") if not do_run_build_script and args.encrypt: add_packages(args, packages, "device-mapper") if args.output_format in (OutputFormat.subvolume, OutputFormat.gpt_btrfs): add_packages(args, packages, "btrfsprogs") if do_run_build_script: packages.update(args.build_packages) if not do_run_build_script and args.ssh: add_packages(args, packages, "openssh-server") cmdline: List[PathString] = [ "zypper", "--root", root, "--gpg-auto-import-keys", "install", "-y", "--no-recommends", "--download-in-advance", *sort_packages(packages), ] with mount_api_vfs(args, root): run(cmdline) # Disable package caching in the image that was enabled previously to populate the package cache. run(["zypper", "--root", root, "modifyrepo", "-K", "repo-oss"]) run(["zypper", "--root", root, "modifyrepo", "-K", "repo-update"]) if args.password == "": shutil.copy2(root / "usr/etc/pam.d/common-auth", root / "etc/pam.d/common-auth") def jj(line: str) -> str: if "pam_unix.so" in line: return f"{line.strip()} nullok" return line patch_file(root / "etc/pam.d/common-auth", jj) if args.autologin: # copy now, patch later (in set_autologin()) shutil.copy2(root / "usr/etc/pam.d/login", root / "etc/pam.d/login") @complete_step("Installing Gentoo…") def install_gentoo( args: CommandLineArguments, root: Path, do_run_build_script: bool ) -> None: from .gentoo import Gentoo # this will fetch/fix stage3 tree and portage confgired for mkosi gentoo = Gentoo(args, root, do_run_build_script) if gentoo.pkgs_fs: gentoo.invoke_emerge(args, root, pkgs=gentoo.pkgs_fs) if not do_run_build_script and args.bootable: # Please don't move, needs to be called before installing dracut # dracut is part of gentoo_pkgs_boot configure_dracut(args, packages={"dracut"}, root=root) gentoo.invoke_emerge(args, root, pkgs=gentoo.pkgs_boot) if args.packages: gentoo.invoke_emerge(args, root, pkgs=args.packages) if do_run_build_script: gentoo.invoke_emerge(args, root, pkgs=args.build_packages) def install_distribution(args: CommandLineArguments, root: Path, do_run_build_script: bool, cached: bool) -> None: if cached: return install: Dict[Distribution, Callable[[CommandLineArguments, Path, bool], None]] = { Distribution.fedora: install_fedora, Distribution.centos: install_centos, Distribution.centos_epel: install_centos, Distribution.mageia: install_mageia, Distribution.debian: install_debian, Distribution.ubuntu: install_ubuntu, Distribution.arch: install_arch, Distribution.opensuse: install_opensuse, Distribution.clear: install_clear, Distribution.photon: install_photon, Distribution.openmandriva: install_openmandriva, Distribution.rocky: install_rocky, Distribution.rocky_epel: install_rocky, Distribution.alma: install_alma, Distribution.alma_epel: install_alma, Distribution.gentoo: install_gentoo, } disable_kernel_install(args, root) with mount_cache(args, root): install[args.distribution](args, root, do_run_build_script) reenable_kernel_install(args, root) def remove_packages(args: CommandLineArguments, root: Path) -> None: """Remove packages listed in args.remove_packages""" remove: Callable[[List[str]], None] if (args.distribution.package_type == PackageType.rpm and args.distribution != Distribution.photon): remove = lambda p: invoke_dnf(args, root, 'remove', p) elif args.distribution.package_type == PackageType.deb: remove = lambda p: invoke_apt(args, False, root, "purge", p) else: # FIXME: implement removal for other package managers: tdnf, swupd, pacman return if args.remove_packages: with complete_step(f"Removing {len(args.packages)} packages…"): remove(args.remove_packages) def reset_machine_id(args: CommandLineArguments, root: Path, do_run_build_script: bool, for_cache: bool) -> None: """Make /etc/machine-id an empty file. This way, on the next boot is either initialized and committed (if /etc is writable) or the image runs with a transient machine ID, that changes on each boot (if the image is read-only). """ if do_run_build_script: return if for_cache: return with complete_step("Resetting machine ID"): machine_id = root / "etc/machine-id" try: machine_id.unlink() except FileNotFoundError: pass machine_id.touch() dbus_machine_id = root / "var/lib/dbus/machine-id" try: dbus_machine_id.unlink() except FileNotFoundError: pass else: dbus_machine_id.symlink_to("../../../etc/machine-id") def reset_random_seed(args: CommandLineArguments, root: Path) -> None: """Remove random seed file, so that it is initialized on first boot""" random_seed = root / "var/lib/systemd/random-seed" if not random_seed.exists(): return with complete_step("Removing random seed"): random_seed.unlink() def set_root_password(args: CommandLineArguments, root: Path, do_run_build_script: bool, cached: bool) -> None: "Set the root account password, or just delete it so it's easy to log in" if do_run_build_script: return if cached: return if args.password == "": with complete_step("Deleting root password"): def delete_root_pw(line: str) -> str: if line.startswith("root:"): return ":".join(["root", ""] + line.split(":")[2:]) return line patch_file(root / "etc/passwd", delete_root_pw) elif args.password: with complete_step("Setting root password"): if args.password_is_hashed: password = args.password else: password = crypt.crypt(args.password, crypt.mksalt(crypt.METHOD_SHA512)) def set_root_pw(line: str) -> str: if line.startswith("root:"): return ":".join(["root", password] + line.split(":")[2:]) return line patch_file(root / "etc/shadow", set_root_pw) def invoke_fstrim(args: CommandLineArguments, root: Path, do_run_build_script: bool, for_cache: bool) -> None: if do_run_build_script: return if is_generated_root(args): return if not args.output_format.is_disk(): return if for_cache: return with complete_step("Trimming File System"): run(["fstrim", "-v", root], check=False) def pam_add_autologin(root: Path, ttys: List[str]) -> None: with open(root / "etc/pam.d/login", "r+") as f: original = f.read() f.seek(0) for tty in ttys: # Some PAM versions require the /dev/ prefix, others don't. Just add both variants. f.write(f"auth sufficient pam_succeed_if.so tty = {tty}\n") f.write(f"auth sufficient pam_succeed_if.so tty = /dev/{tty}\n") f.write(original) def set_autologin(args: CommandLineArguments, root: Path, do_run_build_script: bool, cached: bool) -> None: if do_run_build_script or cached or not args.autologin: return with complete_step("Setting up autologin…"): add_dropin_config_from_resource(root, "console-getty.service", "autologin", "mkosi.resources", "console_getty_autologin.conf") ttys = [] ttys += ["pts/0"] add_dropin_config_from_resource(root, "serial-getty@ttyS0.service", "autologin", "mkosi.resources", "serial_getty_autologin.conf") ttys += ["ttyS0"] add_dropin_config_from_resource(root, "getty@tty1.service", "autologin", "mkosi.resources", "getty_autologin.conf") ttys += ["tty1"] ttys += ["console"] pam_add_autologin(root, ttys) def set_serial_terminal(args: CommandLineArguments, root: Path, do_run_build_script: bool, cached: bool) -> None: """Override TERM for the serial console with the terminal type from the host.""" if do_run_build_script or cached or not args.qemu_headless: return with complete_step("Configuring serial tty (/dev/ttyS0)…"): columns, lines = shutil.get_terminal_size(fallback=(80, 24)) add_dropin_config(root, "serial-getty@ttyS0.service", "term", f"""\ [Service] Environment=TERM={os.getenv('TERM', 'vt220')} Environment=COLUMNS={columns} Environment=LINES={lines} TTYColumns={columns} TTYRows={lines} """) def nspawn_params_for_build_sources(args: CommandLineArguments, sft: SourceFileTransfer) -> List[str]: params = [] if args.build_sources is not None: params += ["--setenv=SRCDIR=/root/src", "--chdir=/root/src"] if sft == SourceFileTransfer.mount: params += [f"--bind={args.build_sources}:/root/src"] if args.read_only: params += ["--overlay=+/root/src::/root/src"] else: params += ["--chdir=/root"] params += [f"--setenv={env}" for env in args.environment] return params def run_prepare_script(args: CommandLineArguments, root: Path, do_run_build_script: bool, cached: bool) -> None: if args.prepare_script is None: return if cached: return verb = "build" if do_run_build_script else "final" with mount_cache(args, root), complete_step("Running prepare script…"): # We copy the prepare script into the build tree. We'd prefer # mounting it into the tree, but for that we'd need a good # place to mount it to. But if we create that we might as well # just copy the file anyway. shutil.copy2(args.prepare_script, root_home(args, root) / "prepare") nspawn_params = nspawn_params_for_build_sources(args, SourceFileTransfer.mount) run_workspace_command(args, root, ["/root/prepare", verb], network=True, nspawn_params=nspawn_params) srcdir = root_home(args, root) / "src" if srcdir.exists(): os.rmdir(srcdir) os.unlink(root_home(args, root) / "prepare") def run_postinst_script( args: CommandLineArguments, root: Path, loopdev: Optional[Path], do_run_build_script: bool, for_cache: bool ) -> None: if args.postinst_script is None: return if for_cache: return verb = "build" if do_run_build_script else "final" with mount_cache(args, root), complete_step("Running postinstall script…"): # We copy the postinst script into the build tree. We'd prefer # mounting it into the tree, but for that we'd need a good # place to mount it to. But if we create that we might as well # just copy the file anyway. shutil.copy2(args.postinst_script, root_home(args, root) / "postinst") nspawn_params = [] # in order to have full blockdev access, i.e. for making grub2 bootloader changes # we need to have these bind mounts for a proper chroot setup if args.bootable: if loopdev is None: raise ValueError("Parameter 'loopdev' required for bootable images.") nspawn_params += nspawn_params_for_blockdev_access(args, loopdev) run_workspace_command( args, root, ["/root/postinst", verb], network=(args.with_network is True), nspawn_params=nspawn_params ) root_home(args, root).joinpath("postinst").unlink() def output_dir(args: CommandLineArguments) -> Path: return args.output_dir or Path(os.getcwd()) def run_finalize_script(args: CommandLineArguments, root: Path, do_run_build_script: bool, for_cache: bool) -> None: if args.finalize_script is None: return if for_cache: return verb = "build" if do_run_build_script else "final" with complete_step("Running finalize script…"): env = dict(cast(Tuple[str, str], v.split("=", maxsplit=1)) for v in args.environment) env = collections.ChainMap(dict(BUILDROOT=str(root), OUTPUTDIR=str(output_dir(args))), env, os.environ) run([args.finalize_script, verb], env=env) def install_boot_loader_clear(args: CommandLineArguments, root: Path, loopdev: Path) -> None: # clr-boot-manager uses blkid in the device backing "/" to # figure out uuid and related parameters. nspawn_params = nspawn_params_for_blockdev_access(args, loopdev) cmdline = ["/usr/bin/clr-boot-manager", "update", "-i"] run_workspace_command(args, root, cmdline, nspawn_params=nspawn_params) def install_boot_loader_centos_old_efi(args: CommandLineArguments, root: Path, loopdev: Path) -> None: nspawn_params = nspawn_params_for_blockdev_access(args, loopdev) # prepare EFI directory on ESP os.makedirs(root / "efi/EFI/centos", exist_ok=True) # patch existing or create minimal GRUB_CMDLINE config write_grub_config(args, root) # generate grub2 efi boot config cmdline = ["/sbin/grub2-mkconfig", "-o", "/efi/EFI/centos/grub.cfg"] run_workspace_command(args, root, cmdline, nspawn_params=nspawn_params) # if /sys/firmware/efi is not present within systemd-nspawn the grub2-mkconfig makes false assumptions, let's fix this def _fix_grub(line: str) -> str: if "linux16" in line: return line.replace("linux16", "linuxefi") elif "initrd16" in line: return line.replace("initrd16", "initrdefi") return line patch_file(root / "efi/EFI/centos/grub.cfg", _fix_grub) def install_boot_loader( args: CommandLineArguments, root: Path, loopdev: Optional[Path], do_run_build_script: bool, cached: bool ) -> None: if not args.bootable or do_run_build_script: return assert loopdev is not None if cached: return with complete_step("Installing boot loader…"): if args.get_partition(PartitionIdentifier.esp): if args.distribution == Distribution.clear: pass elif (args.distribution in (Distribution.centos, Distribution.centos_epel) and is_older_than_centos8(args.release)): install_boot_loader_centos_old_efi(args, root, loopdev) else: run_workspace_command(args, root, ["bootctl", "install"]) if args.get_partition(PartitionIdentifier.bios) and args.distribution != Distribution.clear: grub = ( "grub" if args.distribution in (Distribution.ubuntu, Distribution.debian, Distribution.arch, Distribution.gentoo) else "grub2" ) # TODO: Just use "grub" once https://github.com/systemd/systemd/pull/16645 is widely available. if args.distribution in (Distribution.ubuntu, Distribution.debian, Distribution.opensuse): grub = f"/usr/sbin/{grub}" install_grub(args, root, loopdev, grub) if args.distribution == Distribution.clear: install_boot_loader_clear(args, root, loopdev) def install_extra_trees(args: CommandLineArguments, root: Path, for_cache: bool) -> None: if not args.extra_trees: return if for_cache: return with complete_step("Copying in extra file trees…"): for tree in args.extra_trees: if tree.is_dir(): copy_path(tree, root) else: # unpack_archive() groks Paths, but mypy doesn't know this. # Pretend that tree is a str. shutil.unpack_archive(cast(str, tree), root) def install_skeleton_trees(args: CommandLineArguments, root: Path, cached: bool) -> None: if not args.skeleton_trees: return if cached: return with complete_step("Copying in skeleton file trees…"): for tree in args.skeleton_trees: if tree.is_dir(): copy_path(tree, root) else: # unpack_archive() groks Paths, but mypy doesn't know this. # Pretend that tree is a str. shutil.unpack_archive(cast(str, tree), root) def copy_git_files(src: Path, dest: Path, *, source_file_transfer: SourceFileTransfer) -> None: what_files = ["--exclude-standard", "--cached"] if source_file_transfer == SourceFileTransfer.copy_git_others: what_files += ["--others", "--exclude=.mkosi-*"] c = run(["git", "-C", src, "ls-files", "-z", *what_files], stdout=PIPE, text=False) files = {x.decode("utf-8") for x in c.stdout.rstrip(b"\0").split(b"\0")} # Add the .git/ directory in as well. if source_file_transfer == SourceFileTransfer.copy_git_more: top = os.path.join(src, ".git/") for path, _, filenames in os.walk(top): for filename in filenames: fp = os.path.join(path, filename) # full path fr = os.path.join(".git/", fp[len(top) :]) # relative to top files.add(fr) # Get submodule files c = run(["git", "-C", src, "submodule", "status", "--recursive"], stdout=PIPE, text=True) submodules = {x.split()[1] for x in c.stdout.splitlines()} # workaround for git-ls-files returning the path of submodules that we will # still parse files -= submodules for sm in submodules: c = run( ["git", "-C", os.path.join(src, sm), "ls-files", "-z"] + what_files, stdout=PIPE, text=False, ) files |= {os.path.join(sm, x.decode("utf-8")) for x in c.stdout.rstrip(b"\0").split(b"\0")} files -= submodules del c for path in files: src_path = os.path.join(src, path) dest_path = os.path.join(dest, path) directory = os.path.dirname(dest_path) os.makedirs(directory, exist_ok=True) copy_file(src_path, dest_path) def install_build_src(args: CommandLineArguments, root: Path, do_run_build_script: bool, for_cache: bool) -> None: if for_cache: return if args.build_script is None: return if do_run_build_script: with complete_step("Copying in build script…"): copy_file(args.build_script, root_home(args, root) / args.build_script.name) sft: Optional[SourceFileTransfer] = None resolve_symlinks: bool = False if do_run_build_script: sft = args.source_file_transfer resolve_symlinks = args.source_resolve_symlinks else: sft = args.source_file_transfer_final resolve_symlinks = args.source_resolve_symlinks_final if args.build_sources is None or sft is None: return with complete_step("Copying in sources…"): target = root_home(args, root) / "src" if sft in ( SourceFileTransfer.copy_git_others, SourceFileTransfer.copy_git_cached, SourceFileTransfer.copy_git_more, ): copy_git_files(args.build_sources, target, source_file_transfer=sft) elif sft == SourceFileTransfer.copy_all: ignore = shutil.ignore_patterns( ".git", ".mkosi-*", "*.cache-pre-dev", "*.cache-pre-inst", f"{args.output_dir.name}/" if args.output_dir else "mkosi.output/", f"{args.workspace_dir.name}/" if args.workspace_dir else "mkosi.workspace/", f"{args.cache_path.name}/" if args.cache_path else "mkosi.cache/", f"{args.build_dir.name}/" if args.build_dir else "mkosi.builddir/", f"{args.include_dir.name}/" if args.include_dir else "mkosi.includedir/", f"{args.install_dir.name}/" if args.install_dir else "mkosi.installdir/", ) shutil.copytree(args.build_sources, target, symlinks=not resolve_symlinks, ignore=ignore) def install_build_dest(args: CommandLineArguments, root: Path, do_run_build_script: bool, for_cache: bool) -> None: if do_run_build_script: return if for_cache: return if args.build_script is None: return with complete_step("Copying in build tree…"): copy_path(install_dir(args, root), root) def make_read_only(args: CommandLineArguments, root: Path, for_cache: bool, b: bool = True) -> None: if not args.read_only: return if for_cache: return if args.output_format not in (OutputFormat.gpt_btrfs, OutputFormat.subvolume): return if is_generated_root(args): return with complete_step("Marking root subvolume read-only"): btrfs_subvol_make_ro(root, b) def xz_binary() -> str: return "pxz" if shutil.which("pxz") else "xz" def compressor_command(option: Union[str, bool]) -> List[str]: """Returns a command suitable for compressing archives.""" if option == "xz": return [xz_binary(), "--check=crc32", "--lzma2=dict=1MiB", "-T0"] elif option == "zstd": return ["zstd", "-15", "-q", "-T0"] elif option is False: return ["cat"] else: die(f"Unknown compression {option}") def tar_binary() -> str: # Some distros (Mandriva) install BSD tar as "tar", hence prefer # "gtar" if it exists, which should be GNU tar wherever it exists. # We are interested in exposing same behaviour everywhere hence # it's preferable to use the same implementation of tar # everywhere. In particular given the limited/different SELinux # support in BSD tar and the different command line syntax # compared to GNU tar. return "gtar" if shutil.which("gtar") else "tar" def make_tar(args: CommandLineArguments, root: Path, do_run_build_script: bool, for_cache: bool) -> Optional[BinaryIO]: if do_run_build_script: return None if args.output_format != OutputFormat.tar: return None if for_cache: return None root_dir = root / "usr" if args.usr_only else root cmd: List[PathString] = [tar_binary(), "-C", root_dir, "-c", "--xattrs", "--xattrs-include=*"] if args.tar_strip_selinux_context: cmd += ["--xattrs-exclude=security.selinux"] compress = should_compress_output(args) if compress: cmd += ["--use-compress-program=" + " ".join(compressor_command(compress))] cmd += ["."] with complete_step("Creating archive…"): f: BinaryIO = cast(BinaryIO, tempfile.NamedTemporaryFile(dir=os.path.dirname(args.output), prefix=".mkosi-")) run(cmd, stdout=f) return f def scandir_recursive( root: Path, filter: Optional[Callable[[os.DirEntry[str]], T]] = None, ) -> Iterator[T]: """Recursively walk the tree starting at @root, optionally apply filter, yield non-none values""" queue: Deque[Union[str, Path]] = collections.deque([root]) while queue: for entry in os.scandir(queue.pop()): pred = filter(entry) if filter is not None else entry if pred is not None: yield cast(T, pred) if entry.is_dir(follow_symlinks=False): queue.append(entry.path) def find_files(root: Path) -> Iterator[Path]: """Generate a list of all filepaths relative to @root""" yield from scandir_recursive(root, lambda entry: Path(entry.path).relative_to(root)) def make_cpio( args: CommandLineArguments, root: Path, do_run_build_script: bool, for_cache: bool ) -> Optional[BinaryIO]: if do_run_build_script: return None if args.output_format != OutputFormat.cpio: return None if for_cache: return None root_dir = root / "usr" if args.usr_only else root with complete_step("Creating archive…"): f: BinaryIO = cast(BinaryIO, tempfile.NamedTemporaryFile(dir=os.path.dirname(args.output), prefix=".mkosi-")) compressor = compressor_command(should_compress_output(args)) files = find_files(root_dir) cmd: List[PathString] = [ "cpio", "-o", "--reproducible", "--null", "-H", "newc", "--quiet", "-D", root_dir ] with spawn(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE) as cpio: # https://github.com/python/mypy/issues/10583 assert cpio.stdin is not None with spawn(compressor, stdin=cpio.stdout, stdout=f, delay_interrupt=False): for file in files: cpio.stdin.write(os.fspath(file).encode("utf8") + b"\0") cpio.stdin.close() if cpio.wait() != 0: die("Failed to create archive") return f def generate_squashfs(args: CommandLineArguments, root: Path, for_cache: bool) -> Optional[BinaryIO]: if not args.output_format.is_squashfs(): return None if for_cache: return None command = args.mksquashfs_tool[0] if args.mksquashfs_tool else "mksquashfs" comp_args = args.mksquashfs_tool[1:] if args.mksquashfs_tool and args.mksquashfs_tool[1:] else ["-noappend"] compress = should_compress_fs(args) # mksquashfs default is true, so no need to specify anything to have the default compression. if isinstance(compress, str): comp_args += ["-comp", compress] elif compress is False: comp_args += ["-noI", "-noD", "-noF", "-noX"] with complete_step("Creating squashfs file system…"): f: BinaryIO = cast( BinaryIO, tempfile.NamedTemporaryFile(prefix=".mkosi-squashfs", dir=os.path.dirname(args.output)) ) run([command, root, f.name, *comp_args]) return f def generate_ext4(args: CommandLineArguments, root: Path, label: str, for_cache: bool) -> Optional[BinaryIO]: if args.output_format != OutputFormat.gpt_ext4: return None if for_cache: return None with complete_step("Creating ext4 root file system…"): f: BinaryIO = cast( BinaryIO, tempfile.NamedTemporaryFile(prefix=".mkosi-mkfs-ext4", dir=os.path.dirname(args.output)) ) f.truncate(args.root_size) run(["mkfs.ext4", "-I", "256", "-L", label, "-M", "/", "-d", root, f.name]) if args.minimize: with complete_step("Minimizing ext4 root file system…"): run(["resize2fs", "-M", f.name]) return f def generate_btrfs(args: CommandLineArguments, root: Path, label: str, for_cache: bool) -> Optional[BinaryIO]: if args.output_format != OutputFormat.gpt_btrfs: return None if for_cache: return None with complete_step("Creating minimal btrfs root file system…"): f: BinaryIO = cast(BinaryIO, tempfile.NamedTemporaryFile(prefix=".mkosi-mkfs-btrfs", dir=args.output.parent)) f.truncate(args.root_size) cmdline: Sequence[PathString] = [ "mkfs.btrfs", "-L", label, "-d", "single", "-m", "single", "--rootdir", root, f.name ] if args.minimize: try: run([*cmdline, "--shrink"]) except subprocess.CalledProcessError: # The --shrink option was added in btrfs-tools 4.14.1, before that it was the default behaviour. # If the above fails, let's see if things work if we drop it run(cmdline) else: run(cmdline) return f def make_generated_root(args: CommandLineArguments, root: Path, for_cache: bool) -> Optional[BinaryIO]: if not is_generated_root(args): return None label = "usr" if args.usr_only else "root" patched_root = root / "usr" if args.usr_only else root if args.output_format == OutputFormat.gpt_ext4: return generate_ext4(args, patched_root, label, for_cache) if args.output_format == OutputFormat.gpt_btrfs: return generate_btrfs(args, patched_root, label, for_cache) if args.output_format.is_squashfs(): return generate_squashfs(args, patched_root, for_cache) return None def insert_partition( args: CommandLineArguments, raw: BinaryIO, loopdev: Path, blob: BinaryIO, ident: PartitionIdentifier, description: str, type_uuid: uuid.UUID, read_only: bool, part_uuid: Optional[uuid.UUID] = None, ) -> Partition: assert args.partition_table is not None blob.seek(0) luks_extra = 16 * 1024 * 1024 if args.encrypt == "all" else 0 blob_size = os.stat(blob.name).st_size part = args.partition_table.add(ident, blob_size + luks_extra, type_uuid, description, part_uuid) disk_size = args.partition_table.disk_size() ss = f" ({disk_size // args.partition_table.sector_size} sectors)" if 'disk' in ARG_DEBUG else "" with complete_step(f"Resizing disk image to {format_bytes(disk_size)}{ss}"): os.truncate(raw.name, disk_size) run(["losetup", "--set-capacity", loopdev]) part_size = part.n_sectors * args.partition_table.sector_size ss = f" ({part.n_sectors} sectors)" if 'disk' in ARG_DEBUG else "" with complete_step(f"Inserting partition of {format_bytes(part_size)}{ss}..."): args.partition_table.run_sfdisk(loopdev) with complete_step("Writing partition..."): if ident == PartitionIdentifier.root: luks_format_root(args, loopdev, False, False, True) cm = luks_setup_root(args, loopdev, False, True) else: cm = contextlib.nullcontext() with cm as dev: path = dev if dev is not None else part.blockdev(loopdev) # Let's discard the partition block device first, to ensure the GPT partition table footer that # likely is stored in it is flushed out. After all we want to write with dd's sparse option. run(["blkdiscard", path]) path.write_bytes(blob.read()) return part def insert_generated_root( args: CommandLineArguments, raw: Optional[BinaryIO], loopdev: Optional[Path], image: Optional[BinaryIO], for_cache: bool, ) -> Optional[Partition]: if not is_generated_root(args): return None if not args.output_format.is_disk(): return None if for_cache: return None assert raw is not None assert loopdev is not None assert image is not None assert args.partition_table is not None with complete_step("Inserting generated root partition…"): return insert_partition( args, raw, loopdev, image, PartitionIdentifier.root, root_partition_description(args), type_uuid=gpt_root_native(args.architecture, args.usr_only).root, read_only=args.read_only) def make_verity( args: CommandLineArguments, dev: Optional[Path], do_run_build_script: bool, for_cache: bool ) -> Tuple[Optional[BinaryIO], Optional[str]]: if do_run_build_script or args.verity is False: return None, None if for_cache: return None, None assert dev is not None with complete_step("Generating verity hashes…"): f: BinaryIO = cast(BinaryIO, tempfile.NamedTemporaryFile(dir=args.output.parent, prefix=".mkosi-")) c = run(["veritysetup", "format", dev, f.name], stdout=PIPE) for line in c.stdout.decode("utf-8").split("\n"): if line.startswith("Root hash:"): root_hash = line[10:].strip() return f, root_hash raise ValueError("Root hash not found") def insert_verity( args: CommandLineArguments, raw: Optional[BinaryIO], loopdev: Optional[Path], verity: Optional[BinaryIO], root_hash: Optional[str], for_cache: bool, ) -> Optional[Partition]: if verity is None: return None if for_cache: return None assert loopdev is not None assert raw is not None assert root_hash is not None assert args.partition_table is not None # Use the final 128 bit of the root hash as partition UUID of the verity partition u = uuid.UUID(root_hash[-32:]) with complete_step("Inserting verity partition…"): return insert_partition( args, raw, loopdev, verity, PartitionIdentifier.verity, root_partition_description(args, "Verity"), gpt_root_native(args.architecture, args.usr_only).verity, read_only=True, part_uuid=u) def make_verity_sig( args: CommandLineArguments, root_hash: Optional[str], do_run_build_script: bool, for_cache: bool ) -> Tuple[Optional[BinaryIO], Optional[bytes], Optional[str]]: if do_run_build_script or args.verity != "signed": return None, None, None if for_cache: return None, None, None assert root_hash is not None from cryptography import x509 from cryptography.hazmat.primitives import hashes, serialization from cryptography.hazmat.primitives.serialization import pkcs7 with complete_step("Signing verity root hash…"): key = serialization.load_pem_private_key(args.secure_boot_key.read_bytes(), password=None) certificate = x509.load_pem_x509_certificate(args.secure_boot_certificate.read_bytes()) fingerprint = certificate.fingerprint(hashes.SHA256()).hex() sigbytes = pkcs7.PKCS7SignatureBuilder().add_signer( certificate, key, hashes.SHA256() ).set_data( root_hash.encode("utf-8") ).sign( options=[ pkcs7.PKCS7Options.DetachedSignature, pkcs7.PKCS7Options.NoCerts, pkcs7.PKCS7Options.NoAttributes, pkcs7.PKCS7Options.Binary ], encoding=serialization.Encoding.DER ) # We base64 the DER result, because we want to include it in JSON. This is not PEM # (i.e. no header/footer line, no line breaks), but just base64 encapsulated DER). b64encoded = base64.b64encode(sigbytes).decode("ascii") print(b64encoded) # This is supposed to be extensible, but care should be taken not to include unprotected # data here. j = json.dumps({ "rootHash": root_hash, "certificateFingerprint": fingerprint, "signature": b64encoded }).encode("utf-8") f: BinaryIO = cast(BinaryIO, tempfile.NamedTemporaryFile(mode="w+b", dir=args.output.parent, prefix=".mkosi-")) f.write(j) f.flush() # Returns a file with JSON data to insert as signature partition as the first element, and # the DER PKCS7 signature bytes as second argument (to store as a detached PKCS7 file), and # finally the SHA256 fingerprint of the certificate used (which is used to # deterministically generate the partition UUID for the signature partition). return f, sigbytes, fingerprint def insert_verity_sig( args: CommandLineArguments, raw: Optional[BinaryIO], loopdev: Optional[Path], verity_sig: Optional[BinaryIO], root_hash: Optional[str], fingerprint: Optional[str], for_cache: bool, ) -> Optional[Partition]: if verity_sig is None: return None if for_cache: return None assert loopdev is not None assert raw is not None assert root_hash is not None assert fingerprint is not None assert args.partition_table is not None # Hash the concatenation of verity roothash and the X509 certificate # fingerprint to generate a UUID for the signature partition. u = uuid.UUID(hashlib.sha256(bytes.fromhex(root_hash) + bytes.fromhex(fingerprint)).hexdigest()[:32]) with complete_step("Inserting verity signature partition…"): return insert_partition( args, raw, loopdev, verity_sig, PartitionIdentifier.verity_sig, root_partition_description(args, "Signature"), gpt_root_native(args.architecture, args.usr_only).verity_sig, read_only=True, part_uuid=u) def patch_root_uuid( args: CommandLineArguments, loopdev: Optional[Path], root_hash: Optional[str], for_cache: bool ) -> None: if root_hash is None: return assert loopdev is not None if for_cache: return # Use the first 128bit of the root hash as partition UUID of the root partition u = uuid.UUID(root_hash[:32]) with complete_step("Patching root partition UUID…"): part = args.get_partition(PartitionIdentifier.root) assert part is not None run(["sfdisk", "--part-uuid", loopdev, str(part.number), str(u)]) def extract_partition( args: CommandLineArguments, dev: Optional[Path], do_run_build_script: bool, for_cache: bool ) -> Optional[BinaryIO]: if do_run_build_script or for_cache or not args.split_artifacts: return None assert dev is not None with complete_step("Extracting partition…"): f: BinaryIO = cast(BinaryIO, tempfile.NamedTemporaryFile(dir=os.path.dirname(args.output), prefix=".mkosi-")) run(["dd", f"if={dev}", f"of={f.name}", "conv=nocreat,sparse"]) return f def install_unified_kernel( args: CommandLineArguments, root: Path, root_hash: Optional[str], do_run_build_script: bool, for_cache: bool, cached: bool, mount: Callable[[], ContextManager[None]], ) -> None: # Iterates through all kernel versions included in the image and generates a combined # kernel+initrd+cmdline+osrelease EFI file from it and places it in the /EFI/Linux directory of the ESP. # sd-boot iterates through them and shows them in the menu. These "unified" single-file images have the # benefit that they can be signed like normal EFI binaries, and can encode everything necessary to boot a # specific root device, including the root hash. if not (args.bootable and args.get_partition(PartitionIdentifier.esp) and args.with_unified_kernel_images): return # Don't run dracut if this is for the cache. The unified kernel # typically includes the image ID, roothash and other data that # differs between cached version and final result. Moreover, we # want that the initrd for the image actually takes the changes we # make to the image into account (e.g. when we build a systemd # test image with this we want that the systemd we just built is # in the initrd, and not one from the cache. Hence even though # dracut is slow we invoke it only during the last final build, # never for the cached builds. if for_cache: return # Don't bother running dracut if this is a development build. Strictly speaking it would probably be a # good idea to run it, so that the development environment differs as little as possible from the final # build, but then again the initrd should not be relevant for building, and dracut is simply very slow, # hence let's avoid it invoking it needlessly, given that we never actually invoke the boot loader on the # development image. if do_run_build_script: return with mount(), complete_step("Generating combined kernel + initrd boot file…"): # Apparently openmandriva hasn't yet completed its usrmerge so we use lib here instead of usr/lib. with os.scandir(root / "lib/modules") as d: for kver in d: if not (kver.is_dir() and os.path.isfile(os.path.join(kver, "modules.dep"))): # type: ignore continue prefix = "/boot" if args.get_partition(PartitionIdentifier.xbootldr) else "/efi" # While the kernel version can generally be found as a directory under /usr/lib/modules, the # kernel image files can be found either in /usr/lib/modules//vmlinuz or in # /boot depending on the distro. By invoking the kernel-install script directly, we can pass # the empty string as the kernel image which causes the script to not pass the --kernel-image # option to dracut so it searches the image for us. cmdline = [ "/etc/kernel/install.d/50-mkosi-dracut-unified-kernel.install", "add", kver.name, f"{prefix}/{args.machine_id}/{kver.name}", "", ] if args.distribution == Distribution.gentoo: from .gentoo import ARCHITECTURES _, kimg_path = ARCHITECTURES[args.architecture or "x86_64"] cmdline[4] = f"/usr/src/linux-{kver.name}/{kimg_path}" # Pass some extra meta-info to the script via # environment variables. The script uses this to name # the unified kernel image file env = {} if args.image_id is not None: env["IMAGE_ID"] = args.image_id if args.image_version is not None: env["IMAGE_VERSION"] = args.image_version if root_hash is not None: env["USRHASH" if args.usr_only else "ROOTHASH"] = root_hash run_workspace_command(args, root, cmdline, env=env) def secure_boot_sign( args: CommandLineArguments, root: Path, do_run_build_script: bool, for_cache: bool, cached: bool, mount: Callable[[], ContextManager[None]], ) -> None: if do_run_build_script: return if not args.bootable: return if not args.secure_boot: return if for_cache and args.verity: return if cached and args.verity is False: return with mount(): for path, _, filenames in os.walk(root / "efi"): for i in filenames: if not i.endswith(".efi") and not i.endswith(".EFI"): continue with complete_step(f"Signing EFI binary {i} in ESP…"): p = os.path.join(path, i) run( [ "sbsign", "--key", args.secure_boot_key, "--cert", args.secure_boot_certificate, "--output", p + ".signed", p, ], ) os.rename(p + ".signed", p) def extract_unified_kernel( args: CommandLineArguments, root: Path, do_run_build_script: bool, for_cache: bool, mount: Callable[[], ContextManager[None]], ) -> Optional[BinaryIO]: if do_run_build_script or for_cache or not args.split_artifacts or not args.bootable: return None with mount(): kernel = None for path, _, filenames in os.walk(root / "efi/EFI/Linux"): for i in filenames: if not i.endswith(".efi") and not i.endswith(".EFI"): continue if kernel is not None: raise ValueError( f"Multiple kernels found, don't know which one to extract. ({kernel} vs. {path}/{i})" ) kernel = os.path.join(path, i) if kernel is None: raise ValueError("No kernel found in image, can't extract") assert args.output_split_kernel is not None f = copy_file_temporary(kernel, args.output_split_kernel.parent) return f def compress_output( args: CommandLineArguments, data: Optional[BinaryIO], suffix: Optional[str] = None ) -> Optional[BinaryIO]: if data is None: return None compress = should_compress_output(args) if not compress: # If we shan't compress, then at least make the output file sparse with complete_step(f"Digging holes into output file {data.name}…"): run(["fallocate", "--dig-holes", data.name]) return data with complete_step(f"Compressing output file {data.name}…"): f: BinaryIO = cast( BinaryIO, tempfile.NamedTemporaryFile(prefix=".mkosi-", suffix=suffix, dir=os.path.dirname(args.output)) ) run([*compressor_command(compress), "--stdout", data.name], stdout=f) return f def qcow2_output(args: CommandLineArguments, raw: Optional[BinaryIO]) -> Optional[BinaryIO]: if not args.output_format.is_disk(): return raw assert raw is not None if not args.qcow2: return raw with complete_step("Converting image file to qcow2…"): f: BinaryIO = cast(BinaryIO, tempfile.NamedTemporaryFile(prefix=".mkosi-", dir=os.path.dirname(args.output))) run(["qemu-img", "convert", "-onocow=on", "-fraw", "-Oqcow2", raw.name, f.name]) return f def write_root_hash_file(args: CommandLineArguments, root_hash: Optional[str]) -> Optional[BinaryIO]: if root_hash is None: return None assert args.output_root_hash_file is not None suffix = roothash_suffix(args.usr_only) with complete_step(f"Writing {suffix} file…"): f: BinaryIO = cast( BinaryIO, tempfile.NamedTemporaryFile(mode="w+b", prefix=".mkosi", dir=os.path.dirname(args.output_root_hash_file)), ) f.write((root_hash + "\n").encode()) f.flush() return f def write_root_hash_p7s_file(args: CommandLineArguments, root_hash_p7s: Optional[bytes]) -> Optional[BinaryIO]: if root_hash_p7s is None: return None assert args.output_root_hash_p7s_file is not None suffix = roothash_p7s_suffix(args.usr_only) with complete_step(f"Writing {suffix} file…"): f: BinaryIO = cast( BinaryIO, tempfile.NamedTemporaryFile( mode="w+b", prefix=".mkosi", dir=args.output_root_hash_p7s_file.parent ), ) f.write(root_hash_p7s) f.flush() return f def copy_nspawn_settings(args: CommandLineArguments) -> Optional[BinaryIO]: if args.nspawn_settings is None: return None assert args.output_nspawn_settings is not None with complete_step("Copying nspawn settings file…"): f: BinaryIO = cast( BinaryIO, tempfile.NamedTemporaryFile( mode="w+b", prefix=".mkosi-", dir=os.path.dirname(args.output_nspawn_settings) ), ) with open(args.nspawn_settings, "rb") as c: f.write(c.read()) f.flush() return f def hash_file(of: TextIO, sf: BinaryIO, fname: str) -> None: bs = 16 * 1024 ** 2 h = hashlib.sha256() sf.seek(0) buf = sf.read(bs) while len(buf) > 0: h.update(buf) buf = sf.read(bs) of.write(h.hexdigest() + " *" + fname + "\n") def calculate_sha256sum( args: CommandLineArguments, raw: Optional[BinaryIO], archive: Optional[BinaryIO], root_hash_file: Optional[BinaryIO], root_hash_p7s_file: Optional[BinaryIO], split_root: Optional[BinaryIO], split_verity: Optional[BinaryIO], split_verity_sig: Optional[BinaryIO], split_kernel: Optional[BinaryIO], nspawn_settings: Optional[BinaryIO], ) -> Optional[TextIO]: if args.output_format in (OutputFormat.directory, OutputFormat.subvolume): return None if not args.checksum: return None assert args.output_checksum is not None with complete_step("Calculating SHA256SUMS…"): f: TextIO = cast( TextIO, tempfile.NamedTemporaryFile( mode="w+", prefix=".mkosi-", encoding="utf-8", dir=os.path.dirname(args.output_checksum) ), ) if raw is not None: hash_file(f, raw, os.path.basename(args.output)) if archive is not None: hash_file(f, archive, os.path.basename(args.output)) if root_hash_file is not None: assert args.output_root_hash_file is not None hash_file(f, root_hash_file, os.path.basename(args.output_root_hash_file)) if root_hash_p7s_file is not None: assert args.output_root_hash_p7s_file is not None hash_file(f, root_hash_p7s_file, args.output_root_hash_p7s_file.name) if split_root is not None: assert args.output_split_root is not None hash_file(f, split_root, os.path.basename(args.output_split_root)) if split_verity is not None: assert args.output_split_verity is not None hash_file(f, split_verity, os.path.basename(args.output_split_verity)) if split_verity_sig is not None: assert args.output_split_verity_sig is not None hash_file(f, split_verity_sig, args.output_split_verity_sig.name) if split_kernel is not None: assert args.output_split_kernel is not None hash_file(f, split_kernel, os.path.basename(args.output_split_kernel)) if nspawn_settings is not None: assert args.output_nspawn_settings is not None hash_file(f, nspawn_settings, os.path.basename(args.output_nspawn_settings)) f.flush() return f def calculate_signature(args: CommandLineArguments, checksum: Optional[IO[Any]]) -> Optional[BinaryIO]: if not args.sign: return None if checksum is None: return None assert args.output_signature is not None with complete_step("Signing SHA256SUMS…"): f: BinaryIO = cast( BinaryIO, tempfile.NamedTemporaryFile(mode="wb", prefix=".mkosi-", dir=os.path.dirname(args.output_signature)), ) cmdline = ["gpg", "--detach-sign"] if args.key is not None: cmdline += ["--default-key", args.key] checksum.seek(0) run(cmdline, stdin=checksum, stdout=f) return f def calculate_bmap(args: CommandLineArguments, raw: Optional[BinaryIO]) -> Optional[TextIO]: if not args.bmap: return None if not args.output_format.is_disk_rw(): return None assert raw is not None assert args.output_bmap is not None with complete_step("Creating BMAP file…"): f: TextIO = cast( TextIO, tempfile.NamedTemporaryFile( mode="w+", prefix=".mkosi-", encoding="utf-8", dir=os.path.dirname(args.output_bmap) ), ) cmdline = ["bmaptool", "create", raw.name] run(cmdline, stdout=f) return f def save_cache(args: CommandLineArguments, root: Path, raw: Optional[str], cache_path: Optional[Path]) -> None: disk_rw = args.output_format.is_disk_rw() if disk_rw: if raw is None or cache_path is None: return else: if cache_path is None: return with complete_step("Installing cache copy…", f"Installed cache copy {path_relative_to_cwd(cache_path)}"): if disk_rw: assert raw is not None os.chmod(raw, 0o666 & ~args.original_umask) shutil.move(raw, cache_path) else: unlink_try_hard(cache_path) shutil.move(cast(str, root), cache_path) # typing bug, .move() accepts Path def _link_output( args: CommandLineArguments, oldpath: PathString, newpath: PathString, mode: int = 0o666, ) -> None: assert oldpath is not None assert newpath is not None # Temporary files created by tempfile have mode trimmed to the user. # After we are done writing files, adjust the mode to the default specified by umask. os.chmod(oldpath, mode & ~args.original_umask) os.link(oldpath, newpath) if args.no_chown: return sudo_uid = os.getenv("SUDO_UID") sudo_gid = os.getenv("SUDO_GID") if not (sudo_uid and sudo_gid): return relpath = path_relative_to_cwd(newpath) sudo_user = os.getenv("SUDO_USER", default=sudo_uid) with complete_step( f"Changing ownership of output file {relpath} to user {sudo_user} (acquired from sudo)…", f"Changed ownership of {relpath}", ): os.chown(newpath, int(sudo_uid), int(sudo_gid)) def link_output(args: CommandLineArguments, root: Path, artifact: Optional[BinaryIO]) -> None: with complete_step("Linking image file…", f"Linked {path_relative_to_cwd(args.output)}"): if args.output_format in (OutputFormat.directory, OutputFormat.subvolume): assert artifact is None make_read_only(args, root, for_cache=False, b=False) os.rename(root, args.output) make_read_only(args, args.output, for_cache=False, b=True) elif args.output_format.is_disk() or args.output_format in ( OutputFormat.plain_squashfs, OutputFormat.tar, OutputFormat.cpio, ): assert artifact is not None _link_output(args, artifact.name, args.output) def link_output_nspawn_settings(args: CommandLineArguments, path: Optional[SomeIO]) -> None: if path: assert args.output_nspawn_settings with complete_step( "Linking nspawn settings file…", f"Linked {path_relative_to_cwd(args.output_nspawn_settings)}" ): _link_output(args, path.name, args.output_nspawn_settings) def link_output_checksum(args: CommandLineArguments, checksum: Optional[SomeIO]) -> None: if checksum: assert args.output_checksum with complete_step("Linking SHA256SUMS file…", f"Linked {path_relative_to_cwd(args.output_checksum)}"): _link_output(args, checksum.name, args.output_checksum) def link_output_root_hash_file(args: CommandLineArguments, root_hash_file: Optional[SomeIO]) -> None: if root_hash_file: assert args.output_root_hash_file suffix = roothash_suffix(args.usr_only) with complete_step(f"Linking {suffix} file…", f"Linked {path_relative_to_cwd(args.output_root_hash_file)}"): _link_output(args, root_hash_file.name, args.output_root_hash_file) def link_output_root_hash_p7s_file(args: CommandLineArguments, root_hash_p7s_file: Optional[SomeIO]) -> None: if root_hash_p7s_file: assert args.output_root_hash_p7s_file suffix = roothash_p7s_suffix(args.usr_only) with complete_step( f"Linking {suffix} file…", f"Linked {path_relative_to_cwd(args.output_root_hash_p7s_file)}" ): _link_output(args, root_hash_p7s_file.name, args.output_root_hash_p7s_file) def link_output_signature(args: CommandLineArguments, signature: Optional[SomeIO]) -> None: if signature: assert args.output_signature is not None with complete_step("Linking SHA256SUMS.gpg file…", f"Linked {path_relative_to_cwd(args.output_signature)}"): _link_output(args, signature.name, args.output_signature) def link_output_bmap(args: CommandLineArguments, bmap: Optional[SomeIO]) -> None: if bmap: assert args.output_bmap with complete_step("Linking .bmap file…", f"Linked {path_relative_to_cwd(args.output_bmap)}"): _link_output(args, bmap.name, args.output_bmap) def link_output_sshkey(args: CommandLineArguments, sshkey: Optional[SomeIO]) -> None: if sshkey: assert args.output_sshkey with complete_step("Linking private ssh key file…", f"Linked {path_relative_to_cwd(args.output_sshkey)}"): _link_output(args, sshkey.name, args.output_sshkey, mode=0o600) def link_output_split_root(args: CommandLineArguments, split_root: Optional[SomeIO]) -> None: if split_root: assert args.output_split_root with complete_step( "Linking split root file system…", f"Linked {path_relative_to_cwd(args.output_split_root)}" ): _link_output(args, split_root.name, args.output_split_root) def link_output_split_verity(args: CommandLineArguments, split_verity: Optional[SomeIO]) -> None: if split_verity: assert args.output_split_verity with complete_step("Linking split Verity data…", f"Linked {path_relative_to_cwd(args.output_split_verity)}"): _link_output(args, split_verity.name, args.output_split_verity) def link_output_split_verity_sig(args: CommandLineArguments, split_verity_sig: Optional[SomeIO]) -> None: if split_verity_sig: assert args.output_split_verity_sig with complete_step( "Linking split Verity Signature data…", f"Linked {path_relative_to_cwd(args.output_split_verity_sig)}" ): _link_output(args, split_verity_sig.name, args.output_split_verity_sig) def link_output_split_kernel(args: CommandLineArguments, split_kernel: Optional[SomeIO]) -> None: if split_kernel: assert args.output_split_kernel with complete_step("Linking split kernel image…", f"Linked {path_relative_to_cwd(args.output_split_kernel)}"): _link_output(args, split_kernel.name, args.output_split_kernel) def dir_size(path: PathString) -> int: dir_sum = 0 for entry in os.scandir(path): if entry.is_symlink(): # We can ignore symlinks because they either point into our tree, # in which case we'll include the size of target directory anyway, # or outside, in which case we don't need to. continue elif entry.is_file(): dir_sum += entry.stat().st_blocks * 512 elif entry.is_dir(): dir_sum += dir_size(entry.path) return dir_sum def save_manifest(args: CommandLineArguments, manifest: Manifest) -> None: if manifest.has_data(): relpath = path_relative_to_cwd(args.output) if ManifestFormat.json in args.manifest_format: with complete_step(f"Saving manifest {relpath}.manifest"): f: TextIO = cast( TextIO, tempfile.NamedTemporaryFile( mode="w+", encoding="utf-8", prefix=".mkosi-", dir=os.path.dirname(args.output), ), ) with f: manifest.write_json(f) _link_output(args, f.name, f"{args.output}.manifest") if ManifestFormat.changelog in args.manifest_format: with complete_step(f"Saving report {relpath}.changelog"): g: TextIO = cast( TextIO, tempfile.NamedTemporaryFile( mode="w+", encoding="utf-8", prefix=".mkosi-", dir=os.path.dirname(args.output), ), ) with g: manifest.write_package_report(g) _link_output(args, g.name, f"{relpath}.changelog") def print_output_size(args: CommandLineArguments) -> None: if args.output_format in (OutputFormat.directory, OutputFormat.subvolume): MkosiPrinter.print_step("Resulting image size is " + format_bytes(dir_size(args.output)) + ".") else: st = os.stat(args.output) size = format_bytes(st.st_size) space = format_bytes(st.st_blocks * 512) MkosiPrinter.print_step(f"Resulting image size is {size}, consumes {space}.") def setup_package_cache(args: CommandLineArguments) -> Optional[TempDir]: if args.cache_path and args.cache_path.exists(): return None d = None with complete_step("Setting up package cache…", "Setting up package cache {} complete") as output: if args.cache_path is None: d = tempfile.TemporaryDirectory(dir=os.path.dirname(args.output), prefix=".mkosi-") args.cache_path = Path(d.name) else: os.makedirs(args.cache_path, 0o755, exist_ok=True) output.append(args.cache_path) return d def remove_duplicates(items: List[T]) -> List[T]: "Return list with any repetitions removed" # We use a dictionary to simulate an ordered set return list({x: None for x in items}) class ListAction(argparse.Action): delimiter: str def __init__(self, *args: Any, choices: Optional[Iterable[Any]] = None, **kwargs: Any) -> None: self.list_choices = choices # mypy doesn't like the following call due to https://github.com/python/mypy/issues/6799, # so let's, temporarily, ignore the error super().__init__(choices=choices, *args, **kwargs) # type: ignore[misc] def __call__( self, # These type-hints are copied from argparse.pyi parser: argparse.ArgumentParser, namespace: argparse.Namespace, values: Union[str, Sequence[Any], None], option_string: Optional[str] = None, ) -> None: ary = getattr(namespace, self.dest) if ary is None: ary = [] if isinstance(values, str): # Support list syntax for comma separated lists as well if self.delimiter == "," and values.startswith("[") and values.endswith("]"): values = values[1:-1] # Make sure delimiters between quotes are ignored. # Inspired by https://stackoverflow.com/a/2787979. values = [x.strip() for x in re.split(f"""{self.delimiter}(?=(?:[^'"]|'[^']*'|"[^"]*")*$)""", values) if x] if isinstance(values, list): for x in values: if self.list_choices is not None and x not in self.list_choices: raise ValueError(f"Unknown value {x!r}") # Remove ! prefixed list entries from list. !* removes all entries. This works for strings only now. if x == "!*": ary = [] elif isinstance(x, str) and x.startswith("!"): if x[1:] in ary: ary.remove(x[1:]) else: ary.append(x) else: ary.append(values) ary = remove_duplicates(ary) setattr(namespace, self.dest, ary) class CommaDelimitedListAction(ListAction): delimiter = "," class ColonDelimitedListAction(ListAction): delimiter = ":" class SpaceDelimitedListAction(ListAction): delimiter = " " class BooleanAction(argparse.Action): """Parse boolean command line arguments The argument may be added more than once. The argument may be set explicitly (--foo yes) or implicitly --foo. If the parameter name starts with "not-" or "without-" the value gets inverted. """ def __init__( self, # These type-hints are copied from argparse.pyi option_strings: Sequence[str], dest: str, nargs: Optional[Union[int, str]] = None, const: Any = True, default: Any = False, **kwargs: Any, ) -> None: if nargs is not None: raise ValueError("nargs not allowed") super().__init__(option_strings, dest, nargs="?", const=const, default=default, **kwargs) def __call__( self, # These type-hints are copied from argparse.pyi parser: argparse.ArgumentParser, namespace: argparse.Namespace, values: Union[str, Sequence[Any], None, bool], option_string: Optional[str] = None, ) -> None: new_value = self.default if isinstance(values, str): try: new_value = parse_boolean(values) except ValueError as exp: raise argparse.ArgumentError(self, str(exp)) elif isinstance(values, bool): # Assign const new_value = values else: raise argparse.ArgumentError(self, f"Invalid argument for {option_string}: {values}") # invert the value if the argument name starts with "not" or "without" for option in self.option_strings: if option[2:].startswith("not-") or option[2:].startswith("without-"): new_value = not new_value break setattr(namespace, self.dest, new_value) class CleanPackageMetadataAction(BooleanAction): def __call__( self, parser: argparse.ArgumentParser, namespace: argparse.Namespace, values: Union[str, Sequence[Any], None, bool], option_string: Optional[str] = None, ) -> None: if isinstance(values, str) and values == "auto": setattr(namespace, self.dest, "auto") else: super().__call__(parser, namespace, values, option_string) class WithNetworkAction(BooleanAction): def __call__( self, parser: argparse.ArgumentParser, namespace: argparse.Namespace, values: Union[str, Sequence[Any], None, bool], option_string: Optional[str] = None, ) -> None: if isinstance(values, str) and values == "never": setattr(namespace, self.dest, "never") else: super().__call__(parser, namespace, values, option_string) class VerityAction(BooleanAction): def __call__( self, parser: argparse.ArgumentParser, namespace: argparse.Namespace, values: Union[str, Sequence[Any], None, bool], option_string: Optional[str] = None, ) -> None: if isinstance(values, str): if values == "signed": setattr(namespace, self.dest, "signed") return super().__call__(parser, namespace, values, option_string) class CustomHelpFormatter(argparse.HelpFormatter): def _format_action_invocation(self, action: argparse.Action) -> str: if not action.option_strings or action.nargs == 0: return super()._format_action_invocation(action) default = self._get_default_metavar_for_optional(action) args_string = self._format_args(action, default) return ", ".join(action.option_strings) + " " + args_string class ArgumentParserMkosi(argparse.ArgumentParser): """ArgumentParser with support for mkosi.defaults file(s) This derived class adds a simple ini file parser to python's ArgumentParser features. Each line of the ini file is converted to a command line argument. Example: "FooBar=Hello_World" in the ini file appends "--foo-bar Hello_World" to sys.argv. Command line arguments starting with - or --are considered as regular arguments. Arguments starting with @ are considered as files which are fed to the ini file parser implemented in this class. """ # Mapping of parameters supported in config files but not as command line arguments. SPECIAL_MKOSI_DEFAULT_PARAMS = { "QCow2": "--qcow2", "OutputDirectory": "--output-dir", "WorkspaceDirectory": "--workspace-dir", "XZ": "--compress-output=xz", "NSpawnSettings": "--settings", "ESPSize": "--esp-size", "CheckSum": "--checksum", "BMap": "--bmap", "Packages": "--package", "RemovePackages": "--remove-package", "ExtraTrees": "--extra-tree", "SkeletonTrees": "--skeleton-tree", "BuildPackages": "--build-package", "PostInstallationScript": "--postinst-script", "GPTFirstLBA": "--gpt-first-lba", "TarStripSELinuxContext": "--tar-strip-selinux-context", } fromfile_prefix_chars: str = "@" def __init__(self, *kargs: Any, **kwargs: Any) -> None: self._ini_file_section = "" self._ini_file_key = "" # multi line list processing self._ini_file_list_mode = False # Add config files to be parsed kwargs["fromfile_prefix_chars"] = ArgumentParserMkosi.fromfile_prefix_chars kwargs["formatter_class"] = CustomHelpFormatter super().__init__(*kargs, **kwargs) @staticmethod def _camel_to_arg(camel: str) -> str: s1 = re.sub("(.)([A-Z][a-z]+)", r"\1-\2", camel) return re.sub("([a-z0-9])([A-Z])", r"\1-\2", s1).lower() @classmethod def _ini_key_to_cli_arg(cls, key: str) -> str: return cls.SPECIAL_MKOSI_DEFAULT_PARAMS.get(key) or ("--" + cls._camel_to_arg(key)) def _read_args_from_files(self, arg_strings: List[str]) -> List[str]: """Convert @ prefixed command line arguments with corresponding file content Regular arguments are just returned. Arguments prefixed with @ are considered as configuration file paths. The settings of each file are parsed and returned as command line arguments. Example: The following mkosi.default is loaded. [Distribution] Distribution=fedora mkosi is called like: mkosi -p httpd arg_strings: ['@mkosi.default', '-p', 'httpd'] return value: ['--distribution', 'fedora', '-p', 'httpd'] """ # expand arguments referencing files new_arg_strings = [] for arg_string in arg_strings: # for regular arguments, just add them back into the list if not arg_string or arg_string[0] not in self.fromfile_prefix_chars: new_arg_strings.append(arg_string) continue # replace arguments referencing files with the file content try: # This used to use configparser.ConfigParser before, but # ConfigParser's interpolation clashes with systemd style # specifier, e.g. %u for user, since both use % as a sigil. config = configparser.RawConfigParser(delimiters="=", inline_comment_prefixes=("#",)) config.optionxform = str # type: ignore with open(arg_string[1:]) as args_file: config.read_file(args_file) # Rename old [Packages] section to [Content] if config.has_section("Packages") and not config.has_section("Content"): config.read_dict({"Content": dict(config.items("Packages"))}) config.remove_section("Packages") for section in config.sections(): for key, value in config.items(section): cli_arg = self._ini_key_to_cli_arg(key) # \n in value strings is forwarded. Depending on the action type, \n is considered as a delimiter or needs to be replaced by a ' ' for action in self._actions: if cli_arg in action.option_strings: if isinstance(action, ListAction): value = value.replace(os.linesep, action.delimiter) new_arg_strings.extend([cli_arg, value]) except OSError as e: self.error(str(e)) # return the modified argument list return new_arg_strings COMPRESSION_ALGORITHMS = "zlib", "lzo", "zstd", "lz4", "xz" def parse_compression(value: str) -> Union[str, bool]: if value in COMPRESSION_ALGORITHMS: return value return parse_boolean(value) def parse_source_file_transfer(value: str) -> Optional[SourceFileTransfer]: if value == "": return None try: return SourceFileTransfer(value) except Exception as exp: raise argparse.ArgumentTypeError(str(exp)) def parse_base_packages(value: str) -> Union[str, bool]: if value == "conditional": return value return parse_boolean(value) def parse_remove_files(value: str) -> List[str]: """Normalize paths as relative to / to ensure we don't go outside of our root.""" # os.path.normpath() leaves leading '//' untouched, even though it normalizes '///'. # This follows POSIX specification, see # https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap04.html#tag_04_13. # Let's use lstrip() to handle zero or more leading slashes correctly. return ["/" + os.path.normpath(p).lstrip("/") for p in value.split(",") if p] def parse_ssh_agent(value: Optional[str]) -> Optional[Path]: """Will return None or a path to a socket.""" if value is None: return None try: if not parse_boolean(value): return None except ValueError: pass else: value = os.getenv("SSH_AUTH_SOCK") if not value: die("--ssh-agent=true but $SSH_AUTH_SOCK is not set (consider running 'sudo' with '-E')") sock = Path(value) if not sock.is_socket(): die(f"SSH agent socket {sock} is not an AF_UNIX socket") return sock def create_parser() -> ArgumentParserMkosi: parser = ArgumentParserMkosi(prog="mkosi", description="Build Bespoke OS Images", add_help=False) group = parser.add_argument_group("Commands") group.add_argument("verb", choices=MKOSI_COMMANDS, default="build", help="Operation to execute") group.add_argument( "cmdline", nargs=argparse.REMAINDER, help="The command line to use for " + str(MKOSI_COMMANDS_CMDLINE)[1:-1] ) group.add_argument("-h", "--help", action="help", help="Show this help") group.add_argument("--version", action="version", version="%(prog)s " + __version__) group = parser.add_argument_group("Distribution") group.add_argument("-d", "--distribution", choices=Distribution.__members__, help="Distribution to install") group.add_argument("-r", "--release", help="Distribution release to install") group.add_argument("-m", "--mirror", help="Distribution mirror to use") group.add_argument( "--repositories", action=CommaDelimitedListAction, default=[], help="Repositories to use", metavar="REPOS" ) group.add_argument( "--use-host-repositories", action=BooleanAction, help="Use host's existing software repositories (only for dnf-based distributions)", ) group.add_argument("--architecture", help="Override the architecture of installation") group = parser.add_argument_group("Output") group.add_argument( "-t", "--format", dest="output_format", choices=OutputFormat, type=OutputFormat.from_string, help="Output Format", ) group.add_argument( "--manifest-format", action=CommaDelimitedListAction, type=cast(Callable[[str], ManifestFormat], ManifestFormat.parse_list), help="Manifest Format", ) group.add_argument( "-o", "--output", help="Output image path", type=Path, metavar="PATH", ) group.add_argument( "--output-split-root", help="Output root or /usr/ partition image path (if --split-artifacts is used)", type=Path, metavar="PATH", ) group.add_argument( "--output-split-verity", help="Output Verity partition image path (if --split-artifacts is used)", type=Path, metavar="PATH", ) group.add_argument( "--output-split-verity-sig", help="Output Verity Signature partition image path (if --split-artifacts is used)", type=Path, metavar="PATH", ) group.add_argument( "--output-split-kernel", help="Output kernel path (if --split-artifacts is used)", type=Path, metavar="PATH", ) group.add_argument( "-O", "--output-dir", help="Output root directory", type=Path, metavar="DIR", ) group.add_argument( "--workspace-dir", help="Workspace directory", type=Path, metavar="DIR", ) group.add_argument( "-f", "--force", action="count", dest="force_count", default=0, help="Remove existing image file before operation", ) group.add_argument( "-b", "--bootable", action=BooleanAction, help="Make image bootable on EFI (only gpt_ext4, gpt_xfs, gpt_btrfs, gpt_squashfs)", ) group.add_argument( "--boot-protocols", action=CommaDelimitedListAction, help="Boot protocols to use on a bootable image", metavar="PROTOCOLS", default=[], ) group.add_argument( "--kernel-command-line", action=SpaceDelimitedListAction, default=["rhgb", "selinux=0", "audit=0"], help="Set the kernel command line (only bootable images)", ) group.add_argument( "--kernel-commandline", action=SpaceDelimitedListAction, dest="kernel_command_line", help=argparse.SUPPRESS ) # Compatibility option group.add_argument( "--secure-boot", action=BooleanAction, help="Sign the resulting kernel/initrd image for UEFI SecureBoot" ) group.add_argument( "--secure-boot-key", help="UEFI SecureBoot private key in PEM format", type=Path, metavar="PATH", ) group.add_argument( "--secure-boot-certificate", help="UEFI SecureBoot certificate in X509 format", type=Path, metavar="PATH", ) group.add_argument( "--secure-boot-valid-days", help="Number of days UEFI SecureBoot keys should be valid when generating keys", metavar="DAYS", default="730", ) group.add_argument( "--secure-boot-common-name", help="Template for the UEFI SecureBoot CN when generating keys", metavar="CN", default="mkosi of %u", ) group.add_argument( "--read-only", action=BooleanAction, help="Make root volume read-only (only gpt_ext4, gpt_xfs, gpt_btrfs, subvolume, implied with gpt_squashfs and plain_squashfs)", ) group.add_argument( "--encrypt", choices=("all", "data"), help='Encrypt everything except: ESP ("all") or ESP and root ("data")' ) group.add_argument( "--verity", action=VerityAction, help="Add integrity partition, and optionally sign it (implies --read-only)", ) group.add_argument( "--compress", type=parse_compression, nargs="?", metavar="ALG", help="Enable compression (in-fs if supported, whole-output otherwise)", ) group.add_argument( "--compress-fs", type=parse_compression, nargs="?", metavar="ALG", help="Enable in-filesystem compression (gpt_btrfs, subvolume, gpt_squashfs, plain_squashfs)", ) group.add_argument( "--compress-output", type=parse_compression, nargs="?", metavar="ALG", help="Enable whole-output compression (with images or archives)", ) group.add_argument( "--mksquashfs", dest="mksquashfs_tool", type=str.split, default=[], help="Script to call instead of mksquashfs" ) group.add_argument( "--xz", action="store_const", dest="compress_output", const="xz", help=argparse.SUPPRESS, ) group.add_argument( "--qcow2", action=BooleanAction, help="Convert resulting image to qcow2 (only gpt_ext4, gpt_xfs, gpt_btrfs, gpt_squashfs)", ) group.add_argument("--hostname", help="Set hostname") group.add_argument("--image-version", help="Set version for image") group.add_argument("--image-id", help="Set ID for image") group.add_argument( "--no-chown", action=BooleanAction, help="When running with sudo, disable reassignment of ownership of the generated files to the original user", ) # NOQA: E501 group.add_argument( "--tar-strip-selinux-context", action=BooleanAction, help="Do not include SELinux file context information in tar. Not compatible with bsdtar.", ) group.add_argument( "-i", "--incremental", action=BooleanAction, help="Make use of and generate intermediary cache images" ) group.add_argument("-M", "--minimize", action=BooleanAction, help="Minimize root file system size") group.add_argument( "--without-unified-kernel-images", action=BooleanAction, dest="with_unified_kernel_images", default=True, help="Do not install unified kernel images", ) group.add_argument("--with-unified-kernel-images", action=BooleanAction, default=True, help=argparse.SUPPRESS) group.add_argument("--gpt-first-lba", type=int, help="Set the first LBA within GPT Header", metavar="FIRSTLBA") group.add_argument("--hostonly-initrd", action=BooleanAction, help="Enable dracut hostonly option") group.add_argument( "--split-artifacts", action=BooleanAction, help="Generate split out root/verity/kernel images, too" ) group = parser.add_argument_group("Content") group.add_argument( "--base-packages", type=parse_base_packages, default=True, help="Automatically inject basic packages in the system (systemd, kernel, …)", metavar="OPTION", ) group.add_argument( "-p", "--package", action=CommaDelimitedListAction, dest="packages", default=[], help="Add an additional package to the OS image", metavar="PACKAGE", ) group.add_argument( "--remove-package", action=CommaDelimitedListAction, dest="remove_packages", default=[], help="Remove package from the image OS image after installation", metavar="PACKAGE", ) group.add_argument("--with-docs", action=BooleanAction, help="Install documentation") group.add_argument( "-T", "--without-tests", action=BooleanAction, dest="with_tests", default=True, help="Do not run tests as part of build script, if supported", ) group.add_argument( "--with-tests", action=BooleanAction, default=True, help=argparse.SUPPRESS ) # Compatibility option group.add_argument("--password", help="Set the root password") group.add_argument( "--password-is-hashed", action=BooleanAction, help="Indicate that the root password has already been hashed" ) group.add_argument("--autologin", action=BooleanAction, help="Enable root autologin") group.add_argument( "--cache", dest="cache_path", help="Package cache path", type=Path, metavar="PATH", ) group.add_argument( "--extra-tree", action=CommaDelimitedListAction, dest="extra_trees", default=[], help="Copy an extra tree on top of image", type=Path, metavar="PATH", ) group.add_argument( "--skeleton-tree", action="append", dest="skeleton_trees", default=[], help="Use a skeleton tree to bootstrap the image before installing anything", type=Path, metavar="PATH", ) group.add_argument( "--clean-package-metadata", action=CleanPackageMetadataAction, help="Remove package manager database and other files", default='auto', ) group.add_argument( "--remove-files", action=CommaDelimitedListAction, default=[], help="Remove files from built image", type=parse_remove_files, metavar="GLOB", ) group.add_argument( "--environment", "-E", action=SpaceDelimitedListAction, default=[], help="Set an environment variable when running scripts", metavar="NAME[=VALUE]", ) group.add_argument( "--build-environment", # Compatibility option action=SpaceDelimitedListAction, default=[], dest="environment", help=argparse.SUPPRESS, ) group.add_argument( "--build-sources", help="Path for sources to build", metavar="PATH", type=Path, ) group.add_argument( "--build-dir", # Compatibility option help=argparse.SUPPRESS, type=Path, metavar="PATH", ) group.add_argument( "--build-directory", dest="build_dir", help="Path to use as persistent build directory", type=Path, metavar="PATH", ) group.add_argument( "--include-directory", dest="include_dir", help="Path to use as persistent include directory", type=Path, metavar="PATH", ) group.add_argument( "--install-directory", dest="install_dir", help="Path to use as persistent install directory", type=Path, metavar="PATH", ) group.add_argument( "--build-package", action=CommaDelimitedListAction, dest="build_packages", default=[], help="Additional packages needed for build script", metavar="PACKAGE", ) group.add_argument( "--skip-final-phase", action=BooleanAction, help="Skip the (second) final image building phase.", default=False ) group.add_argument( "--build-script", help="Build script to run inside image", type=script_path, metavar="PATH", ) group.add_argument( "--prepare-script", help="Prepare script to run inside the image before it is cached", type=script_path, metavar="PATH", ) group.add_argument( "--postinst-script", help="Postinstall script to run inside image", type=script_path, metavar="PATH", ) group.add_argument( "--finalize-script", help="Postinstall script to run outside image", type=script_path, metavar="PATH", ) group.add_argument( "--source-file-transfer", type=parse_source_file_transfer, choices=[*list(SourceFileTransfer), None], default=None, help="Method used to copy build sources to the build image." + "; ".join([f"'{k}': {v}" for k, v in SourceFileTransfer.doc().items()]) + " (default: copy-git-others if in a git repository, otherwise copy-all)", ) group.add_argument( "--source-file-transfer-final", type=parse_source_file_transfer, choices=[*list(SourceFileTransfer), None], default=None, help="Method used to copy build sources to the final image." + "; ".join([f"'{k}': {v}" for k, v in SourceFileTransfer.doc().items() if k != SourceFileTransfer.mount]) + " (default: None)", ) group.add_argument( "--source-resolve-symlinks", action=BooleanAction, help="If given, any symbolic links in the build sources are resolved and the file contents copied to the" + " build image. If not given, they are left as symbolic links in the build image." + " Only applies if --source-file-transfer is set to 'copy-all'. (default: keep as symbolic links)", ) group.add_argument( "--source-resolve-symlinks-final", action=BooleanAction, help="If given, any symbolic links in the build sources are resolved and the file contents copied to the" + " final image. If not given, they are left as symbolic links in the final image." + " Only applies if --source-file-transfer-final is set to 'copy-all'. (default: keep as symbolic links)", ) group.add_argument( "--with-network", action=WithNetworkAction, help="Run build and postinst scripts with network access (instead of private network)", ) group.add_argument( "--settings", dest="nspawn_settings", help="Add in .nspawn settings file", type=Path, metavar="PATH", ) group = parser.add_argument_group("Partitions") group.add_argument('--base-image', help='Use the given image as base (e.g. lower sysext layer)', type=Path, metavar='IMAGE') group.add_argument( "--root-size", help="Set size of root partition (only gpt_ext4, gpt_xfs, gpt_btrfs)", metavar="BYTES" ) group.add_argument( "--esp-size", help="Set size of EFI system partition (only gpt_ext4, gpt_xfs, gpt_btrfs, gpt_squashfs)", metavar="BYTES", ) group.add_argument( "--xbootldr-size", help="Set size of the XBOOTLDR partition (only gpt_ext4, gpt_xfs, gpt_btrfs, gpt_squashfs)", metavar="BYTES", ) group.add_argument( "--swap-size", help="Set size of swap partition (only gpt_ext4, gpt_xfs, gpt_btrfs, gpt_squashfs)", metavar="BYTES", ) group.add_argument( "--home-size", help="Set size of /home partition (only gpt_ext4, gpt_xfs, gpt_squashfs)", metavar="BYTES" ) group.add_argument( "--srv-size", help="Set size of /srv partition (only gpt_ext4, gpt_xfs, gpt_squashfs)", metavar="BYTES" ) group.add_argument( "--var-size", help="Set size of /var partition (only gpt_ext4, gpt_xfs, gpt_squashfs)", metavar="BYTES" ) group.add_argument( "--tmp-size", help="Set size of /var/tmp partition (only gpt_ext4, gpt_xfs, gpt_squashfs)", metavar="BYTES" ) group.add_argument( "--usr-only", action=BooleanAction, help="Generate a /usr/ partition instead of a root partition" ) group = parser.add_argument_group("Validation (only gpt_ext4, gpt_xfs, gpt_btrfs, gpt_squashfs, tar, cpio)") group.add_argument("--checksum", action=BooleanAction, help="Write SHA256SUMS file") group.add_argument("--sign", action=BooleanAction, help="Write and sign SHA256SUMS file") group.add_argument("--key", help="GPG key to use for signing") group.add_argument( "--bmap", action=BooleanAction, help="Write block map file (.bmap) for bmaptool usage (only gpt_ext4, gpt_btrfs)", ) group = parser.add_argument_group("Host configuration") group.add_argument( "--extra-search-path", dest="extra_search_paths", action=ColonDelimitedListAction, default=[], help="List of colon-separated paths to look for programs before looking in PATH", ) group.add_argument( "--extra-search-paths", dest="extra_search_paths", action=ColonDelimitedListAction, help=argparse.SUPPRESS ) # Compatibility option group.add_argument("--qemu-headless", action=BooleanAction, help="Configure image for qemu's -nographic mode") group.add_argument("--qemu-smp", help="Configure guest's SMP settings", metavar="SMP", default="2") group.add_argument("--qemu-mem", help="Configure guest's RAM size", metavar="MEM", default="1G") group.add_argument( "--network-veth", action=BooleanAction, help="Create a virtual Ethernet link between the host and the container/VM", ) group.add_argument( "--ephemeral", action=BooleanAction, help="If specified, the container/VM is run with a temporary snapshot of the output image that is " "removed immediately when the container/VM terminates", ) group.add_argument( "--ssh", action=BooleanAction, help="Set up SSH access from the host to the final image via 'mkosi ssh'" ) group.add_argument( "--ssh-key", type=Path, metavar="PATH", help="Use the specified private key when using 'mkosi ssh' (requires a corresponding public key)", ) group.add_argument( "--ssh-timeout", metavar="SECONDS", type=int, default=0, help="Wait up to SECONDS seconds for the SSH connection to be available when using 'mkosi ssh'", ) group.add_argument( "--ssh-agent", type=parse_ssh_agent, default=None, metavar="PATH", help="Path to the ssh agent socket, or true to use $SSH_AUTH_SOCK.", ) group.add_argument( "--ssh-port", type=int, default=22, metavar="PORT", help="If specified, 'mkosi ssh' will use this port to connect", ) group = parser.add_argument_group("Additional Configuration") group.add_argument( "-C", "--directory", help="Change to specified directory before doing anything", type=Path, metavar="PATH", ) group.add_argument( "--default", dest="default_path", help="Read configuration data from file", type=Path, metavar="PATH", ) group.add_argument( "-a", "--all", action="store_true", dest="all", default=False, help="Build all settings files in mkosi.files/" ) group.add_argument( "--all-directory", dest="all_directory", help="Specify path to directory to read settings files from", type=Path, metavar="PATH", ) group.add_argument( "-B", "--auto-bump", action=BooleanAction, help="Automatically bump image version after building", ) group.add_argument( "--debug", action=CommaDelimitedListAction, default=[], help="Turn on debugging output", choices=("run", "build-script", "workspace-command", "disk"), ) try: import argcomplete argcomplete.autocomplete(parser) except ImportError: pass return parser def load_distribution(args: argparse.Namespace) -> argparse.Namespace: if args.distribution is not None: args.distribution = Distribution[args.distribution] if args.distribution is None or args.release is None: d, r = detect_distribution() if args.distribution is None: args.distribution = d if args.distribution == d and d != Distribution.clear and args.release is None: args.release = r if args.distribution is None: die("Couldn't detect distribution.") return args def parse_args(argv: Optional[List[str]] = None) -> Dict[str, argparse.Namespace]: """Load default values from files and parse command line arguments Do all about default files and command line arguments parsing. If --all argument is passed more than one job needs to be processed. The returned tuple contains CommandLineArguments valid for all jobs as well as a dict containing the arguments per job. """ parser = create_parser() if argv is None: argv = sys.argv[1:] argv = list(argv) # make a copy 'cause we'll be modifying the list later on # If ArgumentParserMkosi loads settings from mkosi.default files, the settings from files # are converted to command line arguments. This breaks ArgumentParser's support for default # values of positional arguments. Make sure the verb command gets explicitly passed. # Insert a -- before the positional verb argument otherwise it might be considered as an argument of # a parameter with nargs='?'. For example mkosi -i summary would be treated as -i=summary. for verb in MKOSI_COMMANDS: try: v_i = argv.index(verb) except ValueError: continue if v_i > 0 and argv[v_i - 1] != "--": argv.insert(v_i, "--") break else: argv += ["--", "build"] # First run of command line arguments parsing to get the directory of mkosi.default file and the verb argument. args_pre_parsed, _ = parser.parse_known_args(argv) if args_pre_parsed.verb == "help": parser.print_help() sys.exit(0) # Make sure all paths are absolute and valid. # Relative paths are not valid yet since we are not in the final working directory yet. if args_pre_parsed.directory is not None: directory = args_pre_parsed.directory = args_pre_parsed.directory.absolute() else: directory = Path.cwd() # Note that directory will be ignored if .all_directory or .default_path are absolute all_directory = directory / (args_pre_parsed.all_directory or "mkosi.files") default_path = directory / (args_pre_parsed.default_path or "mkosi.default") if args_pre_parsed.default_path and not default_path.exists(): die(f"No config file found at {default_path}") if args_pre_parsed.all and args_pre_parsed.default_path: die("--all and --default= may not be combined.") # Parse everything in --all mode args_all = {} if args_pre_parsed.all: if not os.path.isdir(all_directory): die(f"all-directory {all_directory} does not exist") for f in os.scandir(all_directory): if not f.name.startswith("mkosi."): continue args = parse_args_file(argv, Path(f.path)) args_all[f.name] = args # Parse everything in normal mode else: args = parse_args_file_group(argv, os.fspath(default_path)) args = load_distribution(args) if args.distribution: # Parse again with any extra distribution files included. args = parse_args_file_group(argv, os.fspath(default_path), args.distribution) if args.distribution == "gentoo": from .gentoo import Gentoo Gentoo.try_import_portage() args_all["default"] = args return args_all def parse_args_file(argv: List[str], default_path: Path) -> argparse.Namespace: """Parse just one mkosi.* file (--all mode).""" # Parse all parameters handled by mkosi. # Parameters forwarded to subprocesses such as nspawn or qemu end up in cmdline_argv. argv = argv[:1] + [f"{ArgumentParserMkosi.fromfile_prefix_chars}{default_path}"] + argv[1:] return create_parser().parse_args(argv) def parse_args_file_group( argv: List[str], default_path: str, distribution: Optional[Distribution] = None ) -> argparse.Namespace: """Parse a set of mkosi.default and mkosi.default.d/* files.""" # Add the @ prefixed filenames to current argument list in inverse priority order. defaults_files = [] if os.path.isfile(default_path): defaults_files += [f"{ArgumentParserMkosi.fromfile_prefix_chars}{default_path}"] defaults_dir = "mkosi.default.d" if os.path.isdir(defaults_dir): for file in sorted(os.listdir(defaults_dir)): path = os.path.join(defaults_dir, file) if os.path.isfile(path): defaults_files += [f"{ArgumentParserMkosi.fromfile_prefix_chars}{path}"] if distribution is not None: distribution_dir = f"mkosi.default.d/{distribution}" if os.path.isdir(distribution_dir): for subdir in sorted(os.listdir(distribution_dir)): path = os.path.join(distribution_dir, subdir) if os.path.isfile(path): defaults_files += [f"{ArgumentParserMkosi.fromfile_prefix_chars}{path}"] # Parse all parameters handled by mkosi. # Parameters forwarded to subprocesses such as nspawn or qemu end up in cmdline_argv. return create_parser().parse_args(defaults_files + argv) def parse_bytes(num_bytes: Optional[str]) -> Optional[int]: if num_bytes is None: return num_bytes if num_bytes.endswith("G"): factor = 1024 ** 3 elif num_bytes.endswith("M"): factor = 1024 ** 2 elif num_bytes.endswith("K"): factor = 1024 else: factor = 1 if factor > 1: num_bytes = num_bytes[:-1] result = int(num_bytes) * factor if result <= 0: raise ValueError("Size out of range") if result % 512 != 0: raise ValueError("Size not a multiple of 512") return result def detect_distribution() -> Tuple[Optional[Distribution], Optional[str]]: try: os_release = read_os_release() except FileNotFoundError: return None, None dist_id = os_release.get("ID", "linux") dist_id_like = os_release.get("ID_LIKE", "").split() version = os_release.get("VERSION", None) version_id = os_release.get("VERSION_ID", None) version_codename = os_release.get("VERSION_CODENAME", None) extracted_codename = None if version: # extract Debian release codename m = re.search(r"\((.*?)\)", version) if m: extracted_codename = m.group(1) if dist_id == "clear-linux-os": dist_id = "clear" d: Optional[Distribution] = None for the_id in [dist_id, *dist_id_like]: d = Distribution.__members__.get(the_id, None) if d is not None: break if d in {Distribution.debian, Distribution.ubuntu} and (version_codename or extracted_codename): # debootstrap needs release codenames, not version numbers version_id = version_codename or extracted_codename return d, version_id def unlink_try_hard(path: Optional[PathString]) -> None: if path is None: return path = Path(path) try: return path.unlink() except FileNotFoundError: return except Exception: pass if shutil.which("btrfs"): try: btrfs_subvol_delete(path) return except Exception: pass shutil.rmtree(path) def remove_glob(*patterns: PathString) -> None: pathgen = (glob.glob(str(pattern)) for pattern in patterns) paths: Set[str] = set(sum(pathgen, [])) # uniquify for path in paths: unlink_try_hard(Path(path)) def empty_directory(path: Path) -> None: try: for f in os.listdir(path): unlink_try_hard(path / f) except FileNotFoundError: pass def unlink_output(args: CommandLineArguments) -> None: if not args.force and args.verb != "clean": return if not args.skip_final_phase: with complete_step("Removing output files…"): unlink_try_hard(args.output) unlink_try_hard(f"{args.output}.manifest") unlink_try_hard(f"{args.output}.changelog") if args.checksum: unlink_try_hard(args.output_checksum) if args.verity: unlink_try_hard(args.output_root_hash_file) if args.verity == "signed": unlink_try_hard(args.output_root_hash_p7s_file) if args.sign: unlink_try_hard(args.output_signature) if args.bmap: unlink_try_hard(args.output_bmap) if args.split_artifacts: unlink_try_hard(args.output_split_root) unlink_try_hard(args.output_split_verity) unlink_try_hard(args.output_split_verity_sig) unlink_try_hard(args.output_split_kernel) if args.nspawn_settings is not None: unlink_try_hard(args.output_nspawn_settings) if args.ssh and args.output_sshkey is not None: unlink_try_hard(args.output_sshkey) # We remove any cached images if either the user used --force # twice, or he/she called "clean" with it passed once. Let's also # remove the downloaded package cache if the user specified one # additional "--force". if args.verb == "clean": remove_build_cache = args.force_count > 0 remove_package_cache = args.force_count > 1 else: remove_build_cache = args.force_count > 1 remove_package_cache = args.force_count > 2 if remove_build_cache: if args.cache_pre_dev is not None or args.cache_pre_inst is not None: with complete_step("Removing incremental cache files…"): if args.cache_pre_dev is not None: unlink_try_hard(args.cache_pre_dev) if args.cache_pre_inst is not None: unlink_try_hard(args.cache_pre_inst) if args.build_dir is not None: with complete_step("Clearing out build directory…"): empty_directory(args.build_dir) if args.include_dir is not None: with complete_step("Clearing out include directory…"): empty_directory(args.include_dir) if args.install_dir is not None: with complete_step("Clearing out install directory…"): empty_directory(args.install_dir) if remove_package_cache: if args.cache_path is not None: with complete_step("Clearing out package cache…"): empty_directory(args.cache_path) def parse_boolean(s: str) -> bool: "Parse 1/true/yes as true and 0/false/no as false" s_l = s.lower() if s_l in {"1", "true", "yes"}: return True if s_l in {"0", "false", "no"}: return False raise ValueError(f"Invalid literal for bool(): {s!r}") def find_nspawn_settings(args: argparse.Namespace) -> None: if args.nspawn_settings is not None: return if os.path.exists("mkosi.nspawn"): args.nspawn_settings = "mkosi.nspawn" def find_extra(args: argparse.Namespace) -> None: if len(args.extra_trees) > 0: return if os.path.isdir("mkosi.extra"): args.extra_trees.append(Path("mkosi.extra")) if os.path.isfile("mkosi.extra.tar"): args.extra_trees.append(Path("mkosi.extra.tar")) def find_skeleton(args: argparse.Namespace) -> None: if len(args.skeleton_trees) > 0: return if os.path.isdir("mkosi.skeleton"): args.skeleton_trees.append(Path("mkosi.skeleton")) if os.path.isfile("mkosi.skeleton.tar"): args.skeleton_trees.append(Path("mkosi.skeleton.tar")) def args_find_path(args: argparse.Namespace, name: str, path: str, *, as_list: bool = False) -> None: if getattr(args, name) is not None: return abspath = Path(path).absolute() if abspath.exists(): setattr(args, name, [abspath] if as_list else abspath) def find_cache(args: argparse.Namespace) -> None: if args.cache_path is not None: return if os.path.exists("mkosi.cache/"): dirname = args.distribution.name # Clear has a release number that can be used, however the # cache is valid (and more efficient) across releases. if args.distribution != Distribution.clear and args.release is not None: dirname += "~" + args.release args.cache_path = Path("mkosi.cache", dirname) def require_private_file(name: str, description: str) -> None: mode = os.stat(name).st_mode & 0o777 if mode & 0o007: warn(dedent(f"""\ Permissions of '{name}' of '{mode:04o}' are too open. When creating {description} files use an access mode that restricts access to the owner only. """)) def find_passphrase(args: argparse.Namespace) -> None: if args.encrypt is None: args.passphrase = None return try: require_private_file("mkosi.passphrase", "passphrase") args.passphrase = {"type": "file", "content": "mkosi.passphrase"} except FileNotFoundError: while True: passphrase = getpass.getpass("Please enter passphrase: ") passphrase_confirmation = getpass.getpass("Passphrase confirmation: ") if passphrase == passphrase_confirmation: args.passphrase = {"type": "stdin", "content": passphrase} break MkosiPrinter.info("Passphrase doesn't match confirmation. Please try again.") def find_password(args: argparse.Namespace) -> None: if args.password is not None: return try: require_private_file("mkosi.rootpw", "root password") with open("mkosi.rootpw") as f: args.password = f.read().strip() except FileNotFoundError: pass def find_secure_boot(args: argparse.Namespace) -> None: if not args.secure_boot and args.verity != "signed": return if args.secure_boot_key is None: if os.path.exists("mkosi.secure-boot.key"): args.secure_boot_key = Path("mkosi.secure-boot.key") if args.secure_boot_certificate is None: if os.path.exists("mkosi.secure-boot.crt"): args.secure_boot_certificate = Path("mkosi.secure-boot.crt") def find_image_version(args: argparse.Namespace) -> None: if args.image_version is not None: return try: with open("mkosi.version") as f: args.image_version = f.read().strip() except FileNotFoundError: pass KNOWN_SUFFIXES = { ".xz", ".zstd", ".raw", ".tar", ".cpio", ".qcow2", } def strip_suffixes(path: Path) -> Path: while path.suffix in KNOWN_SUFFIXES: path = path.with_suffix("") return path def xescape(s: str) -> str: "Escape a string udev-style, for inclusion in /dev/disk/by-*/* symlinks" ret = "" for c in s: if ord(c) <= 32 or ord(c) >= 127 or c == "/": ret = ret + "\\x%02x" % ord(c) else: ret = ret + str(c) return ret def build_auxiliary_output_path(args: argparse.Namespace, suffix: str, can_compress: bool = False) -> Path: output = strip_suffixes(args.output) compression = should_compress_output(args) if can_compress else False return output.with_name(f"{output.name}{suffix}{compression or ''}") DISABLED = Path('DISABLED') # A placeholder value to suppress autodetection. # This is used as a singleton, i.e. should be compared with # 'is' in other parts of the code. def script_path(value: Optional[str]) -> Optional[Path]: if value is None: return None if value == '': return DISABLED return Path(value) def normalize_script(path: Optional[Path]) -> Optional[Path]: if not path or path is DISABLED: return None path = Path(path).absolute() if not path.exists(): die(f"{path} does not exist") if not path.is_file(): die(f"{path} is not a file") if not os.access(path, os.X_OK): die(f"{path} is not executable") return path def load_args(args: argparse.Namespace) -> CommandLineArguments: global ARG_DEBUG ARG_DEBUG.update(args.debug) args_find_path(args, "nspawn_settings", "mkosi.nspawn") args_find_path(args, "build_script", "mkosi.build") args_find_path(args, "build_sources", ".") args_find_path(args, "build_dir", "mkosi.builddir/") args_find_path(args, "include_dir", "mkosi.includedir/") args_find_path(args, "install_dir", "mkosi.installdir/") args_find_path(args, "postinst_script", "mkosi.postinst") args_find_path(args, "prepare_script", "mkosi.prepare") args_find_path(args, "finalize_script", "mkosi.finalize") args_find_path(args, "output_dir", "mkosi.output/") args_find_path(args, "workspace_dir", "mkosi.workspace/") args_find_path(args, "mksquashfs_tool", "mkosi.mksquashfs-tool", as_list=True) find_extra(args) find_skeleton(args) find_password(args) find_passphrase(args) find_secure_boot(args) find_image_version(args) args.extra_search_paths = expand_paths(args.extra_search_paths) if args.cmdline and args.verb not in MKOSI_COMMANDS_CMDLINE: die("Additional parameters only accepted for " + str(MKOSI_COMMANDS_CMDLINE)[1:-1] + " invocations.") args.force = args.force_count > 0 if args.output_format is None: args.output_format = OutputFormat.gpt_ext4 args = load_distribution(args) if args.release is None: if args.distribution == Distribution.fedora: args.release = "34" elif args.distribution in (Distribution.centos, Distribution.centos_epel): args.release = "8" elif args.distribution in (Distribution.rocky, Distribution.rocky_epel): args.release = "8" elif args.distribution in (Distribution.alma, Distribution.alma_epel): args.release = "8" elif args.distribution == Distribution.mageia: args.release = "7" elif args.distribution == Distribution.debian: args.release = "unstable" elif args.distribution == Distribution.ubuntu: args.release = "focal" elif args.distribution == Distribution.opensuse: args.release = "tumbleweed" elif args.distribution == Distribution.clear: args.release = "latest" elif args.distribution == Distribution.photon: args.release = "3.0" elif args.distribution == Distribution.openmandriva: args.release = "cooker" elif args.distribution == Distribution.gentoo: args.release = "17.1" else: args.release = "rolling" if args.bootable: if args.output_format in ( OutputFormat.directory, OutputFormat.subvolume, OutputFormat.tar, OutputFormat.cpio, OutputFormat.plain_squashfs, ): die("Directory, subvolume, tar, cpio, and plain squashfs images cannot be booted.") if not args.boot_protocols: args.boot_protocols = ["uefi"] if args.distribution == Distribution.photon: args.boot_protocols = ["bios"] if not {"uefi", "bios"}.issuperset(args.boot_protocols): die("Not a valid boot protocol") if "uefi" in args.boot_protocols and args.distribution == Distribution.photon: die(f"uefi boot not supported for {args.distribution}") if args.distribution in (Distribution.centos, Distribution.centos_epel): epel_release = int(args.release.split(".")[0]) if epel_release <= 8 and args.output_format == OutputFormat.gpt_btrfs: die(f"Sorry, CentOS {epel_release} does not support btrfs") if epel_release <= 7 and args.bootable and "uefi" in args.boot_protocols and args.with_unified_kernel_images: die( f"Sorry, CentOS {epel_release} does not support unified kernel images. " "You must use --without-unified-kernel-images." ) if args.distribution in (Distribution.rocky, Distribution.rocky_epel): epel_release = int(args.release.split(".")[0]) if epel_release == 8 and args.output_format == OutputFormat.gpt_btrfs: die(f"Sorry, Rocky {epel_release} does not support btrfs") if args.distribution in (Distribution.alma, Distribution.alma_epel): epel_release = int(args.release.split(".")[0]) if epel_release == 8 and args.output_format == OutputFormat.gpt_btrfs: die(f"Sorry, Alma {epel_release} does not support btrfs") # Remove once https://github.com/clearlinux/clr-boot-manager/pull/238 is merged and available. if args.distribution == Distribution.clear and args.output_format == OutputFormat.gpt_btrfs: die("Sorry, Clear Linux does not support btrfs") if args.distribution == Distribution.clear and "," in args.boot_protocols: die("Sorry, Clear Linux does not support hybrid BIOS/UEFI images") if shutil.which("bsdtar") and args.distribution == Distribution.openmandriva and args.tar_strip_selinux_context: die("Sorry, bsdtar on OpenMandriva is incompatible with --tar-strip-selinux-context") find_cache(args) if args.mirror is None: if args.distribution in (Distribution.fedora, Distribution.centos): args.mirror = None elif args.distribution == Distribution.debian: args.mirror = "http://deb.debian.org/debian" elif args.distribution == Distribution.ubuntu: args.mirror = "http://archive.ubuntu.com/ubuntu" if platform.machine() == "aarch64": args.mirror = "http://ports.ubuntu.com/" elif args.distribution == Distribution.arch and platform.machine() == "aarch64": args.mirror = "http://mirror.archlinuxarm.org" elif args.distribution == Distribution.opensuse: args.mirror = "http://download.opensuse.org" elif args.distribution in (Distribution.rocky, Distribution.rocky_epel): args.mirror = None elif args.distribution in (Distribution.alma, Distribution.alma_epel): args.mirror = None if args.minimize and not args.output_format.can_minimize(): die("Minimal file systems only supported for ext4 and btrfs.") if is_generated_root(args) and args.incremental: die("Sorry, incremental mode is currently not supported for squashfs or minimized file systems.") if args.encrypt is not None: if not args.output_format.is_disk(): die("Encryption is only supported for disk images.") if args.encrypt == "data" and args.output_format == OutputFormat.gpt_btrfs: die("'data' encryption mode not supported on btrfs, use 'all' instead.") if args.encrypt == "all" and args.verity: die("'all' encryption mode may not be combined with Verity.") if args.sign: args.checksum = True if args.output is None: iid = args.image_id if args.image_id is not None else "image" prefix = f"{iid}_{args.image_version}" if args.image_version is not None else iid if args.output_format.is_disk(): compress = should_compress_output(args) output = prefix + (".qcow2" if args.qcow2 else ".raw") + (f".{compress}" if compress else "") elif args.output_format == OutputFormat.tar: output = f"{prefix}.tar.xz" elif args.output_format == OutputFormat.cpio: output = f"{prefix}.cpio" + (f".{args.compress}" if args.compress else "") else: output = prefix args.output = Path(output) if args.manifest_format is None: args.manifest_format = [ManifestFormat.json] if args.output_dir is not None: args.output_dir = args.output_dir.absolute() if "/" not in str(args.output): args.output = args.output_dir / args.output else: warn("Ignoring configured output directory as output file is a qualified path.") if args.incremental or args.verb == "clean": if args.image_id is not None: # If the image ID is specified, use cache file names that are independent of the image versions, so that # rebuilding and bumping versions is cheap and reuses previous versions if cached. if args.output_dir: args.cache_pre_dev = args.output_dir / f"{args.image_id}.cache-pre-dev" args.cache_pre_inst = args.output_dir / f"{args.image_id}.cache-pre-inst" else: args.cache_pre_dev = Path(f"{args.image_id}.cache-pre-dev") args.cache_pre_inst = Path(f"{args.image_id}.cache-pre-inst") else: # Otherwise, derive the cache file names directly from the output file names. args.cache_pre_dev = Path(f"{args.output}.cache-pre-dev") args.cache_pre_inst = Path(f"{args.output}.cache-pre-inst") else: args.cache_pre_dev = None args.cache_pre_inst = None args.output = args.output.absolute() if args.output_format == OutputFormat.tar: args.compress_output = "xz" if not args.output_format.is_disk(): args.split_artifacts = False if args.output_format.is_squashfs(): args.read_only = True args.root_size = None if args.compress is False: die("Cannot disable compression with squashfs") if args.compress is None: args.compress = True if args.verity: args.read_only = True args.output_root_hash_file = build_auxiliary_output_path(args, roothash_suffix(args.usr_only)) if args.verity == "signed": args.output_root_hash_p7s_file = build_auxiliary_output_path(args, roothash_p7s_suffix(args.usr_only)) if args.checksum: args.output_checksum = args.output.with_name("SHA256SUMS") if args.sign: args.output_signature = args.output.with_name("SHA256SUMS.gpg") if args.bmap: args.output_bmap = build_auxiliary_output_path(args, ".bmap") if args.nspawn_settings is not None: args.nspawn_settings = args.nspawn_settings.absolute() args.output_nspawn_settings = build_auxiliary_output_path(args, ".nspawn") # We want this set even if --ssh is not specified so we can find the SSH key when verb == "ssh". if args.ssh_key is None and args.ssh_agent is None: args.output_sshkey = args.output.with_name("id_rsa") if args.split_artifacts: args.output_split_root = build_auxiliary_output_path(args, ".usr" if args.usr_only else ".root", True) if args.verity: args.output_split_verity = build_auxiliary_output_path(args, ".verity", True) if args.verity == "signed": args.output_split_verity_sig = build_auxiliary_output_path(args, ".verity-sig", True) if args.bootable: args.output_split_kernel = build_auxiliary_output_path(args, ".efi", True) if args.build_sources is not None: args.build_sources = args.build_sources.absolute() if args.build_dir is not None: args.build_dir = args.build_dir.absolute() if args.include_dir is not None: args.include_dir = args.include_dir.absolute() if args.install_dir is not None: args.install_dir = args.install_dir.absolute() args.build_script = normalize_script(args.build_script) args.prepare_script = normalize_script(args.prepare_script) args.postinst_script = normalize_script(args.postinst_script) args.finalize_script = normalize_script(args.finalize_script) for i in range(len(args.environment)): if "=" not in args.environment[i]: value = os.getenv(args.environment[i], "") args.environment[i] += f"={value}" if args.cache_path is not None: args.cache_path = args.cache_path.absolute() if args.extra_trees: for i in range(len(args.extra_trees)): args.extra_trees[i] = args.extra_trees[i].absolute() if args.skeleton_trees is not None: for i in range(len(args.skeleton_trees)): args.skeleton_trees[i] = args.skeleton_trees[i].absolute() args.root_size = parse_bytes(args.root_size) args.home_size = parse_bytes(args.home_size) args.srv_size = parse_bytes(args.srv_size) args.var_size = parse_bytes(args.var_size) args.tmp_size = parse_bytes(args.tmp_size) args.esp_size = parse_bytes(args.esp_size) args.xbootldr_size = parse_bytes(args.xbootldr_size) args.swap_size = parse_bytes(args.swap_size) if args.root_size is None: args.root_size = 3 * 1024 * 1024 * 1024 if args.bootable and args.esp_size is None: args.esp_size = 256 * 1024 * 1024 args.verity_size = None args.verity_sig_size = None if args.secure_boot_key is not None: args.secure_boot_key = args.secure_boot_key.absolute() if args.secure_boot_certificate is not None: args.secure_boot_certificate = args.secure_boot_certificate.absolute() if args.secure_boot or args.verity == "signed": if args.secure_boot_key is None: die( "UEFI SecureBoot or signed Verity enabled, but couldn't find private key. (Consider placing it in mkosi.secure-boot.key?)" ) # NOQA: E501 if args.secure_boot_certificate is None: die( "UEFI SecureBoot or signed Verity enabled, but couldn't find certificate. (Consider placing it in mkosi.secure-boot.crt?)" ) # NOQA: E501 if args.verb in ("shell", "boot"): opname = "acquire shell" if args.verb == "shell" else "boot" if args.output_format in (OutputFormat.tar, OutputFormat.cpio): die(f"Sorry, can't {opname} with a {args.output_format} archive.") if should_compress_output(args): die("Sorry, can't {opname} with a compressed image.") if args.qcow2: die("Sorry, can't {opname} using a qcow2 image.") if args.verb == "qemu": if not args.output_format.is_disk(): die("Sorry, can't boot non-disk images with qemu.") if needs_build(args) and args.qemu_headless and not args.bootable: die("--qemu-headless requires --bootable") if args.qemu_headless and "console=ttyS0" not in args.kernel_command_line: args.kernel_command_line.append("console=ttyS0") # By default, the serial console gets spammed with kernel log messages. # Let's up the log level to only show warning and error messages when # --qemu-headless is enabled to avoid this spam. if args.qemu_headless and not any("loglevel" in x for x in args.kernel_command_line): args.kernel_command_line.append("loglevel=4") if args.bootable and args.usr_only and not args.verity: # GPT auto-discovery on empty kernel command lines only looks for root partitions # (in order to avoid ambiguities), if we shall operate without one (and only have # a /usr partition) we thus need to explicitly say which partition to mount. name = root_partition_description(args=None, image_id=args.image_id, image_version=args.image_version, usr_only=args.usr_only) args.kernel_command_line.append(f"mount.usr=/dev/disk/by-partlabel/{xescape(name)}") if not args.read_only: args.kernel_command_line.append("rw") if is_generated_root(args) and "bios" in args.boot_protocols: die("Sorry, BIOS cannot be combined with --minimize or squashfs filesystems") if args.bootable and args.distribution in (Distribution.clear, Distribution.photon): die("Sorry, --bootable is not supported on this distro") if not args.with_unified_kernel_images and "uefi" in args.boot_protocols: if args.distribution in (Distribution.debian, Distribution.ubuntu, Distribution.mageia, Distribution.opensuse): die("Sorry, --without-unified-kernel-images is not supported in UEFI mode on this distro.") if args.verity and not args.with_unified_kernel_images: die("Sorry, --verity can only be used with unified kernel images") if args.source_file_transfer is None: if os.path.exists(".git") or args.build_sources.joinpath(".git").exists(): args.source_file_transfer = SourceFileTransfer.copy_git_others else: args.source_file_transfer = SourceFileTransfer.copy_all if args.source_file_transfer_final == SourceFileTransfer.mount: die("Sorry, --source-file-transfer-final=mount is not supported") if args.skip_final_phase and args.verb != "build": die("--skip-final-phase can only be used when building an image using 'mkosi build'") if args.ssh_timeout < 0: die("--ssh-timeout must be >= 0") if args.ssh_port <= 0: die("--ssh-port must be > 0") # We set a reasonable umask so that files that are created in the image # will have reasonable permissions. We don't want those permissions to be # influenced by the caller's umask which will be used only for output files. args.original_umask = os.umask(0o022) # Let's define a fixed machine ID for all our build-time # runs. We'll strip it off the final image, but some build-time # tools (dracut...) want a fixed one, hence provide one, and # always the same args.machine_id = uuid.uuid4().hex # If we are building a sysext we don't want to add base packages to the # extension image, as they will already be in the base image. if args.base_image is not None: args.base_packages = False return CommandLineArguments(**vars(args)) def check_output(args: CommandLineArguments) -> None: if args.skip_final_phase: return for f in ( args.output, args.output_checksum if args.checksum else None, args.output_signature if args.sign else None, args.output_bmap if args.bmap else None, args.output_nspawn_settings if args.nspawn_settings is not None else None, args.output_root_hash_file if args.verity else None, args.output_sshkey if args.ssh else None, args.output_split_root if args.split_artifacts else None, args.output_split_verity if args.split_artifacts else None, args.output_split_verity_sig if args.split_artifacts else None, args.output_split_kernel if args.split_artifacts else None, ): if f and f.exists(): die(f"Output path {f} exists already. (Consider invocation with --force.)") def yes_no(b: Optional[bool]) -> str: return "yes" if b else "no" def yes_no_or(b: Union[bool, str]) -> str: return b if isinstance(b, str) else yes_no(b) def format_bytes_or_disabled(sz: Optional[int]) -> str: if sz is None: return "(disabled)" return format_bytes(sz) def format_bytes_or_auto(sz: Optional[int]) -> str: if sz is None: return "(automatic)" return format_bytes(sz) def none_to_na(s: Optional[T]) -> Union[T, str]: return "n/a" if s is None else s def none_to_no(s: Optional[T]) -> Union[T, str]: return "no" if s is None else s def none_to_none(o: Optional[object]) -> str: return "none" if o is None else str(o) def line_join_list(array: Sequence[PathString]) -> str: if not array: return "none" return "\n ".join(str(item) for item in array) def print_summary(args: CommandLineArguments) -> None: # FIXME: normal print MkosiPrinter.info("COMMANDS:") MkosiPrinter.info(" verb: " + args.verb) MkosiPrinter.info(" cmdline: " + " ".join(args.cmdline)) MkosiPrinter.info("\nDISTRIBUTION:") MkosiPrinter.info(" Distribution: " + args.distribution.name) MkosiPrinter.info(" Release: " + none_to_na(args.release)) if args.architecture: MkosiPrinter.info(" Architecture: " + args.architecture) if args.mirror is not None: MkosiPrinter.info(" Mirror: " + args.mirror) if args.repositories is not None and len(args.repositories) > 0: MkosiPrinter.info(" Repositories: " + ",".join(args.repositories)) MkosiPrinter.info(" Use Host Repositories: " + yes_no(args.use_host_repositories)) MkosiPrinter.info("\nOUTPUT:") if args.hostname: MkosiPrinter.info(" Hostname: " + args.hostname) if args.image_id is not None: MkosiPrinter.info(" Image ID: " + args.image_id) if args.image_version is not None: MkosiPrinter.info(" Image Version: " + args.image_version) MkosiPrinter.info(" Output Format: " + args.output_format.name) maniformats = (" ".join(str(i) for i in args.manifest_format)) or "(none)" MkosiPrinter.info(" Manifest Formats: " + maniformats) if args.output_format.can_minimize(): MkosiPrinter.info(" Minimize: " + yes_no(args.minimize)) if args.output_dir: MkosiPrinter.info(f" Output Directory: {args.output_dir}") if args.workspace_dir: MkosiPrinter.info(f" Workspace Directory: {args.workspace_dir}") MkosiPrinter.info(f" Output: {args.output}") MkosiPrinter.info(f" Output Checksum: {none_to_na(args.output_checksum if args.checksum else None)}") MkosiPrinter.info(f" Output Signature: {none_to_na(args.output_signature if args.sign else None)}") MkosiPrinter.info(f" Output Bmap: {none_to_na(args.output_bmap if args.bmap else None)}") MkosiPrinter.info(f" Generate split artifacts: {yes_no(args.split_artifacts)}") MkosiPrinter.info( f" Output Split Root FS: {none_to_na(args.output_split_root if args.split_artifacts else None)}" ) MkosiPrinter.info( f" Output Split Verity: {none_to_na(args.output_split_verity if args.split_artifacts else None)}" ) MkosiPrinter.info( f" Output Split Verity Sig.: {none_to_na(args.output_split_verity_sig if args.split_artifacts else None)}" ) MkosiPrinter.info( f" Output Split Kernel: {none_to_na(args.output_split_kernel if args.split_artifacts else None)}" ) MkosiPrinter.info( f" Output nspawn Settings: {none_to_na(args.output_nspawn_settings if args.nspawn_settings is not None else None)}" ) MkosiPrinter.info( f" SSH key: {none_to_na((args.ssh_key or args.output_sshkey or args.ssh_agent) if args.ssh else None)}" ) if args.ssh_port != 22: MkosiPrinter.info(f" SSH port: {args.ssh_port}") MkosiPrinter.info(" Incremental: " + yes_no(args.incremental)) MkosiPrinter.info(" Read-only: " + yes_no(args.read_only)) MkosiPrinter.info(" Internal (FS) Compression: " + yes_no_or(should_compress_fs(args))) MkosiPrinter.info("Outer (output) Compression: " + yes_no_or(should_compress_output(args))) if args.mksquashfs_tool: MkosiPrinter.info(" Mksquashfs tool: " + " ".join(map(str, args.mksquashfs_tool))) if args.output_format.is_disk(): MkosiPrinter.info(" QCow2: " + yes_no(args.qcow2)) MkosiPrinter.info(" Encryption: " + none_to_no(args.encrypt)) MkosiPrinter.info(" Verity: " + yes_no_or(args.verity)) if args.output_format.is_disk(): MkosiPrinter.info(" Bootable: " + yes_no(args.bootable)) if args.bootable: MkosiPrinter.info(" Kernel Command Line: " + " ".join(args.kernel_command_line)) MkosiPrinter.info(" UEFI SecureBoot: " + yes_no(args.secure_boot)) MkosiPrinter.info(" Boot Protocols: " + line_join_list(args.boot_protocols)) MkosiPrinter.info(" Unified Kernel Images: " + yes_no(args.with_unified_kernel_images)) MkosiPrinter.info(" GPT First LBA: " + str(args.gpt_first_lba)) MkosiPrinter.info(" Hostonly Initrd: " + yes_no(args.hostonly_initrd)) if args.secure_boot or args.verity == "sign": MkosiPrinter.info(f"SecureBoot/Verity Sign Key: {args.secure_boot_key}") MkosiPrinter.info(f" SecureBoot/verity Cert.: {args.secure_boot_certificate}") MkosiPrinter.info("\nCONTENT:") MkosiPrinter.info(" Packages: " + line_join_list(args.packages)) if args.distribution in ( Distribution.fedora, Distribution.centos, Distribution.centos_epel, Distribution.mageia, Distribution.rocky, Distribution.rocky_epel, Distribution.alma, Distribution.alma_epel, ): MkosiPrinter.info(" With Documentation: " + yes_no(args.with_docs)) MkosiPrinter.info(" Package Cache: " + none_to_none(args.cache_path)) MkosiPrinter.info(" Extra Trees: " + line_join_list(args.extra_trees)) MkosiPrinter.info(" Skeleton Trees: " + line_join_list(args.skeleton_trees)) MkosiPrinter.info(" CleanPackageMetadata: " + yes_no_or(args.clean_package_metadata)) if args.remove_files: MkosiPrinter.info(" Remove Files: " + line_join_list(args.remove_files)) if args.remove_packages: MkosiPrinter.info(" Remove Packages: " + line_join_list(args.remove_packages)) MkosiPrinter.info(" Build Script: " + none_to_none(args.build_script)) MkosiPrinter.info(" Script Environment: " + line_join_list(args.environment)) if args.build_script: MkosiPrinter.info(" Run tests: " + yes_no(args.with_tests)) MkosiPrinter.info(" Password: " + ("default" if args.password is None else "set")) MkosiPrinter.info(" Autologin: " + yes_no(args.autologin)) MkosiPrinter.info(" Build Sources: " + none_to_none(args.build_sources)) MkosiPrinter.info(" Source File Transfer: " + none_to_none(args.source_file_transfer)) MkosiPrinter.info("Source File Transfer Final: " + none_to_none(args.source_file_transfer_final)) MkosiPrinter.info(" Build Directory: " + none_to_none(args.build_dir)) MkosiPrinter.info(" Include Directory: " + none_to_none(args.include_dir)) MkosiPrinter.info(" Install Directory: " + none_to_none(args.install_dir)) MkosiPrinter.info(" Build Packages: " + line_join_list(args.build_packages)) MkosiPrinter.info(" Skip final phase: " + yes_no(args.skip_final_phase)) MkosiPrinter.info(" Postinstall Script: " + none_to_none(args.postinst_script)) MkosiPrinter.info(" Prepare Script: " + none_to_none(args.prepare_script)) MkosiPrinter.info(" Finalize Script: " + none_to_none(args.finalize_script)) MkosiPrinter.info(" Scripts with network: " + yes_no_or(args.with_network)) MkosiPrinter.info(" nspawn Settings: " + none_to_none(args.nspawn_settings)) if args.output_format.is_disk(): MkosiPrinter.info("\nPARTITIONS:") MkosiPrinter.info(" Root Partition: " + format_bytes_or_auto(args.root_size)) MkosiPrinter.info(" Swap Partition: " + format_bytes_or_disabled(args.swap_size)) if "uefi" in args.boot_protocols: MkosiPrinter.info(" ESP: " + format_bytes_or_disabled(args.esp_size)) if "bios" in args.boot_protocols: MkosiPrinter.info(" BIOS: " + format_bytes_or_disabled(BIOS_PARTITION_SIZE)) MkosiPrinter.info(" XBOOTLDR Partition: " + format_bytes_or_disabled(args.xbootldr_size)) MkosiPrinter.info(" /home Partition: " + format_bytes_or_disabled(args.home_size)) MkosiPrinter.info(" /srv Partition: " + format_bytes_or_disabled(args.srv_size)) MkosiPrinter.info(" /var Partition: " + format_bytes_or_disabled(args.var_size)) MkosiPrinter.info(" /var/tmp Partition: " + format_bytes_or_disabled(args.tmp_size)) MkosiPrinter.info(" /usr only: " + yes_no(args.usr_only)) MkosiPrinter.info("\nVALIDATION:") MkosiPrinter.info(" Checksum: " + yes_no(args.checksum)) MkosiPrinter.info(" Sign: " + yes_no(args.sign)) MkosiPrinter.info(" GPG Key: " + ("default" if args.key is None else args.key)) MkosiPrinter.info("\nHOST CONFIGURATION:") MkosiPrinter.info(" Extra search paths: " + line_join_list(args.extra_search_paths)) MkosiPrinter.info(" QEMU Headless: " + yes_no(args.qemu_headless)) MkosiPrinter.info(" Network Veth: " + yes_no(args.network_veth)) def reuse_cache_tree( args: CommandLineArguments, root: Path, do_run_build_script: bool, for_cache: bool, cached: bool ) -> bool: """If there's a cached version of this tree around, use it and initialize our new root directly from it. Returns a boolean indicating whether we are now operating on a cached version or not.""" if cached: return True if not args.incremental: return False if for_cache: return False if args.output_format.is_disk_rw(): return False fname = args.cache_pre_dev if do_run_build_script else args.cache_pre_inst if fname is None: return False if fname.exists(): with complete_step(f"Copying in cached tree {fname}…"): copy_path(fname, root) return True def make_output_dir(args: CommandLineArguments) -> None: """Create the output directory if set and not existing yet""" if args.output_dir is None: return args.output_dir.mkdir(mode=0o755, exist_ok=True) def make_build_dir(args: CommandLineArguments) -> None: """Create the build directory if set and not existing yet""" if args.build_dir is None: return args.build_dir.mkdir(mode=0o755, exist_ok=True) def setup_ssh( args: CommandLineArguments, root: Path, do_run_build_script: bool, for_cache: bool, cached: bool ) -> Optional[TextIO]: if do_run_build_script or not args.ssh: return None if args.distribution in (Distribution.debian, Distribution.ubuntu): unit = "ssh.socket" if args.ssh_port != 22: add_dropin_config(root, unit, "port", f"""\ [Socket] ListenStream= ListenStream={args.ssh_port} """) else: unit = "sshd" # We cache the enable sshd step but not the keygen step because it creates a separate file on the host # which introduces non-trivial issue when trying to cache it. if not cached: run(["systemctl", "--root", root, "enable", unit]) if for_cache: return None authorized_keys = root_home(args, root) / ".ssh/authorized_keys" f: Optional[TextIO] if args.ssh_key: f = open(args.ssh_key, mode="r", encoding="utf-8") copy_file(f"{args.ssh_key}.pub", authorized_keys) elif args.ssh_agent is not None: env = {"SSH_AUTH_SOCK": args.ssh_agent} result = run(["ssh-add", "-L"], env=env, text=True, stdout=subprocess.PIPE) authorized_keys.write_text(result.stdout) f = None else: assert args.output_sshkey is not None f = cast( TextIO, tempfile.NamedTemporaryFile(mode="w+", prefix=".mkosi-", encoding="utf-8", dir=args.output_sshkey.parent), ) with complete_step("Generating SSH key pair…"): # Write a 'y' to confirm to overwrite the file. run( ["ssh-keygen", "-f", f.name, "-N", args.password or "", "-C", "mkosi", "-t", "ed25519"], input="y\n", text=True, stdout=DEVNULL, ) copy_file(f"{f.name}.pub", authorized_keys) os.remove(f"{f.name}.pub") authorized_keys.chmod(0o600) return f def setup_network_veth(args: CommandLineArguments, root: Path, do_run_build_script: bool, cached: bool) -> None: if do_run_build_script or cached or not args.network_veth: return with complete_step("Setting up network veth…"): network_file = root / "etc/systemd/network/80-mkosi-network-veth.network" with open(network_file, "w") as f: # Adapted from https://github.com/systemd/systemd/blob/v247/network/80-container-host0.network f.write( dedent( """\ [Match] Virtualization=!container Type=ether [Network] DHCP=yes LinkLocalAddressing=yes LLDP=yes EmitLLDP=customer-bridge [DHCP] UseTimezone=yes """ ) ) os.chmod(network_file, 0o644) run(["systemctl", "--root", root, "enable", "systemd-networkd"]) @dataclasses.dataclass class BuildOutput: raw: Optional[BinaryIO] archive: Optional[BinaryIO] root_hash: Optional[str] root_hash_p7s: Optional[bytes] sshkey: Optional[TextIO] # Partition contents split_root: Optional[BinaryIO] split_verity: Optional[BinaryIO] split_verity_sig: Optional[BinaryIO] split_kernel: Optional[BinaryIO] def raw_name(self) -> Optional[str]: return self.raw.name if self.raw is not None else None @classmethod def empty(cls) -> BuildOutput: return cls(None, None, None, None, None, None, None, None, None) def build_image( args: CommandLineArguments, root: Path, *, manifest: Optional[Manifest] = None, do_run_build_script: bool, for_cache: bool = False, cleanup: bool = False, ) -> BuildOutput: # If there's no build script set, there's no point in executing # the build script iteration. Let's quit early. if args.build_script is None and do_run_build_script: return BuildOutput.empty() make_build_dir(args) raw, cached = reuse_cache_image(args, do_run_build_script, for_cache) if for_cache and cached: # Found existing cache image, exiting build_image return BuildOutput.empty() if cached: assert raw is not None refresh_partition_table(args, raw) else: raw = create_image(args, for_cache) with attach_base_image(args.base_image) as base_image, \ attach_image_loopback(raw) as loopdev: prepare_swap(args, loopdev, cached) prepare_esp(args, loopdev, cached) prepare_xbootldr(args, loopdev, cached) if loopdev is not None: luks_format_root(args, loopdev, do_run_build_script, cached) luks_format_home(args, loopdev, do_run_build_script, cached) luks_format_srv(args, loopdev, do_run_build_script, cached) luks_format_var(args, loopdev, do_run_build_script, cached) luks_format_tmp(args, loopdev, do_run_build_script, cached) with luks_setup_all(args, loopdev, do_run_build_script) as encrypted: prepare_root(args, encrypted.root, cached) prepare_home(args, encrypted.home, cached) prepare_srv(args, encrypted.srv, cached) prepare_var(args, encrypted.var, cached) prepare_tmp(args, encrypted.tmp, cached) for dev in encrypted: refresh_file_system(args, dev, cached) # Mount everything together, but let's not mount the root # dir if we still have to generate the root image here prepare_tree_root(args, root) with mount_image(args, root, base_image, loopdev, encrypted.without_generated_root(args)): prepare_tree(args, root, do_run_build_script, cached) if do_run_build_script and args.include_dir and not cached: empty_directory(args.include_dir) # We do a recursive unmount of root so we don't need to explicitly unmount this mount # later. mount_bind(args.include_dir, root / "usr/include") cached_tree = reuse_cache_tree(args, root, do_run_build_script, for_cache, cached) install_skeleton_trees(args, root, cached_tree) install_distribution(args, root, do_run_build_script, cached_tree) install_etc_hostname(args, root, cached_tree) install_boot_loader(args, root, loopdev, do_run_build_script, cached_tree) run_prepare_script(args, root, do_run_build_script, cached_tree) install_build_src(args, root, do_run_build_script, for_cache) install_build_dest(args, root, do_run_build_script, for_cache) install_extra_trees(args, root, for_cache) set_root_password(args, root, do_run_build_script, cached_tree) set_serial_terminal(args, root, do_run_build_script, cached_tree) set_autologin(args, root, do_run_build_script, cached_tree) sshkey = setup_ssh(args, root, do_run_build_script, for_cache, cached_tree) setup_network_veth(args, root, do_run_build_script, cached_tree) run_postinst_script(args, root, loopdev, do_run_build_script, for_cache) if cleanup: remove_packages(args, root) if manifest: with complete_step("Recording packages in manifest…"): manifest.record_packages(root) if cleanup: clean_package_manager_metadata(args, root) remove_files(args, root) reset_machine_id(args, root, do_run_build_script, for_cache) reset_random_seed(args, root) run_finalize_script(args, root, do_run_build_script, for_cache) invoke_fstrim(args, root, do_run_build_script, for_cache) make_read_only(args, root, for_cache) generated_root = make_generated_root(args, root, for_cache) generated_root_part = insert_generated_root(args, raw, loopdev, generated_root, for_cache) split_root = ( (generated_root or extract_partition(args, encrypted.root, do_run_build_script, for_cache)) if args.split_artifacts else None ) if args.verity: root_for_verity = encrypted.root if root_for_verity is None and generated_root_part is not None: assert loopdev is not None root_for_verity = generated_root_part.blockdev(loopdev) else: root_for_verity = None verity, root_hash = make_verity(args, root_for_verity, do_run_build_script, for_cache) patch_root_uuid(args, loopdev, root_hash, for_cache) insert_verity(args, raw, loopdev, verity, root_hash, for_cache) split_verity = verity if args.split_artifacts else None verity_sig, root_hash_p7s, fingerprint = make_verity_sig(args, root_hash, do_run_build_script, for_cache) insert_verity_sig(args, raw, loopdev, verity_sig, root_hash, fingerprint, for_cache) split_verity_sig = verity_sig if args.split_artifacts else None # This time we mount read-only, as we already generated # the verity data, and hence really shouldn't modify the # image anymore. mount = lambda: mount_image(args, root, base_image, loopdev, encrypted.without_generated_root(args), root_read_only=True) install_unified_kernel(args, root, root_hash, do_run_build_script, for_cache, cached, mount) secure_boot_sign(args, root, do_run_build_script, for_cache, cached, mount) split_kernel = ( extract_unified_kernel(args, root, do_run_build_script, for_cache, mount) if args.split_artifacts else None ) archive = make_tar(args, root, do_run_build_script, for_cache) or \ make_cpio(args, root, do_run_build_script, for_cache) return BuildOutput( raw or generated_root, archive, root_hash, root_hash_p7s, sshkey, split_root, split_verity, split_verity_sig, split_kernel, ) def one_zero(b: bool) -> str: return "1" if b else "0" def install_dir(args: CommandLineArguments, root: Path) -> Path: return args.install_dir or workspace(root).joinpath("dest") def nspawn_knows_arg(arg: str) -> bool: return bytes("unrecognized option", "UTF-8") not in run(["systemd-nspawn", arg], stderr=PIPE, check=False).stderr def run_build_script(args: CommandLineArguments, root: Path, raw: Optional[BinaryIO]) -> None: if args.build_script is None: return with complete_step("Running build script…"): os.makedirs(install_dir(args, root), mode=0o755, exist_ok=True) target = f"--directory={root}" if raw is None else f"--image={raw.name}" with_network = 1 if args.with_network is True else 0 cmdline = [ "systemd-nspawn", "--quiet", target, f"--uuid={args.machine_id}", f"--machine=mkosi-{uuid.uuid4().hex}", "--as-pid2", "--register=no", f"--bind={install_dir(args, root)}:/root/dest", f"--bind={var_tmp(root)}:/var/tmp", f"--setenv=WITH_DOCS={one_zero(args.with_docs)}", f"--setenv=WITH_TESTS={one_zero(args.with_tests)}", f"--setenv=WITH_NETWORK={with_network}", "--setenv=DESTDIR=/root/dest", *nspawn_rlimit_params(), ] cmdline.extend(f"--setenv={env}" for env in args.environment) # TODO: Use --autopipe once systemd v247 is widely available. console_arg = f"--console={'interactive' if sys.stdout.isatty() else 'pipe'}" if nspawn_knows_arg(console_arg): cmdline += [console_arg] if args.default_path is not None: cmdline += [f"--setenv=MKOSI_DEFAULT={args.default_path}"] if args.image_version is not None: cmdline += [f"--setenv=IMAGE_VERSION={args.image_version}"] if args.image_id is not None: cmdline += [f"--setenv=IMAGE_ID={args.image_id}"] cmdline += nspawn_params_for_build_sources(args, args.source_file_transfer) if args.build_dir is not None: cmdline += ["--setenv=BUILDDIR=/root/build", f"--bind={args.build_dir}:/root/build"] if args.include_dir is not None: cmdline += [f"--bind={args.include_dir}:/usr/include"] if args.with_network is True: # If we're using the host network namespace, use the same resolver cmdline += ["--bind-ro=/etc/resolv.conf"] else: cmdline += ["--private-network"] if args.usr_only: cmdline += [f"--bind={root_home(args, root)}:/root"] cmdline += [f"/root/{args.build_script.name}"] cmdline += args.cmdline # build-script output goes to stdout so we can run language servers from within mkosi build-scripts. # See https://github.com/systemd/mkosi/pull/566 for more information. result = run(cmdline, stdout=sys.stdout, check=False) if result.returncode != 0: if "build-script" in ARG_DEBUG: run(cmdline[:-1], check=False) die(f"Build script returned non-zero exit code {result.returncode}.") def need_cache_images(args: CommandLineArguments) -> bool: if not args.incremental: return False if args.force_count > 1: return True assert args.cache_pre_dev assert args.cache_pre_inst return not args.cache_pre_dev.exists() or not args.cache_pre_inst.exists() def remove_artifacts( args: CommandLineArguments, root: Path, raw: Optional[BinaryIO], archive: Optional[BinaryIO], do_run_build_script: bool, for_cache: bool = False, ) -> None: if for_cache: what = "cache build" elif do_run_build_script: what = "development build" else: return if raw is not None: with complete_step(f"Removing disk image from {what}…"): del raw if archive is not None: with complete_step(f"Removing archive image from {what}…"): del archive with complete_step(f"Removing artifacts from {what}…"): unlink_try_hard(root) unlink_try_hard(var_tmp(root)) if args.usr_only: unlink_try_hard(root_home(args, root)) def build_stuff(args: CommandLineArguments) -> Manifest: make_output_dir(args) setup_package_cache(args) workspace = setup_workspace(args) image = BuildOutput.empty() manifest = Manifest(args) # Make sure tmpfiles' aging doesn't interfere with our workspace # while we are working on it. with open_close(workspace.name, os.O_RDONLY | os.O_DIRECTORY | os.O_CLOEXEC) as dir_fd, \ btrfs_forget_stale_devices(args): fcntl.flock(dir_fd, fcntl.LOCK_EX) root = Path(workspace.name, "root") # If caching is requested, then make sure we have cache images around we can make use of if need_cache_images(args): # There is no point generating a pre-dev cache image if no build script is provided if args.build_script: with complete_step("Running first (development) stage to generate cached copy…"): # Generate the cache version of the build image, and store it as "cache-pre-dev" image = build_image(args, root, do_run_build_script=True, for_cache=True) save_cache(args, root, image.raw_name(), args.cache_pre_dev) remove_artifacts(args, root, image.raw, image.archive, do_run_build_script=True) with complete_step("Running second (final) stage to generate cached copy…"): # Generate the cache version of the build image, and store it as "cache-pre-inst" image = build_image(args, root, do_run_build_script=False, for_cache=True) save_cache(args, root, image.raw_name(), args.cache_pre_inst) remove_artifacts(args, root, image.raw, image.archive, do_run_build_script=False) if args.build_script: with complete_step("Running first (development) stage…"): # Run the image builder for the first (development) stage in preparation for the build script image = build_image(args, root, do_run_build_script=True) run_build_script(args, root, image.raw) remove_artifacts(args, root, image.raw, image.archive, do_run_build_script=True) # Run the image builder for the second (final) stage if not args.skip_final_phase: with complete_step("Running second (final) stage…"): image = build_image(args, root, manifest=manifest, do_run_build_script=False, cleanup=True) else: MkosiPrinter.print_step("Skipping (second) final image build phase.") raw = qcow2_output(args, image.raw) raw = compress_output(args, raw) split_root = compress_output(args, image.split_root, ".usr" if args.usr_only else ".root") split_verity = compress_output(args, image.split_verity, ".verity") split_verity_sig = compress_output(args, image.split_verity_sig, ".verity-sig") split_kernel = compress_output(args, image.split_kernel, ".efi") root_hash_file = write_root_hash_file(args, image.root_hash) root_hash_p7s_file = write_root_hash_p7s_file(args, image.root_hash_p7s) settings = copy_nspawn_settings(args) checksum = calculate_sha256sum( args, raw, image.archive, root_hash_file, root_hash_p7s_file, split_root, split_verity, split_verity_sig, split_kernel, settings, ) signature = calculate_signature(args, checksum) bmap = calculate_bmap(args, raw) link_output(args, root, raw or image.archive) link_output_root_hash_file(args, root_hash_file) link_output_root_hash_p7s_file(args, root_hash_p7s_file) link_output_checksum(args, checksum) link_output_signature(args, signature) link_output_bmap(args, bmap) link_output_nspawn_settings(args, settings) if args.output_sshkey is not None: link_output_sshkey(args, image.sshkey) link_output_split_root(args, split_root) link_output_split_verity(args, split_verity) link_output_split_verity_sig(args, split_verity_sig) link_output_split_kernel(args, split_kernel) if image.root_hash is not None: MkosiPrinter.print_step(f"Root hash is {image.root_hash}.") return manifest def check_root() -> None: if os.getuid() != 0: die("Must be invoked as root.") def check_native(args: CommandLineArguments) -> None: if args.architecture is not None and args.architecture != platform.machine() and args.build_script: die("Cannot (currently) override the architecture and run build commands") @contextlib.contextmanager def suppress_stacktrace() -> Iterator[None]: try: yield except subprocess.CalledProcessError as e: # MkosiException is silenced in main() so it doesn't print a stacktrace. raise MkosiException(e) def virt_name(args: CommandLineArguments) -> str: name = args.hostname or args.image_id or args.output.with_suffix("").name.partition("_")[0] # Shorten to 13 characters so we can prefix with ve- or vt- for the network veth ifname which is limited # to 16 characters. return name[:13] def has_networkd_vm_vt() -> bool: return any( Path(path, "80-vm-vt.network").exists() for path in ("/usr/lib/systemd/network", "/lib/systemd/network", "/etc/systemd/network") ) def ensure_networkd(args: CommandLineArguments) -> bool: networkd_is_running = run(["systemctl", "is-active", "--quiet", "systemd-networkd"], check=False).returncode == 0 if not networkd_is_running: if args.verb != "ssh": # Some programs will use 'mkosi ssh' with pexpect, so don't print warnings that will break # them. warn("--network-veth requires systemd-networkd to be running to initialize the host interface " "of the veth link ('systemctl enable --now systemd-networkd')") return False if args.verb == "qemu" and not has_networkd_vm_vt(): warn(dedent(r"""\ mkosi didn't find 80-vm-vt.network. This is one of systemd's built-in systemd-networkd config files which configures vt-* interfaces. mkosi needs this file in order for --network-veth to work properly for QEMU virtual machines. The file likely cannot be found because the systemd version on the host is too old (< 246) and it isn't included yet. As a workaround until the file is shipped by the systemd package of your distro, add a network file /etc/systemd/network/80-vm-vt.network with the following contents: [Match] Name=vt-* Driver=tun [Network] # Default to using a /28 prefix, giving up to 13 addresses per VM. Address=0.0.0.0/28 LinkLocalAddressing=yes DHCPServer=yes IPMasquerade=yes LLDP=yes EmitLLDP=customer-bridge IPv6PrefixDelegation=yes """ )) return False return True def run_shell(args: CommandLineArguments) -> None: if args.output_format in (OutputFormat.directory, OutputFormat.subvolume): target = f"--directory={args.output}" else: target = f"--image={args.output}" cmdline = ["systemd-nspawn", target] if args.read_only: cmdline += ["--read-only"] # If we copied in a .nspawn file, make sure it's actually honoured if args.nspawn_settings is not None: cmdline += ["--settings=trusted"] if args.verb == "boot": cmdline += ["--boot"] else: cmdline += nspawn_rlimit_params() if is_generated_root(args) or args.verity: cmdline += ["--volatile=overlay"] if args.network_veth: if ensure_networkd(args): cmdline += ["--network-veth"] if args.ephemeral: cmdline += ["--ephemeral"] cmdline += ["--machine", virt_name(args)] if args.cmdline: # If the verb is 'shell', args.cmdline contains the command to run. # Otherwise, the verb is 'boot', and we assume args.cmdline contains nspawn arguments. if args.verb == "shell": cmdline += ["--"] cmdline += args.cmdline with suppress_stacktrace(): run(cmdline, stdout=sys.stdout, stderr=sys.stderr) def find_qemu_binary() -> str: ARCH_BINARIES = {"x86_64": "qemu-system-x86_64", "i386": "qemu-system-i386"} arch_binary = ARCH_BINARIES.get(platform.machine()) binaries: List[str] = [] if arch_binary is not None: binaries += [arch_binary] binaries += ["qemu", "qemu-kvm"] for binary in binaries: if shutil.which(binary) is not None: return binary die("Couldn't find QEMU/KVM binary") def find_qemu_firmware() -> Tuple[Path, bool]: FIRMWARE_LOCATIONS = [ # UEFI firmware blobs are found in a variety of locations, # depending on distribution and package. *{ "x86_64": ["/usr/share/ovmf/x64/OVMF_CODE.secboot.fd"], "i386": ["/usr/share/edk2/ovmf-ia32/OVMF_CODE.secboot.fd"], }.get(platform.machine(), []), "/usr/share/edk2/ovmf/OVMF_CODE.secboot.fd", "/usr/share/edk2-ovmf/OVMF_CODE.secboot.fd", # GENTOO: "/usr/share/qemu/OVMF_CODE.secboot.fd", "/usr/share/ovmf/OVMF.secboot.fd", ] for firmware in FIRMWARE_LOCATIONS: if os.path.exists(firmware): return Path(firmware), True warn("Couldn't find OVMF firmware blob with secure boot support, " "falling back to OVMF firmware blobs without secure boot support.") FIRMWARE_LOCATIONS = [ # First, we look in paths that contain the architecture – # if they exist, they’re almost certainly correct. *{ "x86_64": [ "/usr/share/ovmf/ovmf_code_x64.bin", "/usr/share/ovmf/x64/OVMF_CODE.fd", "/usr/share/qemu/ovmf-x86_64.bin", ], "i386": ["/usr/share/ovmf/ovmf_code_ia32.bin", "/usr/share/edk2/ovmf-ia32/OVMF_CODE.fd"], }.get(platform.machine(), []), # After that, we try some generic paths and hope that if they exist, # they’ll correspond to the current architecture, thanks to the package manager. "/usr/share/edk2/ovmf/OVMF_CODE.fd", "/usr/share/edk2-ovmf/OVMF_CODE.fd", # GENTOO: "/usr/share/qemu/OVMF_CODE.fd", "/usr/share/ovmf/OVMF.fd", ] for firmware in FIRMWARE_LOCATIONS: if os.path.exists(firmware): return Path(firmware), False die("Couldn't find OVMF UEFI firmware blob.") def find_ovmf_vars() -> Path: OVMF_VARS_LOCATIONS = [] if platform.machine() == "x86_64": OVMF_VARS_LOCATIONS += ["/usr/share/ovmf/x64/OVMF_VARS.fd"] elif platform.machine() == "i386": OVMF_VARS_LOCATIONS += ["/usr/share/edk2/ovmf-ia32/OVMF_VARS.fd"] OVMF_VARS_LOCATIONS += ["/usr/share/edk2/ovmf/OVMF_VARS.fd", "/usr/share/edk2-ovmf/OVMF_VARS.fd", # GENTOO: "/usr/share/qemu/OVMF_VARS.fd", "/usr/share/ovmf/OVMF_VARS.fd"] for location in OVMF_VARS_LOCATIONS: if os.path.exists(location): return Path(location) die("Couldn't find OVMF UEFI variables file.") def run_qemu(args: CommandLineArguments) -> None: has_kvm = os.path.exists("/dev/kvm") accel = "kvm" if has_kvm else "tcg" firmware, fw_supports_sb = find_qemu_firmware() cmdline = [ find_qemu_binary(), "-machine", f"type=q35,accel={accel},smm={'on' if fw_supports_sb else 'off'}", "-smp", args.qemu_smp, "-m", args.qemu_mem, "-object", "rng-random,filename=/dev/urandom,id=rng0", "-device", "virtio-rng-pci,rng=rng0,id=rng-device0", ] if has_kvm: cmdline += ["-cpu", "host"] if args.qemu_headless: # -nodefaults removes the default CDROM device which avoids an error message during boot # -serial mon:stdio adds back the serial device removed by -nodefaults. cmdline += ["-nographic", "-nodefaults", "-serial", "mon:stdio"] # Fix for https://github.com/systemd/mkosi/issues/559. QEMU gets stuck in a boot loop when using BIOS # if there's no vga device. if not args.qemu_headless or (args.qemu_headless and "bios" in args.boot_protocols): cmdline += ["-vga", "virtio"] if args.network_veth: if not ensure_networkd(args): # Fall back to usermode networking if the host doesn't have networkd (eg: Debian) fwd = f",hostfwd=tcp::{args.ssh_port}-:{args.ssh_port}" if args.ssh_port != 22 else "" cmdline += ["-nic", f"user,model=virtio-net-pci{fwd}"] else: # Use vt- prefix so we can take advantage of systemd-networkd's builtin network file for VMs. ifname = f"vt-{virt_name(args)}" # vt- is the ifname on the host and is automatically picked up by systemd-networkd which # starts a DHCP server on that interface. This gives IP connectivity to the VM. By default, QEMU # itself tries to bring up the vt network interface which conflicts with systemd-networkd which is # trying to do the same. By specifiying script=no and downscript=no, We tell QEMU to not touch vt # after it is created. cmdline += ["-nic", f"tap,script=no,downscript=no,ifname={ifname},model=virtio-net-pci"] if "uefi" in args.boot_protocols: cmdline += ["-drive", f"if=pflash,format=raw,readonly=on,file={firmware}"] with contextlib.ExitStack() as stack: if fw_supports_sb: ovmf_vars = stack.enter_context(copy_file_temporary(src=find_ovmf_vars(), dir=tmp_dir())) cmdline += [ "-global", "ICH9-LPC.disable_s3=1", "-global", "driver=cfi.pflash01,property=secure,value=on", "-drive", f"file={ovmf_vars.name},if=pflash,format=raw", ] if args.ephemeral: f = stack.enter_context(copy_image_temporary(src=args.output, dir=args.output.parent)) fname = Path(f.name) else: fname = args.output # Debian images fail to boot with virtio-scsi, see: https://github.com/systemd/mkosi/issues/725 if args.distribution == Distribution.debian: cmdline += [ "-drive", f"if=virtio,id=hd,file={fname},format={'qcow2' if args.qcow2 else 'raw'}", ] else: cmdline += [ "-drive", f"if=none,id=hd,file={fname},format={'qcow2' if args.qcow2 else 'raw'}", "-device", "virtio-scsi-pci,id=scsi", "-device", "scsi-hd,drive=hd,bootindex=1", ] cmdline += args.cmdline print_running_cmd(cmdline) with suppress_stacktrace(): run(cmdline, stdout=sys.stdout, stderr=sys.stderr) def interface_exists(dev: str) -> bool: return run(["ip", "link", "show", dev], stdout=DEVNULL, stderr=DEVNULL, check=False).returncode == 0 def find_address(args: CommandLineArguments) -> Tuple[str, str]: if not ensure_networkd(args) and args.ssh_port != 22: return "", "127.0.0.1" name = virt_name(args) timeout = float(args.ssh_timeout) while timeout >= 0: stime = time.time() try: if interface_exists(f"ve-{name}"): dev = f"ve-{name}" elif interface_exists(f"vt-{name}"): dev = f"vt-{name}" else: raise MkosiException("Container/VM interface not found") link = json.loads(run(["ip", "-j", "link", "show", "dev", dev], stdout=PIPE, text=True).stdout)[0] if link["operstate"] == "DOWN": raise MkosiException( f"{dev} is not enabled. Make sure systemd-networkd is running so it can manage the interface." ) # Trigger IPv6 neighbor discovery of which we can access the results via 'ip neighbor'. This allows us to # find out the link-local IPv6 address of the container/VM via which we can connect to it. run(["ping", "-c", "1", "-w", "15", f"ff02::1%{dev}"], stdout=DEVNULL) for _ in range(50): neighbors = json.loads( run(["ip", "-j", "neighbor", "show", "dev", dev], stdout=PIPE, text=True).stdout ) for neighbor in neighbors: dst = cast(str, neighbor["dst"]) if dst.startswith("fe80"): return f"%{dev}", dst time.sleep(0.4) except MkosiException as e: if time.time() - stime > timeout: die(str(e)) time.sleep(1) timeout -= time.time() - stime die("Container/VM address not found") def run_ssh(args: CommandLineArguments) -> None: cmd = [ "ssh", # Silence known hosts file errors/warnings. "-o", "UserKnownHostsFile=/dev/null", "-o", "StrictHostKeyChecking=no", "-o", "LogLevel=ERROR", ] if args.ssh_agent is None: ssh_key = args.ssh_key or args.output_sshkey assert ssh_key is not None if not ssh_key.exists(): die( f"SSH key not found at {ssh_key}. Are you running from the project's root directory " "and did you build with the --ssh option?" ) cmd += ["-i", cast(str, ssh_key)] else: cmd += ["-o", f"IdentityAgent={args.ssh_agent}"] if args.ssh_port != 22: cmd += ["-p", f"{args.ssh_port}"] dev, address = find_address(args) cmd += [f"root@{address}{dev}", *args.cmdline] with suppress_stacktrace(): run(cmd, stdout=sys.stdout, stderr=sys.stderr) def run_serve(args: CommandLineArguments) -> None: """Serve the output directory via a tiny embedded HTTP server""" port = 8081 image = args.output.parent if args.output_dir is not None: os.chdir(args.output_dir) with http.server.HTTPServer(("", port), http.server.SimpleHTTPRequestHandler) as httpd: print(f"Serving HTTP on port {port}: http://localhost:{port}/{image}") httpd.serve_forever() def generate_secure_boot_key(args: CommandLineArguments) -> NoReturn: """Generate secure boot keys using openssl""" args.secure_boot_key = args.secure_boot_key or Path("./mkosi.secure-boot.key") args.secure_boot_certificate = args.secure_boot_certificate or Path("./mkosi.secure-boot.crt") keylength = 2048 expiration_date = datetime.date.today() + datetime.timedelta(int(args.secure_boot_valid_days)) cn = expand_specifier(args.secure_boot_common_name) for f in (args.secure_boot_key, args.secure_boot_certificate): if f.exists() and not args.force: die( dedent( f"""\ {f} already exists. If you are sure you want to generate new secure boot keys remove {args.secure_boot_key} and {args.secure_boot_certificate} first. """ ) ) MkosiPrinter.print_step(f"Generating secure boot keys rsa:{keylength} for CN {cn!r}.") MkosiPrinter.info( dedent( f""" The keys will expire in {args.secure_boot_valid_days} days ({expiration_date:%A %d. %B %Y}). Remember to roll them over to new ones before then. """ ) ) cmd: List[str] = [ "openssl", "req", "-new", "-x509", "-newkey", f"rsa:{keylength}", "-keyout", os.fspath(args.secure_boot_key), "-out", os.fspath(args.secure_boot_certificate), "-days", str(args.secure_boot_valid_days), "-subj", f"/CN={cn}/", "-nodes", ] os.execvp(cmd[0], cmd) def bump_image_version(args: CommandLineArguments) -> None: """Write current image version plus one to mkosi.version""" if args.image_version is None or args.image_version == "": print("No version configured so far, starting with version 1.") new_version = "1" else: v = args.image_version.split(".") try: m = int(v[-1]) except ValueError: new_version = args.image_version + ".2" print( f"Last component of current version is not a decimal integer, appending '.2', bumping '{args.image_version}' → '{new_version}'." ) else: new_version = ".".join(v[:-1] + [str(m + 1)]) print(f"Increasing last component of version by one, bumping '{args.image_version}' → '{new_version}'.") open("mkosi.version", "w").write(new_version + "\n") def expand_paths(paths: List[str]) -> List[str]: if not paths: return [] environ = os.environ.copy() # Add a fake SUDO_HOME variable to allow non-root users specify # paths in their home when using mkosi via sudo. sudo_user = os.getenv("SUDO_USER") if sudo_user and "SUDO_HOME" not in environ: environ["SUDO_HOME"] = os.path.expanduser(f"~{sudo_user}") # No os.path.expandvars because it treats unset variables as empty. expanded = [] for path in paths: try: expanded += [string.Template(path).substitute(environ)] except KeyError: # Skip path if it uses a variable not defined. pass return expanded def prepend_to_environ_path(paths: List[Path]) -> None: if not paths: return news = [os.fspath(path) for path in paths] olds = os.getenv("PATH", "").split(":") os.environ["PATH"] = ":".join(news + olds) def expand_specifier(s: str) -> str: user = os.getenv("SUDO_USER") or os.getenv("USER") assert user is not None return s.replace("%u", user) def needs_build(args: Union[argparse.Namespace, CommandLineArguments]) -> bool: return args.verb == "build" or (not args.output.exists() and args.verb in MKOSI_COMMANDS_NEED_BUILD) def run_verb(raw: argparse.Namespace) -> None: args = load_args(raw) prepend_to_environ_path(args.extra_search_paths) if args.verb == "genkey": generate_secure_boot_key(args) if args.verb == "bump": bump_image_version(args) if args.verb in MKOSI_COMMANDS_SUDO: check_root() unlink_output(args) if args.verb == "build": check_output(args) if args.verb == "summary": print_summary(args) if needs_build(args): check_root() check_native(args) init_namespace(args) manifest = build_stuff(args) if args.auto_bump: bump_image_version(args) save_manifest(args, manifest) print_output_size(args) if args.verb in ("shell", "boot"): run_shell(args) if args.verb == "qemu": run_qemu(args) if args.verb == "ssh": run_ssh(args) if args.verb == "serve": run_serve(args) mkosi-12/mkosi/__main__.py000066400000000000000000000015021415136147600156420ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1+ # PYTHON_ARGCOMPLETE_OK import os import sys from . import complete_step, parse_args, run_verb from .backend import MkosiException, die def main() -> None: try: args = parse_args() for job_name, a in args.items(): # Change working directory if --directory is passed if a.directory: work_dir = a.directory if os.path.isdir(work_dir): os.chdir(work_dir) else: die(f"Error: {work_dir} is not a directory!") if len(args) > 1: with complete_step(f"Processing {job_name}"): run_verb(a) else: run_verb(a) except MkosiException: sys.exit(1) if __name__ == "__main__": main() mkosi-12/mkosi/backend.py000066400000000000000000000630001415136147600155120ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1+ from __future__ import annotations import argparse import contextlib import dataclasses import enum import math import os import resource import shlex import shutil import signal import subprocess import sys import time import uuid from pathlib import Path from types import FrameType from typing import ( IO, TYPE_CHECKING, Any, Callable, Dict, Iterator, List, NoReturn, Optional, Sequence, Set, Union, cast, ) PathString = Union[Path, str] def shell_join(cmd: Sequence[PathString]) -> str: return " ".join(shlex.quote(str(x)) for x in cmd) @contextlib.contextmanager def set_umask(mask: int) -> Iterator[int]: old = os.umask(mask) try: yield old finally: os.umask(old) def print_between_lines(s: str) -> None: size = os.get_terminal_size() print('-' * size.columns) print(s.rstrip('\n')) print('-' * size.columns) def roundup(x: int, step: int) -> int: return ((x + step - 1) // step) * step # These types are only generic during type checking and not at runtime, leading # to a TypeError during compilation. # Let's be as strict as we can with the description for the usage we have. if TYPE_CHECKING: CompletedProcess = subprocess.CompletedProcess[Any] Popen = subprocess.Popen[Any] else: CompletedProcess = subprocess.CompletedProcess Popen = subprocess.Popen class MkosiException(Exception): """Leads to sys.exit""" # This global should be initialized after parsing arguments ARG_DEBUG: Set[str] = set() class Parseable: "A mix-in to provide conversions for argparse" def __repr__(self) -> str: """Return the member name without the class name""" return cast(str, getattr(self, "name")) def __str__(self) -> str: return self.__repr__() @classmethod def from_string(cls: Any, name: str) -> Any: """A convenience method to be used with argparse""" try: return cls[name] except KeyError: raise argparse.ArgumentTypeError(f"unknown Format: {name!r}") @classmethod def parse_list(cls: Any, string: str) -> List[Any]: return [cls.from_string(p) for p in string.split(",") if p] class PackageType(enum.Enum): rpm = 1 deb = 2 pkg = 3 bundle = 4 ebuild = 5 class Distribution(enum.Enum): package_type: PackageType fedora = 0, PackageType.rpm debian = 1, PackageType.deb ubuntu = 2, PackageType.deb arch = 3, PackageType.pkg opensuse = 4, PackageType.rpm mageia = 5, PackageType.rpm centos = 6, PackageType.rpm centos_epel = 7, PackageType.rpm clear = 8, PackageType.bundle photon = 9, PackageType.rpm openmandriva = 10, PackageType.rpm rocky = 11, PackageType.rpm rocky_epel = 12, PackageType.rpm alma = 13, PackageType.rpm alma_epel = 14, PackageType.rpm gentoo = 15, PackageType.ebuild def __new__(cls, number: int, package_type: PackageType) -> Distribution: # This turns the list above into enum entries with .package_type attributes. # See https://docs.python.org/3.9/library/enum.html#when-to-use-new-vs-init # for an explanation. entry = object.__new__(cls) entry._value_ = number entry.package_type = package_type return cast("Distribution", entry) def __str__(self) -> str: return self.name class SourceFileTransfer(enum.Enum): copy_all = "copy-all" copy_git_cached = "copy-git-cached" copy_git_others = "copy-git-others" copy_git_more = "copy-git-more" mount = "mount" def __str__(self) -> str: return self.value @classmethod def doc(cls) -> Dict[SourceFileTransfer, str]: return { cls.copy_all: "normal file copy", cls.copy_git_cached: "use git-ls-files --cached, ignoring any file that git itself ignores", cls.copy_git_others: "use git-ls-files --others, ignoring any file that git itself ignores", cls.copy_git_more: "use git-ls-files --cached, ignoring any file that git itself ignores, but include the .git/ directory", cls.mount: "bind mount source files into the build image", } class OutputFormat(Parseable, enum.Enum): directory = enum.auto() subvolume = enum.auto() tar = enum.auto() cpio = enum.auto() gpt_ext4 = enum.auto() gpt_xfs = enum.auto() gpt_btrfs = enum.auto() gpt_squashfs = enum.auto() plain_squashfs = enum.auto() # Kept for backwards compatibility raw_ext4 = raw_gpt = gpt_ext4 raw_xfs = gpt_xfs raw_btrfs = gpt_btrfs raw_squashfs = gpt_squashfs def is_disk_rw(self) -> bool: "Output format is a disk image with a parition table and a writable filesystem" return self in (OutputFormat.gpt_ext4, OutputFormat.gpt_xfs, OutputFormat.gpt_btrfs) def is_disk(self) -> bool: "Output format is a disk image with a partition table" return self.is_disk_rw() or self == OutputFormat.gpt_squashfs def is_squashfs(self) -> bool: "The output format contains a squashfs partition" return self in {OutputFormat.gpt_squashfs, OutputFormat.plain_squashfs} def is_btrfs(self) -> bool: "The output format contains a btrfs partition" return self in {OutputFormat.gpt_btrfs, OutputFormat.subvolume} def can_minimize(self) -> bool: "The output format can be 'minimized'" return self in (OutputFormat.gpt_ext4, OutputFormat.gpt_btrfs) def needed_kernel_module(self) -> str: if self == OutputFormat.gpt_btrfs: return "btrfs" elif self in (OutputFormat.gpt_squashfs, OutputFormat.plain_squashfs): return "squashfs" elif self == OutputFormat.gpt_xfs: return "xfs" else: return "ext4" def has_fs_compression(self) -> bool: return self.is_squashfs() or self.is_btrfs() class ManifestFormat(Parseable, enum.Enum): json = "json" # the standard manifest in json format changelog = "changelog" # human-readable text file with package changelogs class PartitionIdentifier(enum.Enum): esp = 'esp' bios = 'bios' xbootldr = 'xbootldr' root = 'root' swap = 'swap' home = 'home' srv = 'srv' var = 'var' tmp = 'tmp' verity = 'verity' verity_sig = 'verity-sig' @dataclasses.dataclass class Partition: number: int n_sectors: int type_uuid: uuid.UUID part_uuid: Optional[uuid.UUID] read_only: Optional[bool] description: str def blockdev(self, loopdev: Path) -> Path: return Path(f"{loopdev}p{self.number}") def sfdisk_spec(self) -> str: desc = [f'size={self.n_sectors}', f'type={self.type_uuid}', f'attrs={"GUID:60" if self.read_only else ""}', f'name="{self.description}"', f'uuid={self.part_uuid}' if self.part_uuid is not None else None] return ', '.join(filter(None, desc)) @dataclasses.dataclass class PartitionTable: partitions: Dict[PartitionIdentifier, Partition] = dataclasses.field(default_factory=dict) last_partition_sector: Optional[int] = None sector_size: int = 512 first_lba: Optional[int] = None grain: int = 4096 def first_partition_offset(self, max_partitions: int = 128) -> int: if self.first_lba is not None: # No rounding here, we honour the specified value exactly. return self.first_lba * self.sector_size else: # The header is like the footer, but we have a one-sector "protective MBR" at offset 0 return roundup(self.sector_size + self.footer_size(), self.grain) def last_partition_offset(self, max_partitions: int = 128) -> int: if self.last_partition_sector: return roundup(self.last_partition_sector * self.sector_size, self.grain) else: return self.first_partition_offset(max_partitions) def footer_size(self, max_partitions: int = 128) -> int: # The footer must have enough space for the GPT header (one sector), # and the GPT parition entry area. PEA size of 16384 (128 partitions) # is recommended. pea_sectors = math.ceil(max_partitions * 128 / self.sector_size) return (1 + pea_sectors) * self.sector_size def disk_size(self) -> int: return roundup(self.last_partition_offset() + self.footer_size(), self.grain) def add(self, ident: PartitionIdentifier, size: int, type_uuid: uuid.UUID, description: str, part_uuid: Optional[uuid.UUID] = None, read_only: Optional[bool] = False) -> Partition: assert '"' not in description size = roundup(size, self.grain) n_sectors = size // self.sector_size part = Partition(len(self.partitions) + 1, n_sectors, type_uuid, part_uuid, read_only, description) self.partitions[ident] = part self.last_partition_sector = self.last_partition_offset() // self.sector_size + n_sectors return part def partition_path(self, ident: PartitionIdentifier, loopdev: Path) -> Optional[Path]: part = self.partitions.get(ident) if part is None: return None return part.blockdev(loopdev) def sfdisk_spec(self) -> str: table = ["label: gpt", f"grain: {self.grain}", f"first-lba: {self.first_partition_offset() // self.sector_size}", *(p.sfdisk_spec() for p in self.partitions.values())] return '\n'.join(table) def run_sfdisk(self, device: PathString, *, quiet: bool = False) -> None: spec = self.sfdisk_spec() device = Path(device) if 'disk' in ARG_DEBUG: print_between_lines(spec) cmd: List[PathString] = ["sfdisk", "--color=never", "--no-reread", "--no-tell-kernel", device] if quiet: cmd += ["--quiet"] run(cmd, input=spec.encode("utf-8")) if device.is_block_device(): run(["sync"]) run_with_backoff(["blockdev", "--rereadpt", device], attempts=10) @dataclasses.dataclass class CommandLineArguments: """Type-hinted storage for command line arguments.""" verb: str cmdline: List[str] distribution: Distribution release: str mirror: Optional[str] repositories: List[str] use_host_repositories: bool architecture: Optional[str] output_format: OutputFormat manifest_format: List[ManifestFormat] output: Path output_dir: Optional[Path] force_count: int bootable: bool boot_protocols: List[str] kernel_command_line: List[str] secure_boot: bool secure_boot_key: Path secure_boot_certificate: Path secure_boot_valid_days: str secure_boot_common_name: str read_only: bool encrypt: Optional[str] verity: Union[bool, str] compress: Union[None, str, bool] compress_fs: Union[None, str, bool] compress_output: Union[None, str, bool] mksquashfs_tool: List[PathString] qcow2: bool image_version: Optional[str] image_id: Optional[str] hostname: Optional[str] no_chown: bool tar_strip_selinux_context: bool incremental: bool minimize: bool with_unified_kernel_images: bool gpt_first_lba: Optional[int] hostonly_initrd: bool base_packages: Union[str, bool] packages: List[str] remove_packages: List[str] with_docs: bool with_tests: bool cache_path: Optional[Path] extra_trees: List[Path] skeleton_trees: List[Path] clean_package_metadata: Union[bool, str] remove_files: List[Path] environment: List[str] build_sources: Optional[Path] build_dir: Optional[Path] include_dir: Optional[Path] install_dir: Optional[Path] build_packages: List[str] skip_final_phase: bool build_script: Optional[Path] prepare_script: Optional[Path] postinst_script: Optional[Path] finalize_script: Optional[Path] source_file_transfer: SourceFileTransfer source_file_transfer_final: Optional[SourceFileTransfer] source_resolve_symlinks: bool source_resolve_symlinks_final: bool with_network: Union[bool, str] nspawn_settings: Optional[Path] base_image: Optional[Path] root_size: int esp_size: Optional[int] xbootldr_size: Optional[int] swap_size: Optional[int] home_size: Optional[int] srv_size: Optional[int] var_size: Optional[int] tmp_size: Optional[int] usr_only: bool split_artifacts: bool checksum: bool sign: bool key: Optional[str] bmap: bool password: Optional[str] password_is_hashed: bool autologin: bool extra_search_paths: List[Path] network_veth: bool ephemeral: bool ssh: bool ssh_key: Optional[Path] ssh_agent: Optional[Path] ssh_timeout: int ssh_port: int directory: Optional[Path] default_path: Optional[Path] all: bool all_directory: Optional[Path] debug: List[str] auto_bump: bool workspace_dir: Optional[Path] # QEMU-specific options qemu_headless: bool qemu_smp: str qemu_mem: str # Some extra stuff that's stored in CommandLineArguments for convenience but isn't populated by arguments verity_size: Optional[int] verity_sig_size: Optional[int] machine_id: str force: bool original_umask: int passphrase: Optional[Dict[str, str]] output_checksum: Optional[Path] = None output_nspawn_settings: Optional[Path] = None output_sshkey: Optional[Path] = None output_root_hash_file: Optional[Path] = None output_root_hash_p7s_file: Optional[Path] = None output_bmap: Optional[Path] = None output_split_root: Optional[Path] = None output_split_verity: Optional[Path] = None output_split_verity_sig: Optional[Path] = None output_split_kernel: Optional[Path] = None cache_pre_inst: Optional[Path] = None cache_pre_dev: Optional[Path] = None output_signature: Optional[Path] = None partition_table: Optional[PartitionTable] = None releasever: Optional[str] = None ran_sfdisk: bool = False def get_partition(self, ident: PartitionIdentifier) -> Optional[Partition]: "A shortcut to check that we have a partition table and extract the partition object" if self.partition_table is None: return None return self.partition_table.partitions.get(ident) def should_compress_fs(args: Union[argparse.Namespace, CommandLineArguments]) -> Union[bool, str]: """True for the default compression, a string, or False. When explicitly configured with --compress-fs=, just return whatever was specified. When --compress= was used, try to be smart, so that either this function or should_compress_output() returns True as appropriate. """ c = args.compress_fs if c is None and args.output_format.has_fs_compression(): c = args.compress return False if c is None else c def should_compress_output(args: Union[argparse.Namespace, CommandLineArguments]) -> Union[bool, str]: """A string or False. When explicitly configured with --compress-output=, use that. Since we have complete freedom with selecting the outer compression algorithm, pick some default when True. When --compress= was used, try to be smart, so that either this function or should_compress_fs() returns True as appropriate. """ c = args.compress_output if c is None and not args.output_format.has_fs_compression(): c = args.compress if c is True: return "xz" # default compression return False if c is None else c def workspace(root: Path) -> Path: return root.parent def var_tmp(root: Path) -> Path: p = workspace(root) / "var-tmp" p.mkdir(exist_ok=True) return p def nspawn_params_for_blockdev_access(args: CommandLineArguments, loopdev: Path) -> List[str]: assert args.partition_table is not None params = [ f"--bind-ro={loopdev}", f"--property=DeviceAllow={loopdev}", "--bind-ro=/dev/block", "--bind-ro=/dev/disk", ] for ident in (PartitionIdentifier.esp, PartitionIdentifier.bios, PartitionIdentifier.root, PartitionIdentifier.xbootldr): path = args.partition_table.partition_path(ident, loopdev) if path and path.exists(): params += [f"--bind-ro={path}", f"--property=DeviceAllow={path}"] params += [f"--setenv={env}" for env in args.environment] return params def format_rlimit(rlimit: int) -> str: limits = resource.getrlimit(rlimit) soft = "infinity" if limits[0] == resource.RLIM_INFINITY else str(limits[0]) hard = "infinity" if limits[1] == resource.RLIM_INFINITY else str(limits[1]) return f"{soft}:{hard}" def nspawn_rlimit_params() -> Sequence[str]: return [ f"--rlimit=RLIMIT_CORE={format_rlimit(resource.RLIMIT_CORE)}", ] def run_workspace_command( args: CommandLineArguments, root: Path, cmd: Sequence[PathString], network: bool = False, env: Optional[Dict[str, str]] = None, nspawn_params: Optional[List[str]] = None, capture_stdout: bool = False, ) -> Optional[str]: nspawn = [ "systemd-nspawn", "--quiet", f"--directory={root}", "--uuid=" + args.machine_id, "--machine=mkosi-" + uuid.uuid4().hex, "--as-pid2", "--register=no", f"--bind={var_tmp(root)}:/var/tmp", "--setenv=SYSTEMD_OFFLINE=1", *nspawn_rlimit_params(), ] stdout = None if network: # If we're using the host network namespace, use the same resolver nspawn += ["--bind-ro=/etc/resolv.conf"] else: nspawn += ["--private-network"] if env: nspawn += [f"--setenv={k}={v}" for k, v in env.items()] if nspawn_params: nspawn += nspawn_params if capture_stdout: stdout = subprocess.PIPE nspawn += ["--console=pipe"] result = run([*nspawn, "--", *cmd], check=False, stdout=stdout, text=capture_stdout) if result.returncode != 0: if "workspace-command" in ARG_DEBUG: run(nspawn, check=False) die(f"Workspace command {shell_join(cmd)} returned non-zero exit code {result.returncode}.") return result.stdout.strip() if capture_stdout else None @contextlib.contextmanager def do_delay_interrupt() -> Iterator[None]: # CTRL+C is sent to the entire process group. We delay its handling in mkosi itself so the subprocess can # exit cleanly before doing mkosi's cleanup. If we don't do this, we get device or resource is busy # errors when unmounting stuff later on during cleanup. We only delay a single CTRL+C interrupt so that a # user can always exit mkosi even if a subprocess hangs by pressing CTRL+C twice. interrupted = False def handler(signal: int, frame: Optional[FrameType]) -> None: nonlocal interrupted if interrupted: raise KeyboardInterrupt() else: interrupted = True s = signal.signal(signal.SIGINT, handler) try: yield finally: signal.signal(signal.SIGINT, s) if interrupted: die("Interrupted") @contextlib.contextmanager def do_noop() -> Iterator[None]: yield # Borrowed from https://github.com/python/typeshed/blob/3d14016085aed8bcf0cf67e9e5a70790ce1ad8ea/stdlib/3/subprocess.pyi#L24 _FILE = Union[None, int, IO[Any]] def spawn( cmdline: Sequence[PathString], delay_interrupt: bool = True, stdout: _FILE = None, stderr: _FILE = None, **kwargs: Any, ) -> Popen: if "run" in ARG_DEBUG: MkosiPrinter.info(f"+ {shell_join(cmdline)}") if not stdout and not stderr: # Unless explicit redirection is done, print all subprocess # output on stderr, since we do so as well for mkosi's own # output. stdout = sys.stderr cm = do_delay_interrupt if delay_interrupt else do_noop try: with cm(): return subprocess.Popen(cmdline, stdout=stdout, stderr=stderr, **kwargs) except FileNotFoundError: die(f"{cmdline[0]} not found in PATH.") def run( cmdline: Sequence[PathString], check: bool = True, delay_interrupt: bool = True, stdout: _FILE = None, stderr: _FILE = None, **kwargs: Any, ) -> CompletedProcess: cmdline = [str(x) for x in cmdline] if "run" in ARG_DEBUG: MkosiPrinter.info(f"+ {shell_join(cmdline)}") if not stdout and not stderr: # Unless explicit redirection is done, print all subprocess # output on stderr, since we do so as well for mkosi's own # output. stdout = sys.stderr cm = do_delay_interrupt if delay_interrupt else do_noop try: with cm(): return subprocess.run(cmdline, check=check, stdout=stdout, stderr=stderr, **kwargs) except FileNotFoundError: die(f"{cmdline[0]} not found in PATH.") def run_with_backoff( cmdline: Sequence[PathString], check: bool = True, delay_interrupt: bool = True, stdout: _FILE = None, stderr: _FILE = None, *, attempts: int, **kwargs: Any, ) -> CompletedProcess: delay = 0.0 for attempt in range(attempts): try: return run(cmdline, check, delay_interrupt, stdout, stderr, **kwargs) except subprocess.CalledProcessError: if attempt == attempts - 1: raise time.sleep(delay) delay = min(delay * 2 + 0.01, 1) assert False # make mypy happy def tmp_dir() -> Path: path = os.environ.get("TMPDIR") or "/var/tmp" return Path(path) def patch_file(filepath: Path, line_rewriter: Callable[[str], str]) -> None: temp_new_filepath = filepath.with_suffix(filepath.suffix + ".tmp.new") with filepath.open("r") as old, temp_new_filepath.open("w") as new: for line in old: new.write(line_rewriter(line)) shutil.copystat(filepath, temp_new_filepath) os.remove(filepath) shutil.move(str(temp_new_filepath), filepath) def path_relative_to_cwd(path: PathString) -> Path: "Return path as relative to $PWD if underneath, absolute path otherwise" path = Path(path) try: return path.relative_to(os.getcwd()) except ValueError: return path def write_grub_config(args: CommandLineArguments, root: Path) -> None: kernel_cmd_line = " ".join(args.kernel_command_line) grub_cmdline = f'GRUB_CMDLINE_LINUX="{kernel_cmd_line}"\n' os.makedirs(root / "etc/default", exist_ok=True, mode=0o755) grub_config = root / "etc/default/grub" if not os.path.exists(grub_config): grub_config.write_text(grub_cmdline) else: def jj(line: str) -> str: if line.startswith(("GRUB_CMDLINE_LINUX=", "#GRUB_CMDLINE_LINUX=")): # GENTOO: return grub_cmdline if args.qemu_headless: if "GRUB_TERMINAL" in line: return line.strip('#').split('=')[0] + '="console serial"' return line patch_file(grub_config, jj) if args.qemu_headless: with open(grub_config, "a") as f: f.write('GRUB_SERIAL_COMMAND="serial --unit=0 --speed 115200"\n') def install_grub(args: CommandLineArguments, root: Path, loopdev: Path, grub: str) -> None: assert args.partition_table is not None part = args.get_partition(PartitionIdentifier.bios) if not part: return write_grub_config(args, root) nspawn_params = nspawn_params_for_blockdev_access(args, loopdev) cmdline: Sequence[PathString] = [f"{grub}-install", "--modules=ext2 part_gpt", "--target=i386-pc", loopdev] run_workspace_command(args, root, cmdline, nspawn_params=nspawn_params) # TODO: Remove os.path.basename once https://github.com/systemd/systemd/pull/16645 is widely available. cmdline = [f"{grub}-mkconfig", f"--output=/boot/{os.path.basename(grub)}/grub.cfg"] run_workspace_command(args, root, cmdline, nspawn_params=nspawn_params) def die(message: str) -> NoReturn: MkosiPrinter.warn(f"Error: {message}") raise MkosiException(message) def warn(message: str) -> None: MkosiPrinter.warn(f"Warning: {message}") class MkosiPrinter: out_file = sys.stderr isatty = out_file.isatty() bold = "\033[0;1;39m" if isatty else "" red = "\033[31;1m" if isatty else "" reset = "\033[0m" if isatty else "" prefix = "‣ " level = 0 @classmethod def _print(cls, text: str) -> None: cls.out_file.write(text) @classmethod def print_step(cls, text: str) -> None: prefix = cls.prefix + " " * cls.level if sys.exc_info()[0]: # We are falling through exception handling blocks. # De-emphasize this step here, so the user can tell more # easily which step generated the exception. The exception # or error will only be printed after we finish cleanup. cls._print(f"{prefix}({text})\n") else: cls._print(f"{prefix}{cls.bold}{text}{cls.reset}\n") @classmethod def info(cls, text: str) -> None: cls._print(text + "\n") @classmethod def warn(cls, text: str) -> None: cls._print(f"{cls.prefix}{cls.red}{text}{cls.reset}\n") @classmethod @contextlib.contextmanager def complete_step(cls, text: str, text2: Optional[str] = None) -> Iterator[List[Any]]: cls.print_step(text) cls.level += 1 try: args: List[Any] = [] yield args finally: cls.level -= 1 assert cls.level >= 0 if text2 is not None: cls.print_step(text2.format(*args)) mkosi-12/mkosi/gentoo.py000066400000000000000000000513431415136147600154250ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1+ import contextlib import fcntl import os import re import tarfile import urllib.parse import urllib.request from pathlib import Path from textwrap import dedent from typing import Dict, Generator, List, Sequence from . import copy_path, open_close, unlink_try_hard from .backend import ( ARG_DEBUG, CommandLineArguments, MkosiException, MkosiPrinter, OutputFormat, PartitionIdentifier, die, run_workspace_command, ) ARCHITECTURES = { "x86_64": ("amd64", "arch/x86/boot/bzImage"), # TODO: "aarch64": ("arm64", "arch/arm64/boot/Image.gz"), # TODO: "armv7l": ("arm", "arch/arm/boot/zImage"), } @contextlib.contextmanager def flock_path(path: Path) -> Generator[int, None, None]: with open_close(path, os.O_RDONLY | os.O_DIRECTORY | os.O_CLOEXEC) as fd: fcntl.flock(fd, fcntl.LOCK_EX) yield fd class Gentoo: arch_profile: Path baselayout_use: Path DEFAULT_NSPAWN_PARAMS: List[str] emerge_default_opts: List[str] arch: str emerge_vars: Dict[str, str] # sys-boot and sys-kernel mainly for boot pkgs_boot: List[str] # @system set (https://wiki.gentoo.org/wiki/System_set_(Portage)) pkgs_sys: List[str] # filesystem packages (dosfstools, btrfs, squashfs, etc) pkgs_fs: List[str] grub_platforms: List[str] UNINSTALL_IGNORE: List[str] root: Path portage_cfg_dir: Path profile_path: Path custom_profile_path: Path ebuild_sh_env_dir: Path dracut_atom = "sys-kernel/dracut" EMERGE_UPDATE_OPTS = [ "--update", "--tree", "--changed-use", "--newuse", "--deep", "--with-bdeps=y", "--complete-graph-if-new-use=y", ] UNINSTALL_IGNORE = ["/bin", "/sbin", "/lib", "/lib64"] portage_use_flags = [ "systemd", # 'systemd' is a dependancy "initramfs", "git", # 'git' for sync-type=git "symlink", # 'symlink' for kernel "sdl", "-filecaps", "-savedconfig", "-split-bin", "-split-sbin", "-split-usr", ] # TODO: portage_features.add("ccache"), this shall expedite the builds portage_features = [ # -user* are required for access to USER_CONFIG_PATH "-userfetch", "-userpriv", "-usersync", "-usersandbox", "-sandbox", "-pid-sandbox", # -pid-sandbox is required for cross-compile scenarios "-network-sandbox", "parallel-install", "buildpkg", "binpkg-multi-instance", "-binpkg-docompress", "getbinpkg", "-candy", ] @staticmethod def try_import_portage() -> Dict[str, str]: NEED_PORTAGE_MSG = "You need portage(5) for Gentoo" PORTAGE_INSTALL_INSTRUCTIONS = """\ # Following is known to work on most systemd-based systems: sudo tee /usr/lib/sysusers.d/acct-user-portage.conf > /dev/null <<- EOF # /usr/lib/sysusers.d/portage.conf u portage - "Portage system user" /var/lib/portage/home - EOF sudo systemd-sysusers --no-pager sudo install --owner=portage --group=portage --mode=0755 --directory /var/db/repos sudo install --owner=portage --group=portage --mode=0755 --directory /etc/portage/repos.conf sudo install --owner=portage --group=portage --mode=0755 --directory /var/cache/binpkgs sudo tee /etc/portage/repos.conf/eselect-repo.conf > /dev/null <<- EOF [gentoo] location = /var/db/repos/gentoo sync-type = git sync-uri = https://anongit.gentoo.org/git/repo/gentoo.git EOF git clone https://anongit.gentoo.org/git/proj/portage.git --depth=1 cd portage tee setup.cfg > /dev/null <<- EOF [build_ext] portage-ext-modules=true EOF python setup.py build_ext --inplace --portage-ext-modules sudo python setup.py install sudo ln -s --relative /var/db/repos/gentoo/profiles/default/linux/amd64/17.1/no-multilib/systemd /etc/portage/make.profile """ try: from portage.const import ( # type: ignore CUSTOM_PROFILE_PATH, EBUILD_SH_ENV_DIR, PROFILE_PATH, USER_CONFIG_PATH, ) except ImportError as e: MkosiPrinter.warn(NEED_PORTAGE_MSG) MkosiPrinter.info(PORTAGE_INSTALL_INSTRUCTIONS) raise MkosiException(e) return dict(profile_path=PROFILE_PATH, custom_profile_path=CUSTOM_PROFILE_PATH, ebuild_sh_env_dir=EBUILD_SH_ENV_DIR, portage_cfg_dir=USER_CONFIG_PATH) def __init__( self, args: CommandLineArguments, root: Path, do_run_build_script: bool, ) -> None: ret = self.try_import_portage() from portage.package.ebuild.config import config # type: ignore self.portage_cfg = config(config_root=str(root), target_root=str(root), sysroot=str(root), eprefix=None) PORTAGE_MISCONFIGURED_MSG = "You have portage(5) installed but it's probably missing defaults, bailing out" # we check for PORTDIR, but we could check for any other one if self.portage_cfg['PORTDIR'] is None: die(PORTAGE_MISCONFIGURED_MSG) self.profile_path = root / ret["profile_path"] self.custom_profile_path = root / ret["custom_profile_path"] self.ebuild_sh_env_dir = root / ret["ebuild_sh_env_dir"] self.portage_cfg_dir = root / ret["portage_cfg_dir"] self.portage_cfg_dir.mkdir(parents=True, exist_ok=True) self.DEFAULT_NSPAWN_PARAMS = [ "--capability=CAP_SYS_ADMIN,CAP_MKNOD", f"--bind={self.portage_cfg['PORTDIR']}", f"--bind={self.portage_cfg['DISTDIR']}", f"--bind={self.portage_cfg['PKGDIR']}", ] jobs = os.cpu_count() or 1 self.emerge_default_opts = [ "--buildpkg=y", "--usepkg=y", "--keep-going=y", f"--jobs={jobs}", f"--load-average={jobs-1}", "--nospinner", ] if "build-script" in ARG_DEBUG: self.emerge_default_opts += ["--verbose", "--quiet=n", "--quiet-fail=n"] else: self.emerge_default_opts += ["--quiet-build", "--quiet"] self.arch, _ = ARCHITECTURES[args.architecture or "x86_64"] ####################################################################### # GENTOO_UPSTREAM : we only support systemd profiles! and only the # no-multilib flavour of those, for now; # GENTOO_UPSTREAM : wait for fix upstream: # https://bugs.gentoo.org/792081 ####################################################################### # GENTOO_DONTMOVE : could be done inside set_profile, however # stage3_fetch() will be needing this if we want to allow users to pick # profile ####################################################################### self.arch_profile = Path(f"profiles/default/linux/{self.arch}/{args.release}/systemd") self.pkgs_sys = ["@world"] self.pkgs_fs = ["sys-fs/dosfstools"] if args.output_format in (OutputFormat.subvolume, OutputFormat.gpt_btrfs): self.pkgs_fs += ["sys-fs/btrfs-progs"] elif args.output_format == OutputFormat.gpt_xfs: self.pkgs_fs += ["sys-fs/xfsprogs"] elif args.output_format == OutputFormat.gpt_squashfs: self.pkgs_fs += ["sys-fs/squashfs-tools"] if args.encrypt: self.pkgs_fs += ["cryptsetup", "device-mapper"] self.grub_platforms = [] if not do_run_build_script and args.bootable: if args.get_partition(PartitionIdentifier.esp): self.pkgs_boot = ["sys-kernel/installkernel-systemd-boot"] elif args.get_partition(PartitionIdentifier.bios): self.pkgs_boot = ["sys-boot/grub"] self.grub_platforms = ["coreboot", "qemu", "pc"] self.pkgs_boot += ["sys-kernel/gentoo-kernel-bin", "sys-firmware/edk2-ovmf"] # GENTOO_DONTMOVE: self.grub_platforms, for instance, must be set self.emerge_vars = { "BOOTSTRAP_USE": " ".join(self.portage_use_flags), "FEATURES": " ".join(self.portage_features), "GRUB_PLATFORMS": " ".join(self.grub_platforms), "UNINSTALL_IGNORE": " ".join(self.UNINSTALL_IGNORE), "USE": " ".join(self.portage_use_flags), } self.sync_portage_tree(args, root) self.set_profile(args) self.set_default_repo() self.unmask_arch() self.provide_patches() self.set_useflags() self.mkosi_conf() self.baselayout(args, root) self.fetch_fix_stage3(args, root) self.update_stage3(args, root) self.depclean(args, root) def sync_portage_tree(self, args: CommandLineArguments, root: Path) -> None: self.invoke_emerge(args, root, inside_stage3=False, actions=["--sync"]) def fetch_fix_stage3(self, args: CommandLineArguments, root: Path) -> None: """usrmerge tracker bug: https://bugs.gentoo.org/690294""" # e.g.: # http://distfiles.gentoo.org/releases/amd64/autobuilds/latest-stage3.txt stage3tsf_path_url = urllib.parse.urljoin( self.portage_cfg["GENTOO_MIRRORS"].partition(" ")[0], f"releases/{self.arch}/autobuilds/latest-stage3.txt", ) ########################################################### # GENTOO_UPSTREAM: wait for fix upstream: # https://bugs.gentoo.org/690294 # and more... so we can gladly escape all this hideousness! ########################################################### with urllib.request.urlopen(stage3tsf_path_url) as r: args_profile = "nomultilib" # 20210711T170538Z/stage3-amd64-nomultilib-systemd-20210711T170538Z.tar.xz 214470580 regexp = f"^[0-9TZ]+/stage3-{self.arch}-{args_profile}-systemd-[0-9TZ]+[.]tar[.]xz" all_lines = r.readlines() for line in all_lines: m = re.match(regexp, line.decode("utf-8")) if m: stage3_tar = Path(m.group(0)) break else: die("profile names changed upstream?") stage3_url_path = urllib.parse.urljoin( self.portage_cfg["GENTOO_MIRRORS"], f"releases/{self.arch}/autobuilds/{stage3_tar}", ) stage3_tar_path = self.portage_cfg["DISTDIR"] / stage3_tar stage3_tmp_extract = stage3_tar_path.with_name( stage3_tar.name + ".tmp") if not stage3_tar_path.is_file(): MkosiPrinter.print_step(f"Fetching {stage3_url_path}") stage3_tar_path.parent.mkdir(parents=True, exist_ok=True) urllib.request.urlretrieve(stage3_url_path, stage3_tar_path) stage3_tmp_extract.mkdir(parents=True, exist_ok=True) with flock_path(stage3_tmp_extract): if not stage3_tmp_extract.joinpath(".cache_isclean").exists(): with tarfile.open(stage3_tar_path) as tfd: MkosiPrinter.print_step(f"Extracting {stage3_tar.name} to " f"{stage3_tmp_extract}") tfd.extractall(stage3_tmp_extract, numeric_owner=True) # REMOVEME : pathetic attempt have this merged :) # remove once upstream ships the current *baselayout-999* # version alternative would be to mount /sys as tmpfs when # invoking emerge inside stage3; we don't want that. self.invoke_emerge(args, stage3_tmp_extract, inside_stage3=True, opts=["--unmerge"], pkgs=["sys-apps/baselayout"]) unlink_try_hard(stage3_tmp_extract.joinpath("dev")) unlink_try_hard(stage3_tmp_extract.joinpath("proc")) unlink_try_hard(stage3_tmp_extract.joinpath("sys")) stage3_tmp_extract.joinpath("bin/awk").unlink() root.joinpath("usr/bin/awk").symlink_to("gawk") stage3_tmp_extract.joinpath(".cache_isclean").touch() MkosiPrinter.print_step(f"Copying {stage3_tmp_extract} to {root}") copy_path(stage3_tmp_extract.joinpath("usr"), root.joinpath("usr")) dirs = ["bin", "lib", "lib64"] for d in dirs: copy_path(stage3_tmp_extract.joinpath(d), root.joinpath(f"usr/{d}")) dirs = ["etc", "var/db", "var/lib", "var/cache"] for d in dirs: copy_path(stage3_tmp_extract.joinpath(d), root.joinpath(d)) copy_path(stage3_tmp_extract.joinpath("sbin"), root.joinpath("usr/bin")) def set_profile(self, args: CommandLineArguments) -> None: if not self.profile_path.is_symlink(): MkosiPrinter.print_step(f"{args.distribution} setting Profile") self.profile_path.symlink_to( self.portage_cfg["PORTDIR"] / self.arch_profile) def set_default_repo(self) -> None: eselect_repo_conf = self.portage_cfg_dir / "repos.conf" eselect_repo_conf.mkdir(exist_ok=True) eselect_repo_conf.joinpath("eselect-repo.conf").write_text( dedent( f"""\ [gentoo] location = {self.portage_cfg["PORTDIR"]} sync-uri = https://anongit.gentoo.org/git/repo/gentoo.git sync-type = git sync-dept = 1 """ ) ) def unmask_arch(self) -> None: package_accept_keywords = self.portage_cfg_dir / "package.accept_keywords" package_accept_keywords.mkdir(exist_ok=True) package_accept_keywords.joinpath("mkosi").write_text( dedent( # USE=homed is still in ~ARCH, # ~ARCH (for a given ARCH) is the unstable version of the # package, `Beta` if you like. more here: # https://wiki.gentoo.org/wiki//etc/portage/package.accept_keywords f"""\ sys-auth/pambase ~{self.arch} # sys-kernel/gentoo-kernel-bin ~{self.arch} # virtual/dist-kernel ~{self.arch} """ ) ) # -999 means install from git package_accept_keywords.joinpath("baselayout").write_text( dedent(""" # REMOVE: once upstream has moved this to stable # releases of baselayout # https://gitweb.gentoo.org/proj/baselayout.git/commit/?id=57c250e24c70f8f9581860654cdec0d049345292 =sys-apps/baselayout-9999 ** """) ) package_accept_keywords.joinpath("bug765208").write_text( f"<{self.dracut_atom}-56 ~{self.arch}\n") def provide_patches(self) -> None: patches_dir = self.portage_cfg_dir / "patches" patches_dir.mkdir(exist_ok=True) def set_useflags(self) -> None: self.custom_profile_path.mkdir(exist_ok=True) self.custom_profile_path.joinpath("use.force").write_text( dedent( """\ -split-bin -split-sbin -split-usr """) ) package_use = self.portage_cfg_dir / "package.use" package_use.mkdir(exist_ok=True) self.baselayout_use = package_use.joinpath("baselayout") self.baselayout_use.write_text("sys-apps/baselayout build\n") package_use.joinpath("grub").write_text("sys-boot/grub device-mapper truetype\n") package_use.joinpath("systemd").write_text( # repart for usronly dedent( """\ # sys-apps/systemd http # sys-apps/systemd cgroup-hybrid # MKOSI: Failed to open "/usr/lib/systemd/boot/efi": No such file or directory sys-apps/systemd gnuefi # sys-apps/systemd -pkcs11 # sys-apps/systemd importd lzma sys-apps/systemd homed cryptsetup -pkcs11 # MKOSI: usronly sys-apps/systemd repart # sys-apps/systemd -cgroup-hybrid # sys-apps/systemd vanilla # sys-apps/systemd policykit # MKOSI: make sure we're init (no openrc) sys-apps/systemd sysv-utils """ ) ) def mkosi_conf(self) -> None: package_env = self.portage_cfg_dir / "package.env" package_env.mkdir(exist_ok=True) self.ebuild_sh_env_dir.mkdir(exist_ok=True) # apply whatever we put in mkosi_conf to runs invokation of emerge package_env.joinpath("mkosi.conf").write_text("*/* mkosi.conf\n") # we use this so we don't need to touch upstream files. # we also use this for documenting build environment. emerge_vars_str = "" emerge_vars_str += "\n".join(f'{k}="${{{k}}} {v}"' for k, v in self.emerge_vars.items()) self.ebuild_sh_env_dir.joinpath("mkosi.conf").write_text( dedent( f"""\ # MKOSI: these were used during image creation... # and some more! see under package.*/ # # usrmerge (see all under profile/) {emerge_vars_str} """ ) ) def invoke_emerge( self, args: CommandLineArguments, root: Path, inside_stage3: bool = True, pkgs: Sequence[str] = (), actions: Sequence[str] = (), opts: Sequence[str] = (), ) -> None: if not inside_stage3: from _emerge.main import emerge_main # type: ignore PREFIX_OPTS: List[str] = [] if "--sync" not in actions: PREFIX_OPTS = [ f"--config-root={root.resolve()}", f"--root={root.resolve()}", f"--sysroot={root.resolve()}", ] MkosiPrinter.print_step(f"Invoking emerge(1) pkgs={pkgs} " f"actions={actions} outside stage3") emerge_main([*pkgs, *opts, *actions] + PREFIX_OPTS + self.emerge_default_opts) else: cmd = ["/usr/bin/emerge", *pkgs, *self.emerge_default_opts, *opts, *actions] MkosiPrinter.print_step("Invoking emerge(1) inside stage3") run_workspace_command( args, root, cmd, network=True, env=self.emerge_vars, nspawn_params=self.DEFAULT_NSPAWN_PARAMS, ) def baselayout(self, args: CommandLineArguments, root: Path) -> None: # TOTHINK: sticky bizness when when image profile != host profile # REMOVE: once upstream has moved this to stable releases of baselaouy # https://gitweb.gentoo.org/proj/baselayout.git/commit/?id=57c250e24c70f8f9581860654cdec0d049345292 self.invoke_emerge(args, root, inside_stage3=False, opts=["--nodeps"], pkgs=["=sys-apps/baselayout-9999"]) def update_stage3(self, args: CommandLineArguments, root: Path) -> None: # exclude baselayout, it expects /sys/.keep but nspawn mounts host's # /sys for us without the .keep file. opts = self.EMERGE_UPDATE_OPTS + ["--exclude", "sys-apps/baselayout"] self.invoke_emerge(args, root, pkgs=self.pkgs_sys, opts=opts) # FIXME?: without this we get the following # Synchronizing state of sshd.service with SysV service script with /lib/systemd/systemd-sysv-install. # Executing: /lib/systemd/systemd-sysv-install --root=/var/tmp/mkosi-2b6snh_u/root enable sshd # chroot: failed to run command ‘/usr/sbin/update-rc.d’: No such file or directory root.joinpath("etc/init.d/sshd").unlink() # "build" USE flag can go now, next time users do an update they will # safely merge baselayout without that flag and it should be fine at # that point. self.baselayout_use.unlink() def depclean(self, args: CommandLineArguments, root: Path) -> None: self.invoke_emerge(args, root, actions=["--depclean"]) def _dbg(self, args: CommandLineArguments, root: Path) -> None: """this is for dropping into shell to see what's wrong""" cmdline = ["/bin/sh"] run_workspace_command( args, root, cmdline, network=True, nspawn_params=self.DEFAULT_NSPAWN_PARAMS, ) mkosi-12/mkosi/manifest.py000066400000000000000000000211061415136147600157320ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1+ import dataclasses import json from datetime import datetime from pathlib import Path from subprocess import DEVNULL, PIPE from textwrap import dedent from typing import IO, Any, Dict, List, Optional, cast from .backend import ( CommandLineArguments, Distribution, ManifestFormat, PackageType, run, run_workspace_command, ) @dataclasses.dataclass class PackageManifest: """A description of a package The fields used here must match https://systemd.io/COREDUMP_PACKAGE_METADATA/#well-known-keys. """ type: str name: str version: str architecture: str size: int def as_dict(self) -> Dict[str, str]: return { "type": self.type, "name": self.name, "version": self.version, "architecture": self.architecture, } @dataclasses.dataclass class SourcePackageManifest: name: str changelog: Optional[str] packages: List[PackageManifest] = dataclasses.field(default_factory=list) def add(self, package: PackageManifest) -> None: self.packages.append(package) def report(self) -> str: size = sum(p.size for p in self.packages) t = dedent( f"""\ SourcePackage: {self.name} Packages: {" ".join(p.name for p in self.packages)} Size: {size} """ ) if self.changelog: t += f"""\nChangelog:\n{self.changelog}\n""" return t @dataclasses.dataclass class Manifest: args: CommandLineArguments packages: List[PackageManifest] = dataclasses.field(default_factory=list) source_packages: Dict[str, SourcePackageManifest] = dataclasses.field(default_factory=dict) _init_timestamp: datetime = dataclasses.field(init=False, default_factory=datetime.now) def need_source_info(self) -> bool: return ManifestFormat.changelog in self.args.manifest_format def record_packages(self, root: Path) -> None: if cast(Any, self.args.distribution).package_type == PackageType.rpm: self.record_rpm_packages(root) if cast(Any, self.args.distribution).package_type == PackageType.deb: self.record_deb_packages(root) # TODO: add implementations for other package managers def record_rpm_packages(self, root: Path) -> None: c = run( ["rpm", f"--root={root}", "-qa", "--qf", r"%{NEVRA}\t%{SOURCERPM}\t%{NAME}\t%{ARCH}\t%{SIZE}\t%{INSTALLTIME}\n"], stdout=PIPE, stderr=DEVNULL, text=True, ) packages = sorted(c.stdout.splitlines()) for package in packages: nevra, srpm, name, arch, size, installtime = package.split("\t") assert nevra.startswith(f"{name}-") evra = nevra[len(name) + 1 :] # Some packages have architecture '(none)', and it's not part of NEVRA, e.g.: # gpg-pubkey-45719a39-5f2c0192 gpg-pubkey (none) 0 1635985199 if arch != "(none)": assert nevra.endswith(f".{arch}") evr = evra[: len(arch) + 1] else: evr = evra arch = "" size = int(size) installtime = datetime.fromtimestamp(int(installtime)) # If we are creating a layer based on a BaseImage=, e.g. a sysext, filter by # packages that were installed in this execution of mkosi. We assume that the # upper layer is put together in one go, which currently is always true. if self.args.base_image and installtime < self._init_timestamp: continue package = PackageManifest("rpm", name, evr, arch, size) self.packages.append(package) if not self.need_source_info(): continue source = self.source_packages.get(srpm) if source is None: c = run(["rpm", f"--root={root}", "-q", "--changelog", nevra], stdout=PIPE, stderr=DEVNULL, text=True) changelog = c.stdout.strip() source = SourcePackageManifest(srpm, changelog) self.source_packages[srpm] = source source.add(package) def record_deb_packages(self, root: Path) -> None: c = run( ["dpkg-query", f"--admindir={root}/var/lib/dpkg", "--show", "--showformat", r'${Package}\t${source:Package}\t${Version}\t${Architecture}\t${Installed-Size}\t${db-fsys:Last-Modified}\n'], stdout=PIPE, stderr=DEVNULL, text=True, ) packages = sorted(c.stdout.splitlines()) for package in packages: name, source, version, arch, size, installtime = package.split("\t") # dpkg records the size in KBs size = int(size) * 1024 installtime = datetime.fromtimestamp(int(installtime)) # If we are creating a layer based on a BaseImage=, e.g. a sysext, filter by # packages that were installed in this execution of mkosi. We assume that the # upper layer is put together in one go, which currently is always true. if self.args.base_image and installtime < self._init_timestamp: continue package = PackageManifest("deb", name, version, arch, size) self.packages.append(package) if not self.need_source_info(): continue source_package = self.source_packages.get(source) if source_package is None: # Yes, --quiet is specified twice, to avoid output about download stats. # Note that the argument of the 'changelog' verb is the binary package name, # not the source package name. cmd = ["apt-get", "--quiet", "--quiet", "changelog", name] # If we are building with docs then it's easy, as the changelogs are saved # in the image, just fetch them. Otherwise they will be downloaded from the network. if self.args.with_docs: # By default apt drops privileges and runs as the 'apt' user, but that means it # loses access to the build directory, which is 700. cmd += ["--option", "Acquire::Changelogs::AlwaysOnline=false", "--option", "Debug::NoDropPrivs=true"] else: # Override the URL to avoid HTTPS, so that we don't need to install # ca-certificates to make it work. if self.args.distribution == Distribution.ubuntu: cmd += ["--option", "Acquire::Changelogs::URI::Override::Origin::Ubuntu=http://changelogs.ubuntu.com/changelogs/pool/@CHANGEPATH@/changelog"] else: cmd += ["--option", "Acquire::Changelogs::URI::Override::Origin::Debian=http://metadata.ftp-master.debian.org/changelogs/@CHANGEPATH@_changelog"] # We have to run from the root, because if we use the RootDir option to make # apt from the host look at the repositories in the image, it will also pick # the 'methods' executables from there, but the ABI might not be compatible. changelog = run_workspace_command(self.args, root, cmd, network=not self.args.with_docs, capture_stdout=True) source_package = SourcePackageManifest(source, changelog) self.source_packages[source] = source_package source_package.add(package) def has_data(self) -> bool: # We might add more data in the future return len(self.packages) > 0 def as_dict(self) -> Dict[str, Any]: return { "packages": [package.as_dict() for package in self.packages], } def write_json(self, out: IO[str]) -> None: json.dump(self.as_dict(), out, indent=2) def write_package_report(self, out: IO[str]) -> None: """Create a human-readable report about packages This is modelled after "Fedora compose reports" that are sent to fedora-devel. The format describes added and removed packages, and includes the changelogs. A diff between two such reports shows what changed *in* the packages quite nicely. """ print(f"Packages: {len(self.packages)}", file=out) print(f"Size: {sum(p.size for p in self.packages)}", file=out) for package in self.source_packages.values(): print(f"\n{80*'-'}\n", file=out) out.write(package.report()) mkosi-12/mkosi/resources/000077500000000000000000000000001415136147600155645ustar00rootroot00000000000000mkosi-12/mkosi/resources/__init__.py000066400000000000000000000000001415136147600176630ustar00rootroot00000000000000mkosi-12/mkosi/resources/arch/000077500000000000000000000000001415136147600165015ustar00rootroot00000000000000mkosi-12/mkosi/resources/arch/60_kernel_remove.hook000066400000000000000000000003671415136147600225330ustar00rootroot00000000000000[Trigger] Operation = Upgrade Operation = Remove Type = Path Target = usr/lib/modules/*/vmlinuz [Action] Description = Removing kernel and initramfs images from /boot... When = PreTransaction Exec = /etc/pacman.d/mkosi-kernel-remove NeedsTargets mkosi-12/mkosi/resources/arch/60_vmlinuz_remove.hook000066400000000000000000000004361415136147600227540ustar00rootroot00000000000000[Trigger] Operation = Upgrade Operation = Remove Type = Path Target = usr/lib/modules/*/vmlinuz [Action] Description = Removing vmlinuz from /boot... When = PreTransaction Exec = /bin/bash -c 'while read -r f; do rm -f "/boot/vmlinuz-$(basename "$(dirname "$f")")"; done' NeedsTargets mkosi-12/mkosi/resources/arch/90_kernel_add.hook000066400000000000000000000006131415136147600217630ustar00rootroot00000000000000[Trigger] Operation = Install Operation = Upgrade Type = Path Target = usr/lib/modules/*/vmlinuz Target = usr/lib/kernel/install.d/* Target = boot/*-ucode.img [Trigger] Operation = Install Operation = Upgrade Type = Package Target = systemd [Action] Description = Adding kernel and initramfs images to /boot... When = PostTransaction Exec = /etc/pacman.d/scripts/mkosi-kernel-add NeedsTargets mkosi-12/mkosi/resources/arch/90_vmlinuz_add.hook000066400000000000000000000004521415136147600222100ustar00rootroot00000000000000[Trigger] Operation = Install Operation = Upgrade Type = Path Target = usr/lib/modules/*/vmlinuz [Action] Description = Adding vmlinuz to /boot... When = PostTransaction Exec = /bin/bash -c 'while read -r f; do install -Dm644 "$f" "/boot/vmlinuz-$(basename "$(dirname "$f")")"; done' NeedsTargets mkosi-12/mkosi/resources/arch/91_bootctl_update.hook000066400000000000000000000002451415136147600227050ustar00rootroot00000000000000[Trigger] Operation = Upgrade Type = Package Target = systemd [Action] Description = Updating systemd-boot... When = PostTransaction Exec = /usr/bin/bootctl update mkosi-12/mkosi/resources/arch/__init__.py000066400000000000000000000000001415136147600206000ustar00rootroot00000000000000mkosi-12/mkosi/resources/arch/kernel_add.sh000077500000000000000000000012021415136147600211230ustar00rootroot00000000000000#!/bin/bash -e shopt -s nullglob declare -a kernel_version # Check the targets passed by the pacman hook. while read -r line do if [[ "$line" =~ usr/lib/modules/([^/]+)/vmlinuz ]] then kernel_version+=( "${BASH_REMATCH[1]}" ) else # If a non-matching line is passed, just rebuild all kernels. kernel_version=() for f in /usr/lib/modules/*/vmlinuz do kernel_version+=( "$(basename "$(dirname "$f")")" ) done break fi done # (re)build the kernel images. for kv in "${kernel_version[@]}" do kernel-install add "$kv" "/usr/lib/modules/${kv}/vmlinuz" done mkosi-12/mkosi/resources/arch/kernel_remove.sh000077500000000000000000000001431415136147600216730ustar00rootroot00000000000000#!/bin/bash -e while read -r f; do kernel-install remove "$(basename "$(dirname "$f")")" done mkosi-12/mkosi/resources/console_getty_autologin.conf000066400000000000000000000002031415136147600233650ustar00rootroot00000000000000[Service] ExecStart= ExecStart=-/sbin/agetty -o '-p -- \\u' --noclear --autologin root --keep-baud console 115200,38400,9600 $TERM mkosi-12/mkosi/resources/dracut_unified_kernel_install.sh000077500000000000000000000043321415136147600242000ustar00rootroot00000000000000#!/bin/bash -e COMMAND="$1" KERNEL_VERSION="$2" BOOT_DIR_ABS="$3" KERNEL_IMAGE="$4" # If KERNEL_INSTALL_MACHINE_ID is defined but empty, BOOT_DIR_ABS is a fake directory so let's skip creating # the unified kernel image. if [[ -z "${KERNEL_INSTALL_MACHINE_ID-unset}" ]]; then exit 0 fi # Strip machine ID and kernel version to get the boot directory. PREFIX=$(dirname "$(dirname "$BOOT_DIR_ABS")") # Pick a default prefix name for the unified kernel binary if [[ -n "$IMAGE_ID" ]] ; then if [[ -n "$IMAGE_VERSION" ]]; then PARTLABEL="${IMAGE_ID}_${IMAGE_VERSION}" else PARTLABEL="${IMAGE_ID}" fi else IMAGE_ID=linux fi if [[ -n "$IMAGE_VERSION" ]] ; then BOOT_BINARY="${PREFIX}/EFI/Linux/${IMAGE_ID}_${IMAGE_VERSION}.efi" elif [[ -n "$ROOTHASH" ]] ; then BOOT_BINARY="${PREFIX}/EFI/Linux/${IMAGE_ID}-${KERNEL_VERSION}-${ROOTHASH}.efi" elif [[ -n "$USRHASH" ]] ; then BOOT_BINARY="${PREFIX}/EFI/Linux/${IMAGE_ID}-${KERNEL_VERSION}-${USRHASH}.efi" else BOOT_BINARY="${PREFIX}/EFI/Linux/${IMAGE_ID}-${KERNEL_VERSION}.efi" fi case "$COMMAND" in add) if [[ -f /etc/kernel/cmdline ]]; then read -r -d '' BOOT_OPTIONS < /etc/kernel/cmdline || true elif [[ -f /usr/lib/kernel/cmdline ]]; then read -r -d '' BOOT_OPTIONS < /usr/lib/kernel/cmdline || true else read -r -d '' BOOT_OPTIONS < /proc/cmdline || true fi if [[ -n "$ROOTHASH" ]]; then BOOT_OPTIONS="${BOOT_OPTIONS} roothash=${ROOTHASH}" elif [[ -n "$USRHASH" ]]; then BOOT_OPTIONS="${BOOT_OPTIONS} usrhash=${USRHASH}" elif [[ -n "$PARTLABEL" ]]; then BOOT_OPTIONS="${BOOT_OPTIONS} root=PARTLABEL=${PARTLABEL}" fi if [[ -n "$KERNEL_IMAGE" ]]; then DRACUT_KERNEL_IMAGE_OPTION="--kernel-image ${KERNEL_IMAGE}" else DRACUT_KERNEL_IMAGE_OPTION="" fi # shellcheck disable=SC2086 dracut \ --uefi \ --kver "$KERNEL_VERSION" \ $DRACUT_KERNEL_IMAGE_OPTION \ --kernel-cmdline "$BOOT_OPTIONS" \ --force \ "$BOOT_BINARY" ;; remove) rm -f -- "$BOOT_BINARY" ;; esac mkosi-12/mkosi/resources/getty_autologin.conf000066400000000000000000000002041415136147600216440ustar00rootroot00000000000000[Service] ExecStart= ExecStart=-/sbin/agetty -o '-p -- \\u' --autologin root --noclear - $TERM StandardInput=tty StandardOutput=tty mkosi-12/mkosi/resources/serial_getty_autologin.conf000066400000000000000000000002361415136147600232100ustar00rootroot00000000000000[Service] ExecStart= ExecStart=-/sbin/agetty -o '-p -- \\u' --autologin root --keep-baud 115200,57600,38400,9600 - $TERM StandardInput=tty StandardOutput=tty mkosi-12/pyproject.toml000066400000000000000000000002471415136147600153470ustar00rootroot00000000000000[tool.black] line-length = 119 target-version = ['py37'] [tool.isort] profile = "black" multi_line_output = 3 py_version = "37" [tool.pyright] pythonVersion = "3.7" mkosi-12/setup.cfg000066400000000000000000000013411415136147600142500ustar00rootroot00000000000000[flake8] max-line-length = 119 [isort] multi_line_output = 3 include_trailing_comma = True [mypy] python_version = 3.7 # belonging to --strict warn_unused_configs = True disallow_any_generics = True disallow_subclassing_any = True disallow_untyped_calls = True disallow_untyped_defs = True disallow_untyped_decorators = True disallow_incomplete_defs = True check_untyped_defs = True no_implicit_optional = True warn_redundant_casts = True warn_unused_ignores = False warn_return_any = True no_implicit_reexport = True # extra options not in --strict pretty = True show_error_codes = True show_column_numbers = True warn_unreachable = True allow_redefinition = True strict_equality = True [mypy-argcomplete] ignore_missing_imports = True mkosi-12/setup.py000077500000000000000000000016651415136147600141550ustar00rootroot00000000000000#!/usr/bin/python3 # SPDX-License-Identifier: LGPL-2.1+ from setuptools import setup, Command, find_packages class BuildManpage(Command): description = ('builds the manpage') user_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): self.spawn(['pandoc', '-t', 'man', '-s', '-o', 'man/mkosi.1', 'mkosi.md']) setup( name="mkosi", version="12", description="Build Bespoke OS Images", url="https://github.com/systemd/mkosi", maintainer="mkosi contributors", maintainer_email="systemd-devel@lists.freedesktop.org", license="LGPLv2+", python_requires=">=3.7", packages = find_packages(".", exclude=["tests"]), package_data = {"": ["*.sh", "*.hook", "*.conf"]}, include_package_data = True, scripts = ["bin/mkosi"], cmdclass = { "man": BuildManpage }, data_files = [('share/man/man1', ["man/mkosi.1"])], ) mkosi-12/tests/000077500000000000000000000000001415136147600135725ustar00rootroot00000000000000mkosi-12/tests/.gitignore000066400000000000000000000000071415136147600155570ustar00rootroot00000000000000/*.pyc mkosi-12/tests/__init__.py000066400000000000000000000000001415136147600156710ustar00rootroot00000000000000mkosi-12/tests/conftest.py000066400000000000000000000045121415136147600157730ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1+ from tests.test_config_parser import MkosiConfig class DictDiffer: def __init__(self, expected_dict, current_dict): self.current_dict = current_dict self.expected_dict = expected_dict self.set_current, self.set_past = set(current_dict.keys()), set(expected_dict.keys()) self.intersect = self.set_current.intersection(self.set_past) @property def unexpected(self): return [f"{k}={self.current_dict[k]}" for k in self.set_current - self.intersect] @property def missing(self): return [str(k) for k in self.set_past - self.intersect] @property def invalid(self): inva = {o for o in self.intersect if self.expected_dict[o] != self.current_dict[o]} return [f"{k}={self.current_dict[k]} (exp: {self.expected_dict[k]})" for k in inva] @property def valid(self): return {o for o in self.intersect if self.expected_dict[o] == self.current_dict[o]} def pytest_assertrepr_compare(op, left, right): if not isinstance(left, MkosiConfig): return if not isinstance(right, dict): return for r in right.values(): if not isinstance(vars(r), dict): return ["Invalid datatype"] if op == "==": def compare_job_args(job, l_a, r_a): ddiff = DictDiffer(l_a, r_a) ret.append(f'Comparing parsed configuration {job} against expected configuration:') ret.append("unexpected:") ret.extend([f'- {i}' for i in ddiff.unexpected]) ret.append("missing:") ret.extend([f'- {i}' for i in ddiff.missing]) ret.append("invalid:") ret.extend([f'- {i}' for i in ddiff.invalid]) verified_keys = [] ret = ["MkosiConfig is not equal to parsed args"] for right_job, right_args in right.items(): try: left_args = left.reference_config[right_job] except KeyError: ret.append(f'Unexpected job: {right_job}') continue r_v = vars(right_args) compare_job_args(right_job, left_args, r_v) verified_keys.append(right_job) for left_job in left.reference_config: if not left_job in verified_keys: ret.append(f'Missing job: {left_job}') return ret mkosi-12/tests/pexpect/000077500000000000000000000000001415136147600152425ustar00rootroot00000000000000mkosi-12/tests/pexpect/boot.py000077500000000000000000000010241415136147600165570ustar00rootroot00000000000000#!/usr/bin/env python3 import pexpect import sys import time def run() -> None: p = pexpect.spawnu(" ".join(sys.argv[1:]), logfile=sys.stdout, timeout=240) p.expect("login:") p.sendline("root") time.sleep(15) s = pexpect.spawnu("mkosi ssh", logfile=sys.stdout) s.expect("#") p.expect("#") p.sendline("systemctl poweroff") p.expect(pexpect.EOF) try: run() except pexpect.EOF: print("UNEXPECTED EOF") sys.exit(1) except pexpect.TIMEOUT: print("TIMED OUT") sys.exit(1) mkosi-12/tests/test_backend.py000066400000000000000000000011511415136147600165700ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1+ import os from mkosi.backend import PackageType, Distribution, set_umask def test_distribution(): assert Distribution.fedora.package_type == PackageType.rpm assert Distribution.fedora is Distribution.fedora assert Distribution.fedora is not Distribution.debian assert str(Distribution.photon) == "photon" def test_set_umask(): with set_umask(0o767): tmp1 = os.umask(0o777) with set_umask(0o757): tmp2 = os.umask(0o727) tmp3 = os.umask(0o727) assert tmp1 == 0o767 assert tmp2 == 0o757 assert tmp3 == 0o777 mkosi-12/tests/test_config_parser.py000066400000000000000000001306501415136147600200310ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1+ import configparser import contextlib import copy import os from pathlib import Path from typing import Any, Generator, Mapping import pytest import mkosi @contextlib.contextmanager def change_cwd(path: Path) -> Generator[None, None, None]: """Change working directory temporarily""" old = Path.cwd() os.chdir(path) try: yield finally: os.chdir(old) DEFAULT_JOB_NAME = "default" class MkosiConfig: """Base class for mkosi test and reference configuration generators""" def __init__(self): self.cli_arguments = [] self.reference_config = {} def add_reference_config(self, job_name=DEFAULT_JOB_NAME): """create one initial reference configuration This default reference configuration is equal to the configuration returned by parse_args function without default files and without any command line arguments. """ self.reference_config[job_name] = { "all": False, "all_directory": None, "architecture": None, "bmap": False, "boot_protocols": [], "bootable": False, "build_dir": None, "build_packages": [], "clean_package_metadata": "auto", "remove_files": [], "remove_packages": [], "build_script": None, "environment": [], "build_sources": None, "cache_path": None, "checksum": False, "cmdline": [], "compress": None, "compress_fs": None, "compress_output": None, "debug": [], "default_path": None, "directory": None, "distribution": None, "encrypt": None, "esp_size": None, "extra_search_paths": [], "extra_trees": [], "finalize_script": None, "force_count": 0, "gpt_first_lba": None, "home_size": None, "hostname": None, "include_dir": None, "incremental": False, "install_dir": None, "kernel_command_line": ["rhgb", "selinux=0", "audit=0"], "key": None, "manifest_format": None, "mirror": None, "mksquashfs_tool": [], "no_chown": False, "nspawn_settings": None, "output": None, "output_dir": None, "output_format": None, "base_packages": True, "packages": [], "password": None, "password_is_hashed": False, "autologin": False, "skip_final_phase": False, "tar_strip_selinux_context": False, "prepare_script": None, "postinst_script": None, "qcow2": False, "read_only": False, "release": None, "repositories": [], "use_host_repositories": False, "base_image": None, "root_size": None, "secure_boot": False, "secure_boot_certificate": None, "secure_boot_key": None, "secure_boot_common_name": "mkosi of %u", "secure_boot_valid_days": "730", "sign": False, "skeleton_trees": [], "source_resolve_symlinks": False, "source_resolve_symlinks_final": False, "source_file_transfer": None, "source_file_transfer_final": None, "srv_size": None, "swap_size": None, "tmp_size": None, "usr_only": False, "var_size": None, "verb": "build", "verity": False, "with_docs": False, "with_network": False, "with_tests": True, "xbootldr_size": None, "qemu_headless": False, "qemu_smp": "2", "qemu_mem": "1G", "network_veth": False, "ephemeral": False, "with_unified_kernel_images": True, "hostonly_initrd": False, "ssh": False, "ssh_key": None, "ssh_timeout": 0, "ssh_agent": None, "ssh_port": 22, "minimize": False, "split_artifacts": False, "output_split_root": None, "output_split_kernel": None, "output_split_verity": None, "output_split_verity_sig": None, "image_id": None, "image_version": None, "auto_bump": False, "workspace_dir": None, } def __eq__(self, other: Mapping[str, Any]) -> bool: """Compare the configuration returned by parse_args against self.reference_config""" if len(self.reference_config) != len(other): return False is_eq = True for other_job, other_args in other.items(): try: this_args = self.reference_config[other_job] except KeyError: return False other_args_v = vars(other_args) if this_args != other_args_v: is_eq = False return is_eq def _append_list(self, ref_entry, new_args, job_name=DEFAULT_JOB_NAME, separator=","): """Helper function handling comma separated list as supported by mkosi""" args_list = [] if isinstance(new_args, str): args_list = new_args.split(separator) else: for arg in new_args: if isinstance(arg, str): args_list.extend(arg.split(separator)) else: args_list.append(arg) for arg in args_list: if isinstance(arg, str) and arg.startswith("!"): if arg[1:] in self.reference_config[job_name][ref_entry]: self.reference_config[job_name][ref_entry].remove(arg[1:]) elif arg not in self.reference_config[job_name][ref_entry]: self.reference_config[job_name][ref_entry].append(arg) @staticmethod def write_ini(dname: str, fname: str, config: dict, prio=1000) -> None: """Write mkosi.default(.d/*) files""" if not os.path.exists(dname): os.makedirs(dname) if prio < 1000: fname = f"{prio:03d}_{fname}" config_parser = configparser.RawConfigParser() config_parser.optionxform = lambda optionstr: str(optionstr) # Replace lists in dict before calling config_parser write file config_all_normalized = copy.deepcopy(config) for section, key_val in config_all_normalized.items(): for key, val in key_val.items(): if isinstance(val, list): config_all_normalized[section][key] = os.linesep.join(str(item) for item in val) config_parser.read_dict(config_all_normalized) with open(os.path.join(dname, fname), "w") as f_ini: config_parser.write(f_ini) def _update_ref_from_file(self, mk_config: dict, job_name=DEFAULT_JOB_NAME) -> None: """Update reference_config from a dict as needed to write an ini file using configparser This is basically a conversion from snake case to - separated format. """ if "Distribution" in mk_config: mk_config_distro = mk_config["Distribution"] if "Distribution" in mk_config_distro: self.reference_config[job_name]["distribution"] = mk_config_distro["Distribution"] if "Release" in mk_config_distro: self.reference_config[job_name]["release"] = mk_config_distro["Release"] if "Repositories" in mk_config_distro: self._append_list("repositories", mk_config_distro["Repositories"], job_name) if "UseHostRepositories" in mk_config_distro: self.reference_config[job_name]["use_host_repositories"] = mk_config_distro["UseHostRepositories"] if "Mirror" in mk_config_distro: self.reference_config[job_name]["mirror"] = mk_config_distro["Mirror"] if "Architecture" in mk_config_distro: self.reference_config[job_name]["architecture"] = mk_config_distro["Architecture"] if "Output" in mk_config: mk_config_output = mk_config["Output"] if "Format" in mk_config_output: self.reference_config[job_name]["output_format"] = mkosi.OutputFormat.from_string( mk_config_output["Format"] ) if "ManifestFormat" in mk_config_output: self.reference_config[job_name]["manifest_format"] = mk_config_output["ManifestFormat"] if "Output" in mk_config_output: self.reference_config[job_name]["output"] = Path(mk_config_output["Output"]) if "Force" in mk_config_output: self.reference_config[job_name]["force_count"] += 1 if "Bootable" in mk_config_output: self.reference_config[job_name]["bootable"] = mk_config_output["Bootable"] if "BootProtocols" in mk_config_output: self._append_list("boot_protocols", mk_config_output["BootProtocols"], job_name) if "KernelCommandLine" in mk_config_output: self._append_list("kernel_command_line", mk_config_output["KernelCommandLine"], job_name, " ") if "SecureBoot" in mk_config_output: self.reference_config[job_name]["secure_boot"] = mk_config_output["SecureBoot"] if "SecureBootKey" in mk_config_output: self.reference_config[job_name]["secure_boot_key"] = Path(mk_config_output["SecureBootKey"]) if "SecureBootCertificate" in mk_config_output: self.reference_config[job_name]["secure_boot_certificate"] = Path(mk_config_output["SecureBootCertificate"]) if "SecureBootCommonName" in mk_config_output: self.reference_config[job_name]["secure_boot_common_name"] = mk_config_output["SecureBootCommonName"] if "SecureBootValidDays" in mk_config_output: self.reference_config[job_name]["secure_boot_valid_days"] = mk_config_output["SecureBootValidDays"] if "ReadOnly" in mk_config_output: self.reference_config[job_name]["read_only"] = mk_config_output["ReadOnly"] if "Encrypt" in mk_config_output: self.reference_config[job_name]["encrypt"] = mk_config_output["Encrypt"] if "Verity" in mk_config_output: self.reference_config[job_name]["verity"] = mk_config_output["Verity"] if "Compress" in mk_config_output: self.reference_config[job_name]["compress"] = mk_config_output["Compress"] if "CompressFs" in mk_config_output: self.reference_config[job_name]["compress_fs"] = mk_config_output["CompressFs"] if "CompressOutput" in mk_config_output: self.reference_config[job_name]["compress_output"] = mk_config_output["CompressOutput"] if "Mksquashfs" in mk_config_output: self.reference_config[job_name]["mksquashfs_tool"] = mk_config_output["Mksquashfs"].split() if "QCow2" in mk_config_output: self.reference_config[job_name]["qcow2"] = mk_config_output["QCow2"] if "TarStripSELinuxContext" in mk_config_output: self.reference_config[job_name]["tar_strip_selinux_context"] = mk_config_output[ "TarStripSELinuxContext" ] if "Hostname" in mk_config_output: self.reference_config[job_name]["hostname"] = mk_config_output["Hostname"] if "WithUnifiedKernelImages" in mk_config_output: self.reference_config[job_name]["with_unified_kernel_images"] = mk_config_output[ "WithUnifiedKernelImages" ] if "HostonlyInitrd" in mk_config_output: self.reference_config[job_name]["hostonly_initrd"] = mk_config_output["HostonlyInitrd"] if "Packages" in mk_config: mk_config_packages = mk_config["Packages"] if "Packages" in mk_config_packages: self._append_list("packages", mk_config_packages["Packages"], job_name) if "WithDocs" in mk_config_packages: self.reference_config[job_name]["with_docs"] = mk_config_packages["WithDocs"] if "WithTests" in mk_config_packages: self.reference_config[job_name]["with_tests"] = mk_config_packages["WithTests"] if "Cache" in mk_config_packages: self.reference_config[job_name]["cache_path"] = Path(mk_config_packages["Cache"]) if "ExtraTrees" in mk_config_packages: self._append_list("extra_trees", [Path(mk_config_packages["ExtraTrees"])], job_name) if "SkeletonTrees" in mk_config_packages: self._append_list("skeleton_trees", [Path(mk_config_packages["SkeletonTrees"])], job_name) if "CleanupPackageMetadata" in mk_config_packages: self.reference_config[job_name]["clean_package_metadata"] = mk_config_packages["CleanPackageMetadata"] if "RemoveFiles" in mk_config_packages: self.reference_config[job_name]["remove_files"] = mk_config_packages["RemoveFiles"] if "RemovePackages" in mk_config_packages: self.reference_config[job_name]["remove_packages"] = mk_config_packages["RemovePackages"] if "BuildScript" in mk_config_packages: self.reference_config[job_name]["build_script"] = Path(mk_config_packages["BuildScript"]) if "BuildEnvironment" in mk_config_packages: self.reference_config["environment"] = mk_config_packages["Environment"] if "BuildSources" in mk_config_packages: self.reference_config[job_name]["build_sources"] = Path(mk_config_packages["BuildSources"]) if "SourceFileTransfer" in mk_config_packages: self.reference_config[job_name]["source_file_transfer"] = mk_config_packages["SourceFileTransfer"] if "SourceFileTransferFinal" in mk_config_packages: self.reference_config[job_name]["source_file_transfer_final"] = mk_config_packages[ "SourceFileTransferFinal" ] if "BuildDirectory" in mk_config_packages: self.reference_config[job_name]["build_dir"] = Path(mk_config_packages["BuildDirectory"]) if "IncludeDirectory" in mk_config_packages: self.reference_config[job_name]["include_dir"] = Path(mk_config_packages["IncludeDirectory"]) if "InstallDirectory" in mk_config_packages: self.reference_config[job_name]["install_dir"] = Path(mk_config_packages["InstallDirectory"]) if "BuildPackages" in mk_config_packages: self._append_list("build_packages", mk_config_packages["BuildPackages"], job_name) if "PostInstallationScript" in mk_config_packages: self.reference_config[job_name]["postinst_script"] = Path(mk_config_packages["PostInstallationScript"]) if "FinalizeScript" in mk_config_packages: self.reference_config[job_name]["finalize_script"] = Path(mk_config_packages["FinalizeScript"]) if "WithNetwork" in mk_config_packages: self.reference_config[job_name]["with_network"] = mk_config_packages["WithNetwork"] if "NSpawnSettings" in mk_config_packages: self.reference_config[job_name]["nspawn_settings"] = Path(mk_config_packages["NSpawnSettings"]) if "Partitions" in mk_config: mk_config_partitions = mk_config["Partitions"] if "BaseImage" in mk_config_partitions: self.reference_config[job_name]["base_image"] = mk_config_partitions["BaseImage"] if "RootSize" in mk_config_partitions: self.reference_config[job_name]["root_size"] = mk_config_partitions["RootSize"] if "ESPSize" in mk_config_partitions: self.reference_config[job_name]["esp_size"] = mk_config_partitions["ESPSize"] if "SwapSize" in mk_config_partitions: self.reference_config[job_name]["swap_size"] = mk_config_partitions["SwapSize"] if "HomeSize" in mk_config_partitions: self.reference_config[job_name]["home_size"] = mk_config_partitions["HomeSize"] if "SrvSize" in mk_config_partitions: self.reference_config[job_name]["srv_size"] = mk_config_partitions["SrvSize"] if "Validation" in mk_config: mk_config_validation = mk_config["Validation"] if "CheckSum" in mk_config_validation: self.reference_config[job_name]["checksum"] = mk_config_validation["CheckSum"] if "Sign" in mk_config_validation: self.reference_config[job_name]["sign"] = mk_config_validation["Sign"] if "Key" in mk_config_validation: self.reference_config[job_name]["key"] = mk_config_validation["Key"] if "BMap" in mk_config_validation: self.reference_config[job_name]["bmap"] = mk_config_validation["BMap"] if "Password" in mk_config_validation: self.reference_config[job_name]["password"] = mk_config_validation["Password"] if "PasswordIsHashed" in mk_config_validation: self.reference_config[job_name]["password_is_hashed"] = mk_config_validation["PasswordIsHashed"] if "Autologin" in mk_config_validation: self.reference_config[job_name]["autologin"] = mk_config_validation["Autologin"] if "Host" in mk_config: mk_config_host = mk_config["Host"] if "ExtraSearchPaths" in mk_config_host: self._append_list("extra_search_paths", mk_config_host["ExtraSearchPaths"], job_name, ":") if "QemuHeadless" in mk_config_host: self.reference_config[job_name]["qemu_headless"] = mk_config_host["QemuHeadless"] if "NetworkVeth" in mk_config_host: self.reference_config[job_name]["network_veth"] = mk_config_host["NetworkVeth"] if "Ephemeral" in mk_config_host: self.reference_config[job_name]["ephemeral"] = mk_config_host["Ephemeral"] if "Ssh" in mk_config_host: self.reference_config[job_name]["ssh"] = mk_config_host["Ssh"] class MkosiConfigOne(MkosiConfig): """Classes derived from this class are magically instantiated by pytest Each test_ function with a parameter named "tested_config" gets called by pytest for each class derived from this class. These test cases verify the parse_args function in single image (not --all) mode. This class implements four functions: - prepare_mkosi_default - prepare_mkosi_default_d_1 - prepare_mkosi_default_d_2 - prepare_args or prepare_args_short The purpose of these function is to generate configuration files and sets of command line arguments processed by the parse_args function of mkosi. Additionally each of these four functions alters the reference_config to be consistent with the expected values returned by the parse_args function under test. This allows to write test cases with four steps. The first step generates a reference configuration consisting of mkosi.default file only. Therefore prepare_mkosi_default function is is called to generate the test configuration. Finally parse_args is called and the configuration returned by parse_args is compared against the reference_config. The second test step generates a test configuration by calling prepare_mkosi_default and prepare_mkosi_default_d_1. This verifies the behavior of parse_args is fine for mkosi.default plus one override file. The third test case verifies that mkosi.default with two files in mkosi.default.d folder works as expected. The fourth test case additionally overrides some default values with command line arguments. Classes derived from this base class should override the mentioned functions to implement specific test cases. """ def __init__(self): """Add the default mkosi.default config""" super().__init__() self.add_reference_config() def _prepare_mkosi_default(self, directory: str, config: dict) -> None: __class__.write_ini(directory, "mkosi.default", config) def _prepare_mkosi_default_d(self, directory: str, config: dict, prio=1000, fname="mkosi.conf") -> None: __class__.write_ini(os.path.join(directory, "mkosi.default.d"), fname, config, prio) def prepare_mkosi_default(self, directory: str) -> None: """Generate a mkosi.default defaults file in the working directory""" pass def prepare_mkosi_default_d_1(self, directory: str) -> None: """Generate a prio 1 config file in mkosi.default.d The file name should be prefixed with 001_. """ pass def prepare_mkosi_default_d_2(self, directory: str) -> None: """Generate a prio 2 config file in mkosi.default.d The file name should be prefixed with 002_. """ pass def prepare_args(self) -> None: """Add some command line arguments to this test run""" pass def prepare_args_short(self) -> None: """Add some command line arguments to this test run, in short form""" pass class MkosiConfigSummary(MkosiConfigOne): """Test configuration for mkosi summary This test checks if the default parameter set of these tests is in sync with the default parameters implemented in mkosi. No config files or command line arguments are in place. """ def __init__(self): super().__init__() for ref_c in self.reference_config.values(): ref_c["verb"] = "summary" self.cli_arguments = ["summary"] class MkosiConfigDistro(MkosiConfigOne): """Minimal test configuration for the distribution parameter This tests defines the distribution parameter on several configuration priorities: - mkosi.default - mkosi.default.d/001_mkosi.conf - mkosi.default.d/002_mkosi.conf - --distribution """ def __init__(self, subdir_name=None, alldir_name=None): super().__init__() self.subdir_name = subdir_name if subdir_name: for ref_c in self.reference_config.values(): ref_c["directory"] = self.subdir_name self.cli_arguments = ["--directory", self.subdir_name, "summary"] def prepare_mkosi_default(self, directory: str) -> None: mk_config = {"Distribution": {"Distribution": "fedora"}} self._prepare_mkosi_default(directory, mk_config) for ref_c in self.reference_config.values(): ref_c["distribution"] = "fedora" if self.subdir_name: ref_c["directory"] = self.subdir_name if self.subdir_name: self.cli_arguments = ["--directory", self.subdir_name, "summary"] def prepare_mkosi_default_d_1(self, directory: str) -> None: mk_config = {"Distribution": {"Distribution": "ubuntu"}} self._prepare_mkosi_default_d(directory, mk_config, 1) for ref_c in self.reference_config.values(): ref_c["distribution"] = "ubuntu" def prepare_mkosi_default_d_2(self, directory: str) -> None: mk_config = {"Distribution": {"Distribution": "debian"}} self._prepare_mkosi_default_d(directory, mk_config, 2) for ref_c in self.reference_config.values(): ref_c["distribution"] = "debian" def prepare_args(self) -> None: if not self.cli_arguments: self.cli_arguments = ["build"] self.cli_arguments[0:0] = ["--distribution", "arch"] for ref_c in self.reference_config.values(): ref_c["distribution"] = "arch" def prepare_args_short(self) -> None: if not self.cli_arguments: self.cli_arguments = ["build"] self.cli_arguments[0:0] = ["-d", "arch"] for ref_c in self.reference_config.values(): ref_c["distribution"] = "arch" class MkosiConfigDistroDir(MkosiConfigDistro): """Same as Distro, but gets --directory passed and sets verb to summary""" def __init__(self): super().__init__("a_sub_dir") for ref_c in self.reference_config.values(): ref_c["verb"] = "summary" class MkosiConfigManyParams(MkosiConfigOne): """Test configuration for most parameters""" def prepare_mkosi_default(self, directory: str) -> None: mk_config = { "Distribution": { "Distribution": "fedora", "Release": "28", "Repositories": "http://fedora/repos", "UseHostRepositories": False, "Mirror": "http://fedora/mirror", "Architecture": "i386", }, "Output": { "Format": "gpt_ext4", "Output": "test_image.raw", "ManifestFormat": [mkosi.backend.ManifestFormat.json], # # 'OutputDirectory': '', "Bootable": False, "BootProtocols": "uefi", "KernelCommandLine": ["console=ttyS0"], "SecureBoot": False, "SecureBootKey": "/foo.pem", "SecureBootCertificate": "bar.crt", "SecureBootCommonName": "mkosi for %u", "SecureBootValidDays": "730", "ReadOnly": False, "Encrypt": "all", "Verity": False, "Compress": "lz4", "Mksquashfs": "my/fo/sq-tool", "QCow2": False, "Hostname": "myhost1", "UsrOnly": False, "SplitArtifacts": False, }, "Packages": { "Packages": ["pkg-foo", "pkg-bar", "pkg-foo1,pkg-bar1"], "WithDocs": False, "WithTests": True, "Cache": "the/cache/dir", "ExtraTrees": "another/tree", "SkeletonTrees": "a/skeleton", "BuildScript": "fancy_build.sh", "BuildSources": "src", "SourceFileTransfer": mkosi.SourceFileTransfer.copy_all, "BuildDirectory": "here/we/build", "BuildPackages": ["build-me", "build-me2"], "PostInstallationScript": "post-script.sh", "FinalizeScript": "final.sh", "WithNetwork": False, "NSpawnSettings": "foo.nspawn", }, "Partitions": {"RootSize": "2G", "ESPSize": "128M", "SwapSize": "1024M", "HomeSize": "3G"}, "Validation": { "CheckSum": True, "Sign": False, "Key": "mykey.gpg", "BMap": False, "Password": "secret1234", "Autologin": True, }, "Host": { "ExtraSearchPaths": "search/here:search/there", "QemuHeadless": True, "NetworkVeth": True, }, } self._prepare_mkosi_default(directory, mk_config) for j in self.reference_config: self._update_ref_from_file(mk_config, j) def prepare_mkosi_default_d_1(self, directory: str) -> None: mk_config = { "Distribution": { "Distribution": "ubuntu", "Release": "18.04", "Repositories": "http://ubuntu/repos", "UseHostRepositories": False, "Mirror": "http://ubuntu/mirror", "Architecture": "x86_64", }, "Output": { "Format": "gpt_btrfs", "Output": "test_image.raw.xz", # # 'OutputDirectory': '', "Bootable": True, "BootProtocols": "bios", "KernelCommandLine": ["console=ttyS1"], "SecureBoot": True, "SecureBootKey": "/foo-ubu.pem", "SecureBootCertificate": "bar-bub.crt", "ReadOnly": True, "Encrypt": "data", "Verity": True, "Compress": "zstd", "Mksquashfs": "my/fo/sq-tool-ubu", "QCow2": True, "Hostname": "myubuhost1", "UsrOnly": False, "SplitArtifacts": False, }, "Packages": { "Packages": ["add-ubu-1", "add-ubu-2"], "WithDocs": True, "WithTests": False, "Cache": "the/cache/dir/ubu", "ExtraTrees": "another/tree/ubu", "SkeletonTrees": "a/skeleton/ubu", "BuildScript": "ubu_build.sh", "BuildSources": "src/ubu", "SourceFileTransfer": mkosi.SourceFileTransfer.copy_git_cached, "BuildDirectory": "here/we/build/ubu", "BuildPackages": ["build-me", "build-me2-ubu"], "PostInstallationScript": "post-ubu-script.sh", "FinalizeScript": "final-ubu.sh", "WithNetwork": True, "NSpawnSettings": "foo-ubu.nspawn", }, "Partitions": {"RootSize": "4G", "ESPSize": "148M", "SwapSize": "1536M", "HomeSize": "5G"}, "Validation": { "CheckSum": False, "Sign": True, "Key": "mykey-ubu.gpg", "BMap": True, "Password": "secret12345", "Autologin": True, }, "Host": { "ExtraSearchPaths": "search/ubu", "QemuHeadless": True, "NetworkVeth": True, }, } self._prepare_mkosi_default_d(directory, mk_config, 1) for j in self.reference_config: self._update_ref_from_file(mk_config, j) def prepare_mkosi_default_d_2(self, directory: str) -> None: mk_config = { "Distribution": { "Distribution": "debian", "Release": "unstable", "Repositories": "http://debian/repos", "UseHostRepositories": False, "Mirror": "http://ubuntu/mirror", "Architecture": "x86_64", }, "Output": { "Format": "gpt_btrfs", "Output": "test_image.raw.xz", # # 'OutputDirectory': '', "Bootable": True, "BootProtocols": "bios", "KernelCommandLine": ["console=ttyS1"], "SecureBoot": True, "SecureBootKey": "/foo-debi.pem", "SecureBootCertificate": "bar-bub.crt", "ReadOnly": True, "Encrypt": "data", "Verity": True, "Compress": "zstd", "Mksquashfs": "my/fo/sq-tool-debi", "QCow2": True, "Hostname": "mydebihost1", "UsrOnly": False, "SplitArtifacts": False, }, "Packages": { "Packages": ["!add-ubu-1", "!add-ubu-2", "add-debi-1", "add-debi-2"], "WithDocs": True, "WithTests": False, "Cache": "the/cache/dir/debi", "ExtraTrees": "another/tree/debi", "SkeletonTrees": "a/skeleton/debi", "BuildScript": "debi_build.sh", "BuildSources": "src/debi", "SourceFileTransfer": mkosi.SourceFileTransfer.copy_git_cached, "BuildDirectory": "here/we/build/debi", "BuildPackages": ["build-me", "build-me2-debi"], "PostInstallationScript": "post-debi-script.sh", "FinalizeScript": "final-debi.sh", "WithNetwork": True, "NSpawnSettings": "foo-debi.nspawn", }, "Partitions": {"RootSize": "4G", "ESPSize": "148M", "SwapSize": "1536M", "HomeSize": "5G"}, "Validation": { "CheckSum": False, "Sign": True, "Key": "mykey-debi.gpg", "BMap": True, "Password": "secret12345", "Autologin": True, }, "Host": { "ExtraSearchPaths": "search/debi", "QemuHeadless": True, "NetworkVeth": True, }, } self._prepare_mkosi_default_d(directory, mk_config, 2) for j in self.reference_config: self._update_ref_from_file(mk_config, j) def prepare_args(self) -> None: if not self.cli_arguments: self.cli_arguments = ["build"] self.cli_arguments[0:0] = ["--distribution", "arch"] self.cli_arguments[0:0] = ["--release", "7"] self.cli_arguments[0:0] = ["--repositories", "centos/repos"] self.cli_arguments[0:0] = ["--force"] self.cli_arguments[0:0] = ["--read-only", "no"] self.cli_arguments[0:0] = ["--incremental"] for j, ref_c in self.reference_config.items(): ref_c["distribution"] = "arch" ref_c["release"] = "7" self._append_list("repositories", "centos/repos", j) ref_c["force_count"] += 1 ref_c["read_only"] = False ref_c["incremental"] = True def prepare_args_short(self) -> None: if not self.cli_arguments: self.cli_arguments = ["build"] self.cli_arguments[0:0] = ["-d", "centos"] for ref_c in self.reference_config.values(): ref_c["distribution"] = "centos" class MkosiConfigIniLists1(MkosiConfigOne): """Manually written ini files with advanced list syntax.""" def prepare_mkosi_default(self, directory: str) -> None: ini_lines = [ "[Distribution]", "Distribution=fedora", "", "[Content]", "Packages=openssh-clients", " httpd", " tar", ] with open(os.path.join(directory, "mkosi.default"), "w") as f_ini: f_ini.write(os.linesep.join(ini_lines)) self.reference_config[DEFAULT_JOB_NAME]["distribution"] = "fedora" self.reference_config[DEFAULT_JOB_NAME]["packages"] = ["openssh-clients", "httpd", "tar"] def prepare_mkosi_default_d_1(self, directory: str) -> None: ini_lines = [ "[Distribution]", "Distribution=ubuntu", "", "[Content]", "Packages= ", " !httpd", " apache2", "", "[Output]", "KernelCommandLine=console=ttyS0", ] dname = os.path.join(directory, "mkosi.default.d") if not os.path.exists(dname): os.makedirs(dname) with open(os.path.join(dname, "1_ubuntu.conf"), "w") as f_ini: f_ini.write(os.linesep.join(ini_lines)) self.reference_config[DEFAULT_JOB_NAME]["distribution"] = "ubuntu" if "httpd" in self.reference_config[DEFAULT_JOB_NAME]["packages"]: self.reference_config[DEFAULT_JOB_NAME]["packages"].remove("httpd") self.reference_config[DEFAULT_JOB_NAME]["packages"].append("apache2") self.reference_config[DEFAULT_JOB_NAME]["kernel_command_line"].extend(["console=ttyS0"]) def prepare_mkosi_default_d_2(self, directory: str) -> None: ini_lines = [ "[Content]", "Packages=[ vim,!vi", " ca-certificates, bzip ]" "", "[Output]", "KernelCommandLine=console=ttyS1", " driver.feature=1", ] dname = os.path.join(directory, "mkosi.default.d") if not os.path.exists(dname): os.makedirs(dname) with open(os.path.join(dname, "2_additional_stuff.conf"), "w") as f_ini: f_ini.write(os.linesep.join(ini_lines)) if "vi" in self.reference_config[DEFAULT_JOB_NAME]["packages"]: self.reference_config[DEFAULT_JOB_NAME]["packages"].remove("vi") self.reference_config[DEFAULT_JOB_NAME]["packages"].extend(["vim", "ca-certificates", "bzip"]) self.reference_config[DEFAULT_JOB_NAME]["kernel_command_line"].extend(["console=ttyS1", "driver.feature=1"]) class MkosiConfigIniLists2(MkosiConfigIniLists1): """Same as MkosiConfigIniLists2 but with clean KernelCommandLine""" def prepare_mkosi_default(self, directory: str) -> None: ini_lines = ["[Output]", "KernelCommandLine=!*"] with open(os.path.join(directory, "mkosi.default"), "w") as f_ini: f_ini.write(os.linesep.join(ini_lines)) self.reference_config[DEFAULT_JOB_NAME]["kernel_command_line"] = [] # pytest magic: run each test function with each class derived from MkosiConfigOne @pytest.fixture(params=MkosiConfigOne.__subclasses__()) def tested_config(request): return request.param() def test_verb_none(tmpdir): with change_cwd(tmpdir.strpath): args = mkosi.parse_args([]) assert args["default"].verb == "build" def test_verb_build(tmpdir): with change_cwd(tmpdir.strpath): args = mkosi.parse_args(["build"]) assert args["default"].verb == "build" def test_verb_boot_no_cli_args1(tmpdir): with change_cwd(tmpdir.strpath): cmdline_ref = ["boot", "--par-for-sub", "--pom", "--for_sub", "1234"] args = mkosi.parse_args(cmdline_ref) assert args["default"].verb == "boot" assert args["default"].cmdline == cmdline_ref[1:] def test_verb_boot_no_cli_args2(tmpdir): with change_cwd(tmpdir.strpath): cmdline_ref = ["-pa-package", "boot", "--par-for-sub", "--popenssl", "--for_sub", "1234"] args = mkosi.parse_args(cmdline_ref) assert args["default"].verb == "boot" assert "a-package" in args["default"].packages assert args["default"].cmdline == cmdline_ref[2:] def test_verb_boot_no_cli_args3(tmpdir): with change_cwd(tmpdir.strpath): cmdline_ref = ["-pa-package", "-p", "another-package", "build"] args = mkosi.parse_args(cmdline_ref) assert args["default"].verb == "build" assert args["default"].packages == ["a-package", "another-package"] def test_verb_summary_no_cli_args4(tmpdir): with change_cwd(tmpdir.strpath): cmdline_ref = ["-pa-package", "-p", "another-package", "summary"] args = mkosi.parse_args(cmdline_ref) assert args["default"].verb == "summary" assert args["default"].packages == ["a-package", "another-package"] def test_verb_shell_cli_args5(tmpdir): with change_cwd(tmpdir.strpath): cmdline_ref = ["-pa-package", "-p", "another-package", "shell", "python3 -foo -bar;", "ls --inode"] args = mkosi.parse_args(cmdline_ref) assert args["default"].verb == "shell" assert args["default"].packages == ["a-package", "another-package"] assert args["default"].cmdline == cmdline_ref[4:] def test_verb_shell_cli_args6(tmpdir): with change_cwd(tmpdir.strpath): cmdline_ref = ["-i", "yes", "summary"] args = mkosi.parse_args(cmdline_ref) assert args["default"].verb == "summary" assert args["default"].incremental == True def test_verb_shell_cli_args7(tmpdir): with change_cwd(tmpdir.strpath): cmdline_ref = ["-i", "summary"] args = mkosi.parse_args(cmdline_ref) assert args["default"].verb == "summary" assert args["default"].incremental == True def test_builtin(tested_config, tmpdir): """Test if builtin config and reference config match""" with change_cwd(tmpdir.strpath): if "--all" in tested_config.cli_arguments: with pytest.raises(mkosi.MkosiException): args = mkosi.parse_args(tested_config.cli_arguments) else: args = mkosi.parse_args(tested_config.cli_arguments) assert tested_config == args def test_def(tested_config, tmpdir): """Generate the mkosi.default file only""" with change_cwd(tmpdir.strpath): tested_config.prepare_mkosi_default(tmpdir.strpath) args = mkosi.parse_args(tested_config.cli_arguments) assert tested_config == args def test_def_1(tested_config, tmpdir): """Generate the mkosi.default file plus one config file""" with change_cwd(tmpdir.strpath): tested_config.prepare_mkosi_default(tmpdir.strpath) tested_config.prepare_mkosi_default_d_1(tmpdir.strpath) args = mkosi.parse_args(tested_config.cli_arguments) assert tested_config == args def test_def_2(tested_config, tmpdir): """Generate the mkosi.default file plus another config file""" with change_cwd(tmpdir.strpath): tested_config.prepare_mkosi_default(tmpdir.strpath) tested_config.prepare_mkosi_default_d_2(tmpdir.strpath) args = mkosi.parse_args(tested_config.cli_arguments) assert tested_config == args def test_def_1_2(tested_config, tmpdir): """Generate the mkosi.default file plus two config files""" with change_cwd(tmpdir.strpath): tested_config.prepare_mkosi_default(tmpdir.strpath) tested_config.prepare_mkosi_default_d_1(tmpdir.strpath) tested_config.prepare_mkosi_default_d_2(tmpdir.strpath) args = mkosi.parse_args(tested_config.cli_arguments) assert tested_config == args def test_def_args(tested_config, tmpdir): """Generate the mkosi.default plus command line arguments""" with change_cwd(tmpdir.strpath): tested_config.prepare_args() args = mkosi.parse_args(tested_config.cli_arguments) assert tested_config == args def test_def_1_args(tested_config, tmpdir): """Generate the mkosi.default plus a config file plus command line arguments""" with change_cwd(tmpdir.strpath): tested_config.prepare_mkosi_default(tmpdir.strpath) tested_config.prepare_mkosi_default_d_1(tmpdir.strpath) tested_config.prepare_args() args = mkosi.parse_args(tested_config.cli_arguments) assert tested_config == args def test_def_1_2_args(tested_config, tmpdir): """Generate the mkosi.default plus two config files plus command line arguments""" with change_cwd(tmpdir.strpath): tested_config.prepare_mkosi_default(tmpdir.strpath) tested_config.prepare_mkosi_default_d_1(tmpdir.strpath) tested_config.prepare_mkosi_default_d_2(tmpdir.strpath) tested_config.prepare_args() args = mkosi.parse_args(tested_config.cli_arguments) assert tested_config == args def test_def_1_2_argssh(tested_config, tmpdir): """Generate the mkosi.default plus two config files plus short command line arguments""" with change_cwd(tmpdir.strpath): tested_config.prepare_mkosi_default(tmpdir.strpath) tested_config.prepare_mkosi_default_d_1(tmpdir.strpath) tested_config.prepare_mkosi_default_d_2(tmpdir.strpath) tested_config.prepare_args_short() args = mkosi.parse_args(tested_config.cli_arguments) assert tested_config == args class MkosiConfigAll(MkosiConfig): """Classes derived from this class are magically instantiated by pytest Each test_ function with a parameter named "tested_config_all" gets called by pytest for each class derived from this class. """ class MkosiConfigAllHost(MkosiConfigAll): """Test --all option with two simple configs""" def __init__(self): """Add two default mkosi.default configs""" super().__init__() for hostname in ["test1.example.org", "test2.example.org"]: job_name = "mkosi." + hostname self.add_reference_config(job_name) self.reference_config[job_name]["all"] = True self.reference_config[job_name]["hostname"] = hostname self.cli_arguments = ["--all", "build"] def prepare_mkosi_files(self, directory: str, all_directory=None) -> None: if all_directory is None: all_dir = os.path.abspath("mkosi.files") else: all_dir = os.path.join(directory, all_directory) for job_name, config in self.reference_config.items(): mk_config = {"Output": {"Hostname": config["hostname"]}} __class__.write_ini(all_dir, job_name, mk_config) if all_directory: self.cli_arguments[0:0] = ["--all-directory", "all_dir"] # pytest magic: run each test function with each class derived from MkosiConfigAll @pytest.fixture(params=MkosiConfigAll.__subclasses__()) def tested_config_all(request): return request.param() def test_all_1(tested_config_all, tmpdir): """Generate the mkosi.default plus two config files plus short command line arguments""" with change_cwd(tmpdir.strpath): tested_config_all.prepare_mkosi_files(tmpdir.strpath) args = mkosi.parse_args(tested_config_all.cli_arguments) assert tested_config_all == args mkosi-12/tools/000077500000000000000000000000001415136147600135705ustar00rootroot00000000000000mkosi-12/tools/do-a-release.sh000077500000000000000000000011451415136147600163660ustar00rootroot00000000000000#!/bin/sh # SPDX-License-Identifier: LGPL-2.1+ if [ -z "$1" ] ; then echo "Version number not specified." exit 1 fi if ! git diff-index --quiet HEAD; then echo "Repo has modified files." exit 1 fi pandoc -t man -s -o man/mkosi.1 mkosi.md if ! git diff-index --quiet HEAD; then git add man/mkosi.1 git commit -m "man: rebuild the man page" fi sed -i 's/version=".*",/version="'"$1"'",/' setup.py sed -i "s/__version__ = \".*\"/__version__ = \"$1\"/" mkosi/__init__.py git add -p setup.py mkosi action.yaml git commit -m "Bump version numbers for v$1" git tag -s "v$1" -m "mkosi $1" mkosi-12/tools/generate-zipapp.sh000077500000000000000000000004261415136147600172240ustar00rootroot00000000000000#!/bin/bash BUILDDIR=$(mktemp -d -q) cleanup() { rm -rf "$BUILDDIR" } trap cleanup EXIT mkdir -p builddir cp -r mkosi "${BUILDDIR}/" python3 -m zipapp \ -p "/usr/bin/env python3" \ -o builddir/mkosi \ -m mkosi.__main__:main \ "$BUILDDIR"