pax_global_header00006660000000000000000000000064145652025310014515gustar00rootroot0000000000000052 comment=b555f17ecefccc8e3dec4baaf7e1566aa1634008 debspawn-0.6.4/000077500000000000000000000000001456520253100133275ustar00rootroot00000000000000debspawn-0.6.4/.github/000077500000000000000000000000001456520253100146675ustar00rootroot00000000000000debspawn-0.6.4/.github/workflows/000077500000000000000000000000001456520253100167245ustar00rootroot00000000000000debspawn-0.6.4/.github/workflows/python.yml000066400000000000000000000037521456520253100207770ustar00rootroot00000000000000name: Build & Test on: push: branches: [ master ] pull_request: branches: [ master ] jobs: build: runs-on: ubuntu-latest strategy: matrix: python-version: [ '3.9', '3.11', '3.12' ] name: Python ${{ matrix.python-version }} steps: - uses: actions/checkout@v3 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v3 with: python-version: ${{ matrix.python-version }} - name: Update cache run: sudo apt-get update -qq - name: Install system prerequisites run: sudo apt-get install -yq xsltproc docbook-xsl docbook-xml zstd systemd systemd-container debootstrap - name: Upgrading pip run: python -m pip install --upgrade pip - name: Install dependencies run: python -m pip install setuptools tomlkit pkgconfig flake8 pytest pylint mypy isort black - name: Build & Install run: | ./setup.py build ./setup.py install --root=/tmp rm -rf build/ - name: Test run: | sudo $(which python3) -m pytest rm -rf build/ - name: Lint (flake8) run: | python -m flake8 ./ --statistics python -m flake8 debspawn/dsrun --statistics - name: Lint (pylint) run: | python -m pylint -f colorized ./debspawn python -m pylint -f colorized ./debspawn/dsrun python -m pylint -f colorized ./tests ./data python -m pylint -f colorized setup.py install-sysdata.py - name: Lint (mypy) run: | python -m mypy --install-types --non-interactive . python -m mypy ./debspawn/dsrun - name: Lint (isort) run: isort --diff . - name: Lint (black) run: black --diff . debspawn-0.6.4/.gitignore000066400000000000000000000022631456520253100153220ustar00rootroot00000000000000# Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class # C extensions *.so # Distribution / packaging .Python build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ wheels/ *.egg-info/ .installed.cfg *.egg MANIFEST # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .coverage .coverage.* .cache nosetests.xml coverage.xml *.cover .hypothesis/ .pytest_cache/ # Translations *.mo *.pot # Django stuff: *.log local_settings.py db.sqlite3 # Flask stuff: instance/ .webassets-cache # Scrapy stuff: .scrapy # Sphinx documentation docs/_build/ # PyBuilder target/ # Jupyter Notebook .ipynb_checkpoints # pyenv .python-version # celery beat schedule file celerybeat-schedule # SageMath parsed files *.sage.py # Environments .env .venv env/ venv/ ENV/ env.bak/ venv.bak/ # Spyder project settings .spyderproject .spyproject # Rope project settings .ropeproject # mkdocs documentation /site # mypy .mypy_cache/ debspawn-0.6.4/.mypy.ini000066400000000000000000000002761456520253100151110ustar00rootroot00000000000000[mypy] show_column_numbers = True pretty = True strict_optional = False ignore_missing_imports = True warn_redundant_casts = True warn_unused_ignores = True debspawn-0.6.4/AUTHORS000066400000000000000000000000501456520253100143720ustar00rootroot00000000000000Matthias Klumpp debspawn-0.6.4/LICENSE000066400000000000000000000167431456520253100143470ustar00rootroot00000000000000 GNU LESSER GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. This version of the GNU Lesser General Public License incorporates the terms and conditions of version 3 of the GNU General Public License, supplemented by the additional permissions listed below. 0. Additional Definitions. As used herein, "this License" refers to version 3 of the GNU Lesser General Public License, and the "GNU GPL" refers to version 3 of the GNU General Public License. "The Library" refers to a covered work governed by this License, other than an Application or a Combined Work as defined below. An "Application" is any work that makes use of an interface provided by the Library, but which is not otherwise based on the Library. Defining a subclass of a class defined by the Library is deemed a mode of using an interface provided by the Library. A "Combined Work" is a work produced by combining or linking an Application with the Library. The particular version of the Library with which the Combined Work was made is also called the "Linked Version". The "Minimal Corresponding Source" for a Combined Work means the Corresponding Source for the Combined Work, excluding any source code for portions of the Combined Work that, considered in isolation, are based on the Application, and not on the Linked Version. The "Corresponding Application Code" for a Combined Work means the object code and/or source code for the Application, including any data and utility programs needed for reproducing the Combined Work from the Application, but excluding the System Libraries of the Combined Work. 1. Exception to Section 3 of the GNU GPL. You may convey a covered work under sections 3 and 4 of this License without being bound by section 3 of the GNU GPL. 2. Conveying Modified Versions. If you modify a copy of the Library, and, in your modifications, a facility refers to a function or data to be supplied by an Application that uses the facility (other than as an argument passed when the facility is invoked), then you may convey a copy of the modified version: a) under this License, provided that you make a good faith effort to ensure that, in the event an Application does not supply the function or data, the facility still operates, and performs whatever part of its purpose remains meaningful, or b) under the GNU GPL, with none of the additional permissions of this License applicable to that copy. 3. Object Code Incorporating Material from Library Header Files. The object code form of an Application may incorporate material from a header file that is part of the Library. You may convey such object code under terms of your choice, provided that, if the incorporated material is not limited to numerical parameters, data structure layouts and accessors, or small macros, inline functions and templates (ten or fewer lines in length), you do both of the following: a) Give prominent notice with each copy of the object code that the Library is used in it and that the Library and its use are covered by this License. b) Accompany the object code with a copy of the GNU GPL and this license document. 4. Combined Works. You may convey a Combined Work under terms of your choice that, taken together, effectively do not restrict modification of the portions of the Library contained in the Combined Work and reverse engineering for debugging such modifications, if you also do each of the following: a) Give prominent notice with each copy of the Combined Work that the Library is used in it and that the Library and its use are covered by this License. b) Accompany the Combined Work with a copy of the GNU GPL and this license document. c) For a Combined Work that displays copyright notices during execution, include the copyright notice for the Library among these notices, as well as a reference directing the user to the copies of the GNU GPL and this license document. d) Do one of the following: 0) Convey the Minimal Corresponding Source under the terms of this License, and the Corresponding Application Code in a form suitable for, and under terms that permit, the user to recombine or relink the Application with a modified version of the Linked Version to produce a modified Combined Work, in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source. 1) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (a) uses at run time a copy of the Library already present on the user's computer system, and (b) will operate properly with a modified version of the Library that is interface-compatible with the Linked Version. e) Provide Installation Information, but only if you would otherwise be required to provide such information under section 6 of the GNU GPL, and only to the extent that such information is necessary to install and execute a modified version of the Combined Work produced by recombining or relinking the Application with a modified version of the Linked Version. (If you use option 4d0, the Installation Information must accompany the Minimal Corresponding Source and Corresponding Application Code. If you use option 4d1, you must provide the Installation Information in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source.) 5. Combined Libraries. You may place library facilities that are a work based on the Library side by side in a single library together with other library facilities that are not Applications and are not covered by this License, and convey such a combined library under terms of your choice, if you do both of the following: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities, conveyed under the terms of this License. b) Give prominent notice with the combined library that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 6. Revised Versions of the GNU Lesser General Public License. The Free Software Foundation may publish revised and/or new versions of the GNU Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library as you received it specifies that a certain numbered version of the GNU Lesser General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that published version or of any later version published by the Free Software Foundation. If the Library as you received it does not specify a version number of the GNU Lesser General Public License, you may choose any version of the GNU Lesser General Public License ever published by the Free Software Foundation. If the Library as you received it specifies that a proxy can decide whether future versions of the GNU Lesser General Public License shall apply, that proxy's public statement of acceptance of any version is permanent authorization for you to choose that version for the Library. debspawn-0.6.4/MANIFEST.in000066400000000000000000000001631456520253100150650ustar00rootroot00000000000000include debspawn/dsrun include install-sysdata.py include docs/* include data/services/* include data/tmpfiles.d/* debspawn-0.6.4/NEWS000066400000000000000000000205111456520253100140250ustar00rootroot00000000000000Version 0.6.4 ~~~~~~~~~~~~~ Released: 2024-02-20 Bugfixes: * Ensure nspawn never tries to bindmount /etc/localtime * Fix Lintian check permission error Version 0.6.3 ~~~~~~~~~~~~~ Released: 2023-12-22 Bugfixes: * Ensure containers have a dummy /etc/hosts file (Matthias Klumpp) * Work around nspawn wanting to ensure its user-home exists (Matthias Klumpp) * dsrun: Use eatmydata a bit more sparingly (Matthias Klumpp) * Ensure passwd is available in the build environment (Matthias Klumpp) * Ensure locally injected packages are always preferred (Matthias Klumpp) * Fix debsign error on source only builds (Maykel Moya) Version 0.6.2 ~~~~~~~~~~~~~ Released: 2023-05-28 Features: * Support Python 3.11 Bugfixes: * Allow APT more choices when installing build-deps from partial suites * Set BaseSuite for image recreation if image has a custom name * Make APT consider all suites equally for dependency resolution Version 0.6.1 ~~~~~~~~~~~~~ Released: 2023-01-02 Features: * Make container APT configuration more sbuild-like Bugfixes: * docs: Update positional argument info in manual pages * Use useradd instead of adduser * Don't force a suite when packages are injected Version 0.6.0 ~~~~~~~~~~~~~ Released: 2022-10-02 Features: * Allow containers that run a custom command to be booted as well * Add configuration option to disable package caching * Add configuration option to set a different bootstrap tool * Make artifact storage in interactive mode an explicit action Bugfixes: * Fix pyproject.toml license/author fields * Don't use deprecated Lintian argument * Use tomlkit instead of deprecated toml Version 0.5.2 ~~~~~~~~~~~~~ Released: 2022-02-22 Features: * Format source code with Black * Allow to boot a container for interactive logins Bugfixes: * Set suite explicitly when resolving build-deps * Do not include APT package caches in tarballs Version 0.5.1 ~~~~~~~~~~~~~ Released: 2021-11-08 Notes: * This release changes the default bootstrap variant to `buildd`, which may result in users needing to pass `--variant=none` to build with existing images, or change the default via Debspawn's global settings. * The image name and suite name have been decoupled, so users can now give images arbitrary names and create multiple ones for different purposes. Features: * Allow custom container image names, decoupling them from being suite-based * Propagate proxy settings through to APT, debootstrap and nspawn * Default to the 'buildd' bootstrap variant * Make update-all command work with custom image names * Add global config option for default bootstrap variant Bugfixes: * Give access to /boot as well if read-kmods is passed * run: Copy build directory by default, instead of bindmounting it * run: Retrieve artifacts the same way as regular build artifacts * Unmount any bindmounds when cleaning up temporary directories * man: Document the SyscallFilter config option * man: Clarify new image name / suite relations in ds-create manual page Version 0.5.0 ~~~~~~~~~~~~~ Released: 2021-06-04 Features: * First release also available on PyPI! * maintain: Add new flag to print status information * maintain: status: Include debootstrap version in reports * docs: Document the `maintain` subcommand * Install systemd timer to clear all caches monthly * Unconditionally save buildlog Bugfixes: * Rework how external system files are installed * Include extra data in manifest as well * Fix image creation if resolv.conf is a symlink Version 0.4.2 ~~~~~~~~~~~~~ Released: 2021-05-24 Features: * Add "maintain" subcommand to migrate or reset settings & state * Configure APT to not install recommends by default (deb: #987312) * Retry apt updates a few times to protect against bad mirrors * Add tmpfiles.d snippet to manage debspawn's temporary directory * Allow defining custom environment variables for package builds (deb: #986967) * Add maintenance action to update all images Bugfixes: * Interpret EOF as "No" in interactive override question * Implement privileged device access properly * Move images to the right default location * Don't try to bindmound KVM if it doesn't exist * Use dpkg --print-architecture to determine arch (deb: #987547) * run: Mount builddir in initialization step * Don't register any of our nspawn containers by default * Check system encoding properly (deb: #982793) * Atomically and safely copy files into unsafe environments * Run builds as user with a random free UID (deb: #989049) Contributors: Helmut Grohne, Matthias Klumpp Version 0.4.1 ~~~~~~~~~~~~~ Released: 2020-12-22 Features: * README, debspawn.1: document config file (Gordon Ball) * Install lintian after build (Harlan Lieberman-Berg) * Allow custom scripts to cache their prepared images for faster builds (Matthias Klumpp) * Allow running fully privileged containers (Matthias Klumpp) * Make global config file use TOML, update documentation (Matthias Klumpp) Bugfixes: * Pass --console nspawn flag only if our systemd version is high enough (Matthias Klumpp) * Enforce the suite name of the env we built in for changes files (Matthias Klumpp) * Add extra suites to sources even if base suite is equal to image suite (Matthias Klumpp) * Have nspawn recreate container machine-id each time (Matthias Klumpp) * cli: Safeguard against cases where we have flags but no subcommands (Matthias Klumpp) * Disable syscall filter for some groups by default (Matthias Klumpp) Version 0.4.0 ~~~~~~~~~~~~~ Released: 2020-01-20 Features: * Implement an interactive build mode * Store a copy of the build log by default * Allow copying back changes in interactive mode * Use a bit of color in errors and warnings, if possible * Update manual pages * Permit recreation of images, instead of just updating them Bugfixes: * Move dsrun helper into the package itself * Drop some unwanted files from /dev before creating OS tarballs * Remove d/files file if it's created by Debspawn pre-build * Interactive mode and build logs are mutually exclusive for now * Add MANIFEST file Version 0.3.0 ~~~~~~~~~~~~~ Released: 2020-01-06 Features: * Allow to override temporary directory path explicitly in config * Allow full sources.list customization at image creation time * Add initial test infrastructure * Allow 'b' shorthand for the 'build' subparser (Mo Zhou) * Allow turning on d/rules clean on the host, disable it by default * Allow selected environment variables to survive auto-sudo * Implement way to run Lintian as part of the build * Print pretty error message if configuration JSON is broken * Prefer hardlinks over copies when creating the APT package cache * Implement support for injecting packages * docs: Add a note about how to inject packages * Only install minimal Python in containers * Harmonize project name (= Debspawn spelling everywhere) * Add command to list installed container image details * Update sbuild replacement note Bugfixes: * Ensure we have absolute paths for debspawn run * Don't fail running command without build/artifacts directory * Build packages with epochs correctly when building from source-dir * Sign packages with an epoch correctly * Change HOME when dropping privileges * Don't install arch-indep build-deps on arch-only builds * Shorten nspawn machine name when hostname is exceptionally long * tests: Test container updates * Ensure all data lands in its intended directories when installing Version 0.2.1 ~~~~~~~~~~~~~ Released: 2019-01-10 Features: * Allow giving the container extra capabilities easily for custom commands * Allow giving the container permission to access the host's /dev * Allow creating an image with a suite and base-suite Version 0.2.0 ~~~~~~~~~~~~~ Released: 2018-08-28 Features: * Allow specifying enabled archive components at image creation time * Support printing the program version to stdout * Allow diverting the maintainer address * Prepare container for arbitrary run action similarly to package build * Support more build-only choices * Print some basic system info to the log * Log some basic disk space stats before/after build Bugfixes: * random.choices is only available since Python 3.6, replace it * Enforce dsrun to be installed in a location were we can find it * Ensure we don't try to link journals * Force new configuration by default, not old one * Set environment shell Version 0.1.0 ~~~~~~~~~~~~~ Released: 2018-08-20 Notes: * Initial release debspawn-0.6.4/README.md000066400000000000000000000242661456520253100146200ustar00rootroot00000000000000# Debspawn [![Build & Test](https://github.com/lkhq/debspawn/actions/workflows/python.yml/badge.svg)](https://github.com/lkhq/debspawn/actions/workflows/python.yml) Debspawn is a tool to build Debian packages in an isolated environment. Unlike similar tools like `sbuild` or `pbuilder`, `debspawn` uses `systemd-nspawn` instead of plain chroots to manage the isolated environment. This allows Debspawn to isolate builds from the host system much more via containers. It also allows for more advanced features to manage builds, for example setting resource limits for individual builds. Please keep in mind that Debspawn is *not* a security feature! While it provides a lot of isolation from the host system, you should not run arbitrary untrusted code with it. The usual warnings for all container technology apply here. Debspawn also allows one to run arbitrary custom commands in its environment. This is used by the Laniakea[1] Spark workers to execute a variety of non-package builds and QA actions in the same environment in which we usually build packages. Debspawn was created to be simple to use in automation as well as by humans. It should both be easily usable on large build farms with good integration with a job runner, as well as on a personal workstation by a human user (to reproduce builds done elsewhere, or to develop a Debian package). Due to that, the most common operations are as easily accessible as possible and should require zero configuration by default. Additionally, `debspawn` will always try to do the right thing automatically before resorting to a flag that the user has to set. Options which change the build environment are - with one exception - not made available intentionally, so achieving reproducible builds is easier. See the FAQ below for more details. [1]: https://github.com/lkhq/laniakea ## Usage ### Installing Debspawn #### Via the Debian package On Debian/Ubuntu, simply run ```bash sudo apt install debspawn ``` to start using Debspawn. #### Via PyPI > **⚠ WARNING: Careful when installing via PyPI!** > While we do ship `debspawn` on PyPI, installing it via `pip` will not install certain system services > to automate cache cleanup and temp data cleanup. In addition to that, all manual pages will be missing. > This is due to intentional limitations of Python packages installed via pip. If you want to install Debspawn via PyPI anyway, you can use `pip install debspawn`. You can decide to install the system data files manually later by running the `install-sysdata.py` script from the Git repository and adjusting the `debspawn` binary path in the installed systemd units, if you want to. #### Via the Git repository Clone the Git repository, install the (build and runtime) dependencies of `debspawn`: ```bash sudo apt install xsltproc docbook-xsl python3-setuptools zstd systemd-container debootstrap ``` You can the run `debspawn.py` directly from the Git repository, or choose to install it: ```bash sudo python setup.py install --root=/ ``` Debspawn requires at least Python 3.9 on the host system, and Python 3.5 in the container. We try to keep the dependency footprint of this tool as small as possible, so it is not planned to raise that requirement or add any more dependencies anytime soon (especially not for the minimum Python version used in a container). ### On superuser permission If `sudo` is available on the system, `debspawn` will automatically request root permission when it needs it, there is no need to run it as root explicitly. If it can not obtain privileges, `debspawn` will exit with the appropriate error message. ### Creating a new image You can easily create images for any suite that has a script in `debootstrap`. For Debian Unstable for example: ```bash $ debspawn create sid ``` This will create a Debian Sid (unstable) image for the current system architecture. To create an image for testing Ubuntu builds: ```bash $ debspawn create --arch=i386 cosmic ``` This creates an `i386` image for Ubuntu 18.10. If you want to use a different mirror than set by default, pass it with the `--mirror` option. ### Refreshing an image Just run `debspawn update` and give the details of the base image that should be updated: ```bash $ debspawn update sid $ debspawn update --arch=i386 cosmic ``` This will update the base image contents and perform other maintenance actions. ### Building a package You can build a package from its source directory, or just by passing a plain `.dsc` file to `debspawn`. If the result should be automatically signed, the `--sign` flag needs to be passed too: ```bash $ cd ~/packages/hello $ debspawn build sid --sign $ debspawn build --arch=i386 cosmic ./hello_2.10-1.dsc ``` Build results are by default returned in `/var/lib/debspawn/results/`. If you need to inject other local packages as build dependencies, place `deb` files in `/var/lib/debspawn/injected-pkgs` (or other location set in the config file). ### Building a package - with git-buildpackage You can use a command like this to build your project with gbp and Debspawn: ```bash $ gbp buildpackage --git-builder='debspawn build sid --sign' ``` You might also want to add `--results-dir=..` to the debspawn arguments to get the resulting artifacts in the directory to which the package repository was originally exported. ### Manual interactive-shell action If you want to, you can log into the container environment and either play around in ephemeral mode with no persistent changes, or pass `--persistent` to `debspawn` so all changes are permanently saved: ```bash $ debspawn login sid # Attention! This may alter the build environment! $ debspawn login --persistent sid ``` ### Deleting a container image At some point, you may want to permanently remove a container image again, for example because the release it was built for went end of life. This is easily done as well: ```bash $ debspawn delete sid $ debspawn delete --arch=i386 cosmic ``` ### Running arbitrary commands This is achieved with the `debspawn run` command and is a bit more involved. Refer to its manual page and help output for more information on how to use it: `man debspawn run`. ### Global configuration Debspawn will read a global configuration file from `/etc/debspawn/global.toml`, or a configuration file in a location specified by the `--config` flag. If a configuration file is specified on the command line, the global file is ignored completely rather than merged. The config is a TOML file containing any of the following (all optional) keys: * `OSRootsDir`: directory for os images (`/var/lib/debspawn/images/`) * `ResultsDir`: directory for build artifacts (`/var/lib/debspawn/results/`) * `APTCacheDir`: directory for debspawn's own package cache (`/var/lib/debspawn/aptcache/`) * `InjectedPkgsDir`: packages placed in this directory will be available as dependencies for builds (`/var/lib/debspawn/injected-pkgs/`) * `TempDir`: temporary directory used for running containers (`/var/tmp/debspawn/`) * `AllowUnsafePermissions`: allow usage of riskier container permissions, such as binding the host `/dev` and `/proc` into the container (`false`) ## FAQ #### Why use systemd-nspawn? Why not $other_container_manager? Systemd-nspawn is a very lightweight container solution readily available without much (or any) setup on all Linux systems that are using systemd. It does not need any background daemon and while it does not provide a lot of features, it fits the relatively simple usecase of building in an isolated environment perfectly. #### Do I need to set up apt-cacher-ng to use this efficiently? No - while `apt-cacher-ng` is generally a useful tool, it is not required for efficient use of `debspawn`. `debspawn` will cache downloaded packages between runs fully automatically, so packages only get downloaded when they have not been retrieved before. #### Is the build environment the same as sbuild? No, unfortunately. Due to the different technology used, there are subtle differences between sbuild chroots and `debspawn` containers. The differences should - for the most part - not have any impact on package builds, and any such occurrence is highly likely a bug in the package's build process. If you think it is not, please file a bug against Debspawn. We try to be as close to sbuild's default environment as possible. One way the build environment differs from Debian's default sbuild setup intentionally is in its consistent use of unicode. By default, `debspawn` will ensure that unicode is always available and used. If you do not want this behavior, you can pass the `--no-unicode` flag to `debspawn` to disable unicode in the tool itself and in the build environment. #### Will this replace sbuild? Not in the foreseeable future on Debian itself. Sbuild is a proven tool that works well for Debian and supports other OSes than Linux, while `debspawn` is Linux-only, a thing that will not change due to its use of systemd. However, Laniakea-using derivatives such as PureOS use the tool for building all packages and for constructing other build environments to e.g. build disk images. #### What is the relation of this project with Laniakea? The Laniakea job runner uses `debspawn` for a bunch of tasks and the integration with the Laniakea system is generally quite tight. Of course you can use `debspawn` without Laniakea and integrate it with any tool you want. Debspawn will always be usable without Laniakea automation. #### This tool is really fast! What is the secret? Surprisingly, building packages with `debspawn` is often a bit faster than using `pbuilder` and `sbuild` with their default settings. The speed gain comes in large part from the internal use of the Zstandard compression algorithm for container base images. Zstd allows for fast decompression of the tarballs, which is exactly why it was chosen (LZ4 would be even faster, but Zstd actually is a good compromise between compression ration and speed). This shaves off a few seconds of time for each build that is used on base image decompression. Additionally, Debspawn uses `eatmydata` to disable fsync & co. by default in a few places, improving the time it takes to set up the build environment by quite a bit as well. If you want, you can configure other tools to make use of the same methods as well and see if they run faster. There's nothing new or unusually clever here at all! debspawn-0.6.4/RELEASE000066400000000000000000000014121456520253100143300ustar00rootroot00000000000000Debspawn Release Notes 1. Write NEWS entries for Debspawn in the same format as usual. git shortlog v0.6.3.. | grep -i -v trivial | grep -v Merge > NEWS.new -------------------------------------------------------------------------------- Version 0.6.4 ~~~~~~~~~~~~~ Released: 2024-xx-xx Notes: Features: Bugfixes: -------------------------------------------------------------------------------- 2. Commit changes in Git: git commit -a -m "Release version 0.6.4" git tag -s -f -m "Release 0.6.4" v0.6.4 git push --tags git push 3. Upload to PyPI: python setup.py sdist twine upload dist/* 4. Do post release version bump in `RELEASE` and `debspawn/__init__.py` 5. Commit trivial changes: git commit -a -m "trivial: post release version bump" git push debspawn-0.6.4/autoformat.sh000077500000000000000000000002611456520253100160460ustar00rootroot00000000000000#!/usr/bin/env bash set -e BASEDIR=$(dirname "$0") cd $BASEDIR echo "=== ISort ===" python -m isort . python -m isort ./debspawn/dsrun echo "=== Black ===" python -m black . debspawn-0.6.4/data/000077500000000000000000000000001456520253100142405ustar00rootroot00000000000000debspawn-0.6.4/data/services/000077500000000000000000000000001456520253100160635ustar00rootroot00000000000000debspawn-0.6.4/data/services/debspawn-clear-caches.service000066400000000000000000000007631456520253100235660ustar00rootroot00000000000000# This file is part of debspawn. # # Debspawn is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the license, or # (at your option) any later version. [Unit] Description=Clear debspawn caches Wants=debspawn-clear-caches.timer [Service] Type=oneshot ExecStart=@PREFIX@/bin/debspawn maintain --clear-caches PrivateTmp=true PrivateDevices=true PrivateNetwork=true debspawn-0.6.4/data/services/debspawn-clear-caches.timer000066400000000000000000000007151456520253100232430ustar00rootroot00000000000000# This file is part of debspawn. # # Debspawn is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the license, or # (at your option) any later version. [Unit] Description=Clear all debspawn caches regularly [Timer] OnCalendar=monthly RandomizedDelaySec=12h AccuracySec=20min Persistent=true [Install] WantedBy=timers.target debspawn-0.6.4/data/tmpfiles.d/000077500000000000000000000000001456520253100163055ustar00rootroot00000000000000debspawn-0.6.4/data/tmpfiles.d/debspawn.conf000066400000000000000000000006271456520253100207640ustar00rootroot00000000000000# This file is part of debspawn. # # Debspawn is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the license, or # (at your option) any later version. # Ensure debspawn temporary directory is only owned by root and cleaned regularly D /var/tmp/debspawn 0755 root root 2d debspawn-0.6.4/debspawn.py000077500000000000000000000003541456520253100155110ustar00rootroot00000000000000#!/usr/bin/env python3 import os import sys from debspawn import cli thisfile = __file__ if not os.path.isabs(thisfile): thisfile = os.path.normpath(os.path.join(os.getcwd(), thisfile)) sys.exit(cli.run(thisfile, sys.argv[1:])) debspawn-0.6.4/debspawn/000077500000000000000000000000001456520253100151325ustar00rootroot00000000000000debspawn-0.6.4/debspawn/__init__.py000066400000000000000000000015421456520253100172450ustar00rootroot00000000000000# -*- coding: utf-8 -*- # # Copyright (C) 2018-2022 Matthias Klumpp # # Licensed under the GNU Lesser General Public License Version 3 # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the license, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this software. If not, see . __appname__ = 'debspawn' __version__ = '0.6.4' debspawn-0.6.4/debspawn/aptcache.py000066400000000000000000000064051456520253100172610ustar00rootroot00000000000000# -*- coding: utf-8 -*- # # Copyright (C) 2018-2022 Matthias Klumpp # # Licensed under the GNU Lesser General Public License Version 3 # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the license, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this software. If not, see . import os import shutil from glob import glob from pathlib import Path from .utils.misc import hardlink_or_copy class APTCache: ''' Manage cache of APT packages ''' def __init__(self, osbase): self._cache_dir = os.path.join(osbase.global_config.aptcache_dir, osbase.name) def merge_from_dir(self, tmp_cache_dir): ''' Merge in packages from a temporary cache ''' from random import choice from string import digits, ascii_lowercase Path(self._cache_dir).mkdir(parents=True, exist_ok=True) for pkg_fname in glob(os.path.join(tmp_cache_dir, '*.deb')): pkg_basename = os.path.basename(pkg_fname) pkg_cachepath = os.path.join(self._cache_dir, pkg_basename) if not os.path.isfile(pkg_cachepath): pkg_tmp_name = ( pkg_cachepath + '.tmp-' + ''.join(choice(ascii_lowercase + digits) for _ in range(8)) ) shutil.copy2(pkg_fname, pkg_tmp_name) try: os.rename(pkg_tmp_name, pkg_cachepath) except OSError: # maybe some other debspawn instance tried to add the package just now, # in that case we give up os.remove(pkg_tmp_name) def create_instance_cache(self, tmp_cache_dir): ''' Copy the cache to a temporary location for use in a new container instance. ''' Path(self._cache_dir).mkdir(parents=True, exist_ok=True) Path(tmp_cache_dir).mkdir(parents=True, exist_ok=True) for pkg_fname in glob(os.path.join(self._cache_dir, '*.deb')): pkg_cachepath = os.path.join(tmp_cache_dir, os.path.basename(pkg_fname)) if not os.path.isfile(pkg_cachepath): hardlink_or_copy(pkg_fname, pkg_cachepath) def clear(self): ''' Remove all cache contents. ''' Path(self._cache_dir).mkdir(parents=True, exist_ok=True) cache_size = len(glob(os.path.join(self._cache_dir, '*.deb'))) old_cache_dir = self._cache_dir.rstrip(os.sep) + '.old' os.rename(self._cache_dir, old_cache_dir) Path(self._cache_dir).mkdir(parents=True, exist_ok=True) shutil.rmtree(old_cache_dir) return cache_size def delete(self): ''' Remove cache completely - only useful when removing a base image completely. ''' shutil.rmtree(self._cache_dir) debspawn-0.6.4/debspawn/build.py000066400000000000000000000514111456520253100166050ustar00rootroot00000000000000# -*- coding: utf-8 -*- # # Copyright (C) 2018-2022 Matthias Klumpp # # Licensed under the GNU Lesser General Public License Version 3 # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the license, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this software. If not, see . import os import platform import subprocess from glob import glob from collections.abc import Iterable from .nspawn import nspawn_run_persist, nspawn_run_helper_persist from .injectpkg import PackageInjector from .utils.env import ( ensure_root, get_tree_size, get_free_space, get_owner_uid_gid, switch_unprivileged, get_random_free_uid_gid, ) from .utils.log import ( input_bool, print_info, print_warn, print_error, print_bullet, print_header, print_section, capture_console_output, save_captured_console_output, ) from .utils.misc import ( cd, listify, temp_dir, safe_copy, format_filesize, version_noepoch, ) from .utils.command import safe_run class BuildError(Exception): """Package build failed with a generic error.""" def interact_with_build_environment( osbase, instance_dir, machine_name, *, pkg_dir_root, source_pkg_dir, aptcache_tmp, pkginjector, prev_exitcode, ) -> bool: '''Launch an interactive shell in the build environment''' # find the right directory to switch to pkg_dir = pkg_dir_root for f in glob(os.path.join(pkg_dir, '*')): if os.path.isdir(f): pkg_dir = f break print() print_info('Launching interactive shell in build environment.') if prev_exitcode != 0: print_info('The previous build step failed with exit code {}'.format(prev_exitcode)) else: print_info('The previous build step was successful.') print_info('Temporary location of package files on the host:\n => file://{}'.format(pkg_dir)) print_info('Press CTL+D to exit the interactive shell.') print() nspawn_flags = ['--bind={}:/srv/build/'.format(pkg_dir_root)] nspawn_run_persist( osbase, instance_dir, machine_name, chdir=os.path.join('/srv/build', os.path.basename(pkg_dir)), flags=nspawn_flags, tmp_apt_cache_dir=aptcache_tmp, pkginjector=pkginjector, syscall_filter=osbase.global_config.syscall_filter, verbose=True, ) print() copy_artifacts = input_bool( 'Should any generated build artifacts (binary/source packages, etc.) be saved?', default=False ) if copy_artifacts: print_bullet('Artifacts will be copied to the results directory.') else: print_bullet('Artifacts will not be kept.') if source_pkg_dir: copy_changes = input_bool( ( 'Should changes to the debian/ directory be copied back to the host?\n' 'This will OVERRIDE all changes made on files on the host.' ), default=False, ) if copy_changes: print_info('Cleaning up...') # clean the source tree. we intentionally ignore errors here. nspawn_run_persist( osbase, instance_dir, machine_name, chdir=os.path.join('/srv/build', os.path.basename(pkg_dir)), flags=nspawn_flags, command=['dpkg-buildpackage', '-T', 'clean'], tmp_apt_cache_dir=aptcache_tmp, pkginjector=pkginjector, ) print() print_info('Copying back changes...') known_files = {} dest_debian_dir = os.path.join(source_pkg_dir, 'debian') src_debian_dir = os.path.join(pkg_dir, 'debian') # get uid/gid of the user who invoked us o_uid, o_gid = get_owner_uid_gid() # collect list of existing packages for sdir, _, files in os.walk(dest_debian_dir): for f in files: fname = os.path.join(sdir, f) known_files[os.path.relpath(fname, dest_debian_dir)] = fname # walk through the source files, copying everything to the destination for sdir, _, files in os.walk(src_debian_dir): for f in files: fname = os.path.join(sdir, f) rel_fname = os.path.relpath(fname, src_debian_dir) dest_fname = os.path.normpath(os.path.join(dest_debian_dir, rel_fname)) dest_dir = os.path.dirname(dest_fname) if rel_fname in known_files: del known_files[rel_fname] if os.path.isdir(fname): print('New dir: {}'.format(rel_fname)) with switch_unprivileged(): os.makedirs(dest_fname, exist_ok=True) continue if not os.path.isdir(dest_dir): print('New dir: {}'.format(os.path.relpath(dest_dir, dest_debian_dir))) with switch_unprivileged(): os.makedirs(dest_dir, exist_ok=True) print('Copy: {}'.format(rel_fname)) safe_copy(fname, dest_fname) os.chown(dest_fname, o_uid, o_gid, follow_symlinks=False) for rel_fname, fname in known_files.items(): print('Delete: {}'.format(rel_fname)) os.remove(fname) print() else: print_bullet('Discarding build environment.') else: print_info('Can not copy back changes as original package directory is unknown.') return copy_artifacts def internal_execute_build( osbase, pkg_dir, build_only=None, *, qa_lintian=False, interact=False, source_pkg_dir=None, buildflags: list[str] = None, build_env: dict[str, str] = None, ): '''Perform the actual build on an extracted package directory''' assert not build_only or isinstance(build_only, str) if not pkg_dir: raise ValueError('Package directory is missing!') pkg_dir = os.path.normpath(pkg_dir) if not build_env: build_env = {} # get a fresh UID to give to our build user within the container builder_uid = get_random_free_uid_gid()[0] with osbase.new_instance() as (instance_dir, machine_name): # first, check basic requirements # instance dir and pkg dir are both temporary directories, so they # will be on the same filesystem configured as workspace for debspawn. # therefore we only check on directory. free_space = get_free_space(instance_dir) print_info('Free space in workspace: {}'.format(format_filesize(free_space))) # check for at least 512MiB - this is a ridiculously small amount, so the build will likely fail. # but with even less, even attempting a build is pointless. if (free_space / 2048) < 512: print_error('Not enough free space available in workspace.') return 8 # prepare the build. At this point, we only run trusted code and the container # has network access with temp_dir('pkgsync-' + machine_name) as pkgsync_tmp: # create temporary locations set up and APT cache sharing and package injection aptcache_tmp = os.path.join(pkgsync_tmp, 'aptcache') pkginjector = PackageInjector(osbase) if pkginjector.has_injectables(): pkginjector.create_instance_repo(os.path.join(pkgsync_tmp, 'pkginject')) # set up the build environment nspawn_flags = ['--bind={}:/srv/build/'.format(pkg_dir)] prep_flags = ['--build-prepare'] # if we force a suite and have injected packages, the injected packages # will never be picked up. if not pkginjector.has_injectables(): prep_flags.extend(['--suite', osbase.suite]) if build_only == 'arch': prep_flags.append('--arch-only') r = nspawn_run_helper_persist( osbase, instance_dir, machine_name, prep_flags, '/srv', build_uid=builder_uid, nspawn_flags=nspawn_flags, tmp_apt_cache_dir=aptcache_tmp, pkginjector=pkginjector, ) if r != 0: print_error('Build environment setup failed.') return False # run the actual build. At this point, code is less trusted, and we disable network access. nspawn_flags = ['--bind={}:/srv/build/'.format(pkg_dir), '-u', 'builder', '--private-network'] helper_flags = ['--build-run'] helper_flags.extend(['--suite', osbase.suite]) if buildflags: helper_flags.append('--buildflags={}'.format(';'.join(buildflags))) r = nspawn_run_helper_persist( osbase, instance_dir, machine_name, helper_flags, '/srv', build_uid=builder_uid, nspawn_flags=nspawn_flags, tmp_apt_cache_dir=aptcache_tmp, pkginjector=pkginjector, env_vars=build_env, syscall_filter=osbase.global_config.syscall_filter, ) # exit, unless we are in interactive mode if r != 0 and not interact: return False if qa_lintian and r == 0: # running Lintian was requested, so do so. # we use Lintian from the container, so we validate with the validator from # the OS the package was actually built against nspawn_flags = ['--bind={}:/srv/build/'.format(pkg_dir)] r = nspawn_run_helper_persist( osbase, instance_dir, machine_name, ['--run-qa', '--lintian'], '/srv', build_uid=builder_uid, nspawn_flags=nspawn_flags, tmp_apt_cache_dir=aptcache_tmp, pkginjector=pkginjector, ) if r != 0: print_error('QA failed.') return False print() # extra blank line after Lintian output if interact: ri = interact_with_build_environment( osbase, instance_dir, machine_name, pkg_dir_root=pkg_dir, source_pkg_dir=source_pkg_dir, aptcache_tmp=aptcache_tmp, pkginjector=pkginjector, prev_exitcode=r, ) # if we exit with a non-True result, we stop here and don't proceed # with the next steps that save artifacts. if not ri: return False build_dir_size = get_tree_size(pkg_dir) print_info( 'This build required {} of dedicated disk space.'.format(format_filesize(build_dir_size)) ) return True def print_build_detail(osbase, pkgname, version): print_info('Package: {}'.format(pkgname)) print_info('Version: {}'.format(version)) print_info('Distribution: {}'.format(osbase.suite)) print_info('Architecture: {}'.format(osbase.arch)) print_info() def _read_source_package_details(): out, err, ret = safe_run(['dpkg-parsechangelog']) if ret != 0: raise BuildError('Running dpkg-parsechangelog failed: {}{}'.format(out, err)) pkg_sourcename = None pkg_version = None for line in out.split('\n'): if line.startswith('Source: '): pkg_sourcename = line[8:].strip() elif line.startswith('Version: '): pkg_version = line[9:].strip() if not pkg_sourcename or not pkg_version: print_error('Unable to determine source package name or source package version. Can not continue.') return None, None, None pkg_version_dsc = version_noepoch(pkg_version) dsc_fname = '{}_{}.dsc'.format(pkg_sourcename, pkg_version_dsc) return pkg_sourcename, pkg_version, dsc_fname def _get_build_flags(build_only=None, include_orig=False, maintainer=None, extra_flags: Iterable[str] = None): import shlex buildflags = [] extra_flags = listify(extra_flags) if build_only: if build_only == 'binary': buildflags.append('-b') elif build_only == 'arch': buildflags.append('-B') elif build_only == 'indep': buildflags.append('-A') elif build_only == 'source': buildflags.append('-S') else: print_error('Invalid build-only flag "{}". Can not continue.'.format(build_only)) return False, [] if include_orig: buildflags.append('-sa') if maintainer: buildflags.append('-m{}'.format(maintainer.replace(';', ','))) buildflags.append('-e{}'.format(maintainer.replace(';', ','))) for flag_raw in extra_flags: buildflags.extend(shlex.split(flag_raw)) return True, buildflags def _sign_result(results_dir, spkg_name, spkg_version, build_arch, build_only): print_section('Signing Package') spkg_version_noepoch = version_noepoch(spkg_version) sign_arch = 'source' if build_only == 'source' else build_arch changes_basename = '{}_{}_{}.changes'.format(spkg_name, spkg_version_noepoch, sign_arch) with switch_unprivileged(): proc = subprocess.run(['debsign', os.path.join(results_dir, changes_basename)], check=False) if proc.returncode != 0: print_error('Signing failed.') return False return True def _print_system_info(): from . import __version__ from .utils.misc import current_time_string print_info( 'debspawn {version} on {host} at {time}'.format( version=__version__, host=platform.node(), time=current_time_string() ) ) def build_from_directory( osbase, pkg_dir, *, sign=False, build_only=None, include_orig=False, maintainer=None, clean_source=False, qa_lintian=False, interact=False, log_build=True, extra_dpkg_flags: list[str] = None, build_env: dict[str, str] = None, ): ensure_root() osbase.ensure_exists() extra_dpkg_flags = listify(extra_dpkg_flags) if interact and log_build: print_warn('Build log and interactive mode can not be enabled at the same time. Disabling build log.') print() log_build = False # capture console output if we should log the build if log_build: capture_console_output() if not pkg_dir: pkg_dir = os.getcwd() pkg_dir = os.path.abspath(pkg_dir) r, buildflags = _get_build_flags(build_only, include_orig, maintainer, extra_dpkg_flags) if not r: return False _print_system_info() print_header('Package build (from directory)') print_section('Creating source package') with cd(pkg_dir): with switch_unprivileged(): deb_files_fname = os.path.join(pkg_dir, 'debian', 'files') if os.path.isfile(deb_files_fname): deb_files_fname = None # the file already existed, we don't need to clean it up later pkg_sourcename, pkg_version, dsc_fname = _read_source_package_details() if not pkg_sourcename: return False cmd = ['dpkg-buildpackage', '-S', '--no-sign'] # d/rules clean requires build dependencies installed if run on the host # we avoid that by default, unless explicitly requested if not clean_source: cmd.append('-nc') proc = subprocess.run(cmd, check=False) if proc.returncode != 0: return False # remove d/files file that was created when generating the source package. # we only clean up the file if it didn't exist prior to us running the command. if deb_files_fname: try: os.remove(deb_files_fname) except OSError: pass print_header('Package build') print_build_detail(osbase, pkg_sourcename, pkg_version) success = False with temp_dir(pkg_sourcename) as pkg_tmp_dir: with cd(pkg_tmp_dir): cmd = ['dpkg-source', '-x', os.path.join(pkg_dir, '..', dsc_fname)] proc = subprocess.run(cmd, check=False) if proc.returncode != 0: return False success = internal_execute_build( osbase, pkg_tmp_dir, build_only, qa_lintian=qa_lintian, interact=interact, source_pkg_dir=pkg_dir, buildflags=buildflags, build_env=build_env, ) # copy build results if success: osbase.retrieve_artifacts(pkg_tmp_dir) # save buildlog, if we generated one log_fname = os.path.join( osbase.results_dir, '{}_{}_{}.buildlog'.format(pkg_sourcename, version_noepoch(pkg_version), osbase.arch), ) save_captured_console_output(log_fname) # exit, there is nothing more to do if no package was built if not success: return False # sign the resulting package if sign: r = _sign_result(osbase.results_dir, pkg_sourcename, pkg_version, osbase.arch, build_only) if not r: return False print_info('Done.') return True def build_from_dsc( osbase, dsc_fname, *, sign=False, build_only=None, include_orig=False, maintainer=None, qa_lintian: bool = False, interact: bool = False, log_build: bool = True, extra_dpkg_flags: Iterable[str] = None, build_env: dict[str, str] = None, ): ensure_root() osbase.ensure_exists() extra_dpkg_flags = listify(extra_dpkg_flags) if interact and log_build: print_warn('Build log and interactive mode can not be enabled at the same time. Disabling build log.') print() log_build = False # capture console output if we should log the build if log_build: capture_console_output() r, buildflags = _get_build_flags(build_only, include_orig, maintainer, extra_dpkg_flags) if not r: return False _print_system_info() success = False dsc_fname = os.path.abspath(os.path.normpath(dsc_fname)) tmp_prefix = os.path.basename(dsc_fname).replace('.dsc', '').replace(' ', '-') with temp_dir(tmp_prefix) as pkg_tmp_dir: with cd(pkg_tmp_dir): cmd = ['dpkg-source', '-x', dsc_fname] proc = subprocess.run(cmd, check=False) if proc.returncode != 0: return False pkg_srcdir = None for f in glob('./*'): if os.path.isdir(f): pkg_srcdir = f break if not pkg_srcdir: print_error('Unable to find source directory of extracted package.') return False with cd(pkg_srcdir): pkg_sourcename, pkg_version, dsc_fname = _read_source_package_details() if not pkg_sourcename: return False print_header('Package build') print_build_detail(osbase, pkg_sourcename, pkg_version) success = internal_execute_build( osbase, pkg_tmp_dir, build_only, qa_lintian=qa_lintian, interact=interact, buildflags=buildflags, build_env=build_env, ) # copy build results if success: osbase.retrieve_artifacts(pkg_tmp_dir) # save buildlog, if we generated one log_fname = os.path.join( osbase.results_dir, '{}_{}_{}.buildlog'.format(pkg_sourcename, version_noepoch(pkg_version), osbase.arch), ) save_captured_console_output(log_fname) # build log is saved, but no artifacts are available, so there's nothing more to do if not success: return False # sign the resulting package if sign: r = _sign_result(osbase.results_dir, pkg_sourcename, pkg_version, osbase.arch, build_only) if not r: return False print_info('Done.') return True debspawn-0.6.4/debspawn/cli.py000066400000000000000000000554101456520253100162600ustar00rootroot00000000000000# -*- coding: utf-8 -*- # # Copyright (C) 2018-2022 Matthias Klumpp # # Licensed under the GNU Lesser General Public License Version 3 # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the license, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this software. If not, see . import os import sys import logging as log from argparse import HelpFormatter, ArgumentParser from .utils import print_error from .config import GlobalConfig from .osbase import OSBase from .utils.env import set_owning_user, set_unicode_allowed def init_config(options): ''' Create a new :GlobalConfig from command-line options. ''' gconf = GlobalConfig() gconf.load(options.config) # check if we are forbidden from using unicode - otherwise we build # with unicode enabled by default if options.no_unicode: set_unicode_allowed(False) else: import locale current_encoding = locale.getpreferredencoding() if current_encoding.lower() != 'utf-8': log.warning( ( 'Building with unicode support, but your environment does not seem to support unicode. ' '(Encoding is {})' ).format(current_encoding) ) set_unicode_allowed(True) if options.owner: info = options.owner.split(':') if len(info) > 2: print_error('You can only use one colon to split user:group when using the --owner flag.') sys.exit(1) if len(info) == 2: user = info[0] group = info[1] else: user = info[0] group = None set_owning_user(user, group) return gconf def check_print_version(options): if options.show_version: from . import __version__ print(__version__) sys.exit(0) def command_create(options): '''Create new container image''' check_print_version(options) if not options.name: print_error('Need at least a container name (suite name) to bootstrap!') sys.exit(1) gconf = init_config(options) components = None if options.components: components = options.components.split(',') extra_suites = [] if options.extra_suites: extra_suites = options.extra_suites.strip().split(' ') osbase = OSBase( gconf, options.suite, options.arch, variant=options.variant, base_suite=options.base_suite, custom_name=options.name, ) r = osbase.create( options.mirror, components, extra_suites=extra_suites, extra_source_lines=options.extra_source_lines, allow_recommends=options.allow_recommends, with_init=options.with_init, ) if not r: sys.exit(2) def command_delete(options): '''Delete container image''' check_print_version(options) if not options.name: print_error('No container image name was specified!') sys.exit(1) gconf = init_config(options) osbase = OSBase(gconf, options.suite, options.arch, options.variant, custom_name=options.name) r = osbase.delete() if not r: sys.exit(2) def command_update(options): '''Update container image''' check_print_version(options) if not options.name: print_error('Need at least a container image name for update!') sys.exit(1) gconf = init_config(options) osbase = OSBase(gconf, options.suite, options.arch, options.variant, custom_name=options.name) if options.recreate: r = osbase.recreate() else: r = osbase.update() if not r: sys.exit(2) def command_list(options): '''List container images''' from .osbase import print_container_base_image_info check_print_version(options) gconf = init_config(options) print_container_base_image_info(gconf) def command_build(options): '''Build a package in a new volatile container''' from .build import build_from_dsc, build_from_directory check_print_version(options) if not options.name: print_error('Need at least a container image or suite name for building!') sys.exit(1) gconf = init_config(options) osbase = OSBase(gconf, options.suite, options.arch, options.variant, custom_name=options.name) # prepare user-defined environment variables env_vars = {} if options.env_vars: for kv in options.env_vars: p = kv.split('=', 1) if len(p) != 2: print('Environment variable definition `{}` is invalid!'.format(kv)) print('Can not continue.') sys.exit(1) env_vars[p[0]] = p[1] # prepare user-defined buildflags buildflags = [] if options.buildflags: buildflags = options.buildflags.split(';') if not options.target and os.path.isdir(options.name): print( 'A directory is given as parameter, but you are missing a container-name parameter to build for.' ) print('Can not continue.') sys.exit(1) # override globally configured output directory with # a custom one defined on the CLI if options.results_dir: osbase.results_dir = options.results_dir if not options.target or os.path.isdir(options.target): r = build_from_directory( osbase, options.target, sign=options.sign, build_only=options.build_only, include_orig=options.include_orig, maintainer=options.maintainer, clean_source=options.clean_source, qa_lintian=options.lintian, interact=options.interact, log_build=not options.no_buildlog, extra_dpkg_flags=buildflags, build_env=env_vars, ) else: r = build_from_dsc( osbase, options.target, sign=options.sign, build_only=options.build_only, include_orig=options.include_orig, maintainer=options.maintainer, qa_lintian=options.lintian, interact=options.interact, log_build=not options.no_buildlog, extra_dpkg_flags=buildflags, build_env=env_vars, ) if not r: sys.exit(2) def command_login(options): '''Open interactive session in a container''' check_print_version(options) if not options.name: print_error('Need at least a container image name!') sys.exit(1) gconf = init_config(options) osbase = OSBase(gconf, options.suite, options.arch, options.variant, custom_name=options.name) allowed = [] if options.allow: allowed = [s.strip() for s in options.allow.split(',')] r = osbase.login(options.persistent, allowed=allowed, boot=options.boot) if not r: sys.exit(2) def command_run(options, custom_command): '''Run arbitrary command in container session''' check_print_version(options) if not options.name: print_error('Need at least a container image name!') sys.exit(1) gconf = init_config(options) osbase = OSBase( gconf, options.suite, options.arch, options.variant, custom_name=options.name, cachekey=options.cachekey, ) allowed = [] if options.allow: allowed = [s.strip() for s in options.allow.split(',')] bind_build_dir = options.bind_build_dir if bind_build_dir == 'y': bind_build_dir = 'ro' elif bind_build_dir == 'rw' or bind_build_dir == 'ro': pass else: bind_build_dir = 'n' r = osbase.run( custom_command, options.build_dir, options.artifacts_dir, boot=options.boot, init_command=options.init_command, copy_command=options.external_commad, header_msg=options.header, bind_build_dir=bind_build_dir, allowed=allowed, ) if not r: sys.exit(2) def command_maintain(options): '''Execute global maintenance actions''' check_print_version(options) gconf = init_config(options) if options.migrate: from .maintain import maintain_migrate maintain_migrate(gconf) return if options.clear_caches: from .maintain import maintain_clear_caches maintain_clear_caches(gconf) return if options.update_all: from .maintain import maintain_update_all maintain_update_all(gconf) return if options.purge: from .maintain import maintain_purge maintain_purge(gconf, options.yes) return if options.status: from .maintain import maintain_print_status maintain_print_status(gconf) return print_error('No maintenance action selected!') sys.exit(1) class CustomArgparseFormatter(HelpFormatter): def _split_lines(self, text, width): if text.startswith('CF|'): return text[3:].splitlines() return HelpFormatter._split_lines(self, text, width) def add_container_select_arguments(parser): parser.add_argument( '--variant', action='store', dest='variant', default=None, help='Set the bootstrap script variant (use `none` to select no variant).', ) parser.add_argument( '-a', '--arch', action='store', dest='arch', default=None, help='The architecture of the container.' ) parser.add_argument( '--suite', action='store', dest='suite', default=None, help='Explicitly set a suite name (instead of having it derived from the container name).', ) parser.add_argument( 'name', action='store', nargs='?', default=None, help='The name of the container image (usually a distribution suite name).', ) def create_parser(formatter_class=None): '''Create debspawn CLI argument parser''' if not formatter_class: formatter_class = CustomArgparseFormatter parser = ArgumentParser(description='Build in nspawn containers', formatter_class=formatter_class) subparsers = parser.add_subparsers(dest='sp_name', title='subcommands') # generic arguments parser.add_argument( '-c', '--config', action='store', dest='config', default=None, help='Path to the global config file.' ) parser.add_argument('--verbose', action='store_true', dest='verbose', help='Enable debug messages.') parser.add_argument( '--no-unicode', action='store_true', dest='no_unicode', help='Disable unicode support.' ) parser.add_argument( '--version', action='store_true', dest='show_version', help='Display the version of debspawn itself.' ) parser.add_argument( '--owner', action='store', dest='owner', default=None, help=('Set the user name/uid and group/gid separated by a colon ' 'whose behalf we are acting.'), ) # 'create' command sp = subparsers.add_parser('create', help='Create new container image') add_container_select_arguments(sp) sp.add_argument( '--mirror', action='store', dest='mirror', default=None, help='Set a specific mirror to bootstrap from.', ) sp.add_argument( '--components', action='store', dest='components', default=None, help='A comma-separated list of archive components to enable in the newly created image.', ) sp.add_argument( '--base-suite', action='store', dest='base_suite', default=None, help=( 'A full suite that forms the base of the selected partial suite ' '(e.g. for -updates and -backports).' ), ) sp.add_argument( '--extra-suites', action='store', dest='extra_suites', default=None, help=( 'Space-separated list of additional suites that should also be added to the ' 'sources.list file.' ), ) sp.add_argument( '--extra-sourceslist-lines', action='store', dest='extra_source_lines', default=None, help=( 'Lines that should be added to the build environments source.list verbatim. ' 'Separate lines by linebreaks.' ), ) sp.add_argument( '--allow-recommends', action='store', dest='allow_recommends', default=None, help=( 'Do not disable APT installing "recommends"-type dependencies by default, and instead use ' 'the default behavior for full, normal system installations with "recommends" enabled.' ), ) sp.add_argument( '--with-init', action='store_true', dest='with_init', help='Include an init system in this image, so it is bootable.', ) sp.set_defaults(func=command_create) # 'delete' command sp = subparsers.add_parser('delete', help='Remove a container image') add_container_select_arguments(sp) sp.set_defaults(func=command_delete) # 'update' command sp = subparsers.add_parser('update', help='Update a container image') add_container_select_arguments(sp) sp.add_argument( '--recreate', action='store_true', dest='recreate', help=( 'Re-create the container image from scratch using the settings used to create it previously, ' 'instead of just updating it.' ), ) sp.set_defaults(func=command_update) # 'list' command sp = subparsers.add_parser('list', help='List available container images', aliases=['ls']) sp.set_defaults(func=command_list) # 'build' command sp = subparsers.add_parser( 'build', help='Build a package in an isolated environment', formatter_class=formatter_class, aliases=['b'], ) add_container_select_arguments(sp) sp.add_argument('--sign', action='store_true', dest='sign', help='Sign the resulting package.') sp.add_argument( '--only', choices=['binary', 'arch', 'indep', 'source'], dest='build_only', help=( 'CF|Select only a specific set of packages to be built. Choices are:\n' 'binary: Build only binary packages, no source files are to be built and/or distributed.\n' 'arch: Build only architecture-specific binary packages.\n' 'indep: Build only architecture-independent (arch:all) binary packages.\n' 'source: Do a source-only build, no binary packages are made.' ), ) sp.add_argument( '--include-orig', action='store_true', dest='include_orig', help='Forces the inclusion of the original source.', ) sp.add_argument( '--buildflags', action='store', dest='buildflags', help='Set flags passed through to dpkg-buildpackage as semicolon-separated list.', ) sp.add_argument( '--results-dir', action='store', dest='results_dir', help='Override the configured results directory and return artifacts at a custom location.', ) sp.add_argument( '--maintainer', action='store', dest='maintainer', help=( 'Set the name and email address of the maintainer for this package and upload, rather than ' 'using the information from the source tree\'s control file or changelog.' ), ) sp.add_argument( '--clean-source', action='store_true', dest='clean_source', help=( 'Run the d/rules clean target outside of the container. This means the package build ' 'dependencies need to be installed on the host system when building from a ' 'source directory.' ), ) sp.add_argument( '--lintian', action='store_true', dest='lintian', help='Run the Lintian static analysis tool for Debian packages after the package is built.', ) sp.add_argument( '--no-buildlog', action='store_true', dest='no_buildlog', help='Do not write a build log.' ) sp.add_argument( '-i', '--interact', action='store_true', dest='interact', help=( 'Run an interactive shell in the build environment after build. This implies `--no-buildlog` ' 'and disables the log.' ), ) sp.add_argument( '-e', '--setenv', action='append', dest='env_vars', help=( 'Set an environment variable for the build process. Takes a `key=value` pair any may be ' 'defined multiple times to set different environment variables.' ), ) sp.add_argument( 'target', action='store', nargs='?', default=None, help='The source package file or source directory to build.', ) sp.set_defaults(func=command_build) # 'login' command sp = subparsers.add_parser('login', help='Open interactive session in a container') add_container_select_arguments(sp) sp.add_argument( '--persistent', action='store_true', dest='persistent', help='Make changes done in the session persistent.', ) sp.add_argument( '--allow', action='store', dest='allow', help=( 'List one or more additional permissions to grant the container. Takes a comma-separated ' 'list of capability names.' ), ) sp.add_argument( '--boot', action='store_true', dest='boot', help='Boot container image (requires the image to contain an init system).', ) sp.set_defaults(func=command_login) # 'run' command sp = subparsers.add_parser('run', help='Run arbitrary command in an ephemeral container') add_container_select_arguments(sp) sp.add_argument( '--artifacts-out', action='store', dest='artifacts_dir', default=None, help='Directory on the host where artifacts can be stored. Mounted to /srv/artifacts in the guest.', ) sp.add_argument( '--build-dir', action='store', dest='build_dir', default=None, help='Select a host directory that gets copied to /srv/build.', ) sp.add_argument( '--bind-build-dir', action='store', dest='bind_build_dir', default='n', choices=['y', 'n', 'ro', 'rw'], help=( 'Bindmount build directory instead of copying it. Mounts read-only by default, but can mount ' 'as writable as well if \'rw\' is passed as value.' ), ) sp.add_argument( '--cachekey', action='store', dest='cachekey', default=None, help=( 'If set, use the specified cache-ID to store an initialized container image for faster ' 'initialization times.\n' 'This may mean that the command passed in `--init-command` is skipped if the cache ' 'already existed.' ), ) sp.add_argument( '--init-command', action='store', dest='init_command', default=None, help='The command or command script used to set up the container.', ) sp.add_argument( '-x', '--external-command', action='store_true', dest='external_commad', help=( 'If set, the command script(s) will be copied from the host to the container ' 'and then executed.' ), ) sp.add_argument( '--header', action='store', dest='header', default=None, help='Name of the task that is run, will be printed as header.', ) sp.add_argument( '--allow', action='store', dest='allow', help=( 'List one or more additional permissions to grant the container. Takes a comma-separated ' 'list of capability names.' ), ) sp.add_argument( '--boot', action='store_true', dest='boot', help='Boot container image (requires the image to contain an init system).', ) sp.add_argument('command', action='store', nargs='*', default=None, help='The command to run.') # 'maintain' command sp = subparsers.add_parser('maintain', help='Execute various maintenance actions, affecting all images') sp.add_argument( '-y', '--yes', action='store_true', dest='yes', help='Perform dangerous actions without asking twice.' ) sp.add_argument( '--migrate', action='store_true', dest='migrate', help='Migrate any settings or configuration changes to the current version of debspawn.', ) sp.add_argument( '--update-all', action='store_true', dest='update_all', help='Update all container images that we know.', ) sp.add_argument( '--clear-caches', action='store_true', dest='clear_caches', help='Delete all cached packages for all images.', ) sp.add_argument( '--purge', action='store_true', dest='purge', help='Remove all images as well as any data associated with them.', ) sp.add_argument( '--status', action='store_true', dest='status', help='Display a status summary about this installation, highlighting potential issues.', ) sp.set_defaults(func=command_maintain) return parser def run(mainfile, args): if len(args) == 0: print_error('Need a subcommand to proceed!') sys.exit(1) parser = create_parser() # special case, so 'run' can understand which arguments are for debspawn and which are # for the command to be executed custom_command = None if args[0] == 'run': for i, arg in enumerate(args): if arg == '---': if i + 1 == len(args): print_error('No command was given after "---", can not continue.') sys.exit(1) custom_command = args[i + 1 :] args = args[:i] break args = parser.parse_args(args) check_print_version(args) if args.sp_name == 'run': if not custom_command: custom_command = args.command command_run(args, custom_command) else: if not hasattr(args, 'func'): print_error('Unknown or no subcommand was provided. Can not proceed.') sys.exit(1) args.func(args) debspawn-0.6.4/debspawn/config.py000066400000000000000000000131761456520253100167610ustar00rootroot00000000000000# -*- coding: utf-8 -*- # # Copyright (C) 2018-2022 Matthias Klumpp # # Licensed under the GNU Lesser General Public License Version 3 # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the license, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this software. If not, see . import os import sys import tomlkit thisfile = __file__ if not os.path.isabs(thisfile): thisfile = os.path.normpath(os.path.join(os.getcwd(), thisfile)) __all__ = ['GlobalConfig'] class GlobalConfig: ''' Global configuration singleton affecting all of Debspawn. ''' _instance = None class __GlobalConfig: def load(self, fname=None): if not fname: fname = '/etc/debspawn/global.toml' cdata = {} if os.path.isfile(fname): with open(fname) as f: try: cdata = tomlkit.load(f) except tomlkit.exceptions.ParseError as e: print( 'Unable to parse global configuration (global.toml): {}'.format(str(e)), file=sys.stderr, ) sys.exit(8) self._dsrun_path = os.path.normpath(os.path.join(thisfile, '..', 'dsrun')) if not os.path.isfile(self._dsrun_path): print( 'Debspawn is not set up properly: Unable to find file "{}". Can not continue.'.format( self._dsrun_path ), file=sys.stderr, ) sys.exit(4) self._osroots_dir = cdata.get('OSImagesDir', '/var/lib/debspawn/images/') self._results_dir = cdata.get('ResultsDir', '/var/lib/debspawn/results/') self._aptcache_dir = cdata.get('APTCacheDir', '/var/lib/debspawn/aptcache/') self._injected_pkgs_dir = cdata.get('InjectedPkgsDir', '/var/lib/debspawn/injected-pkgs/') self._temp_dir = cdata.get('TempDir', '/var/tmp/debspawn/') self._default_bootstrap_variant = cdata.get('DefaultBootstrapVariant', 'buildd') self._allow_unsafe_perms = cdata.get('AllowUnsafePermissions', False) self._cache_packages = bool(cdata.get('CachePackages', True)) self._bootstrap_tool = cdata.get('BootstrapTool', 'debootstrap') self._syscall_filter = cdata.get('SyscallFilter', 'compat') if self._syscall_filter == 'compat': # permit some system calls known to be needed by packages that sbuild & Co. # build without problems. self._syscall_filter = ['@memlock', '@pkey', '@clock', '@cpu-emulation'] elif self._syscall_filter == 'nspawn-default': # make no additional changes, so nspawn's built-in defaults are used self._syscall_filter = [] else: if type(self._syscall_filter) is not list: print( ( 'Configuration error (global.toml): Entry "SyscallFilter" needs to be either a ' 'string value ("compat" or "nspawn-default"), or a list of permissible ' 'system call names as listed by the syscall-filter command of systemd-analyze(1)' ), file=sys.stderr, ) sys.exit(8) @property def dsrun_path(self) -> str: return self._dsrun_path @dsrun_path.setter def dsrun_path(self, v): self._dsrun_path = v @property def osroots_dir(self) -> str: return self._osroots_dir @property def results_dir(self) -> str: return self._results_dir @property def aptcache_dir(self) -> str: return self._aptcache_dir @property def injected_pkgs_dir(self) -> str: return self._injected_pkgs_dir @property def temp_dir(self) -> str: return self._temp_dir @property def default_bootstrap_variant(self) -> str: return self._default_bootstrap_variant @property def syscall_filter(self) -> list: """Customize which syscalls should be filtered.""" return self._syscall_filter @property def allow_unsafe_perms(self) -> bool: """Whether usage of unsafe permissions is allowed.""" return self._allow_unsafe_perms @property def cache_packages(self) -> bool: """Whether APT packages should be cached by debspawn.""" return self._cache_packages @property def bootstrap_tool(self) -> str: """The chroot bootstrap tool that we should use.""" return self._bootstrap_tool def __init__(self, fname=None): if not GlobalConfig._instance: GlobalConfig._instance = GlobalConfig.__GlobalConfig() GlobalConfig._instance.load(fname) def __getattr__(self, name): return getattr(self._instance, name) debspawn-0.6.4/debspawn/dsrun000077500000000000000000000351451456520253100162230ustar00rootroot00000000000000#!/usr/bin/python3 # -*- coding: utf-8 -*- # # Copyright (C) 2018-2022 Matthias Klumpp # # SPDX-License-Identifier: LGPL-3.0-or-later # IMPORTANT: This file is placed within a Debspawn container. # The containers only contain a minimal set of packages, and only a reduced # installation of Python is available via the python3-minimal package. # This file must be self-contained and only depend on modules available # in that Python installation. # It must also not depend on any Python 3 feature introduced after version 3.5. # See /usr/share/doc/python3.*-minimal/README.Debian for a list of permitted # modules. # Additionally, the CLI API of this file should remain as stable as possible, # to not introduce odd behavior if a container image wasn't updated and is used # with a newer debspawn version. import os import pwd import sys import time import subprocess from glob import glob from argparse import ArgumentParser from contextlib import contextmanager # the user performing builds in the container BUILD_USER = 'builder' # the directory where we build a package BUILD_DIR = '/srv/build' # additional packages to be used when building EXTRAPKG_DIR = '/srv/extra-packages' # directory that may or may not be exist, but must never be written to INVALID_DIR = '/run/invalid' # # Globals # unicode_enabled = True color_enabled = True def run_command(cmd, env=None, *, check=True): if isinstance(cmd, str): cmd = cmd.split(' ') proc_env = env if proc_env: proc_env = os.environ.copy() proc_env.update(env) p = subprocess.run(cmd, env=proc_env, check=False) if p.returncode != 0: if check: print('Command `{}` failed.'.format(' '.join(cmd)), file=sys.stderr) sys.exit(p.returncode) else: return False return True def run_apt_command(cmd): if isinstance(cmd, str): cmd = cmd.split(' ') env = {'DEBIAN_FRONTEND': 'noninteractive'} apt_cmd = ['apt-get', '-uyq', '-o Dpkg::Options::="--force-confnew"'] apt_cmd.extend(cmd) if cmd == 'update': # retry an apt update a few times, to protect a bit against bad # network connections or a flaky mirror / internal build queue repo for i in range(0, 3): is_last = i == 2 if run_command(apt_cmd, env, check=is_last): break print('APT update failed, retrying...') time.sleep(5) else: run_command(apt_cmd, env) def print_textbox(title, tl, hline, tr, vline, bl, br): def write_utf8(s): sys.stdout.buffer.write(s.encode('utf-8')) tlen = len(title) write_utf8('\n{}'.format(tl)) write_utf8(hline * (10 + tlen)) write_utf8('{}\n'.format(tr)) write_utf8('{} {}'.format(vline, title)) write_utf8(' ' * 8) write_utf8('{}\n'.format(vline)) write_utf8(bl) write_utf8(hline * (10 + tlen)) write_utf8('{}\n'.format(br)) sys.stdout.flush() def print_header(title): if unicode_enabled: print_textbox(title, '╔', '═', '╗', '║', '╚', '╝') else: print_textbox(title, '+', '═', '+', '|', '+', '+') def print_section(title): if unicode_enabled: print_textbox(title, '┌', '─', '┐', '│', '└', '┘') else: print_textbox(title, '+', '-', '+', '|', '+', '+') @contextmanager def eatmydata(): try: # FIXME: We just override the env vars here, appending to them would # be much cleaner. os.environ['LD_LIBRARY_PATH'] = '/usr/lib/libeatmydata' os.environ['LD_PRELOAD'] = 'libeatmydata.so' yield finally: del os.environ['LD_LIBRARY_PATH'] del os.environ['LD_PRELOAD'] def ensure_no_nonexistent_dirs(): nonexistent_dirs = ('/nonexistent', INVALID_DIR) for path in nonexistent_dirs: if os.path.exists(path): if os.geteuid() == 0: run_command('rm -r {}'.format(path)) continue if path == INVALID_DIR: # ensure invalid dir has no permissions try: os.chmod(INVALID_DIR, 0o000) except PermissionError: print('WARNING: Directory {} exists and is writable.'.format(INVALID_DIR), file=sys.stderr) else: print('WARNING: Directory {} exists and can not be removed!'.format(path), file=sys.stderr) def drop_privileges(): import grp if os.geteuid() != 0: return builder_gid = grp.getgrnam(BUILD_USER).gr_gid builder_uid = pwd.getpwnam(BUILD_USER).pw_uid os.setgroups([]) os.setgid(builder_gid) os.setuid(builder_uid) def update_container(builder_uid): with eatmydata(): run_apt_command('update') run_apt_command('full-upgrade') run_apt_command(['install', '--no-install-recommends', 'build-essential', 'dpkg-dev', 'fakeroot', 'eatmydata']) run_apt_command(['--purge', 'autoremove']) run_apt_command('clean') try: pwd.getpwnam(BUILD_USER) except KeyError: print('No "{}" user, creating it.'.format(BUILD_USER)) run_command('useradd -M -f -1 -d {} --uid {} {}' .format(INVALID_DIR, builder_uid, BUILD_USER)) run_command('mkdir -p /srv/build') run_command('chown {} /srv/build'.format(BUILD_USER)) # ensure the non existent directory is gone even if it was # created accidentally ensure_no_nonexistent_dirs() return True def prepare_run(): print_section('Preparing container') with eatmydata(): run_apt_command('update') run_apt_command('full-upgrade') return True def _generate_hashes(filename): import hashlib hash_md5 = hashlib.md5() hash_sha256 = hashlib.sha256() file_size = 0 with open(filename, 'rb') as f: for chunk in iter(lambda: f.read(4096), b''): hash_md5.update(chunk) hash_sha256.update(chunk) file_size = f.tell() return hash_md5.hexdigest(), hash_sha256.hexdigest(), file_size def prepare_package_build(arch_only=False, suite=None): from datetime import datetime, timezone print_section('Preparing container for build') with eatmydata(): run_apt_command('update') run_apt_command('full-upgrade') run_apt_command(['install', '--no-install-recommends', 'build-essential', 'dpkg-dev', 'fakeroot']) # check if we have extra packages to register with APT if os.path.exists(EXTRAPKG_DIR) and os.path.isdir(EXTRAPKG_DIR): if os.listdir(EXTRAPKG_DIR): with eatmydata(): run_apt_command(['install', '--no-install-recommends', 'apt-utils']) print() print('Using injected packages as additional APT package source.') packages_index_fname = os.path.join(EXTRAPKG_DIR, 'Packages') os.chdir(EXTRAPKG_DIR) with open(packages_index_fname, 'wt') as f: proc = subprocess.Popen(['apt-ftparchive', 'packages', '.'], cwd=EXTRAPKG_DIR, stdout=f) ret = proc.wait() if ret != 0: print('ERROR: Unable to generate temporary APT repository for injected packages.', file=sys.stderr) sys.exit(2) with open(os.path.join(EXTRAPKG_DIR, 'Release'), 'wt') as f: release_tmpl = '''Archive: local-pkg-inject Origin: LocalInjected Label: LocalInjected Acquire-By-Hash: no Component: main Date: {date} MD5Sum: {md5_hash} {size} Packages SHA256: {sha256_hash} {size} Packages ''' md5_hash, sha256_hash, size = _generate_hashes(packages_index_fname) f.write(release_tmpl.format( date=datetime.now(timezone.utc).strftime('%a, %d %b %Y %H:%M:%S +0000'), md5_hash=md5_hash, sha256_hash=sha256_hash, size=size) ) with open('/etc/apt/sources.list', 'a') as f: f.write('deb [trusted=yes] file://{} ./\n'.format(EXTRAPKG_DIR)) with eatmydata(): # make APT aware of the new packages, update base packages if needed run_apt_command('update') run_apt_command('full-upgrade') # ensure we are in our build directory at this point os.chdir(BUILD_DIR) run_command('chown -R {} /srv/build'.format(BUILD_USER)) for f in glob('./*'): if os.path.isdir(f): os.chdir(f) break print_section('Installing package build-dependencies') with eatmydata(): cmd = ['build-dep'] if arch_only: cmd.append('--arch-only') cmd.append('./') run_apt_command(cmd) return True def build_package(buildflags=None, suite=None): drop_privileges() print_section('Build') os.chdir(BUILD_DIR) for f in glob('./*'): if os.path.isdir(f): os.chdir(f) break cmd = ['dpkg-buildpackage'] if suite: cmd.append('--changes-option=-DDistribution={}'.format(suite)) if buildflags: cmd.extend(buildflags) run_command(cmd) # run_command will exit the whole program if the command failed, # so we can return True here (everything went fine if we are here) return True def run_qatasks(qa_lintian=True): ''' Run QA tasks on a built package immediately after build (currently Lintian) ''' os.chdir(BUILD_DIR) for f in glob('./*'): if os.path.isdir(f): os.chdir(f) break if qa_lintian: print_section('QA: Prepare') if qa_lintian: # install Lintian if Lintian check was requested run_apt_command(['install', 'lintian']) print_section('QA: Lintian') drop_privileges() cmd = ['lintian', '-I', # infos by default '--pedantic', # pedantic hints by default, '--tag-display-limit', '0', # display all tags found (even if that may be a lot occasionally) ] run_command(cmd) # run_command will exit the whole program if the command failed, # so we can return True here (everything went fine if we are here) return True def setup_environment(builder_uid=None, use_color=True, use_unicode=True, *, is_update=False): os.environ['LANG'] = 'C.UTF-8' if use_unicode else 'C' os.environ['LC_ALL'] = 'C.UTF-8' if use_unicode else 'C' os.environ['HOME'] = '/nonexistent' os.environ['TERM'] = 'xterm-256color' if use_color else 'xterm-mono' os.environ['SHELL'] = '/bin/sh' del os.environ['LOGNAME'] # ensure no directories exists that shouldn't be there ensure_no_nonexistent_dirs() if builder_uid and builder_uid > 0 and os.geteuid() == 0: # we are root and got a UID to change the BUILD_USER to try: pwd.getpwnam(BUILD_USER) except KeyError: if not is_update: print('WARNING: No "{}" user found in this container!'.format(BUILD_USER), file=sys.stderr) return run_command('usermod -u {} {}'.format(builder_uid, BUILD_USER)) def main(): if not os.environ.get('container'): print('This helper script must be run in a systemd-nspawn container.') return 1 parser = ArgumentParser(description='Debspawn helper script') parser.add_argument('--no-color', action='store_true', dest='no_color', help='Disable terminal colors.') parser.add_argument('--no-unicode', action='store_true', dest='no_unicode', help='Disable unicode support.') parser.add_argument('--buid', action='store', type=int, dest='builder_uid', help='Designated UID of the build user within the container.') parser.add_argument('--update', action='store_true', dest='update', help='Initialize the container.') parser.add_argument('--arch-only', action='store_true', dest='arch_only', default=None, help='Only get arch-dependent packages (used when satisfying build dependencies).') parser.add_argument('--build-prepare', action='store_true', dest='build_prepare', help='Prepare building a Debian package.') parser.add_argument('--build-run', action='store_true', dest='build_run', help='Build a Debian package.') parser.add_argument('--lintian', action='store_true', dest='qa_lintian', help='Run Lintian on the generated package.') parser.add_argument('--buildflags', action='store', dest='buildflags', default=None, help='Flags passed to dpkg-buildpackage.') parser.add_argument('--suite', action='store', dest='suite', default=None, help='The suite we are building for (may be inferred if not set).') parser.add_argument('--prepare-run', action='store_true', dest='prepare_run', help='Prepare container image for generic script run.') parser.add_argument('--run-qa', action='store_true', dest='run_qatasks', help='Run QA tasks (only Lintian currently) against a package.') options = parser.parse_args(sys.argv[1:]) # initialize environment defaults global unicode_enabled, color_enabled unicode_enabled = not options.no_unicode color_enabled = not options.no_color setup_environment(options.builder_uid, color_enabled, unicode_enabled, is_update=options.update) if options.update: r = update_container(options.builder_uid) if not r: return 2 elif options.build_prepare: r = prepare_package_build(options.arch_only, options.suite) if not r: return 2 elif options.build_run: buildflags = [] if options.buildflags: buildflags = [s.strip('\'" ') for s in options.buildflags.split(';')] r = build_package(buildflags, options.suite) if not r: return 2 elif options.prepare_run: r = prepare_run() if not r: return 2 elif options.run_qatasks: r = run_qatasks(qa_lintian=options.qa_lintian) if not r: return 2 else: print('ERROR: No action specified.', file=sys.stderr) return 1 return 0 if __name__ == '__main__': sys.exit(main()) debspawn-0.6.4/debspawn/injectpkg.py000066400000000000000000000100401456520253100174550ustar00rootroot00000000000000# -*- coding: utf-8 -*- # # Copyright (C) 2019-2022 Matthias Klumpp # # Licensed under the GNU Lesser General Public License Version 3 # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the license, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this software. If not, see . import os from glob import glob from pathlib import Path from contextlib import contextmanager from .utils import temp_dir, print_info, hardlink_or_copy class PackageInjector: ''' Inject packages from external sources into the container APT environment. ''' def __init__(self, osbase): self._pkgs_basedir = osbase.global_config.injected_pkgs_dir self._pkgs_specific_dir = os.path.join(self._pkgs_basedir, osbase.name) self._has_injectables = None self._instance_repo_dir = None def has_injectables(self): '''Return True if we actually have any packages ready to inject''' if type(self._has_injectables) is bool: return self._has_injectables if os.path.exists(self._pkgs_basedir) and os.path.isdir(self._pkgs_basedir): for f in os.listdir(self._pkgs_basedir): if os.path.isfile(os.path.join(self._pkgs_basedir, f)): self._has_injectables = True return True if os.path.exists(self._pkgs_specific_dir) and os.path.isdir(self._pkgs_specific_dir): for f in os.listdir(self._pkgs_specific_dir): if os.path.isfile(os.path.join(self._pkgs_specific_dir, f)): self._has_injectables = True return True self._has_injectables = False return False def create_instance_repo(self, tmp_repo_dir): ''' Create a temporary location where all injected packages for this container are copied to. ''' Path(self._pkgs_basedir).mkdir(parents=True, exist_ok=True) Path(tmp_repo_dir).mkdir(parents=True, exist_ok=True) print_info('Copying injected packages to instance location') self._instance_repo_dir = tmp_repo_dir # copy/link injected packages specific to this environment if os.path.isdir(self._pkgs_specific_dir): for pkg_fname in glob(os.path.join(self._pkgs_specific_dir, '*.deb')): pkg_path = os.path.join(tmp_repo_dir, os.path.basename(pkg_fname)) if not os.path.isfile(pkg_path): hardlink_or_copy(pkg_fname, pkg_path) # copy/link injected packages used by all environments for pkg_fname in glob(os.path.join(self._pkgs_basedir, '*.deb')): pkg_path = os.path.join(tmp_repo_dir, os.path.basename(pkg_fname)) if not os.path.isfile(pkg_path): hardlink_or_copy(pkg_fname, pkg_path) @property def instance_repo_dir(self) -> str: return self._instance_repo_dir @contextmanager def package_injector(osbase, machine_name=None): ''' Create a package injector as context manager and make it create a new temporary instance repo. ''' if not machine_name: from random import choice from string import digits, ascii_lowercase nid = ''.join(choice(ascii_lowercase + digits) for _ in range(4)) machine_name = '{}-{}'.format(osbase.name, nid) pi = PackageInjector(osbase) if not pi.has_injectables(): yield pi else: with temp_dir('pkginject-' + machine_name) as injectrepo_tmp: pi.create_instance_repo(injectrepo_tmp) yield pi debspawn-0.6.4/debspawn/maintain.py000066400000000000000000000177551456520253100173230ustar00rootroot00000000000000# -*- coding: utf-8 -*- # # Copyright (C) 2018-2022 Matthias Klumpp # # Licensed under the GNU Lesser General Public License Version 3 # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the license, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this software. If not, see . import os import sys import json import shutil from glob import glob from .config import GlobalConfig from .osbase import OSBase from .utils.env import ensure_root from .utils.log import ( print_info, print_warn, print_error, print_bullet, print_section, print_bool_item, ) def ensure_rmtree_symattack_protection(): '''Exit the program with an error if rmtree does not protect against symlink attacks''' if not shutil.rmtree.avoids_symlink_attacks: print_error('Will not continue: rmtree does not run in symlink-attack protected mode.') sys.exit(1) def maintain_migrate(gconf: GlobalConfig): '''Migrate configuration from older versions of debspawn to the latest version''' ensure_root() # migrate old container images directory, if needed images_dir = gconf.osroots_dir if not os.path.isdir(images_dir): old_images_dir = '/var/lib/debspawn/containers' if os.path.isdir(old_images_dir): print_info('Migrating images directory...') shutil.move(old_images_dir, images_dir) def maintain_clear_caches(gconf: GlobalConfig): '''Delete all cache data for all images''' ensure_root() ensure_rmtree_symattack_protection() aptcache_dir = gconf.aptcache_dir if os.path.isdir(aptcache_dir): for cdir in glob(os.path.join(aptcache_dir, '*')): print_info('Removing APT cache for: {}'.format(os.path.basename(cdir))) if os.path.isdir(cdir): shutil.rmtree(cdir) else: os.remove(cdir) dcache_dir = os.path.join(gconf.osroots_dir, 'dcache') if os.path.isdir(dcache_dir): print_info('Removing image derivatives cache.') shutil.rmtree(dcache_dir) def maintain_purge(gconf: GlobalConfig, force: bool = False): '''Remove all images as well as any data associated with them''' ensure_root() ensure_rmtree_symattack_protection() if not force: print_warn( ( 'This action will delete ALL your images as well as their configuration, build results and other ' 'associated data and will clear all data from the directories you may have configured as default.' ) ) delete_all = False while True: try: in_res = input('Do you really want to continue? [y/N]: ') except EOFError: in_res = 'n' print() if not in_res: delete_all = False break elif in_res.lower() == 'y': delete_all = True break elif in_res.lower() == 'n': delete_all = False break if not delete_all: print_info('Purge action aborted.') return print_warn('Deleting all images, image configuration, build results and state data.') for sdir in [gconf.osroots_dir, gconf.results_dir, gconf.aptcache_dir, gconf.injected_pkgs_dir]: if not os.path.isdir(sdir): continue if sdir.startswith('/home/') or sdir.startswith('/usr/'): continue print_info('Purging: {}'.format(sdir)) for d in glob(os.path.join(sdir, '*')): if os.path.isdir(d): shutil.rmtree(d) else: os.remove(d) default_state_dir = '/var/lib/debspawn/' if os.path.isdir(default_state_dir): print_info('Removing: {}'.format(default_state_dir)) shutil.rmtree(default_state_dir) def maintain_update_all(gconf: GlobalConfig): '''Update all container images that we know.''' ensure_root() osroots_dir = gconf.osroots_dir tar_files = [] if os.path.isdir(osroots_dir): tar_files = list(glob(os.path.join(osroots_dir, '*.tar.zst'))) if not tar_files: print_info('No container base images have been found!') return failed_images = [] nodata_images = [] first_entry = True for tar_fname in tar_files: img_basepath = os.path.splitext(os.path.splitext(tar_fname)[0])[0] config_fname = img_basepath + '.json' imgid = os.path.basename(img_basepath) # read configuration data if not os.path.isfile(config_fname): nodata_images.append(imgid) continue with open(config_fname, 'rt') as f: cdata = json.loads(f.read()) if not first_entry: print() first_entry = False print_bullet('Update: {}'.format(imgid), indent=1, large=True) osbase = OSBase( gconf, cdata['Suite'], cdata['Architecture'], cdata.get('Variant'), custom_name=os.path.basename(img_basepath), ) r = osbase.update() if not r: print_error('Failed to update {}'.format(imgid)) failed_images.append(imgid) if nodata_images or failed_images: print() for imgid in nodata_images: print_warn('Could not auto-update image {}: Configuration data is missing.'.format(imgid)) if failed_images: print_error('Failed to update image(s): {}'.format(', '.join(failed_images))) sys.exit(1) def maintain_print_status(gconf: GlobalConfig): ''' Print status information about this Debspawn installation that may be useful for debugging issues. ''' import platform from . import __version__ from .nspawn import systemd_version, systemd_detect_virt from .osbase import bootstrap_tool_version, print_container_base_image_info print('Debspawn Status Report', end='') sys.stdout.flush() # read distribution information os_release = {} if os.path.exists('/etc/os-release'): with open('/etc/os-release') as f: for line in f: k, v = line.rstrip().split("=") os_release[k] = v.strip('"') print_section('Host System') print('OS:', os_release.get('NAME', 'Unknown'), os_release.get('VERSION', '')) print('Platform:', platform.platform(aliased=True)) print('Virtualization:', systemd_detect_virt()) print('Systemd-nspawn version:', systemd_version()) print('Bootstrap tool:', '{} {}'.format(gconf.bootstrap_tool, bootstrap_tool_version(gconf))) print_section('Container image list') print_container_base_image_info(gconf) print_section('Debspawn') print('Version:', __version__) print_bool_item( 'Tmpfiles.d configuration:', os.path.isfile('/usr/lib/tmpfiles.d/debspawn.conf'), text_true='installed', text_false='missing', ) print_bool_item( 'Monthly cache cleanup timer:', os.path.isfile('/lib/systemd/system/debspawn-clear-caches.timer'), text_true='available', text_false='missing', ) print_bool_item( 'Manual pages:', len(glob('/usr/share/man/man1/debspawn*.1.*')) >= 8, text_true='installed', text_false='missing', ) if not os.path.isfile('/etc/debspawn/global.toml'): print('Global configuration: default') else: print('Global configuration:') with open('/etc/debspawn/global.toml', 'r') as f: for line in f: print(' ', line) debspawn-0.6.4/debspawn/nspawn.py000066400000000000000000000326601456520253100170210ustar00rootroot00000000000000# -*- coding: utf-8 -*- # # Copyright (C) 2018-2022 Matthias Klumpp # # Licensed under the GNU Lesser General Public License Version 3 # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the license, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this software. If not, see . import os import typing as T import platform import subprocess from .utils import ( safe_run, temp_dir, print_info, print_warn, print_error, run_forwarded, ) from .injectpkg import PackageInjector from .utils.env import unicode_allowed, colored_output_allowed from .utils.command import run_command __systemd_version = None def systemd_version(): global __systemd_version if __systemd_version: return __systemd_version __systemd_version = -1 try: out, _, _ = safe_run(['systemd-nspawn', '--version']) parts = out.split(' ', 2) if len(parts) >= 2: __systemd_version = int(parts[1]) except Exception as e: print_warn('Unable to determine systemd version: {}'.format(e)) return __systemd_version def systemd_detect_virt(): vm_name = 'unknown' try: out, _, _ = run_command(['systemd-detect-virt']) vm_name = out.strip() except Exception as e: print_warn('Unable to determine current virtualization: {}'.format(e)) return vm_name def systemd_version_atleast(expected_version: int): v = systemd_version() # we always assume we are running the highest version, # if we failed to determine the right systemd version if v < 0: return True if v >= expected_version: return True return False def get_nspawn_personality(osbase): ''' Return the syszemd-nspawn container personality for the given combination of host architecture and base OS. This allows running x86 builds on amd64 machines. ''' import fnmatch if platform.machine() == 'x86_64' and fnmatch.filter([osbase.arch], 'i?86'): return 'x86' return None def _execute_sdnspawn( osbase, parameters, machine_name, *, boot: bool = False, allow_permissions: list[str] = None, syscall_filter: list[str] = None, env_vars: dict[str, str] = None, private_users: bool = False, nowait: bool = False, ) -> T.Union[subprocess.CompletedProcess, subprocess.Popen]: ''' Execute systemd-nspawn with the given parameters. Mess around with cgroups if necessary. ''' import sys if not allow_permissions: allow_permissions = [] if not syscall_filter: syscall_filter = [] if not env_vars: env_vars = {} capabilities = [] full_dev_access = False full_proc_access = False ro_kmods_access = False kvm_access = False all_privileges = False for perm in allow_permissions: perm = perm.lower() if perm.startswith('cap_') or perm == 'all': if perm == 'all': capabilities.append(perm) print_warn('Container retains all privileges.') all_privileges = True else: capabilities.append(perm.upper()) elif perm == 'full-dev': full_dev_access = True elif perm == 'full-proc': full_proc_access = True elif perm == 'read-kmods': ro_kmods_access = True elif perm == 'kvm': kvm_access = True else: print_info('Unknown allowed permission: {}'.format(perm)) if ( capabilities or full_dev_access or full_proc_access or kvm_access ) and not osbase.global_config.allow_unsafe_perms: print_error( 'Configuration does not permit usage of additional and potentially dangerous permissions. Exiting.' ) sys.exit(9) cmd = ['systemd-nspawn'] cmd.extend(['-M', machine_name]) if boot: # if we boot the container, we also register it with machinectl, otherwise # we run an unregistered container with the command as PID2 cmd.append('-b') cmd.append('--notify-ready=yes') else: cmd.append('--register=no') cmd.append('-a') if private_users: cmd.append('-U') # User namespaces with --private-users=pick --private-users-chown, if possible # never try to bindmount /etc/localtime cmd.append('--timezone=copy') if full_dev_access: cmd.extend(['--bind', '/dev']) if systemd_version_atleast(244): cmd.append('--console=pipe') cmd.extend(['--property=DeviceAllow=block-* rw', '--property=DeviceAllow=char-* rw']) if kvm_access and not full_dev_access: if os.path.exists('/dev/kvm'): cmd.extend(['--bind', '/dev/kvm']) cmd.extend(['--property=DeviceAllow=/dev/kvm rw']) else: print_warn( 'Access to KVM requested, but /dev/kvm does not exist on the host. Is virtualization supported?' ) if full_proc_access: cmd.extend(['--bind', '/proc']) if not all_privileges: print_warn('Container has access to host /proc') if ro_kmods_access: cmd.extend(['--bind-ro', '/lib/modules/']) cmd.extend(['--bind-ro', '/boot/']) if capabilities: cmd.extend(['--capability', ','.join(capabilities)]) if syscall_filter: cmd.extend(['--system-call-filter', ' '.join(syscall_filter)]) for v_name, v_value in env_vars.items(): cmd.extend(['-E', '{}={}'.format(v_name, v_value)]) # add custom parameters cmd.extend(parameters) if nowait: return subprocess.Popen(cmd, shell=False, stdin=subprocess.DEVNULL) else: return run_forwarded(cmd) def nspawn_run_persist( osbase, base_dir, machine_name, chdir, command: T.Union[list[str], str] = None, flags: T.Union[list[str], str] = None, *, tmp_apt_cache_dir: str = None, pkginjector: PackageInjector = None, allowed: list[str] = None, syscall_filter: list[str] = None, env_vars: dict[str, str] = None, private_users: bool = False, boot: bool = False, verbose: bool = False, ): if isinstance(command, str): command = command.split(' ') elif not command: command = [] if isinstance(flags, str): flags = flags.split(' ') elif not flags: flags = [] personality = get_nspawn_personality(osbase) def run_nspawn_with_aptcache(aptcache_tmp_dir): params = [ '--chdir={}'.format(chdir), '--link-journal=no', ] if aptcache_tmp_dir: params.append('--bind={}:/var/cache/apt/archives/'.format(aptcache_tmp_dir)) if pkginjector and pkginjector.instance_repo_dir: params.append('--bind={}:/srv/extra-packages/'.format(pkginjector.instance_repo_dir)) if personality: params.append('--personality={}'.format(personality)) params.extend(flags) params.extend(['-{}D'.format('' if verbose else 'q'), base_dir]) # nspawn can not run a command in a booted container on its own if not boot: params.extend(command) sdns_nowait = boot and command # ensure the temporary apt cache is up-to-date if aptcache_tmp_dir: osbase.aptcache.create_instance_cache(aptcache_tmp_dir) # run command in container ns_proc = _execute_sdnspawn( osbase, params, machine_name, allow_permissions=allowed, syscall_filter=syscall_filter, env_vars=env_vars, private_users=private_users, boot=boot, nowait=sdns_nowait, ) if not sdns_nowait: ret = ns_proc.returncode else: try: import time # the container is (hopefully) running now, but let's check for that time_ac_start = time.time() container_booted = False while (time.time() - time_ac_start) < 60: scisr_out, _, _ = run_command( [ 'systemd-run', '-GP', '--wait', '-qM', machine_name, 'systemctl', 'is-system-running', ] ) # check if we are actually running, try again later if not if scisr_out.strip() in ('running', 'degraded'): print() container_booted = True break time.sleep(0.5) if container_booted: sdr_cmd = [ 'systemd-run', '-GP', '--wait', '-qM', machine_name, '--working-directory', chdir, ] + command proc = run_forwarded(sdr_cmd) ret = proc.returncode else: ret = 7 print_error('Timed out while waiting for the container to boot.') finally: run_forwarded(['machinectl', 'poweroff', machine_name]) try: ns_proc.wait(30) except subprocess.TimeoutExpired: ns_proc.terminate() # archive APT cache, so future runs of this command are faster (unless disabled in configuration) if aptcache_tmp_dir: osbase.aptcache.merge_from_dir(aptcache_tmp_dir) return ret if not osbase.cache_packages: # APT package caching was explicitly disabled by the user ret = run_nspawn_with_aptcache(None) elif tmp_apt_cache_dir: # we will be reusing an externally provided temporary APT cache directory ret = run_nspawn_with_aptcache(tmp_apt_cache_dir) else: # we will create our own temporary APT cache dir with temp_dir('aptcache-' + machine_name) as aptcache_tmp: ret = run_nspawn_with_aptcache(aptcache_tmp) return ret def nspawn_run_ephemeral( osbase, base_dir, machine_name, chdir, command: T.Union[list[str], str] = None, flags: T.Union[list[str], str] = None, allowed: list[str] = None, syscall_filter: list[str] = None, env_vars: dict[str, str] = None, private_users: bool = False, boot: bool = False, ): if isinstance(command, str): command = command.split(' ') elif not command: command = [] if isinstance(flags, str): flags = flags.split(' ') elif not flags: flags = [] personality = get_nspawn_personality(osbase) params = ['--chdir={}'.format(chdir), '--link-journal=no'] if personality: params.append('--personality={}'.format(personality)) params.extend(flags) params.extend(['-qxD', base_dir]) params.extend(command) return _execute_sdnspawn( osbase, params, machine_name, allow_permissions=allowed, syscall_filter=syscall_filter, env_vars=env_vars, private_users=private_users, ).returncode def nspawn_make_helper_cmd(flags, build_uid: int): if isinstance(flags, str): flags = flags.split(' ') cmd = ['/usr/lib/debspawn/dsrun'] if not colored_output_allowed(): cmd.append('--no-color') if not unicode_allowed(): cmd.append('--no-unicode') if build_uid > 0: cmd.append('--buid={}'.format(build_uid)) cmd.extend(flags) return cmd def nspawn_run_helper_ephemeral( osbase, base_dir, machine_name, helper_flags, chdir='/tmp', *, build_uid: int, nspawn_flags: T.Union[list[str], str] = None, allowed: list[str] = None, env_vars: dict[str, str] = None, private_users: bool = False, ): cmd = nspawn_make_helper_cmd(helper_flags, build_uid) return nspawn_run_ephemeral( base_dir, machine_name, chdir, cmd, flags=nspawn_flags, allowed=allowed, env_vars=env_vars, private_users=private_users, ) def nspawn_run_helper_persist( osbase, base_dir, machine_name, helper_flags, chdir='/tmp', *, build_uid: int, nspawn_flags: T.Union[list[str], str] = None, tmp_apt_cache_dir=None, pkginjector=None, allowed: list[str] = None, syscall_filter: list[str] = None, env_vars: dict[str, str] = None, private_users: bool = False, ): cmd = nspawn_make_helper_cmd(helper_flags, build_uid) return nspawn_run_persist( osbase, base_dir, machine_name, chdir, cmd, nspawn_flags, tmp_apt_cache_dir=tmp_apt_cache_dir, pkginjector=pkginjector, allowed=allowed, syscall_filter=syscall_filter, env_vars=env_vars, private_users=private_users, ) debspawn-0.6.4/debspawn/osbase.py000066400000000000000000001305631456520253100167700ustar00rootroot00000000000000# -*- coding: utf-8 -*- # # Copyright (C) 2018-2022 Matthias Klumpp # # Licensed under the GNU Lesser General Public License Version 3 # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the license, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this software. If not, see . import os import json import shutil import typing as T import subprocess from pathlib import Path from contextlib import contextmanager from .utils import ( listify, temp_dir, print_info, print_warn, print_error, print_header, print_section, format_filesize, ) from .config import GlobalConfig from .nspawn import nspawn_run_persist, nspawn_run_helper_persist from .aptcache import APTCache from .utils.env import ensure_root, get_owner_uid_gid, get_random_free_uid_gid from .utils.misc import safe_copy, maybe_remove, rmtree_mntsafe from .utils.command import safe_run from .utils.zstd_tar import ensure_tar_zstd, compress_directory, decompress_tarball def bootstrap_tool_version(gconf=None): if not gconf: gconf = GlobalConfig() ds_version = 'unknown' try: out, _, _ = safe_run([gconf.bootstrap_tool, '--version']) parts = out.strip().split(' ', 2) ds_version = parts[0 if len(parts) < 2 else 1] except Exception as e: print_warn('Unable to determine bootstrap tool version: {}'.format(e)) return ds_version class OSBase: ''' Describes an OS base registered with debspawn ''' def __init__( self, gconf: GlobalConfig, suite: str, arch: str, variant=None, *, base_suite=None, custom_name=None, cachekey=None, ): self._gconf = gconf self._suite = suite self._base_suite = base_suite self._arch = arch # if variant is not passed, we use the configured default self._variant = variant if variant else gconf.default_bootstrap_variant if self._variant == 'none': # "none" is an alias to "don't set a variant when invoking debootstrap" self._variant = None self._custom_name = custom_name self._name = self._make_name() self._results_dir = self._gconf.results_dir self._cachekey = cachekey if self._cachekey: self._cachekey = self._cachekey.replace(' ', '') self._parameters_checked = False self._aptcache = APTCache(self) # debootstrap-compatible tools that we know about self._known_bootstrap_tools = ('debootstrap', 'mmdebstrap', 'qemu-debootstrap') # get a fresh UID to give to our build user within the container self._builder_uid = get_random_free_uid_gid()[0] # ensure we can (de)compress zstd tarballs ensure_tar_zstd() def _make_name(self): '''Configure a unique-ish name based on user defined data, and tweak the custom name and suite values to match.''' if self._custom_name and not self._suite: # if we have a custom name but no suite name, the custom name is treated # as our suite name *if* no image exists with the custom name # (this is for backwards compatibility) self._name = self._custom_name if not self.exists(): self._suite = self._custom_name self._custom_name = None if self._custom_name == self._suite: self._custom_name = None if not self._arch: out, _, ret = safe_run(['dpkg', '--print-architecture']) if ret != 0: raise RuntimeError('Running dpkg --print-architecture failed: {}'.format(out)) self._arch = out.strip() if self._custom_name: return self._custom_name elif self._variant: return '{}-{}-{}'.format(self._suite, self._variant, self._arch) else: return '{}-{}'.format(self._suite, self._arch) def _custom_name_parameter_check(self): '''Read parameters in case a custom name was passed, and perform basic sanity checks.''' import sys if self._parameters_checked: return if not self._custom_name: return config_fname = self.get_config_location() if not os.path.isfile(config_fname): print_error('No configuration data found for image "{}"!'.format(self.name)) sys.exit(3) with open(config_fname, 'rt') as f: cdata = json.loads(f.read()) c_suite = cdata.get('Suite', self.suite) if not self._suite: # if no suite was set, but we have one in the manifest file, # we will always use it to fill in the gap self._suite = c_suite if self.suite != c_suite: print_error( 'Expected suite name "{}" for image "{}", but got "{}" instead.'.format( cdata.get('Suite'), self.name, self.suite ) ) sys.exit(1) c_arch = cdata.get('Architecture', self.arch) c_variant = cdata.get('Variant', self.variant) if self.arch and self.arch != c_arch: print_warn( ( 'Expected architecture "{}" for image "{}", but got "{}" instead. ' 'Using expected value.' ).format(c_arch, self.name, self.arch) ) if self.variant and self.variant != c_variant: print_warn( ( 'Expected variant "{}" for image "{}", but got "{}" instead. ' 'Using expected value.' ).format(c_variant, self.name, self.variant) ) self._arch = c_arch self._variant = c_variant self._parameters_checked = True @property def name(self) -> str: return self._name @property def suite(self) -> str: return self._suite @property def base_suite(self) -> str: return self._base_suite @property def arch(self) -> str: return self._arch @property def variant(self) -> str: return self._variant @property def global_config(self): return self._gconf @property def aptcache(self): return self._aptcache @property def cache_packages(self) -> bool: return self._gconf.cache_packages @property def has_base_suite(self) -> bool: return True if self.base_suite and self.base_suite != self.suite else False @property def results_dir(self): Path(self._results_dir).mkdir(parents=True, exist_ok=True) return self._results_dir @results_dir.setter def results_dir(self, path): self._results_dir = path Path(self._results_dir).mkdir(exist_ok=True) def _copy_helper_script(self, osroot_path): script_location = os.path.join(osroot_path, 'usr', 'lib', 'debspawn') Path(script_location).mkdir(parents=True, exist_ok=True) script_fname = os.path.join(script_location, 'dsrun') if os.path.isfile(script_fname): os.remove(script_fname) shutil.copy2(self._gconf.dsrun_path, script_fname) os.chmod(script_fname, 0o0755) def get_image_location(self): return os.path.join(self._gconf.osroots_dir, '{}.tar.zst'.format(self.name)) def get_image_cache_dir(self): cache_img_dir = os.path.join(self._gconf.osroots_dir, 'dcache', self.name) Path(cache_img_dir).mkdir(parents=True, exist_ok=True) return cache_img_dir def get_cache_image_location(self): if not self._cachekey: return None return os.path.join(self.get_image_cache_dir(), '{}.tar.zst'.format(self._cachekey)) def get_config_location(self): return os.path.join(self._gconf.osroots_dir, '{}.json'.format(self.name)) def exists(self): return os.path.isfile(self.get_image_location()) def cacheimg_exists(self): location = self.get_cache_image_location() if not location: return False return os.path.isfile(location) def ensure_exists(self): ''' Ensure the container image exists, and terminate the program with an error code in case it does not. ''' import sys if not self._load_existent(): print_error( 'The container image for "{}" does not exist. Please create it first.'.format(self.name) ) sys.exit(3) def _load_existent(self) -> bool: '''Check if image exists, and if so load some essential data and return True.''' # ensure the set config values are sane if the user is using a custom container name if self._custom_name: self._custom_name_parameter_check() return self.exists() def new_nspawn_machine_name(self): import platform from random import choice from string import digits, ascii_lowercase nid = ''.join(choice(ascii_lowercase + digits) for _ in range(4)) # on Linux, the maximum hostname length is 64, so we simple set this as general default for # debspawn here. # shorten the hostname part or replace the suffix, depending on what is longer. # This should only ever matter if the hostname of the system already is incredibly long uniq_suffix = '{}-{}'.format(self.name, nid) if len(uniq_suffix) > 48: uniq_suffix = ''.join(choice(ascii_lowercase + digits) for _ in range(12)) node_name_prefix = platform.node()[: 63 - len(uniq_suffix)] return '{}-{}'.format(node_name_prefix, uniq_suffix) def _write_config_json( self, mirror, components, extra_suites, extra_source_lines, *, allow_recommends: bool, with_init: bool ): ''' Create configuration file for this container base image ''' print_info('Saving configuration settings.') data: T.Dict[str, T.Union[str, bool]] = { 'Name': self.name, 'CustomName': self._custom_name, 'Suite': self.suite, 'BaseSuite': self.base_suite, 'Architecture': self.arch, } if self.variant: data['Variant'] = self.variant if mirror: data['Mirror'] = mirror if components: data['Components'] = components if extra_suites: data['ExtraSuites'] = extra_suites if extra_source_lines: data['ExtraSourceLines'] = extra_source_lines if allow_recommends: data['AllowRecommends'] = True if with_init: data['IncludesInit'] = True data['BootstrapTool'] = self._gconf.bootstrap_tool with open(self.get_config_location(), 'wt') as f: f.write(json.dumps(data, sort_keys=True, indent=4)) f.write('\n') def _clear_image_tree(self, image_dir): '''Clear files from a directory tree that we don't want in the tarball.''' if os.path.ismount(image_dir): print_warn('Preparing OS tree for compression, but /dev is still mounted.') return for sdir, _, files in os.walk(os.path.join(image_dir, 'dev')): for f in files: fname = os.path.join(sdir, f) if os.path.lexists(fname) and not os.path.isdir(fname) and not os.path.ismount(fname): os.remove(fname) def _remove_unwanted_files(self, instance_dir): '''Delete unwanted files from a base image''' # drop resolv.conf: Some OSes set this to a symlink, which will lead to nowhere # in the container and may cause issues when bindmounting over it maybe_remove(os.path.join(instance_dir, 'etc', 'resolv.conf')) # our APT proxy connfiguration should also not be stored, we will reset it every time maybe_remove(os.path.join(instance_dir, 'etc', 'apt', 'apt.conf.d', '98debspawn_proxy')) # drop machine-id file, if one exists maybe_remove(os.path.join(instance_dir, 'etc', 'machine-id')) # drop logfiles which we want to reset maybe_remove(os.path.join(instance_dir, 'var', 'log', 'lastlog')) maybe_remove(os.path.join(instance_dir, 'var', 'log', 'faillog')) # drop APT package caches, we do not want them in the tarballs apt_pkg_cache_dir = os.path.join(instance_dir, 'var', 'cache', 'apt', 'archives') if os.path.isdir(apt_pkg_cache_dir): rmtree_mntsafe(apt_pkg_cache_dir, ignore_errors=True) def _setup_apt_proxy(self, instance_dir): '''Setup APT proxy configuration. APT needs special treatment even in the container to work with a company proxy (yuck!), so we do this here so the user does not need to care about it. ''' http_proxy = os.getenv('HTTP_PROXY', os.getenv('http_proxy')) https_proxy = os.getenv('HTTPS_PROXY', os.getenv('https_proxy')) if not http_proxy and not https_proxy: return if http_proxy: http_proxy = http_proxy.replace('"', '') if https_proxy: https_proxy = https_proxy.replace('"', '') proxyconf_fname = os.path.join(instance_dir, 'etc', 'apt', 'apt.conf.d', '98debspawn_proxy') with open(proxyconf_fname, 'w') as f: if http_proxy: f.write('Acquire::http::Proxy "{}";\n'.format(http_proxy)) if https_proxy: f.write('Acquire::https::Proxy "{}";\n'.format(https_proxy)) def _setup_etchosts(self, instance_dir): '''Set /etc/hosts for the container. Many packages spawn webservers as part of their tests, which need to resolve "localhost" even when on our private network. So we need to provide /etc/hosts on our own, so this works even with systemd-nspawn's private network. ''' hosts_fname = os.path.join(instance_dir, 'etc', 'hosts') with open(hosts_fname, 'w') as f: f.write( '127.0.0.1 localhost\n' '\n' '# The following lines are desirable for IPv6 capable hosts\n' '::1 localhost ip6-localhost ip6-loopback\n' 'ff02::1 ip6-allnodes\n' 'ff02::2 ip6-allrouters\n' ) def _setup_apt_repo_preferences(self, instance_dir, preferred_suites): '''Setup APT repository preferences. APT somtimes wants to install packages from older repositories to satisfy dependencies, even though we must build with versions from the newer repositories. This especially is the case for `NotAutomatic` suites like "experimental", where packages would not get auto-installed from. Passing "-t " to the APT commands restricts the solver way too much, so valid dependency chains are no chosen for partial suites if the suites don't contain all dependencies as well. This is a compromise to make APT do the right thing, by simply setting all suites to the same priority. NOTE: With suites like experimental, we may need to set the preferences later if we want to avoid experimental packages being added to the base image. That comes with its own difficulties though, so we avoid that for now. ''' # enusre all suites are represented in the "preferred suites" list if not preferred_suites: preferred_suites = [] if self.has_base_suite: if self.base_suite not in preferred_suites: preferred_suites.insert(0, self.base_suite) if self.suite not in preferred_suites: preferred_suites.insert(0, self.suite) aptpref_fname = os.path.join(instance_dir, 'etc', 'apt', 'preferences.d', '10debspawn') with open(aptpref_fname, 'w') as f: first = True for suite in preferred_suites: if not first: f.write('\n') first = False priority = 500 if suite == self.suite: priority = 600 f.write(('Package: *\n' 'Pin: release a={}\n' 'Pin-Priority: {}\n').format(suite, priority)) # we *always* prefer locally injected packages above anything else f.write('\nPackage: *\nPin: release o=LocalInjected\nPin-Priority: 1000\n') def _create_internal( self, mirror=None, components=None, *, extra_suites: list[str] = None, extra_source_lines: str = None, allow_recommends: bool = False, with_init: bool = False, show_header: bool = True, ): '''Create new container base image (internal method)''' if self.exists(): print_error('An image already exists for this configuration. Can not create a new one.') return False if not extra_suites: extra_suites = [] bootstrap_tool_exe = self._gconf.bootstrap_tool if not shutil.which(bootstrap_tool_exe): print_error('Unable to find executable for bootstrap tool "{}".'.format(bootstrap_tool_exe)) return False # ensure image location exists Path(self._gconf.osroots_dir).mkdir(parents=True, exist_ok=True) if show_header: print_header('Creating new base: {} [{}]'.format(self.suite, self.arch)) else: print_section('Creating new base: {} [{}]'.format(self.suite, self.arch)) print('Bootstrap tool:', '{} {}'.format(bootstrap_tool_exe, bootstrap_tool_version(self._gconf))) if bootstrap_tool_exe not in self._known_bootstrap_tools: print_warn('Using unfamiliar bootstrap tool: {}'.format(bootstrap_tool_exe)) if self._custom_name: print('Custom name: {}'.format(self.name)) print('Using mirror: {}'.format(mirror if mirror else 'default')) if self.variant: print('Variant: {}'.format(self.variant)) if with_init: print('Includes init: yes') include_pkgs = ['passwd', 'python3-minimal', 'eatmydata'] if with_init: include_pkgs.append('systemd-sysv') cmd = [bootstrap_tool_exe, '--arch={}'.format(self.arch), '--include=' + ','.join(include_pkgs)] if components: cmd.append('--components={}'.format(','.join(components))) if self.variant: cmd.append('--variant={}'.format(self.variant)) with temp_dir() as tdir: bootstrap_suite = self.suite if self.has_base_suite: bootstrap_suite = self.base_suite cmd.extend([bootstrap_suite, tdir]) print('Bootstrap suite: {}'.format(bootstrap_suite)) if extra_suites: print('Additional suites: {}'.format(', '.join(extra_suites))) if extra_source_lines: print('Custom sources.list lines will be added:') for line in extra_source_lines.split('\\n'): print(' {}'.format(line)) if mirror: cmd.append(mirror) print_section('Bootstrap') proc = subprocess.run(cmd, check=False) if proc.returncode != 0: return False # create helper script runner self._copy_helper_script(tdir) # if we bootstrapped the base suite, add the primary suite to # sources.list. We also add any explicit extra suites and source lines if self.has_base_suite or extra_suites or extra_source_lines: import re sourceslist_fname = os.path.join(tdir, 'etc', 'apt', 'sources.list') if not mirror: with open(sourceslist_fname, 'r') as f: contents = f.read() matches = re.findall( 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', contents, ) if not matches: print_error('Unable to detect default APT repository URL (no regex matches).') return False mirror = matches[0] if not mirror: print_error('Unable to detect default APT repository URL.') return False if not components: # FIXME: We should really be more clever here, e.g. depend on python-apt # and parse sources.list properly components = ['main'] with open(sourceslist_fname, 'a') as f: if self.has_base_suite: f.write( 'deb {mirror} {suite} {components}\n'.format( mirror=mirror, suite=self.suite, components=' '.join(components) ) ) if extra_suites: f.write('\n') for esuite in extra_suites: if esuite == self.suite or esuite == bootstrap_suite: # don't add existing suites multiple times continue f.write( 'deb {mirror} {esuite} {components}\n'.format( mirror=mirror, esuite=esuite, components=' '.join(components) ) ) if extra_source_lines: f.write('\n') for line in extra_source_lines.split('\\n'): f.write('{}\n'.format(line.strip())) # set preference suites in dependency resolution self._setup_apt_repo_preferences(tdir, extra_suites) # write our default APT settings for this container aptconf_fname = os.path.join(tdir, 'etc', 'apt', 'apt.conf.d', '99debspawn') with open(aptconf_fname, 'w') as f: # fail immediately with a proper exit code when e.g. apt update fails, # so we can retry and don't silently use old packages # (only available with newer APT versions) f.write('APT::Update::Error-Mode "any";\n') f.write('APT::AutoRemove::SuggestsImportant "false";\n') f.write('APT::AutoRemove::RecommendsImportant "false";\n') f.write('Acquire::Languages "none";\n') if not allow_recommends: f.write('APT::Install-Recommends "false";\n') f.write('APT::Install-Suggests "false";\n') if with_init: # if we are allowing to boot this container, add a passwordless root login # hack so the user gets a root shell instead of a login prompt getty_override_fname = os.path.join( tdir, 'etc', 'systemd', 'system', 'console-getty.service.d', 'autorootlogin.conf' ) os.makedirs(os.path.dirname(getty_override_fname), exist_ok=True) with open(getty_override_fname, 'w') as f: # fail immediately with a proper exit code when e.g. apt update fails, # so we can retry and don't silently use old packages # (only available with newer APT versions) f.write( ( '[Service]\n' 'ExecStart=\n' 'ExecStartPre=-/usr/bin/sed -i \'/pam_loginuid.so/d\' /etc/pam.d/login\n' 'ExecStart=-/sbin/agetty --autologin root --noclear ' '--keep-baud - 115200,38400,9600 $TERM\n' ) ) # delete unwanted files, especially resolv.conf as a broken one will # mess with the next step self._remove_unwanted_files(tdir) # configure APT proxy, so the configure operation will work behind proxys self._setup_apt_proxy(tdir) # setup hosts self._setup_etchosts(tdir) print_section('Configure') if ( nspawn_run_helper_persist( self, tdir, self.new_nspawn_machine_name(), '--update', build_uid=self._builder_uid ) != 0 ): return False # drop any unwanted files (again) before building the tarball self._remove_unwanted_files(tdir) print_section('Creating Tarball') self._clear_image_tree(tdir) compress_directory(tdir, self.get_image_location()) # store configuration settings, so we can later recreate this tarball # or just display information about it self._write_config_json( mirror, components, extra_suites, extra_source_lines, allow_recommends=allow_recommends, with_init=with_init, ) return True def create( self, mirror: str = None, components: list[str] = None, *, extra_suites: list[str] = None, extra_source_lines: str = None, allow_recommends: bool = False, with_init: bool = False, ): '''Create new container base image (internal method)''' ensure_root() if self.exists(): print_error('This configuration has already been created. You can only delete or update it.') return False ret = self._create_internal( mirror=mirror, components=components, extra_suites=extra_suites, extra_source_lines=extra_source_lines, allow_recommends=allow_recommends, with_init=with_init, show_header=True, ) if ret: print_info('Done.') return ret def delete(self): '''Remove container base image''' ensure_root() if not self._load_existent(): print_error('Can not delete "{}": This configuration does not exist.'.format(self.name)) return False print_header('Removing base image {}'.format(self.name)) print_section('Deleting cache') # remove packages cache cache_size = self._aptcache.clear() print_info('Removed {} cached packages.'.format(cache_size)) self._aptcache.delete() # remove cached images shutil.rmtree(self.get_image_cache_dir()) print_info('Cache directory removed.') print_section('Deleting base tarball') os.remove(self.get_image_location()) config_fname = self.get_config_location() if os.path.isfile(config_fname): print_section('Deleting configuration manifest') os.remove(config_fname) print_info('Done.') return True @contextmanager def new_instance(self, basename=None): with temp_dir() as tdir: if self.cacheimg_exists(): image_fname = self.get_cache_image_location() else: image_fname = self.get_image_location() decompress_tarball(image_fname, tdir) self._setup_apt_proxy(tdir) yield tdir, self.new_nspawn_machine_name() def make_instance_permanent(self, instance_dir): '''Add changes done in the current instance to the main tarball of this OS tree, replacing it.''' # remove unwanted files from the tarball self._clear_image_tree(instance_dir) if self._cachekey: tarball_name = self.get_cache_image_location() else: tarball_name = self.get_image_location() tarball_name_old = '{}.old'.format(tarball_name) if os.path.isfile(tarball_name): os.replace(tarball_name, tarball_name_old) compress_directory(instance_dir, tarball_name) if os.path.isfile(tarball_name_old): os.remove(tarball_name_old) tar_size = os.path.getsize(tarball_name) if self._cachekey: print_info( 'New compressed tarball size (for {}) is {}'.format(self._cachekey, format_filesize(tar_size)) ) else: print_info('New compressed tarball size is {}'.format(format_filesize(tar_size))) def update(self): '''Update container base image''' ensure_root() if not self._load_existent(): print_error('Can not update "{}": This configuration does not exist.'.format(self.name)) return False print_header('Updating container image') with self.new_instance() as (instance_dir, _): # ensure helper script runner exists and is up to date self._copy_helper_script(instance_dir) print_section('Update') if ( nspawn_run_helper_persist( self, instance_dir, self.new_nspawn_machine_name(), '--update', build_uid=self._builder_uid, ) != 0 ): return False # setup hosts self._setup_etchosts(instance_dir) # drop unwanted files from the image self._remove_unwanted_files(instance_dir) print_section('Recreating tarball') self.make_instance_permanent(instance_dir) print_section('Cleaning up cache') cache_size = self._aptcache.clear() print_info('Removed {} cached packages.'.format(cache_size)) # remove now-outdated cached images shutil.rmtree(self.get_image_cache_dir()) print_info('Done.') return True def recreate(self): '''Recreate a container base image''' ensure_root() if not self._load_existent(): print_error('Can not recreate "{}": The image does not exist.'.format(self.name)) return False config_fname = self.get_config_location() if not os.path.isfile(config_fname): print_error( 'Can not recreate "{}": Unable to find configuration data for this image.'.format(self.name) ) return False print_header('Recreating container image') # read configuration data with open(config_fname, 'rt') as f: cdata: T.Dict[str, T.Union[str, bool]] = json.loads(f.read()) self._name = cdata.get('Name', self.name) self._custom_name = cdata.get('CustomName', self._custom_name) self._suite = cdata.get('Suite', self.suite) self._base_suite = cdata.get('BaseSuite', self.base_suite) self._arch = cdata.get('Architecture', self.arch) self._variant = cdata.get('Variant', self.variant) mirror = cdata.get('Mirror') components = cdata.get('Components') extra_suites = cdata.get('ExtraSuites', []) extra_source_lines = cdata.get('ExtraSourceLines') allow_recommends = cdata.get('AllowRecommends', False) with_init = cdata.get('IncludesInit', False) print_section('Deleting cache') cache_size = self._aptcache.clear() print_info('Removed {} cached packages.'.format(cache_size)) self._aptcache.delete() print_info('Cache directory removed.') # move old image tarball out of the way image_name = self.get_image_location() image_name_old = self.get_image_location() + '.old' if os.path.isfile(image_name_old): print_info('Removing cruft image') os.remove(image_name_old) os.rename(image_name, image_name_old) print_info('Old tarball moved.') # ty to create the tarball again try: ret = self._create_internal( mirror=mirror, components=components, extra_suites=extra_suites, extra_source_lines=extra_source_lines, allow_recommends=allow_recommends, with_init=with_init, show_header=False, ) except Exception as e: print_error('Error while trying to create image: {}'.format(str(e))) ret = False if ret: if os.path.isfile(image_name_old): print_info('Removing old image') os.remove(image_name_old) print_info('Removing outdated cached images') shutil.rmtree(self.get_image_cache_dir()) print_info('Done.') return True else: print_info('Restoring old tarball') if os.path.isfile(image_name): print_info('Removing failed new image') os.remove(image_name) os.rename(image_name_old, image_name) print_info('Recreation failed.') return False def login(self, persistent=False, *, allowed: list[str] = None, boot: bool = False): '''Interactive shell login into the container''' ensure_root() if not self._load_existent(): print_info('Can not enter "{}": This configuration does not exist.'.format(self.name)) return False print_header( 'Login (persistent changes) for {}'.format(self.name) if persistent else 'Login for {}'.format(self.name) ) with self.new_instance() as (instance_dir, _): # ensure helper script runner exists and is up to date self._copy_helper_script(instance_dir) # run an interactive shell in the new container nspawn_run_persist( self, instance_dir, self.new_nspawn_machine_name(), '/srv', verbose=True, allowed=allowed, boot=boot, ) if persistent: print_section('Recreating tarball') self.make_instance_permanent(instance_dir) else: print_info('Changes discarded.') print_info('Done.') return True def retrieve_artifacts(self, src_dir: str, dest_dir: T.Optional[str] = None): from glob import glob print_section('Retrieving build artifacts') if not dest_dir: dest_dir = self.results_dir o_uid, o_gid = get_owner_uid_gid() acount = 0 for f in glob(os.path.join(src_dir, '*.*')): if os.path.isfile(f): target_fname = os.path.join(dest_dir, os.path.basename(f)) safe_copy(f, target_fname) os.chown(target_fname, o_uid, o_gid, follow_symlinks=False) acount += 1 print_info('Copied {} files.'.format(acount)) def _copy_command_script_to_instance_dir(self, instance_dir: str, command_script: str) -> T.Optional[str]: ''' Copy a script from the host to the current instance directory and make it executable. Contains the path to the executable script as seen from inside the container. ''' host_script = os.path.abspath(command_script) if not os.path.isfile(host_script): return None script_location = os.path.join(instance_dir, 'srv', 'tmp') Path(script_location).mkdir(parents=True, exist_ok=True) script_fname = os.path.join(script_location, os.path.basename(host_script)) if os.path.isfile(script_fname): os.remove(script_fname) shutil.copy2(host_script, script_fname) os.chmod(script_fname, 0o0755) return os.path.join('/srv', 'tmp', os.path.basename(host_script)) def run( self, command, build_dir, artifacts_dir, boot: bool = False, init_command=None, copy_command=False, header_msg=None, bind_build_dir: T.Optional[str] = None, allowed: list[str] = None, ): '''Run an arbitrary command or script in the container''' ensure_root() if not self._load_existent(): print_error('Can not run command in "{}": The base image does not exist.'.format(self.name)) return False if len(command) <= 0: print_error('No command was given. Can not continue.') return False if isinstance(init_command, str): if init_command: import shlex init_command = shlex.split(init_command) init_command = listify(init_command) allowed = listify(allowed) if bind_build_dir == 'n': bind_build_dir = None # ensure we have absolute paths if build_dir: build_dir = os.path.normpath(os.path.abspath(build_dir)) if artifacts_dir: artifacts_dir = os.path.normpath(os.path.abspath(artifacts_dir)) if self._cachekey and init_command and not self.cacheimg_exists(): print_header('Preparing template for `{}`'.format(self._cachekey)) # we do not have a cached image prepared, let's do that now! with self.new_instance() as (instance_dir, machine_name): # ensure helper script runner exists and is up to date self._copy_helper_script(instance_dir) if copy_command: # copy initialization script from host to container host_script = init_command[0] init_command[0] = self._copy_command_script_to_instance_dir(instance_dir, host_script) if not init_command[0]: print_error( ( 'Unable to find initialization script "{}", ' 'can not copy it to the container. Exiting.' ).format(host_script) ) return False r = nspawn_run_helper_persist( self, instance_dir, machine_name, '--prepare-run', '/srv', build_uid=self._builder_uid ) if r != 0: print_error('Container setup failed.') return False # we do not want some permissions to be in effect here, # as they may have unwanted effects on the final cached image banned_permissions = ['full-dev', 'full-proc', 'read-kmods'] filtered_allowed = [] for perm in allowed: if perm not in banned_permissions: filtered_allowed.append(perm) init_nspawn_flags = [] if build_dir: if not bind_build_dir: shutil.copytree( build_dir, os.path.join(instance_dir, 'srv', 'build'), dirs_exist_ok=True ) else: if bind_build_dir == 'rw': init_nspawn_flags = ['--bind={}:/srv/build/'.format(build_dir)] elif bind_build_dir == 'ro': init_nspawn_flags = ['--bind-ro={}:/srv/build/'.format(build_dir)] r = nspawn_run_persist( self, instance_dir, machine_name, '/srv', init_command, init_nspawn_flags, allowed=filtered_allowed, ) if r != 0: return False print_info('Storing prepared image in cache') self.make_instance_permanent(instance_dir) if header_msg: print_header(header_msg) if self._cachekey and init_command and self.cacheimg_exists(): print_info('Using cached container image `{}`'.format(self._cachekey)) with self.new_instance() as (instance_dir, machine_name): # ensure helper script runner exists and is up to date self._copy_helper_script(instance_dir) if copy_command: # copy the script from the host into our container and execute it there host_script = command[0] command[0] = self._copy_command_script_to_instance_dir(instance_dir, host_script) if not command[0]: print_error( ('Unable to find script "{}", can not copy it to the container. ' 'Exiting.').format( host_script ) ) return False r = nspawn_run_helper_persist( self, instance_dir, machine_name, '--prepare-run', '/srv', build_uid=self._builder_uid ) if r != 0: print_error('Container setup failed.') return False # Create a few directories we may use for bindmounting if some allow-flags are set, # and which are not commonly present in the base image. # This is only needed for `run` actions, and regular package builds should not require # bindmounts to these directories. os.makedirs(os.path.join(instance_dir, 'lib', 'modules'), exist_ok=True) os.makedirs(os.path.join(instance_dir, 'boot'), exist_ok=True) os.makedirs(os.path.join(instance_dir, 'srv', 'artifacts'), exist_ok=True) print_section('Running Task') nspawn_flags = [] chdir = '/srv' if build_dir: chdir = '/srv/build' if not bind_build_dir: shutil.copytree(build_dir, os.path.join(instance_dir, 'srv', 'build'), dirs_exist_ok=True) else: if bind_build_dir == 'rw': nspawn_flags.extend(['--bind={}:/srv/build/'.format(build_dir)]) elif bind_build_dir == 'ro': nspawn_flags.extend(['--bind-ro={}:/srv/build/'.format(build_dir)]) r = nspawn_run_persist( self, instance_dir, machine_name, chdir, command, nspawn_flags, allowed=allowed, boot=boot ) if r != 0: return False # copy results to target directory self.retrieve_artifacts(os.path.join(instance_dir, 'srv', 'artifacts'), artifacts_dir) print_info('Done.') return True def print_container_base_image_info(gconf): ''' Search for all available container base images and list information about them. ''' from glob import glob osroots_dir = gconf.osroots_dir tar_files = [] if os.path.isdir(osroots_dir): tar_files = list(glob(os.path.join(osroots_dir, '*.tar.zst'))) if not tar_files: print_info('No container base images have been found!') return False tar_files_len = len(tar_files) for i, tar_fname in enumerate(tar_files): img_basepath = os.path.splitext(os.path.splitext(tar_fname)[0])[0] config_fname = img_basepath + '.json' imgid = os.path.basename(img_basepath) print('[{}]'.format(imgid)) cache_files = list(glob(os.path.join(osroots_dir, 'dcache', imgid, '*.tar.zst'))) cached_names = [] for cfile in cache_files: cname = os.path.basename(os.path.splitext(os.path.splitext(cfile)[0])[0]) cached_names.append(cname) # read configuration data if it exists if os.path.isfile(config_fname): with open(config_fname, 'rt') as f: cdata = json.loads(f.read()) for key, value in cdata.items(): if type(value) is list: value = '; '.join(value) print('{} = {}'.format(key, value)) tar_size = os.path.getsize(tar_fname) print('Size = {}'.format(format_filesize(tar_size))) if cached_names: print('CachedImages = {}'.format('; '.join(cached_names))) if i != tar_files_len - 1: print() debspawn-0.6.4/debspawn/utils/000077500000000000000000000000001456520253100162725ustar00rootroot00000000000000debspawn-0.6.4/debspawn/utils/__init__.py000066400000000000000000000026001456520253100204010ustar00rootroot00000000000000# -*- coding: utf-8 -*- # # Licensed under the GNU Lesser General Public License Version 3 # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the license, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this software. If not, see . from .env import unicode_allowed, colored_output_allowed from .log import print_info, print_warn, print_error, print_header, print_section from .misc import ( cd, listify, temp_dir, rmtree_mntsafe, systemd_escape, format_filesize, hardlink_or_copy, ) from .command import safe_run, run_forwarded __all__ = [ 'print_info', 'print_warn', 'print_error', 'print_header', 'print_section', 'colored_output_allowed', 'unicode_allowed', 'safe_run', 'run_forwarded', 'listify', 'temp_dir', 'cd', 'hardlink_or_copy', 'format_filesize', 'rmtree_mntsafe', 'systemd_escape', ] debspawn-0.6.4/debspawn/utils/command.py000066400000000000000000000061441456520253100202670ustar00rootroot00000000000000# -*- coding: utf-8 -*- # # Copyright (C) 2016-2022 Matthias Klumpp # Copyright (C) 2012-2013 Paul Tagliamonte # # Licensed under the GNU Lesser General Public License Version 3 # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the license, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this software. If not, see . import sys import shlex import subprocess from .log import TwoStreamLogger class SubprocessError(Exception): def __init__(self, out, err, ret, cmd): self.out = out self.err = err self.ret = ret self.cmd = cmd super(SubprocessError, self).__init__('%s: %d\n%s' % (str(self.cmd), self.ret, str(self.err))) def __str__(self): return '%s: %d\n%s' % (str(self.cmd), self.ret, str(self.err)) # Input may be a byte string, a unicode string, or a file-like object def run_command(command, input=None): if not isinstance(command, list): command = shlex.split(command) if not input: input = None elif isinstance(input, str): input = input.encode('utf-8') elif not isinstance(input, bytes): input = input.read() try: pipe = subprocess.Popen( command, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) except OSError: return (None, None, -1) (output, stderr) = pipe.communicate(input=input) (output, stderr) = (c.decode('utf-8', errors='ignore') for c in (output, stderr)) return (output, stderr, pipe.returncode) def safe_run(cmd, input=None, expected=0): if not isinstance(expected, tuple): expected = (expected,) out, err, ret = run_command(cmd, input=input) if ret not in expected: raise SubprocessError(out, err, ret, cmd) return out, err, ret def run_forwarded(command): ''' Run a command, forwarding all output to the current stdout as well as to our build-logger in case we have one set previously. ''' if not isinstance(command, list): command = shlex.split(command) if isinstance(sys.stdout, TwoStreamLogger): proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) # ensure output is written to our file as well as stdout (as sys.stdout may be a redirect) while True: line = proc.stdout.readline() if proc.poll() is not None: break sys.stdout.write(str(line, 'utf-8', 'replace')) return proc else: return subprocess.run(command, check=False) debspawn-0.6.4/debspawn/utils/env.py000066400000000000000000000140521456520253100174360ustar00rootroot00000000000000# -*- coding: utf-8 -*- # # Copyright (C) 2017-2022 Matthias Klumpp # # Licensed under the GNU Lesser General Public License Version 3 # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the license, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this software. If not, see . import os import sys import shutil from contextlib import contextmanager _unicode_allowed = True # store whether we are allowed to use unicode _owner_uid = 0 # uid of the user on whose behalf we are running _owner_gid = 0 # gid of the user on whose behalf we are running def set_owning_user(user, group=None): ''' Set the user on whose behalf we are running. This is useful so we can drop privileges to the perticular user in many cases. ''' from grp import getgrnam from pwd import getpwnam, getpwuid if user.isdecimal(): uid = int(user) else: uid = getpwnam(user).pw_uid if not group: gid = getpwuid(uid).pw_gid elif group.isdecimal(): gid = int(group) else: gid = getgrnam(group).gr_gid global _owner_uid global _owner_gid _owner_uid = uid _owner_gid = gid def ensure_root(): ''' Ensure we are running as root and all code following this function is privileged. ''' if os.geteuid() == 0: return args = sys.argv.copy() owner_set = any(a.startswith('--owner=') for a in sys.argv) if owner_set: # we don't override an owner explicitly set by the user args = sys.argv.copy() else: args = [sys.argv[0]] # set flag to tell the new process who it can impersonate # for unprivileged actions. It it is root, just omit the flag. uid = os.getuid() gid = os.getgid() if uid != 0 or gid != 0: args.append('--owner={}:{}'.format(uid, gid)) args.extend(sys.argv[1:]) def filter_env_far(result, name): value = os.environ.get(name) if not value: return result.append('{}={}'.format(name, shlex.quote(value))) if shutil.which('sudo'): # Filter "good" environment variables that we want to have after running sudo. # Most of those are standard variables affecting debsign bahevior later, in case # the user has requested signing import shlex env = [] filter_env_far(env, 'DEBEMAIL') filter_env_far(env, 'DEBFULLNAME') filter_env_far(env, 'GPGKEY') filter_env_far(env, 'GPG_AGENT_INFO') filter_env_far(env, 'HTTP_PROXY') filter_env_far(env, 'HTTPS_PROXY') filter_env_far(env, 'http_proxy') filter_env_far(env, 'https_proxy') os.execvp("sudo", ["sudo"] + env + args) else: print('This command needs to be run as root.') sys.exit(1) @contextmanager def switch_unprivileged(): ''' Run actions using the unprivileged user ID on the behalf of which we are running. This is NOT a security feature! ''' import pwd if _owner_uid == 0 and _owner_gid == 0: # we can't really do much here, we have to run # as root, as we don't know an unprivileged user # to switch to yield else: orig_egid = os.getegid() orig_euid = os.geteuid() orig_home = os.environ.get('HOME') if not orig_home: orig_home = pwd.getpwuid(os.getuid()).pw_dir try: os.setegid(_owner_gid) os.seteuid(_owner_uid) os.environ['HOME'] = pwd.getpwuid(_owner_uid).pw_dir yield finally: os.setegid(orig_egid) os.seteuid(orig_euid) os.environ['HOME'] = orig_home def get_owner_uid_gid(): return _owner_uid, _owner_gid def get_random_free_uid_gid(): '''Get a random unused UID and GID for the current system.''' import pwd import random uid = 1000 gid = 1000 for pw in pwd.getpwall(): if pw.pw_name == 'nobody': continue if pw.pw_uid > uid: uid = pw.pw_uid if pw.pw_gid > gid: gid = pw.pw_gid # we can not use an extremely large number here, as otherwise the container's # lastlog/faillog will grow to insane sizes r = random.randint(100, 2048) return uid + r, gid + r def colored_output_allowed(): return (hasattr(sys.stdout, "isatty") and sys.stdout.isatty()) or ( 'TERM' in os.environ and os.environ['TERM'] == 'ANSI' ) def unicode_allowed(): return _unicode_allowed def set_unicode_allowed(val): global _unicode_allowed _unicode_allowed = val def get_free_space(path): ''' Return free space of :path ''' real_path = os.path.realpath(path) stat = os.statvfs(real_path) # get free space in MiB. free_space = float(stat.f_bsize * stat.f_bavail) return free_space def get_tree_size(path): ''' Return total size of files in path and subdirs. If is_dir() or stat() fails, print an error message to stderr and assume zero size (for example, file has been deleted). ''' total = 0 for entry in os.scandir(path): try: is_dir = entry.is_dir(follow_symlinks=False) except OSError as error: print('Error calling is_dir():', error, file=sys.stderr) continue if is_dir: total += get_tree_size(entry.path) else: try: total += entry.stat(follow_symlinks=False).st_size except OSError as error: print('Error calling stat():', error, file=sys.stderr) return total debspawn-0.6.4/debspawn/utils/log.py000066400000000000000000000134451456520253100174340ustar00rootroot00000000000000# -*- coding: utf-8 -*- # # Copyright (C) 2017-2022 Matthias Klumpp # # Licensed under the GNU Lesser General Public License Version 3 # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the license, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this software. If not, see . import os import re import sys from .env import unicode_allowed from .misc import safe_copy def console_supports_color(): ''' Returns True if the running system's terminal supports color, and False otherwise. ''' is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty() return 'ANSICON' in os.environ or is_a_tty def print_textbox(title, tl, hline, tr, vline, bl, br): def write_utf8(s): sys.stdout.buffer.write(s.encode('utf-8')) tlen = len(title) write_utf8('\n{}'.format(tl)) write_utf8(hline * (10 + tlen)) write_utf8('{}\n'.format(tr)) write_utf8('{} {}'.format(vline, title)) write_utf8(' ' * 8) write_utf8('{}\n'.format(vline)) write_utf8(bl) write_utf8(hline * (10 + tlen)) write_utf8('{}\n'.format(br)) sys.stdout.flush() def print_header(title): if unicode_allowed(): print_textbox(title, '╔', '═', '╗', '║', '╚', '╝') else: print_textbox(title, '+', '═', '+', '|', '+', '+') def print_section(title): if unicode_allowed(): print_textbox(title, '┌', '─', '┐', '│', '└', '┘') else: print_textbox(title, '+', '-', '+', '|', '+', '+') def print_info(*arg): ''' Prints an information message and ensures that it shows up on stdout immediately. ''' print(*arg) sys.stdout.flush() def print_warn(*arg): ''' Prints an information message and ensures that it shows up on stdout immediately. ''' if console_supports_color(): print('\033[93m/!\\\033[0m', *arg) else: print('/!\\', *arg) sys.stdout.flush() def print_error(*arg): ''' Prints an information message and ensures that it shows up on stdout immediately. ''' if console_supports_color(): print('\033[91mERROR:\033[0m', *arg, file=sys.stderr) else: print('ERROR:', *arg, file=sys.stderr) sys.stderr.flush() def print_bullet(*arg, large: bool = False, indent: int = 0): ''' Prints a bullet point to the console, with a set indentation and style. ''' if unicode_allowed(): b = '●' if large else '•' else: b = '*' print((' ' * indent) + b, *arg) def print_bool_item(prefix: str, b: bool, text_true: str = 'yes', text_false: str = 'no'): ''' Prints a (colored, if possible) boolean item with a given prefix. ''' if console_supports_color(): s = '\033[92m{}\033[0m'.format(text_true) if b else '\033[91m{}\033[0m'.format(text_false) else: s = text_true if b else text_false if prefix: print(prefix, s) else: print(prefix) sys.stdout.flush() def input_bool(question_text, default=False) -> bool: """As user a Yes/No question.""" if default: default_info = '[Y/n]' else: default_info = '[y/N]' while True: try: in_str = input('{} {}:'.format(question_text, default_info)) except EOFError: return default if in_str == 'y' or in_str == 'Y': return True elif in_str == 'n' or in_str == 'N': return False elif not in_str: return default class TwoStreamLogger: ''' Permits logging messages to stdout/stderr as well as to a file. ''' class Buffer: def __init__(self, fstream, cstream): self._fstream = fstream self._cstream = cstream def write(self, message): self._fstream.write(str(message, 'utf-8', 'replace')) self._cstream.buffer.write(message) def __init__(self, fstream, cstream, fflush_always=False): self._fstream = fstream self._cstream = cstream self._fflush_always = fflush_always self._colorsub = re.compile('\x1b\\[(K|.*?m)') self.buffer = TwoStreamLogger.Buffer(fstream, cstream) def write(self, message): # write message to console self._cstream.write(message) if self._fflush_always: self.flush() # write message to file, stripping ANSI colors self._fstream.write(self._colorsub.sub('', message)) def flush(self): self._cstream.flush() self._fstream.flush() def copy_to(self, fname): self.flush() safe_copy(self._fstream.name, fname, preserve_mtime=False) def isatty(self): return self._cstream.isatty() def capture_console_output(): ''' Direct console output to a file as well as to the original stdout/stderr terminal. ''' from tempfile import NamedTemporaryFile logfile = NamedTemporaryFile(mode='a', prefix='ds_', suffix='.log') nstdout = TwoStreamLogger(logfile, sys.stdout) nstderr = TwoStreamLogger(logfile, sys.stderr, True) sys.stdout = nstdout sys.stderr = nstderr def save_captured_console_output(fname): from .env import get_owner_uid_gid if hasattr(sys.stdout, 'copy_to'): o_uid, o_gid = get_owner_uid_gid() sys.stdout.copy_to(fname) os.chown(fname, o_uid, o_gid) debspawn-0.6.4/debspawn/utils/misc.py000066400000000000000000000250401456520253100176000ustar00rootroot00000000000000# -*- coding: utf-8 -*- # # Copyright (C) 2017-2022 Matthias Klumpp # # Licensed under the GNU Lesser General Public License Version 3 # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the license, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this software. If not, see . import os import sys import stat import fcntl import shutil import typing as T import subprocess from pathlib import Path from contextlib import contextmanager from ..config import GlobalConfig class MountError(Exception): """Error while dealing with mountpoints.""" def listify(item: T.Any): ''' Return a list of :item, unless :item already is a lit. ''' if not item: return [] return item if isinstance(item, list) else [item] @contextmanager def cd(where): ncwd = os.getcwd() try: yield os.chdir(where) finally: os.chdir(ncwd) def random_string(prefix: T.Optional[str] = None, count: int = 8): ''' Create a string of random alphanumeric characters of a given length, separated with a hyphen from an optional prefix. ''' from random import choice from string import digits, ascii_lowercase if count <= 0: count = 1 rdm_id = ''.join(choice(ascii_lowercase + digits) for _ in range(count)) if prefix: return '{}-{}'.format(prefix, rdm_id) return rdm_id def systemd_escape(name: str) -> T.Optional[str]: '''Escape a string using systemd's escaping rules.''' from .command import run_command out, _, ret = run_command(['systemd-escape', name]) if ret != 0: return None return out.strip() @contextmanager def temp_dir(basename=None): '''Context manager for a temporary directory in debspawn's temp-dir location. This function will also ensure that we will not jump into possibly still bind-mounted directories upon deletion, and will unmount those directories instead. ''' dir_name = random_string(basename) temp_basedir = GlobalConfig().temp_dir if not temp_basedir: temp_basedir = '/var/tmp/debspawn/' tmp_path = os.path.join(temp_basedir, dir_name) Path(tmp_path).mkdir(parents=True, exist_ok=True) fd = os.open(tmp_path, os.O_RDONLY) # we hold a shared lock on the directory to prevent systemd-tmpfiles # from deleting it, just in case we are building something for days try: if fd > 0: fcntl.flock(fd, fcntl.LOCK_SH | fcntl.LOCK_NB) except (IOError, OSError): print('WARNING: Unable to lock temporary directory {}'.format(tmp_path), file=sys.stderr) sys.stderr.flush() try: yield tmp_path finally: try: fcntl.flock(fd, fcntl.LOCK_UN) rmtree_mntsafe(tmp_path) finally: if fd > 0: os.close(fd) def safe_copy(src, dst, *, preserve_mtime: bool = True): ''' Attempt to safely copy a file, by atomically replacing the destination and protecting against symlink attacks. ''' dst_tmp = random_string(dst + '.tmp') try: if preserve_mtime: shutil.copy2(src, dst_tmp) else: shutil.copy(src, dst_tmp) if os.path.islink(dst): os.remove(dst) os.replace(dst_tmp, dst) finally: try: os.remove(dst_tmp) except OSError: pass def maybe_remove(f): '''Delete a file if it exists, but do nothing if it doesn't.''' try: os.remove(f) except OSError: pass def format_filesize(num, suffix='B'): for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']: if abs(num) < 1024.0: return "%3.1f%s%s" % (num, unit, suffix) num /= 1024.0 return "%.1f%s%s" % (num, 'Yi', suffix) def current_time_string(): '''Get the current time as human-readable string.''' from datetime import datetime, timezone utc_dt = datetime.now(timezone.utc) return utc_dt.astimezone().strftime('%Y-%m-%d %H:%M:%S UTC%z') def version_noepoch(version): '''Return version from :version without epoch.''' version_noe = version if ':' in version_noe: version_noe = version_noe.split(':', 1)[1] return version_noe def hardlink_or_copy(src, dst): '''Hardlink a file :src to :dst or copy the file in case linking is not possible''' try: os.link(src, dst) except (PermissionError, OSError): shutil.copy2(src, dst) def is_mountpoint(path) -> bool: '''Check if :path is a mountpoint. Unlike os.path.ismount, this function will also consider bindmountpoints. This function may be slow ''' if not os.path.exists(path): return False if os.path.ismount(path): return True ret = subprocess.run(['findmnt', '-M', str(path)], capture_output=True, check=False) if ret.returncode == 0: return True return False def bindmount(from_path, to_path): '''Create a bindmount point.''' cmd = ['mount', '--bind', from_path, to_path] ret = subprocess.run(cmd, capture_output=True, check=False) if ret.returncode != 0: raise MountError('Unable to create bindmount {} -> {}'.format(from_path, to_path)) def umount(path, lazy: bool = True): '''Try to unmount a path.''' cmd = ['umount'] if lazy: cmd.append('-l') cmd.append(path) ret = subprocess.run(cmd, capture_output=True, check=False) if ret.returncode != 0: raise MountError('Unable to umount path {}'.format(path)) # try again if the mountpoint is still there, as # overmounting may have happened if is_mountpoint(path): umount(path, lazy=lazy) def _rmtree_mntsafe_fd(topfd, path, onerror): try: with os.scandir(topfd) as scandir_it: entries = list(scandir_it) except OSError as err: err.filename = path onerror(os.scandir, path, sys.exc_info()) return for entry in entries: fullname = os.path.join(path, entry.name) try: is_dir = entry.is_dir(follow_symlinks=False) except OSError: is_dir = False else: if is_dir: try: orig_st = entry.stat(follow_symlinks=False) is_dir = stat.S_ISDIR(orig_st.st_mode) except OSError: onerror(os.lstat, fullname, sys.exc_info()) continue if is_dir: if is_mountpoint(fullname): try: umount(fullname) orig_st = os.stat(fullname, follow_symlinks=False) except Exception: onerror(umount, fullname, sys.exc_info()) continue try: dirfd = os.open(entry.name, os.O_RDONLY, dir_fd=topfd) except OSError: onerror(os.open, fullname, sys.exc_info()) else: try: if os.path.samestat(orig_st, os.fstat(dirfd)): _rmtree_mntsafe_fd(dirfd, fullname, onerror) try: os.rmdir(entry.name, dir_fd=topfd) except OSError: onerror(os.rmdir, fullname, sys.exc_info()) else: try: # This can only happen if someone replaces # a directory with a symlink after the call to # os.scandir or stat.S_ISDIR above. raise OSError('Cannot call rmtree on a symbolic link') except OSError: onerror(os.path.islink, fullname, sys.exc_info()) finally: os.close(dirfd) else: try: os.unlink(entry.name, dir_fd=topfd) except OSError: onerror(os.unlink, fullname, sys.exc_info()) def rmtree_mntsafe(path, ignore_errors=False, onerror=None): '''Recursively delete a directory tree, unmounting mount points if possible. This function is based on shutil.rmtree, but will not jump into mount points, but instead try to unmount them and if that fails leave them alone. This prevents data loss in case bindmounts were set carelessly. If ignore_errors is set, errors are ignored; otherwise, if onerror is set, it is called to handle the error with arguments (func, path, exc_info) where func is platform and implementation dependent; path is the argument to that function that caused it to fail; and exc_info is a tuple returned by sys.exc_info(). If ignore_errors is false and onerror is None, an exception is raised. ''' if ignore_errors: # pylint: disable=function-redefined def onerror(*args): pass elif onerror is None: # pylint: disable=misplaced-bare-raise def onerror(*args): raise # While the unsafe rmtree works fine on bytes, the fd based does not. if isinstance(path, bytes): path = os.fsdecode(path) if os.path.ismount(path): try: umount(path) except Exception: onerror(umount, path, sys.exc_info()) return # Note: To guard against symlink races, we use the standard # lstat()/open()/fstat() trick. try: orig_st = os.lstat(path) except Exception: onerror(os.lstat, path, sys.exc_info()) return try: fd = os.open(path, os.O_RDONLY) except Exception: onerror(os.open, path, sys.exc_info()) return try: if os.path.samestat(orig_st, os.fstat(fd)): _rmtree_mntsafe_fd(fd, path, onerror) try: os.rmdir(path) except OSError: onerror(os.rmdir, path, sys.exc_info()) else: try: # symlinks to directories are forbidden, see bug #1669 raise OSError("Cannot call rmtree on a symbolic link") except OSError: onerror(os.path.islink, path, sys.exc_info()) finally: os.close(fd) debspawn-0.6.4/debspawn/utils/zstd_tar.py000066400000000000000000000040061456520253100204760ustar00rootroot00000000000000# -*- coding: utf-8 -*- # # Copyright (C) 2018-2022 Matthias Klumpp # # Licensed under the GNU Lesser General Public License Version 3 # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the license, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this software. If not, see . import shutil from .command import run_command class TarError(Exception): """Generic error while using tar/zstd.""" def ensure_tar_zstd(): '''Check if the required binaries for compression are available''' if not shutil.which('zstd'): raise TarError( ( 'The "zsdt" binary was not found, we can not compress tarballs. ' 'Please install zstd to continue!' ) ) if not shutil.which('tar'): raise TarError( 'The "tar" binary was not found, we can not create tarballs. Please install tar to continue!' ) def compress_directory(dirname, tarname): '''Compress a directory to a given tarball''' cmd = ['tar', '-C', dirname, '-I', 'zstd', '-cf', tarname, '.'] out, err, ret = run_command(cmd) if ret != 0: raise TarError('Unable to create tarball "{}":\n{}{}'.format(tarname, out, err)) def decompress_tarball(tarname, dirname): '''Compress a directory to a given tarball''' cmd = ['tar', '-C', dirname, '-I', 'zstd', '-xf', tarname] out, err, ret = run_command(cmd) if ret != 0: raise TarError('Unable to decompress tarball "{}":\n{}{}'.format(tarname, out, err)) debspawn-0.6.4/docs/000077500000000000000000000000001456520253100142575ustar00rootroot00000000000000debspawn-0.6.4/docs/__init__.py000066400000000000000000000000001456520253100163560ustar00rootroot00000000000000debspawn-0.6.4/docs/assemble_man.py000066400000000000000000000111121456520253100172530ustar00rootroot00000000000000#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # Copyright (C) 2018-2022 Matthias Klumpp # # SPDX-License-Identifier: LGPL-3.0-or-later import os import sys from functools import reduce from subprocess import check_call from xml.sax.saxutils import escape as xml_escape sys.path.append("..") class DocbookEditor: def __init__(self): self._replacements = {} def add_substvar(self, name, replacement): self._replacements['@{}@'.format(name)] = replacement def register_command_flag_synopsis(self, actions, command_name): flags_text = '' flags_entries = '' for item in actions: options_text = xml_escape('|'.join(item.option_strings)) flags_text += '{}\n'.format(options_text) oid = item.option_strings[0] desc_text = None if oid == '-h': desc_text = 'Print brief help information about available commands.' if command_name != 'create': if oid == '--variant': desc_text = 'Set the variant of the selected image, that was used for bootstrapping.' elif oid == '-a': desc_text = 'The architecture of the base image that should be selected.' if not desc_text: desc_text = item.help desc_text = xml_escape(desc_text) if desc_text.startswith('CF|'): desc_text = desc_text[3:] desc_text = desc_text.replace('binary:', ':', 1) desc_text = desc_text.replace('arch:', ':', 1) desc_text = desc_text.replace('indep:', ':', 1) desc_text = desc_text.replace('source:', ':', 1) flags_entries += ''' {} {} '''.format( options_text, desc_text ) self.add_substvar('{}_FLAGS_SYNOPSIS'.format(command_name.upper()), flags_text) self.add_substvar('{}_FLAGS_ENTRIES'.format(command_name.upper()), flags_entries) def process_file(self, input_fname, output_fname): with open(input_fname, 'r') as f: template_content = f.read() result = reduce( lambda x, y: x.replace(y, self._replacements[y]), self._replacements, template_content ) with open(output_fname, 'w') as f: f.write(result) return output_fname def generate_docbook_pages(build_dir): from debspawn.cli import create_parser build_dir = os.path.abspath(build_dir) parser = create_parser() editor = DocbookEditor() editor.register_command_flag_synopsis(parser._get_optional_actions(), 'BASE') xml_manpages = [] xml_manpages.append(editor.process_file('docs/debspawn.1.xml', os.path.join(build_dir, 'debspawn.1.xml'))) for command, sp in parser._get_positional_actions()[0]._name_parser_map.items(): editor.register_command_flag_synopsis(sp._get_optional_actions(), command) template_fname = 'docs/debspawn-{}.1.xml'.format(command) if not os.path.isfile(template_fname): if command in ['ls', 'b']: continue # the ls and b shorthands need to manual page print('Manual page template {} is missing! Skipping it.'.format(template_fname)) continue xml_manpages.append( editor.process_file(template_fname, os.path.join(build_dir, os.path.basename(template_fname))) ) return xml_manpages def create_manpage(xml_src, out_dir): man_name = os.path.splitext(os.path.basename(xml_src))[0] out_fname = os.path.join(out_dir, man_name) print('Generating manual page {}'.format(man_name)) check_call( [ 'xsltproc', '--nonet', '--stringparam', 'man.output.quietly', '1', '--stringparam', 'funcsynopsis.style', 'ansi', '--stringparam', 'man.th.extra1.suppress', '1', '-o', out_fname, 'http://docbook.sourceforge.net/release/xsl/current/manpages/docbook.xsl', xml_src, ] ) return out_fname if __name__ == '__main__': generate_docbook_pages('/tmp') debspawn-0.6.4/docs/debspawn-build.1.xml000066400000000000000000000130221456520253100200360ustar00rootroot00000000000000 18 August, 2018"> ]> &command; 2018-2022 Matthias Klumpp Debspawn &date; &pagename; 1 &pagename; Build Debian packages in a container &command; @BUILD_FLAGS_SYNOPSIS@ SUITE DIR|DSC_FILE Description Build a Debian package from a directory or source package *.dsc file. debspawn will create a new container for the respective build using the base image specified, build the package and return build artifacts in the default output directory /var/lib/debspawn/results/ unless a different location was specified via the flag. Downloaded packages that are build dependencies are cached and will be reused on subsequent builds if possible. You can inject packages into the build environment that are not available in the preconfigured APT repositories by placing them in /var/lib/debspawn/injected-pkgs/${container-name}, or in /var/lib/debspawn/injected-pkgs/ to make a package available in all environments. Internally, debspawn will build a transient package repository with the respective packages and add it as a package source for APT. If you want to debug the package build process, you can pass the flag to debspawn. This will open an interactive root shell in the build environment post-build, no matter whether the build failed or succeeded. After investigating the issue / building the package manually, the shell can be exited and the user is asked whether debspawn should copy back the changes made in the packages' debian/ directory to the host to make them permanent. Please keep in mind that while interactive mode is enabled, no build log can be created. Examples You can build a package from its source directory, or just by passing a plain .dsc file to &command;. If the result should be automatically signed, the flag needs to be passed too: $ cd ~/packages/hello $ &command; sid --sign $ &command; --arch=i386 cosmic ./hello_2.10-1.dsc You can also build packages using git-buildpackage and debspawn. In this case the flag is also used to perform a Lintian static analysis check in the container after build: $ gbp buildpackage --git-builder='debspawn b sid --lintian --sign' To debug a build issue interactively, the flag can be used: $ &command; sid --interact Options @BUILD_FLAGS_ENTRIES@ Differences to sbuild On Debian, sbuild is the primary tool used for package building, which uses different technology. So naturally, the question is whether the sbuild build environments and the debspawn build environments are be identical or at least compatible. Due to the different technology used, there may be subtle differences between sbuild chroots and debspawn containers. The differences should not have any impact on package builds, and any such occurrence is highly likely a bug in the package's build process. If you think it is not, please file a bug against Debspawn. We try to be as close to sbuild's default environment as possible, but unfortunately can not make any guarantees. One way the build environment of debspawn differs from Debian's default sbuild setup intentionally is in its consistent use of unicode. By default, debspawn will ensure that unicode is always available and enabled. If you do not want this behavior, you can pass the flag to &command; to disable unicode in the tool itself and in the build environment. See Also debspawn-update(1), debspawn-create(1), dpkg-buildpackage(1). AUTHOR This manual page was written by Matthias Klumpp mak@debian.org. debspawn-0.6.4/docs/debspawn-create.1.xml000066400000000000000000000052401456520253100202050ustar00rootroot00000000000000 18 August, 2018"> ]> &command; 2018-2022 Matthias Klumpp Debspawn &date; &pagename; 1 &pagename; Create new container images &command; @CREATE_FLAGS_SYNOPSIS@ NAME Description Create a new base image for a suite known to debootstrap(1). The image will later be used to spawn ephemeral containers in which packages can be built. Examples You can easily create images for any suite that has a script in debootstrap. For example, to create a Debian Unstable image for your current machine architecture, you can use: $ &command; unstable A more advanced example, for building on Ubuntu 18.10 on the x86 architecture: $ &command; --arch=i386 cosmic The suite name is inferred from the container image name given as positional parameter. If it can not be inferred, you will need to pass the parameter with the primary suite name for this image. If a is passed and no is set, the image name will automatically be assumed to be for an overlay suite, which may not always be the desired result. Options NAME The name of the container image to create (usually the name of the suite). @CREATE_FLAGS_ENTRIES@ See Also debspawn-build(1), debootstrap(1), systemd-nspawn(1). AUTHOR This manual page was written by Matthias Klumpp mak@debian.org. debspawn-0.6.4/docs/debspawn-delete.1.xml000066400000000000000000000033211456520253100202020ustar00rootroot00000000000000 18 August, 2018"> ]> &command; 2018-2022 Matthias Klumpp Debspawn &date; &pagename; 1 &pagename; Remove a container image &command; @DELETE_FLAGS_SYNOPSIS@ NAME Description Remove an image known to debspawn and clear all data related to it. This explicitly includes any cached data, but does not include generated build artifacts that may still exist in the results directory. Options NAME The name of the container image to delete (usually a distribution suite name). @DELETE_FLAGS_ENTRIES@ See Also debspawn-create(1). AUTHOR This manual page was written by Matthias Klumpp mak@debian.org. debspawn-0.6.4/docs/debspawn-list.1.xml000066400000000000000000000027461456520253100177250ustar00rootroot00000000000000 18 August, 2018"> ]> &command; 2018-2022 Matthias Klumpp Debspawn &date; &pagename; 1 &pagename; List information about container images &command; @LIST_FLAGS_SYNOPSIS@ SUITE Description This command will list detailed information about all currently registered container images that Debspawn can use as build environments. Options @LIST_FLAGS_ENTRIES@ See Also debspawn-create(1), debspawn-update(1). AUTHOR This manual page was written by Matthias Klumpp mak@debian.org. debspawn-0.6.4/docs/debspawn-login.1.xml000066400000000000000000000034311456520253100200520ustar00rootroot00000000000000 18 August, 2018"> ]> &command; 2018-2022 Matthias Klumpp Debspawn &date; &pagename; 1 &pagename; Open interactive shell session in a container &command; @LOGIN_FLAGS_SYNOPSIS@ NAME Description This command enters an interactive shell session in a container that is normally used for building. This can be useful to inspect the build environment, or to manually customize the container image for special applications if the flag is set. Options NAME The name of the container image (usually a distribution suite name). @LOGIN_FLAGS_ENTRIES@ See Also debspawn(1), systemd-nspawn(1). AUTHOR This manual page was written by Matthias Klumpp mak@debian.org. debspawn-0.6.4/docs/debspawn-maintain.1.xml000066400000000000000000000047521456520253100205510ustar00rootroot00000000000000 18 August, 2018"> ]> &command; 2018-2022 Matthias Klumpp Debspawn &date; &pagename; 1 &pagename; Run various maintenance actions &command; @MAINTAIN_FLAGS_SYNOPSIS@ Description Perform various maintenance actions on debspawn. Actions this subcommand allows will affect generic settings of debspawn or all of its container images at once. It can also be used to display general, useful information about the system and debspawn installation to help with finding setup issues. Examples You can update all container images that debspawn knows of in one go: $ &command; --update-all If you want to get information about the current debspawn installation (useful when reporting an issue against it), the option will print a status summary and highlight issues: $ &command; --status You can clear all caches for all images to free up disk space (missing data will be downloaded or regenerated again when it is needed): $ &command; --clear-caches Options @MAINTAIN_FLAGS_ENTRIES@ See Also debspawn-build(1), debootstrap(1), systemd-nspawn(1). AUTHOR This manual page was written by Matthias Klumpp mak@debian.org. debspawn-0.6.4/docs/debspawn-run.1.xml000066400000000000000000000040741456520253100175520ustar00rootroot00000000000000 18 August, 2018"> ]> &command; 2018-2022 Matthias Klumpp Debspawn &date; &pagename; 1 &pagename; Run arbitrary commands in debspawn container session &command; @RUN_FLAGS_SYNOPSIS@ NAME COMMAND Description This subcommand allows you to run arbitrary commands in an ephemeral debspawn container, using the same environment that is normally used for building packages. &command; is explicitly designed to be used by other automation tools for custom applications, and usually you will want to use debspawn build instead to build Debian packages. Options NAME The name of the container image (usually a distribution suite name). COMMAND The command to run. @RUN_FLAGS_ENTRIES@ See Also debspawn-build(1). AUTHOR This manual page was written by Matthias Klumpp mak@debian.org. debspawn-0.6.4/docs/debspawn-update.1.xml000066400000000000000000000046041456520253100202270ustar00rootroot00000000000000 18 August, 2018"> ]> &command; 2018-2022 Matthias Klumpp Debspawn &date; &pagename; 1 &pagename; Update a container image &command; @UPDATE_FLAGS_SYNOPSIS@ NAME Description Update a container base image. This achieves the same thing as running apt update && apt full-upgrade on the base image and making the changes permanent. Additionally, &command; will prune all caches and ensure all required packages and scripts are installed in the container image. Running &command; on the images that are in use about once a week ensures builds will happen faster, due to less changes that have to be done prior to each build. Examples Updating images is easy, you just pass the same arguments you used for creating them, but use the update subcommand instead: $ &command; sid $ &command; --arch=i386 cosmic Options NAME The name of the container image (usually a distribution suite name). @UPDATE_FLAGS_ENTRIES@ See Also debspawn-create(1), debspawn-build(1). AUTHOR This manual page was written by Matthias Klumpp mak@debian.org. debspawn-0.6.4/docs/debspawn.1.xml000066400000000000000000000227171456520253100167540ustar00rootroot00000000000000 18 August, 2018"> ]> debspawn 2018-2022 Matthias Klumpp Debspawn &date; debspawn 1 &package; Build in nspawn containers &package; @BASE_FLAGS_SYNOPSIS@ Description This manual page documents the &package; command. &package; is a tool to build Debian packages in an isolated environment, using nspawn containers. By using containers, Debspawn can isolate builds from the host system much better than a regular chroot could. It also allows for more advanced features to manage builds, for example setting resource limits for individual builds. Please keep in mind that Debspawn is not a security feature! While it provides a lot of isolation from the host system, you should not run arbitrary untrusted code with it. The usual warnings for all technology based on Linux containers apply here. See systemd-nspawn(1) for more information on the container solution Debspawn uses. Debspawn also allows one to run arbitrary custom commands in its environment. This is useful to execute a variety of non-package build and QA actions that make sense to be run in the same environment in which packages are usually built. For more information about the Debspawn project, you can visit its project page. Subcommands &package; actions are invoked via subcommands. Refer to their individual manual pages for further details. Create a new container base image for a specific suite, architecture and variant. A custom mirror location can also be provided. For details, see debspawn-create(1). List information about all container image that Debspawn knows on the current host. For details, see debspawn-list(1). Delete a container base image and all data associated with it. For details, see debspawn-delete(1). Update a container base image, ensuring all packages are up to date and the image is set up properly for use with debspawn. For details, see debspawn-update(1). Build a Debian package in an isolated environment. For details, see debspawn-build(1). Get an interactive shell session in a container. For details, see debspawn-login(1). Run arbitrary commands in debspawn container session. This is primarily useful for using &package; to isolate non-package build processes. For details, see debspawn-run(1). Flags @BASE_FLAGS_ENTRIES@ Configuration Configuration is read from an optional TOML file, located at /etc/debspawn/global.toml or a location specified with . Specifying a config file on the command line will skip loading of the global, system-wide configuration. The following keys are valid at the document root level, all are optional: Location for stored container images. Default output directory for build artifacts on successful builds. Location for debspawn's package cache. Package files placed in the root of this directory are available to all containers to satisfy build dependencies, while ones placed in subdirectories with the OS image name (e.g. sid-arm64) will only be available to the specified container. Temporary data location (Default: /var/tmp/debspawn/). Set a default variant used for bootstrapping with debootstrap that gets used if no variant is explicitly set when creating a new image. Set to none to make "no variant" the default. (Default: buildd) Set the system call filter used by &package; containers. This will take a list of system call names or set names as described in the "System Call Filtering" section of systemd.exec(5). It also recognizes the special string-only values compat and nspawn-default, where compat will allow enough system calls to permit many builds and tests that would run in a regular sbuild(1) chroot to work with &package; as well. By setting nspawn-default, the more restrictive defaults of systemd-nspawn(1) are applied. (Default: compat) Boolean option. If set to true, unsafe options can be used for building software via &package; run, such as making the host's /dev and /proc filesystems available from within the container. See the --allow option of &package; run for more details. (Default: false) Boolean option. If set to false, &package; will not manage its own local cache of APT packages, but will instead always try to download them. It is only recommended to change this option if you are already running a separate APT package repository mirror or a caching proxy such as apt-cacher-ng(8). (Default: true) Set the bootstrap tool that should be used for bootstrapping new images. The tool should have an interface compatible with debootstrap(8). This option allows one to use alternative tools like mmdebstrap(1) with &package;. (Default: debootstrap) See Also dpkg-buildpackage(1), systemd-nspawn(1), sbuild(1). AUTHOR This manual page was written by Matthias Klumpp mak@debian.org. debspawn-0.6.4/install-sysdata.py000077500000000000000000000104061456520253100170210ustar00rootroot00000000000000#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # Copyright (C) 2018-2022 Matthias Klumpp # # SPDX-License-Identifier: LGPL-3.0-or-later # # This is a helper script to install additional configuration and documentation into # system locations, which Python's setuptools and pip will not usually let us install. # import os import sys import shutil from pathlib import Path from argparse import ArgumentParser from tempfile import TemporaryDirectory try: import pkgconfig except ImportError: print() print( ( 'Unable to import pkgconfig. Please install the module ' '(apt install python3-pkgconfig or pip install pkgconfig) ' 'to continue.' ) ) print() sys.exit(4) from docs.assemble_man import create_manpage, generate_docbook_pages class Installer: def __init__(self, root: str = None, prefix: str = None): if not root: root = os.environ.get('DESTDIR') if not root: root = '/' self.root = root if not prefix: prefix = '/usr/local' if self.root == '/' else '/usr' if prefix.startswith('/'): prefix = prefix[1:] self.prefix = prefix def install(self, src, dst, replace_vars=False): if dst.startswith('/'): dst = dst[1:] dst_full = os.path.join(self.root, dst, os.path.basename(src)) else: dst_full = os.path.join(self.root, self.prefix, dst, os.path.basename(src)) Path(os.path.dirname(dst_full)).mkdir(mode=0o755, parents=True, exist_ok=True) if replace_vars: with open(src, 'r') as f_src: with open(dst_full, 'w') as f_dst: for line in f_src: f_dst.write(line.replace('@PREFIX@', '/' + self.prefix)) else: shutil.copy(src, dst_full) os.chmod(dst_full, 0o644) print('{}\t\t{}'.format(os.path.basename(src), dst_full)) def chdir_to_source_root(): thisfile = __file__ if not os.path.isabs(thisfile): thisfile = os.path.normpath(os.path.join(os.getcwd(), thisfile)) os.chdir(os.path.dirname(thisfile)) def make_manpages(temp_dir): '''Build manual pages''' # check for xsltproc, we need it to build manual pages if not shutil.which('xsltproc'): print('The "xsltproc" binary was not found. Please install it to continue!') sys.exit(1) build_dir = os.path.join(temp_dir, 'docbook') Path(build_dir).mkdir(parents=True, exist_ok=True) pages = generate_docbook_pages(build_dir) man_files = [] for page in pages: man_files.append(create_manpage(page, temp_dir)) return man_files def install_data(temp_dir: str, root_dir: str, prefix_dir: str): chdir_to_source_root() print('Checking dependencies') if not pkgconfig.installed('systemd', '>= 240'): print('Systemd is not installed on this system. Please make systemd available to continue.') sys.exit(4) print('Generating manual pages') manpage_files = make_manpages(temp_dir) print('Installing data') inst = Installer(root_dir, prefix_dir) sd_tmpfiles_dir = pkgconfig.variables('systemd')['tmpfilesdir'] sd_system_unit_dir = pkgconfig.variables('systemd')['systemdsystemunitdir'] man_dir = os.path.join('share', 'man', 'man1') inst.install('data/tmpfiles.d/debspawn.conf', sd_tmpfiles_dir) inst.install('data/services/debspawn-clear-caches.timer', sd_system_unit_dir) inst.install('data/services/debspawn-clear-caches.service', sd_system_unit_dir, replace_vars=True) for mf in manpage_files: inst.install(mf, man_dir) def main(): parser = ArgumentParser(description='Debspawn system data installer') parser.add_argument( '--root', action='store', dest='root', default=None, help='Root directory to install into.' ) parser.add_argument( '--prefix', action='store', dest='prefix', default=None, help='Directory prefix (usually `/usr` or `/usr/local`).', ) options = parser.parse_args(sys.argv[1:]) with TemporaryDirectory(prefix='dsinstall-') as temp_dir: install_data(temp_dir, options.root, options.prefix) return 0 if __name__ == '__main__': sys.exit(main()) debspawn-0.6.4/lint.sh000077500000000000000000000010671456520253100146400ustar00rootroot00000000000000#!/usr/bin/env bash set -e BASEDIR=$(dirname "$0") cd $BASEDIR echo "=== Flake8 ===" python -m flake8 ./ --statistics python -m flake8 debspawn/dsrun --statistics echo "✓" echo "=== Pylint ===" python -m pylint -f colorized ./debspawn python -m pylint -f colorized ./debspawn/dsrun python -m pylint -f colorized ./tests ./data python -m pylint -f colorized setup.py install-sysdata.py echo "✓" echo "=== MyPy ===" python -m mypy . python -m mypy ./debspawn/dsrun echo "✓" echo "=== Isort ===" isort --diff . echo "✓" echo "=== Black ===" black --diff . debspawn-0.6.4/pyproject.toml000066400000000000000000000021341456520253100162430ustar00rootroot00000000000000[project] name = "debspawn" description = "Debian package builder and build helper using systemd-nspawn" authors = [ {name = "Matthias Klumpp", email = "matthias@tenstral.net"}, ] license = {text="LGPL-3.0-or-later"} readme = "README.md" requires-python = ">=3.9" dynamic = ['version'] [project.urls] Documentation = "https://github.com/lkhq/debspawn" Source = "https://github.com/lkhq/debspawn" [build-system] requires = ["setuptools", "wheel", "pkgconfig"] build-backend = "setuptools.build_meta" [tool.pylint.master] [tool.pylint.format] max-line-length = 120 [tool.pylint."messages control"] disable = [ 'C', 'R', 'fixme', 'unused-argument', 'global-statement', 'logging-format-interpolation', 'attribute-defined-outside-init', 'protected-access', 'broad-except', 'redefined-builtin', 'unspecified-encoding', ] [tool.pylint.reports] score = 'no' [tool.isort] py_version = 39 profile = "black" multi_line_output = 3 skip_gitignore = true length_sort = true atomic = true [tool.black] target-version = ['py39'] line-length = 110 skip-string-normalization = true debspawn-0.6.4/setup.cfg000066400000000000000000000001331456520253100151450ustar00rootroot00000000000000[flake8] max-line-length = 120 ignore = E203,W503 [metadata] description_file = README.md debspawn-0.6.4/setup.py000077500000000000000000000104301456520253100150420ustar00rootroot00000000000000#!/usr/bin/env python3 import os import sys import shutil import platform from subprocess import check_call from setuptools import setup from setuptools.command.install_scripts import install_scripts as install_scripts_orig sys.path.append(os.getcwd()) from debspawn import __appname__, __version__ # noqa: E402 thisfile = __file__ if not os.path.isabs(thisfile): thisfile = os.path.normpath(os.path.join(os.getcwd(), thisfile)) source_root = os.path.dirname(thisfile) class install_scripts(install_scripts_orig): def _check_command(self, command): if not shutil.which(command): print( 'The "{}" binary was not found. Please install it to continue!'.format(command), file=sys.stderr, ) sys.exit(1) def _check_commands_available(self): '''Check if certain commands are available that debspawn needs to work.''' self._check_command('systemd-nspawn') self._check_command('findmnt') self._check_command('zstd') self._check_command('debootstrap') self._check_command('dpkg') def run(self): if platform.system() == 'Windows': super().run() return if not self.skip_build: self.run_command('build_scripts') self.outfiles = [] if self.dry_run: return # We want the files to be installed without a suffix on Unix self.mkpath(self.install_dir) for infile in self.get_inputs(): infile = os.path.basename(infile) in_built = os.path.join(self.build_dir, infile) in_stripped = infile[:-3] if infile.endswith('.py') else infile outfile = os.path.join(self.install_dir, in_stripped) # NOTE: Mode is preserved by default self.copy_file(in_built, outfile) self.outfiles.append(outfile) # try to install configuration snippets, manual pages and other external data bin_install_dir = str(self.install_dir) if '/usr/' in bin_install_dir: install_root = bin_install_dir.split('/usr/', 1)[0] prefix = '/usr/local' if '/usr/local/' in bin_install_dir else '/usr' sysdata_install_script = os.path.join(source_root, 'install-sysdata.py') if os.path.isfile(sysdata_install_script) and os.path.isdir(install_root): check_call( [sys.executable, sysdata_install_script, '--root', install_root, '--prefix', prefix] ) else: print('Unable to install externally managed data!', file=sys.stderr) else: print( ( '\n\n ------------------------\n' 'Unable to install external configuration and manual pages!\n' 'While these files are not essential to work with debspawn, they will improve how it runs ' 'or are useful as documentation. Please install these files manually by running the ' '`install-sysdata.py` script from debspawn\'s source directory manually as root.\n' 'Installing these external files is not possible when installing e.g. with pip. If `setup.py` is ' 'used directly we make an attempt to install the files, but this attempt has failed.' '\n ------------------------\n\n' ), file=sys.stderr, ) cmdclass = { 'install_scripts': install_scripts, } packages = [ 'debspawn', 'debspawn.utils', ] package_data = {'': ['debspawn/dsrun']} scripts = ['debspawn.py'] install_requires = ['tomlkit>=0.8'] setup( name=__appname__, version=__version__, author="Matthias Klumpp", author_email="matthias@tenstral.net", description='Easily build Debian packages in systemd-nspawn containers', license="LGPL-3.0+", url="https://github.com/lkhq/debspawn", long_description=open(os.path.join(source_root, 'README.md')).read(), long_description_content_type='text/markdown', # python_requires='>=3.9', platforms=['any'], zip_safe=False, include_package_data=True, # packages=packages, cmdclass=cmdclass, package_data=package_data, scripts=scripts, install_requires=install_requires, ) debspawn-0.6.4/tests/000077500000000000000000000000001456520253100144715ustar00rootroot00000000000000debspawn-0.6.4/tests/__init__.py000066400000000000000000000021331456520253100166010ustar00rootroot00000000000000# -*- coding: utf-8 -*- # # Copyright (C) 2019-2020 Matthias Klumpp # # Licensed under the GNU Lesser General Public License Version 3 # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the license, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this software. If not, see . import os import sys thisfile = __file__ if not os.path.isabs(thisfile): thisfile = os.path.normpath(os.path.join(os.getcwd(), thisfile)) source_root = os.path.normpath(os.path.join(os.path.dirname(thisfile), '..')) sys.path.append(os.path.normpath(source_root)) __all__ = ['source_root'] debspawn-0.6.4/tests/conftest.py000066400000000000000000000056411456520253100166760ustar00rootroot00000000000000# -*- coding: utf-8 -*- # # Copyright (C) 2019-2020 Matthias Klumpp # # Licensed under the GNU Lesser General Public License Version 3 # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the license, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this software. If not, see . import os import sys import pytest # pylint: disable=redefined-outer-name @pytest.fixture(scope='session', autouse=True) def gconfig(): ''' Ensure the global config object is set up properly for unit-testing. ''' import shutil import debspawn.cli from . import source_root debspawn.cli.__mainfile = os.path.join(source_root, 'debspawn.py') class MockOptions: config = None no_unicode = False owner = None gconf = debspawn.cli.init_config(MockOptions()) test_tmp_dir = '/tmp/debspawn-test/' shutil.rmtree(test_tmp_dir, ignore_errors=True) os.makedirs(test_tmp_dir) gconf._instance._osroots_dir = os.path.join(test_tmp_dir, 'containers/') gconf._instance._results_dir = os.path.join(test_tmp_dir, 'results/') gconf._instance._aptcache_dir = os.path.join(test_tmp_dir, 'aptcache/') gconf._instance._injected_pkgs_dir = os.path.join(test_tmp_dir, 'injected-pkgs/') return gconf @pytest.fixture(scope='session', autouse=True) def ensure_root(): ''' Ensure we run with superuser permissions. ''' if os.geteuid() != 0: print('The testsuite has to be run with superuser permissions in order to create nspawn instances.') sys.exit(1) @pytest.fixture(scope='session') def build_arch(): ''' Retrieve the current architecture we should build packages for. ''' from debspawn.utils.command import safe_run out, _, ret = safe_run(['dpkg', '--print-architecture']) assert ret == 0 arch = out.strip() if not arch: arch = 'amd64' # assume amd64 as default return arch @pytest.fixture(scope='session') def testing_container(gconfig, build_arch): ''' Create a container for Debian stable used for default tests ''' from debspawn.osbase import OSBase suite = 'stable' variant = 'minbase' components = ['main', 'contrib', 'non-free'] extra_suites = [] osbase = OSBase(gconfig, suite, build_arch, variant=variant, base_suite=None) r = osbase.create(None, components, extra_suites=extra_suites) assert r return (suite, build_arch, variant) debspawn-0.6.4/tests/test_cud.py000066400000000000000000000027641456520253100166660ustar00rootroot00000000000000# -*- coding: utf-8 -*- # # Copyright (C) 2019-2020 Matthias Klumpp # # Licensed under the GNU Lesser General Public License Version 3 # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the license, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this software. If not, see . from debspawn.osbase import OSBase def test_container_create_delete(gconfig, testing_container): # the "default" container is created by a fixture. # what we actually want to do here in future is create and # delete containers with special settings pass def test_container_update(gconfig, testing_container): '''Update a container''' suite, arch, variant = testing_container osbase = OSBase(gconfig, suite, arch, variant) assert osbase.update() def test_container_recreate(gconfig, testing_container): '''Test recreating a container''' suite, arch, variant = testing_container osbase = OSBase(gconfig, suite, arch, variant) assert osbase.recreate() debspawn-0.6.4/tests/test_utils.py000066400000000000000000000054541456520253100172520ustar00rootroot00000000000000# -*- coding: utf-8 -*- # # Copyright (C) 2019-2022 Matthias Klumpp # # Licensed under the GNU Lesser General Public License Version 3 # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the license, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this software. If not, see . import os import tempfile from debspawn.utils.misc import umount, bindmount, is_mountpoint, rmtree_mntsafe def test_bindmount_umount(gconfig): with tempfile.TemporaryDirectory() as src_tmpdir1: with tempfile.TemporaryDirectory() as src_tmpdir2: with tempfile.TemporaryDirectory() as dest_tmpdir: bindmount(src_tmpdir1, dest_tmpdir) assert is_mountpoint(dest_tmpdir) bindmount(src_tmpdir2, dest_tmpdir) assert is_mountpoint(dest_tmpdir) # sanity check open(os.path.join(src_tmpdir2, 'test'), 'a').close() assert os.path.isfile(os.path.join(dest_tmpdir, 'test')) # umount is supposed to unmount everything, even overmounted directories umount(dest_tmpdir) assert not is_mountpoint(dest_tmpdir) def test_rmtree_mntsafe(gconfig): mnt_tmpdir = tempfile.TemporaryDirectory().name dest_tmpdir = tempfile.TemporaryDirectory().name # create directory structure and files to delete mp_dir = os.path.join(dest_tmpdir, 'subdir', 'mountpoint') mount_subdir = os.path.join(mnt_tmpdir, 'subdir_in_mount') os.makedirs(mp_dir) os.makedirs(mount_subdir) open(os.path.join(dest_tmpdir, 'file1.txt'), 'a').close() open(os.path.join(mp_dir, 'file_below_mountpoint.txt'), 'a').close() open(os.path.join(mnt_tmpdir, 'file_in_mount.txt'), 'a').close() open(os.path.join(mount_subdir, 'file_in_mount_subdir.txt'), 'a').close() # create bindmount bindmount(mnt_tmpdir, mp_dir) assert is_mountpoint(mp_dir) # try to delete the directory structure containing bindmounts rmtree_mntsafe(dest_tmpdir) # verify assert not os.path.exists(dest_tmpdir) assert os.path.isfile(os.path.join(mnt_tmpdir, 'file_in_mount.txt')) assert os.path.isfile(os.path.join(mount_subdir, 'file_in_mount_subdir.txt')) # cleanup mounted dir rmtree_mntsafe(mnt_tmpdir) assert not os.path.exists(mnt_tmpdir)