pax_global_header00006660000000000000000000000064144476125310014521gustar00rootroot0000000000000052 comment=8b1e02e8f6e3e3a19a7c06d14b30c152b2d21397 depthcharge-tools-0.6.2/000077500000000000000000000000001444761253100151425ustar00rootroot00000000000000depthcharge-tools-0.6.2/.gitignore000066400000000000000000000000411444761253100171250ustar00rootroot00000000000000bin *.pyc __pycache__ *.egg-info depthcharge-tools-0.6.2/COPYRIGHT000066400000000000000000000071721444761253100164440ustar00rootroot00000000000000Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ Upstream-Name: depthcharge-tools Upstream-Contact: Alper Nebi Yasak Source: https://github.com/alpernebbi/depthcharge-tools/ License: GPL-2+ Files: * Copyright: 2019-2023 Alper Nebi Yasak License: GPL-2+ Files: systemd/depthchargectl-bless.service Comment: This file is modified from systemd's systemd-bless-boot.service. Author attributions are derived from systemd git log. Copyright: 2019 Alper Nebi Yasak 2018 Lennart Poettering License: LGPL-2.1+ systemd is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. . On Debian systems, the complete text of the GNU Lesser General Public License version 2.1 can be found in ‘/usr/share/common-licenses/LGPL-2.1’. Files: systemd/90-depthcharge-tools.install Comment: This file is modified from systemd's 90-loaderentry.install. Author attributions are derived from systemd git log. Copyright: 2022 Alper Nebi Yasak 2022 Michael Biebl 2022 Antonio Alvarez Feijoo 2021-2022 наб 2020 Kir Kolyshkin 2020 Jörg Thalheim 2019 Marc-Antoine Perennou 2019-2022 Zbigniew Jędrzejewski-Szmek 2018 Javier Martinez Canillas 2018-2019 Mike Auty 2017-2021 Yu Watanabe 2014 Michael Chapman 2014-2022 Lennart Poettering 2013 Tom Gundersen 2013-2016 Harald Hoyer License: LGPL-2.1+ systemd is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. . systemd is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. . You should have received a copy of the GNU Lesser General Public License along with systemd; If not, see . . On Debian systems, the complete text of the GNU Lesser General Public License version 2.1 can be found in ‘/usr/share/common-licenses/LGPL-2.1’. License: GPL-2+ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. . This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. . You should have received a copy of the GNU General Public License along with this program. If not, see . On Debian systems, the complete text of the GNU General Public License version 2 can be found in "/usr/share/common-licenses/GPL-2". # vi: set ft=debcopyright : depthcharge-tools-0.6.2/LICENSE000066400000000000000000000432541444761253100161570ustar00rootroot00000000000000 GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Lesser General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. , 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. depthcharge-tools-0.6.2/MANIFEST.in000066400000000000000000000005241444761253100167010ustar00rootroot00000000000000# SPDX-License-Identifier: GPL-2.0-or-later # depthcharge-tools python package manifest # Copyright (C) 2021 Alper Nebi Yasak # See COPYRIGHT and LICENSE files for full copyright information. include COPYRIGHT LICENSE *.rst recursive-include completions * recursive-include init.d * recursive-include systemd * depthcharge-tools-0.6.2/README.rst000066400000000000000000000226671444761253100166460ustar00rootroot00000000000000.. SPDX-License-Identifier: GPL-2.0-or-later .. depthcharge-tools README file .. Copyright (C) 2019-2023 Alper Nebi Yasak .. See COPYRIGHT and LICENSE files for full copyright information. ================= Depthcharge-Tools ================= This project is a collection of tools that ease and automate interacting with depthcharge_, the Chrome OS bootloader. Depthcharge is built into the firmware of Chrome OS boards, uses a custom verified boot flow and usually cannot boot other operating systems as is. This means someone who wants to use e.g. Debian_ on these boards need to either replace the firmware or work their system into `the format depthcharge expects`_. These tools are about the latter. Right now these are developed on and tested with only a few boards, but everything will attempt to work on other boards based on my best guesses. .. _depthcharge: https://chromium.googlesource.com/chromiumos/platform/depthcharge .. _the format depthcharge expects: https://chromium.googlesource.com/chromiumos/docs/+/HEAD/disk_format.md#Google-ChromeOS-devices .. _Debian: https://www.debian.org/ mkdepthcharge ============= The mkdepthcharge_ tool is intended to wrap mkimage_ and vbutil_kernel_ to provide reasonable defaults to them, hide their idiosyncrasies and automate creating a depthcharge-bootable partition image appropriate for the running architecture. An example invocation on a Samsung Chromebook Plus (v1, arm64) could be:: $ mkdepthcharge -o depthcharge.img --compress lzma \ --cmdline "console=tty1 root=/dev/mmcblk0p2 rootwait" -- \ /boot/vmlinuz.gz /boot/initrd.img rk3399-gru-kevin.dtb Here, mkdepthcharge would automatically extract and recompress the kernel, create a FIT image, put command line parameters into a file, create an empty bootloader, and provide defaults for vboot keys and other arguments while building the partition image. .. _mkdepthcharge: https://github.com/alpernebbi/depthcharge-tools/blob/master/mkdepthcharge.rst .. _mkimage: https://dyn.manpages.debian.org/jump?q=unstable/mkimage .. _vbutil_kernel: https://dyn.manpages.debian.org/jump?q=unstable/vbutil_kernel depthchargectl ============== The depthchargectl_ tool goes a step further and aims to fully automate bootable image creation and Chrome OS kernel partition management, even the board-specific and distro-specific parts. With proper integration with your distribution, depthchargectl can keep your system bootable across kernel and initramfs changes without any interaction on your part. Even without such integration, a single command automates most of the work:: # Use --allow-current if you only have one Chrome OS kernel partition. $ sudo depthchargectl write --allow-current Building depthcharge image for board 'Samsung Chromebook Plus' ('kevin'). Built depthcharge image for kernel version '5.10.0-6-arm64'. Wrote image '/boot/depthcharge/5.10.0-6-arm64.img' to partition '/dev/mmcblk1p1'. Set partition '/dev/mmcblk1p1' as next to boot. # After a reboot, you or an init service should run this. $ sudo depthchargectl bless Set partition '/dev/mmcblk1p1' as successfully booted. .. _depthchargectl: https://github.com/alpernebbi/depthcharge-tools/blob/master/depthchargectl.rst Installation ============ This depends on the ``pkg_resources`` Python package which is usually distributed with ``setuptools``. The tools can run a number of programs when necessary, which should be considered dependencies: - ``futility`` (``vbutil_kernel``), ``cgpt``, ``crossystem`` - ``mkimage``, ``fdtget``, ``fdtput`` - ``lz4``, ``lzma`` - ``gzip``, ``lzop``, ``bzip2``, ``xz``, ``zstd`` (optional, for unpacking compressed ``/boot/vmlinuz``) The ``rst2man`` program (from ``docutils``) should be used to convert the ``mkdepthcharge.rst`` and ``depthchargectl.rst`` files to manual pages. However, this is not automated here and has to be done manually. This project (or at least ``depthchargectl``) is meant to be integrated into your operating system by its maintainers, and the best way to install it is through your OS' package manager whenever possible. Debian ------ An official `depthcharge-tools Debian package`_ is available upstream, since Debian 12 (bookworm). You can install it like any other package:: $ sudo apt install depthcharge-tools It includes the necessary system hooks and services to make and keep your Chromebook bootable, enabled by default. These however do not trigger on the depthcharge-tools installation, but on kernel and initramfs changes. To trigger these hooks manually, run:: $ sudo update-initramfs -u .. _depthcharge-tools Debian package: https://packages.debian.org/sid/depthcharge-tools Alpine Linux ------------ Thanks to the efforts in supporting `postmarketOS on ChromeOS Devices`_, there is an official `depthcharge-tools package for Alpine Linux`_. You should be able to install it as:: $ sudo apk add depthcharge-tools However, this doesn't include any system hooks or services to keep your Chromebook bootable. .. _postmarketOS on ChromeOS Devices: https://wiki.postmarketos.org/wiki/Chrome_OS_devices .. _depthcharge-tools package for Alpine Linux: https://pkgs.alpinelinux.org/package/edge/testing/x86/depthcharge-tools Pip --- Python binary wheels are uploaded to PyPI_, and it should be possible to install the python package using `pip`. However, this does not install the manual pages, bash/zsh completions, systemd/init.d service files, and OS-specific kernel/initramfs hooks. You can install in `--user` mode, but this makes it quite hard to use `depthchargectl` as root. As root privileges are necessary to manipulate system block devices this limits you a bit:: $ pip install --user depthcharge-tools Although inadvisable, you can install as root to overcome that caveat. Alternatively, see the `PYTHONPATH` hack in one of the later sections. .. _PyPI: https://pypi.org/project/depthcharge-tools/ Configuration ============= You can configure depthcharge-tools with the |CONFIG_FILE| file, or by putting similar fragments in the |CONFIGD_DIR| directory. See the config.ini_ file for the built-in default configuration. Settings in the ``[depthcharge-tools]`` section are the global defaults from which all commands inherit. Other than that, config sections have inheritence based on their names i.e. those in the form of ``[a/b/c]`` inherit from ``[a/b]`` which also inherits from ``[a]``. Each subcommand reads its config from such a subsection. Currently the following configuration options are available:: [depthcharge-tools] enable-system-hooks: Write/remove images on kernel/initramfs changes vboot-keyblock: The kernel keyblock file for verifying and signing images vboot-private-key: The private key (.vbprivk) for signing images vboot-public-key: The public key for (.vbpubk) verifying images [depthchargectl] board: Codename of a board to build and check images for ignore-initramfs: Do not include an initramfs in the image images-dir: Directory to store built images kernel-cmdline: Kernel commandline parameters to use zimage-initramfs-hack = How to support initramfs on x86 boards For longer explanations check the manual pages of each command for options named the same as these. .. |CONFIG_FILE| replace:: ``/etc/depthcharge-tools/config`` .. |CONFIGD_DIR| replace:: ``/etc/depthcharge-tools/config.d`` .. _config.ini: https://github.com/alpernebbi/depthcharge-tools/blob/master/depthcharge_tools/config.ini Installation for development ============================ If you want to use development versions, you can clone this repository and install using pip:: $ pip3 install --user -e /path/to/depthcharge-tools Hopefully, you should be able to use depthchargectl with just that:: $ depthchargectl build --output depthcharge.img Building depthcharge image for board 'Samsung Chromebook Plus' ('kevin'). Built depthcharge image for kernel version '5.10.0-6-arm64'. depthchargectl.img Most ``depthchargectl`` functionality needs root as it handles disks and partitions, and you need special care while invoking as root:: $ depthchargectl() { sudo PYTHONPATH=/path/to/depthcharge-tools \ python3 -m depthcharge_tools.depthchargectl "$@" } $ depthchargectl list /dev/mmcblk0 S P T PATH 1 2 0 /dev/mmcblk0p2 1 1 0 /dev/mmcblk0p4 0 0 15 /dev/mmcblk0p6 Or you can add a similar invocation to the /usr/local/bin files, so that it's available to both normal users and root:: $ sudo tee /usr/local/bin/depthchargectl < This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see See COPYRIGHT and LICENSE files for full copyright information. depthcharge-tools-0.6.2/completions/000077500000000000000000000000001444761253100174765ustar00rootroot00000000000000depthcharge-tools-0.6.2/completions/_depthchargectl.bash000066400000000000000000000205461444761253100234640ustar00rootroot00000000000000# SPDX-License-Identifier: GPL-2.0-or-later # depthcharge-tools depthchargectl bash completions # Copyright (C) 2020-2022 Alper Nebi Yasak # See COPYRIGHT and LICENSE files for full copyright information. _depthchargectl__file() { COMPREPLY+=($(compgen -f -- "$cur")) compopt -o filenames if [ "${#COMPREPLY[@]}" -eq 1 ]; then if [ -d "${COMPREPLY[0]}" ]; then compopt -o nospace COMPREPLY=("${COMPREPLY[0]}/") elif [ -f "${COMPREPLY[0]}" ]; then compopt +o nospace fi fi } 2>/dev/null _depthchargectl__timestamp() { local timestamp="$(date "+%s")" COMPREPLY+=($(compgen -W "$timestamp" -- "$cur")) } 2>/dev/null _depthchargectl__disk() { local disks="$(lsblk -o "PATH" -n -l)" COMPREPLY+=($(compgen -W "$disks" -- "$cur")) } 2>/dev/null _depthchargectl__root() { local root="$(findmnt --fstab -n -o SOURCE "/")" COMPREPLY+=($(compgen -W "$root" -- "$cur")) } 2>/dev/null _depthchargectl__boot() { local boot="$(findmnt --fstab -n -o SOURCE "/boot")" COMPREPLY+=($(compgen -W "$boot" -- "$cur")) } 2>/dev/null _depthchargectl__cmdline() { local cmdline="$(cat /proc/cmdline | sed -e 's/\(cros_secure\|kern_guid\)[^ ]* //g')" COMPREPLY+=($(compgen -W "$cmdline" -- "$cur")) } 2>/dev/null _depthchargectl__kernel() { if command -v _kernel_versions >/dev/null 2>/dev/null; then _kernel_versions else local script="from depthcharge_tools.utils.platform import installed_kernels" "$script;kernels = (k.release for k in installed_kernels());" "$script;print(*sorted(filter(None, kernels)));" COMPREPLY+=($(compgen -W "$(python3 -c "$script")" -- "$cur")) fi } 2>/dev/null _depthchargectl__boards() { # later local script="import re" script="$script;from depthcharge_tools import boards_ini" script="$script;boards = re.findall(\"codename = (.+)\", boards_ini)" script="$script;print(*sorted(boards))" COMPREPLY+=($(compgen -W "$(python3 -c "$script")" -- "$cur")) } 2>/dev/null _depthchargectl() { COMPREPLY=() local cur="${COMP_WORDS[COMP_CWORD]}" local prev="${COMP_WORDS[COMP_CWORD-1]}" local global_opts=(-h --help -V --version -v --verbose --tmpdir --root) local config_opts=( --config --board --images-dir --vboot-keyblock --vboot-public-key --vboot-private-key --kernel-cmdline --ignore-initramfs ) local cmds=(bless build config check list remove target write) case "$prev" in --root) _depthchargectl__root; _depthchargectl__disk; return ;; --root-mountpoint) _depthchargectl__file; return ;; --boot-mountpoint) _depthchargectl__file; return ;; --tmpdir) _depthchargectl__file; return ;; --config) _depthchargectl__file; return ;; --board) _depthchargectl__boards; return ;; --images-dir) _depthchargectl__file; return ;; --vboot-keyblock) _depthchargectl__file; return ;; --vboot-public-key) _depthchargectl__file; return ;; --vboot-private-key) _depthchargectl__file; return ;; --kernel-cmdline) _depthchargectl__cmdline; return ;; --ignore-initramfs) : ;; --zimage-initramfs-hack) COMPREPLY+=($(compgen -W "set-init-size pad-vmlinuz none" -- "$cur")) ;; --) ;; *) ;; esac local cmd for cmd in "${COMP_WORDS[@]}"; do case "$cmd" in bless) _depthchargectl_bless; break ;; build) _depthchargectl_build; break ;; config) _depthchargectl_config; break ;; check) _depthchargectl_check; break ;; list) _depthchargectl_list; break ;; remove) _depthchargectl_remove; break ;; target) _depthchargectl_target; break ;; write) _depthchargectl_write; break ;; *) cmd="" ;; esac done if [ -z "$cmd" ]; then COMPREPLY+=($(compgen -W "${cmds[*]}" -- "$cur")) COMPREPLY+=($(compgen -W "${global_opts[*]}" -- "$cur")) COMPREPLY+=($(compgen -W "${config_opts[*]}" -- "$cur")) fi } _depthchargectl_bless() { local opts=(--bad --oneshot -i --partno) case "$prev" in -i|--partno) return ;; *) _depthchargectl__disk ;; esac COMPREPLY+=($(compgen -W "${opts[*]}" -- "$cur")) COMPREPLY+=($(compgen -W "${global_opts[*]}" -- "$cur")) COMPREPLY+=($(compgen -W "${config_opts[*]}" -- "$cur")) } _depthchargectl_build() { local opts=( --description --root --compress --timestamp -o --output --kernel-release --kernel --initramfs --fdtdir --dtbs ) case "$prev" in --description) if [ -f /etc/os-release ]; then local name="$(. /etc/os-release; echo "$NAME")" COMPREPLY+=($(compgen -W "$name" -- "$cur")) fi return ;; --root) _depthchargectl__root; _depthchargectl__disk; return ;; --compress) local compress=(none lz4 lzma) COMPREPLY+=($(compgen -W "${compress[*]}" -- "$cur")) return ;; --timestamp) _depthchargectl__timestamp; return;; -o|--output) _depthchargectl__file; return ;; --kernel-release) _depthchargectl__kernel; return ;; --kernel) _depthchargectl__file; return ;; --initramfs) _depthchargectl__file; return ;; --fdtdir) _depthchargectl__file; return ;; --dtbs) _depthchargectl__file; return ;; *) _depthchargectl__kernel;; esac COMPREPLY+=($(compgen -W "${opts[*]}" -- "$cur")) COMPREPLY+=($(compgen -W "${global_opts[*]}" -- "$cur")) COMPREPLY+=($(compgen -W "${config_opts[*]}" -- "$cur")) } _depthchargectl_config() { local opts=(--section --default) case "$prev" in --section) return ;; --default) return ;; *) ;; esac COMPREPLY+=($(compgen -W "${opts[*]}" -- "$cur")) COMPREPLY+=($(compgen -W "${global_opts[*]}" -- "$cur")) COMPREPLY+=($(compgen -W "${config_opts[*]}" -- "$cur")) } _depthchargectl_check() { _depthchargectl__file COMPREPLY+=($(compgen -W "${global_opts[*]}" -- "$cur")) COMPREPLY+=($(compgen -W "${config_opts[*]}" -- "$cur")) } _depthchargectl_list() { local opts=(-a --all-disks -c --count -n --noheadings -o --output) local outputs=(A ATTRIBUTE S SUCCESSFUL T TRIES P PRIORITY PATH DISK DISKPATH PARTNO SIZE) case "$prev" in -o|--output) compopt -o nospace case "$cur" in *,) COMPREPLY+=($(compgen -W "${outputs[*]}" -P "$cur" -- "")) ;; *,*) COMPREPLY+=($(compgen -W "${outputs[*]}" -P "${cur%,*}," -- "${cur##*,}")) ;; *) COMPREPLY+=($(compgen -W "${outputs[*]}" -- "$cur")) ;; esac ;; *) _depthchargectl__disk COMPREPLY+=($(compgen -W "${opts[*]}" -- "$cur")) COMPREPLY+=($(compgen -W "${global_opts[*]}" -- "$cur")) COMPREPLY+=($(compgen -W "${config_opts[*]}" -- "$cur")) ;; esac } _depthchargectl_remove() { local opts=(-f --force) _depthchargectl__file _depthchargectl__kernel COMPREPLY+=($(compgen -W "${opts[*]}" -- "$cur")) COMPREPLY+=($(compgen -W "${global_opts[*]}" -- "$cur")) COMPREPLY+=($(compgen -W "${config_opts[*]}" -- "$cur")) } _depthchargectl_target() { local opts=(-s --min-size --allow-current -a --all-disks) local sizes=(8M 16M 32M 64M 128M 256M 512M) case "$prev" in -s|--min-size) COMPREPLY+=($(compgen -W "${sizes[*]}" -- "$cur")) ;; *) _depthchargectl__disk COMPREPLY+=($(compgen -W "${opts[*]}" -- "$cur")) COMPREPLY+=($(compgen -W "${global_opts[*]}" -- "$cur")) COMPREPLY+=($(compgen -W "${config_opts[*]}" -- "$cur")) ;; esac } _depthchargectl_write() { local opts=(-f --force -t --target --no-prioritize --allow-current) case "$prev" in -t|--target) _depthchargectl__disk ;; *) _depthchargectl__kernel _depthchargectl__file COMPREPLY+=($(compgen -W "${opts[*]}" -- "$cur")) COMPREPLY+=($(compgen -W "${global_opts[*]}" -- "$cur")) COMPREPLY+=($(compgen -W "${config_opts[*]}" -- "$cur")) ;; esac } complete -F _depthchargectl depthchargectl # vim: filetype=sh depthcharge-tools-0.6.2/completions/_depthchargectl.zsh000066400000000000000000000155241444761253100233530ustar00rootroot00000000000000#compdef depthchargectl # SPDX-License-Identifier: GPL-2.0-or-later # depthcharge-tools depthchargectl zsh completions # Copyright (C) 2020-2022 Alper Nebi Yasak # See COPYRIGHT and LICENSE files for full copyright information. function _depthchargectl { _arguments -C \ {-h,--help}'[Show a help message.]' \ {-v,--verbose}'[Print more detailed output.]' \ {-V,--version}'[Print program version.]' \ --tmpdir'[Directory to keep temporary files.]:temp dir:_directories' \ --root'[Root device or mountpoint of the system to work on]:root device:{_depthchargectl__root; _depthchargectl__disk}' \ --root-mountpoint'[Root mountpoint of the system to work on]:root mountpoint:_directories' \ --boot-mountpoint'[Boot mountpoint of the system to work on]:boot mountpoint:_directories' \ --config'[Additional configuration file to read]:config file:_files' \ --board'[Assume running on the specified board]:board codenames:{_depthchargectl__board;}' \ --images-dir'[Directory to store built images]:images dir:_directories' \ --vboot-keyblock'[Keyblock file to include in images]:keyblock file:_files' \ --vboot-public-key'[Public key file to verify images]:vbpubk file:_files' \ --vboot-private-key'[Private key file to include in images]:vbprivk file:_files' \ --kernel-cmdline'[Command line options for the kernel]:kernel cmdline:{_depthchargectl__cmdline;}' \ --ignore-initramfs'[Do not include initramfs in images]' \ --zimage-initramfs-hack'[Initramfs support hack choice for zimage format]:zimage hack:(set-init-size pad-vmlinuz none)' \ '1:command:(bless build config check list remove target write)' \ '*::arg:->args' \ ; case "$state:$line[1]" in args:bless) _arguments -S \ --bad'[Set the partition as unbootable]' \ --oneshot'[Set the partition to be tried once]' \ {-i,--partno}'[Partition number in the given disk image]:number:()' \ ':disk or partition:{_depthchargectl__disk}' \ ; ;; args:build) _arguments -S \ --description'[Human-readable description for the image]:image description:($(source /etc/os-release; echo "$NAME"))' \ --root'[Root device to add to kernel cmdline]:root device:{_depthchargectl__root; _depthchargectl__disk}' \ --compress'[Compression types to attempt]:compress:(none lz4 lzma)' \ --timestamp'[Build timestamp for the image]:timestamp:($(date "+%s"))' \ {-o,--output}'[Output image to path instead of storing in images-dir]:output path:_files' \ --kernel-release'[Release name for the kernel used in image name]:kernel release:{_depthchargectl__kernel;}' \ --kernel'[Kernel executable]:kernel:_files' \ --initramfs'[Ramdisk image]:*:initramfs:_files' \ --fdtdir'[Directory to search device-tree binaries for the board]:fdtdir:_directories' \ --dtbs'[Device-tree binary files to use instead of searching fdtdir]:*:dtb files:_files' \ ':kernel version:{_depthchargectl__kernel}' \ ; ;; args:config) _arguments -S \ --section'[Config section to work on.]' \ --default'[Value to return if key does not exist in section.]' \ ':config key:' \ ; ;; args:check) _arguments -S \ ':image file:_files' \ ; ;; args:list) local outputspec='{_values -s , "description" "A" "ATTRIBUTE" "S" "SUCCESSFUL" "T" "TRIES" "P" "PRIORITY" "PATH" "DISKPATH" "DISK" "PARTNO" "SIZE"}' _arguments -S \ {-n,--noheadings}'[Do not print column headings.]' \ {-a,--all-disks}'[List partitions on all disks.]' \ {-c,--count}'[Print only the count of partitions.]' \ {-o,--output}'[Comma separated list of columns to output.]:columns:'"$outputspec" \ '*::disk or partition:{_depthchargectl__disk}' \ ; ;; args:remove) _arguments -S \ {-f,--force}'[Allow disabling the current partition.]' \ '::kernel version or image file:{_depthchargectl__kernel; _files}' \ ; ;; args:target) _arguments -S \ {-s,--min-size}'[Target partitions larger than this size.]:bytes:(8M 16M 32M 64M 128M 256M 512M)' \ --allow-current'[Allow targeting the currently booted part.]' \ {-a,--all-disks}'[Target partitions on all disks.]' \ '*::disk or partition:{_depthchargectl__disk}' \ ; ;; args:write) _arguments -S \ {-f,--force}'[Write image even if it cannot be verified.]' \ {-t,--target}'[Specify a disk or partition to write to.]:disk or partition:{_depthchargectl__disk}' \ --no-prioritize'[Do not set any flags on the partition]' \ --allow-current'[Allow overwriting the current partition]' \ '::kernel version or image file:{_depthchargectl__kernel; _files}' \ ; ;; *) : ;; esac } function _depthchargectl__kernel { if command -v linux-version >/dev/null 2>/dev/null; then local kversions=($(linux-version list)) _describe 'kernel version' kversions else local script=( 'from depthcharge_tools.utils.platform import installed_kernels;' 'kernels = (k.release for k in installed_kernels());' 'print(*sorted(filter(None, kernels)));' ) local kversions=($(python3 -c "$script")) _describe 'kernel version' kversions fi } 2>/dev/null function _depthchargectl__disk { local disks=($(lsblk -o "PATH" -n -l)) 2>/dev/null _describe 'disk or partition' disks } 2>/dev/null function _depthchargectl__board { local script=( 'import re;' 'from depthcharge_tools import boards_ini;' 'boards = re.findall("codename = (.+)", boards_ini);' 'print(*sorted(boards));' ) local boards=($(python3 -c "$script")) _describe 'board codenames' boards } 2>/dev/null function _depthchargectl__cmdline { local cmdline=($(cat /proc/cmdline | sed -e 's/\(cros_secure\|kern_guid\)[^ ]* //g')) _describe 'kernel cmdline' cmdline } 2>/dev/null function _depthchargectl__root { local root=($(findmnt --fstab -n -o SOURCE "/")) _describe root root } 2>/dev/null function _depthchargectl__boot { local boot=($(findmnt --fstab -n -o SOURCE "/boot")) _describe boot boot } 2>/dev/null _depthchargectl "$@" depthcharge-tools-0.6.2/completions/_mkdepthcharge.bash000066400000000000000000000052061444761253100233050ustar00rootroot00000000000000# SPDX-License-Identifier: GPL-2.0-or-later # depthcharge-tools mkdepthcharge bash completions # Copyright (C) 2020-2022 Alper Nebi Yasak # See COPYRIGHT and LICENSE files for full copyright information. _mkdepthcharge__file() { COMPREPLY+=($(compgen -f -- "$cur")) compopt -o filenames if [ "${#COMPREPLY[@]}" -eq 1 ]; then if [ -d "${COMPREPLY[0]}" ]; then compopt -o nospace COMPREPLY=("${COMPREPLY[0]}/") elif [ -f "${COMPREPLY[0]}" ]; then compopt +o nospace fi fi } 2>/dev/null _mkdepthcharge__cmdline() { cmdline="$(cat /proc/cmdline | sed -e 's/\(cros_secure\|kern_guid\)[^ ]* //g')" COMPREPLY+=($(compgen -W "$cmdline" -- "$cur")) } 2>/dev/null _mkdepthcharge() { COMPREPLY=() local cur="${COMP_WORDS[COMP_CWORD]}" local prev="${COMP_WORDS[COMP_CWORD-1]}" local opts=( -h --help -V --version -v --verbose -d --vmlinuz -i --initramfs -b --dtbs -o --output --tmpdir -A --arch --format -C --compress -n --name --kernel-start --ramdisk-load-address --patch-dtbs --no-patch-dtbs --pad-vmlinuz --no-pad-vmlinuz --set-init-size --no-set-init-size -c --cmdline --kern-guid --no-kern-guid --bootloader --keydir --keyblock --signprivate --signpubkey ) case "$prev" in -d|--vmlinuz) _mkdepthcharge__file ;; -i|--initramfs) _mkdepthcharge__file ;; -b|--dtbs) _mkdepthcharge__file ;; -o|--output) _mkdepthcharge__file ;; -A|--arch) COMPREPLY+=($(compgen -W "arm arm64 aarch64 x86 x86_64 amd64" -- "$cur")) ;; --format) COMPREPLY+=($(compgen -W "fit zimage" -- "$cur")) ;; -C|--compress) COMPREPLY+=($(compgen -W "none lz4 lzma" -- "$cur")) ;; -n|--name) if [ -f /etc/os-release ]; then local name="$(. /etc/os-release; echo "$NAME")" COMPREPLY+=($(compgen -W "$name" -- "$cur")) fi ;; --kernel-start) : ;; --ramdisk-load-address) : ;; -c|--cmdline) _mkdepthcharge__cmdline ;; --tmpdir) _mkdepthcharge__file ;; --bootloader) _mkdepthcharge__file ;; --keydir) _mkdepthcharge__file ;; --keyblock) _mkdepthcharge__file ;; --signprivate) _mkdepthcharge__file ;; --signpubkey) _mkdepthcharge__file ;; --) _mkdepthcharge__file ;; *) COMPREPLY+=($(compgen -W "${opts[*]}" -- "$cur")) _mkdepthcharge__file ;; esac } complete -F _mkdepthcharge mkdepthcharge # vim: filetype=sh depthcharge-tools-0.6.2/completions/_mkdepthcharge.zsh000066400000000000000000000054241444761253100231760ustar00rootroot00000000000000#compdef mkdepthcharge # SPDX-License-Identifier: GPL-2.0-or-later # depthcharge-tools mkdepthcharge zsh completions # Copyright (C) 2020-2022 Alper Nebi Yasak # See COPYRIGHT and LICENSE files for full copyright information. function _mkdepthcharge { _arguments -S \ {-d,--vmlinuz}'[Kernel executable]:vmlinuz file:_files' \ {-i,--initramfs}'[Ramdisk image]:*:initrd files:_files' \ {-b,--dtbs}'[Device-tree binary files]:*:dtbs files:_files' \ {-h,--help}'[Show a help message.]' \ {-v,--verbose}'[Print more detailed output.]' \ {-V,--version}'[Print program version.]' \ --tmpdir'[Directory to keep temporary files.]:temp dir:_directories' \ --kernel-start'[Start of depthcharge kernel buffer in memory.]:kernel start:_numbers' \ {-o,--output}'[Write resulting image to FILE.]:output:_files' \ {-A,--arch}'[Architecture to build for.]:arch:(arm arm64 aarch64 x86 x86_64 amd64)' \ --format'[Kernel image format to use.]:format:(fit zimage)' \ {-C,--compress}'[Compress vmlinuz with lz4 or lzma.]:compression:(none lz4 lzma)' \ {-n,--name}'[Description of vmlinuz to put in the FIT.]:description:($(source /etc/os-release; echo "$NAME"))' \ --ramdisk-load-address'[Add load address to FIT ramdisk image section.]:ramdisk load address:_numbers' \ --patch-dtbs'[Add linux,initrd properties to device-tree binary files.]' \ --no-patch-dtbs'[Do not add linux,initrd properties to device-tree binary files.]' \ --pad-vmlinuz'[Pad the vmlinuz file for safe decompression]' \ --no-pad-vmlinuz'[Do not pad the vmlinuz file for safe decompression]' \ --set-init-size'[Set init-size boot param for safe decompression]' \ --no-set-init-size'[Do not set init-size boot param for safe decompression]' \ '*'{-c,--cmdline}'[Command-line parameters for the kernel.]:*:kernel cmdline:{_mkdepthcharge__cmdline}' \ --kern-guid'[Prepend kern_guid=%U to the cmdline.]' \ --no-kern-guid'[Do not prepend kern_guid=%U to the cmdline.]' \ --bootloader'[Bootloader stub binary to use.]:bootloader file:_files' \ --keydir'[Directory containing vboot keys to use.]:keys dir:_directories' \ --keyblock'[The key block file (.keyblock).]:keyblock file:_files' \ --signprivate'[Private key (.vbprivk) to sign the image.]:vbprivk file:_files' \ --signpubkey'[Public key (.vbpubk) to verify the image.]:vbpubk file:_files' \ ':vmlinuz file:_files' \ '*:initrd or dtb files:_files' \ ; } function _mkdepthcharge__cmdline { local cmdline=($(cat /proc/cmdline | sed -e 's/\(cros_secure\|kern_guid\)[^ ]* //g')) _describe 'kernel cmdline' cmdline } 2>/dev/null _mkdepthcharge "$@" depthcharge-tools-0.6.2/depthcharge_tools/000077500000000000000000000000001444761253100206405ustar00rootroot00000000000000depthcharge-tools-0.6.2/depthcharge_tools/__init__.py000066400000000000000000000034221444761253100227520ustar00rootroot00000000000000#! /usr/bin/env python3 # SPDX-License-Identifier: GPL-2.0-or-later # depthcharge-tools __init__.py # Copyright (C) 2020-2021 Alper Nebi Yasak # See COPYRIGHT and LICENSE files for full copyright information. import glob import logging import pathlib import pkg_resources import re import subprocess logger = logging.getLogger(__name__) logger.addHandler(logging.NullHandler()) def get_version(): version = None pkg_path = pkg_resources.resource_filename(__name__, '') pkg_path = pathlib.Path(pkg_path).resolve() try: self = pkg_resources.require(__name__)[0] version = self.version except pkg_resources.DistributionNotFound: setup_py = pkg_path.parent / "setup.py" if setup_py.exists(): version = re.findall( 'version=(\'.+\'|".+"),', setup_py.read_text(), )[0].strip('"\'') if (pkg_path.parent / ".git").exists(): proc = subprocess.run( ["git", "-C", pkg_path, "describe"], stdout=subprocess.PIPE, encoding="utf-8", check=False, ) if proc.returncode == 0: tag, *local = proc.stdout.split("-") if local: version = "{}+{}".format(tag, ".".join(local)) else: version = tag if version is not None: return pkg_resources.parse_version(version) __version__ = get_version() config_ini = pkg_resources.resource_string(__name__, "config.ini") config_ini = config_ini.decode("utf-8") boards_ini = pkg_resources.resource_string(__name__, "boards.ini") boards_ini = boards_ini.decode("utf-8") config_files = [ *glob.glob("/etc/depthcharge-tools/config"), *glob.glob("/etc/depthcharge-tools/config.d/*"), ] depthcharge-tools-0.6.2/depthcharge_tools/boards.ini000066400000000000000000002225551444761253100226260ustar00rootroot00000000000000# SPDX-License-Identifier: GPL-2.0-or-later # depthcharge-tools boards database # Copyright (C) 2021-2022 Alper Nebi Yasak # See COPYRIGHT and LICENSE files for full copyright information. # The format of this boards.ini file is not stable. Instead of relying # on the section names and hierarchy here, you should add your own fully # specified sections with a unique codename, then set that codename as # your board, both in /etc/depthcharge-tools/config.d/*. For example: # # [depthchargectl] # board = custom-kevin # # [boards/custom/kevin] # arch = arm64 # codename = custom-kevin # dt-compatible = google,kevin # image-format = fit # image-max-size = 512 MiB # name = Samsung Chromebook Plus (Custom coreboot) # # [boards/custom/minnie] # arch = arm # codename = custom-minnie # dt-compatible = google,veyron-minnie # image-format = fit # image-max-size = 32 MiB # name = ASUS Chromebook Flip C100PA (Libreboot r20160907) # [boards/amd64] arch = amd64 codename = amd64-generic loads-zimage-ramdisk = False image-start-address = 0x100000 image-format = zimage [boards/amd64/alderlake] codename = chipset-adl [boards/amd64/alderlake/brya] codename = brya image-max-size = 512 MiB [boards/amd64/alderlake/brya/anahera] codename = anahera hwid-match = ^ANAHERA-TYOO.* name = HP Elite c640 14" G3 Chromebook (Enterprise) [boards/amd64/alderlake/brya/banshee] codename = banshee hwid-match = ^BANSHEE-UZTQ.* name = Framework Laptop Chromebook Edition [boards/amd64/alderlake/brya/brask] codename = brask [boards/amd64/alderlake/brya/brask/kinox] codename = kinox hwid-match = ^KINOX-WGMJ.* name = Lenovo ThinkCentre M60q Chromebox [boards/amd64/alderlake/brya/brask/kuldax] codename = kuldax hwid-match = ^KULDAX-DGSC.* name = ASUS Chromebox (Enterprise) 5 (CN67) [boards/amd64/alderlake/brya/brask/moli] codename = moli hwid-match = ^MOLI-.* name = Acer Chromebox CXI5 [boards/amd64/alderlake/brya/crota] codename = crota hwid-match = ^CROTA-LPXP.* name = Dell Latitude 5430 Chromebook [boards/amd64/alderlake/brya/crota/crota360] codename = crota360 hwid-match = ^CROTA360-QYFB.* name = Dell Latitude 5430 2-in-1 Chromebook [boards/amd64/alderlake/brya/felwinter] codename = felwinter hwid-match = ^FELWINTER-BDXJ.* name = ASUS Chromebook (Enterprise) Flip CX5 (CX5601) [boards/amd64/alderlake/brya/gimble] codename = gimble hwid-match = ^GIMBLE-PVHI.* name = HP Chromebook x360 14 inch [boards/amd64/alderlake/brya/kano] codename = kano [boards/amd64/alderlake/brya/kano/0] hwid-match = ^KANO-VJXU.* name = Acer Chromebook (Enterprise) Spin 714 (CP714-1WN) [boards/amd64/alderlake/brya/kano/1] hwid-match = ^KANO-.* name = Acer Chromebook (Enterprise) Spin 714 (CP714-2WN) [boards/amd64/alderlake/brya/mithrax] codename = mithrax [boards/amd64/alderlake/brya/mithrax/0] hwid-match = ^MITHRAX-HKVS.* name = ASUS Chromebook (Enterprise) CX34, CB34 Flip (CX3401, CX3401FBA, CB3401) [boards/amd64/alderlake/brya/mithrax/1] hwid-match = ^MITHRAX-ISVS.* name = Asus Chromebook Vibe CX34 Flip (CX3401, CX3401FBA) [boards/amd64/alderlake/brya/osiris] codename = osiris hwid-match = ^OSIRIS-ATFE.* name = Acer Chromebook 516 GE (CBG516-1H) [boards/amd64/alderlake/brya/primus] codename = primus hwid-match = ^PRIMUS-ZPIS.* name = ThinkPad C14 Gen 1 Chromebook [boards/amd64/alderlake/brya/redrix] codename = redrix hwid-match = ^REDRIX-CLQY .* name = HP Elite Dragonfly Chromebook [boards/amd64/alderlake/brya/taeko] codename = taeko hwid-match = ^TAEKO.* name = Lenovo (IdeaPad) Flex 5i Chromebook (14", 7) [boards/amd64/alderlake/brya/taniks] codename = taniks hwid-match = ^TANIKS.* name = Lenovo IdeaPad Gaming Chromebook 16 [boards/amd64/alderlake/brya/tarlo] codename = tarlo hwid-match = ^TARLO[ |-].* name = Lenovo (IdeaPad) 5i Chromebook (16", 7) [boards/amd64/alderlake/brya/vell] codename = vell hwid-match = ^VELL-SVGZ .* name = HP Dragonfly Pro Chromebook [boards/amd64/alderlake/brya/volmar] codename = volmar hwid-match = ^VOLMAR-PGRU.* name = Acer Chromebook Vero 514 [boards/amd64/alderlake/brya/volmar/zavala] codename = zavala hwid-match = ^ZAVALA-PSHU.* name = Acer Chromebook Vero 712 (CV872, CV872T) [boards/amd64/alderlake-n] codename = chipset-adln [boards/amd64/alderlake-n/nissa] codename = nissa image-format = zimage image-max-size = 512 MiB [boards/amd64/alderlake-n/nissa/craask] codename = craask hwid-match = ^CRAASK-HULX.* name = Acer Chromebook Spin 512 (R856T, R856T-TCO, R856TN, R856TN-TCO, R856LT, R856LT-TCO, R856LTN, R856LTN-TCO) [boards/amd64/alderlake-n/nissa/craask/craaskbowl] codename = craaskbowl hwid-match = ^CRAASKBOWL-GSKT.* name = Acer Chromebook Spin 511 (R756T, R756T-TCO, R756TN, R756TN-TCO, R756LT, R756LT-TCO, R756LTN, R756LTN-TCO) [boards/amd64/alderlake-n/nissa/craask/craaskvin] codename = craaskvin hwid-match = ^CRAASKVIN-HOWA.* name = Acer Chromebook 511 (C736, C736T, C736L, C736LT) [boards/amd64/alderlake-n/nissa/pujjo] codename = pujjo [boards/amd64/alderlake-n/nissa/pujjo/0] hwid-match = ^PUJJO-KTLR.* name = Lenovo 500e Yoga Chromebook Gen 4 [boards/amd64/alderlake-n/nissa/pujjo/1] hwid-match = ^PUJJO-DCCV.* name = Lenovo (IdeaPad) Flex 3i Chromebook (12", 8) [boards/amd64/alderlake-n/nissa/pujjo/pujjoteen] codename = pujjoteen [boards/amd64/alderlake-n/nissa/pujjo/pujjoteen/0] hwid-match = ^PUJJOTEEN(15W)?[-| ].* name = Lenovo 14e Chromebook Gen 3 (15w) [boards/amd64/alderlake-n/nissa/pujjo/pujjoteen/1] hwid-match = ^PUJJOTEEN(15W)?[-| ].* name = Lenovo 14e Chromebook Gen 3 Chromebook [boards/amd64/alderlake-n/nissa/xivu] codename = xivu hwid-match = ^XIVU-YAZN.* name = ASUS Chromebook CR11 (CR1102C) [boards/amd64/alderlake-n/nissa/xivu/xivu360] codename = xivu360 hwid-match = ^XIVU360-HRQS.* name = ASUS Chromebook CR11 (CR1102F) [boards/amd64/alderlake-n/nissa/yaviks] codename = yaviks hwid-match = ^YAVIKS-BVSW.* name = HP Chromebook 15.6” [boards/amd64/apollolake] codename = chipset-apl [boards/amd64/apollolake/coral] codename = coral image-max-size = 32 MiB [boards/amd64/apollolake/coral/astronaut] codename = astronaut hwid-match = ^ASTRONAUT .* name = Acer Chromebook 11 (C732, C732T, C732L, C732LT) [boards/amd64/apollolake/coral/babymako] codename = babymako hwid-match = ^BABYMAKO .* name = ASUS Chromebook C403 [boards/amd64/apollolake/coral/babymega] codename = babymega hwid-match = ^BABYMEGA .* [boards/amd64/apollolake/coral/babymega/0] hwid-match = ^BABYMEGA [ABCD].* name = ASUS Chromebook C223 [boards/amd64/apollolake/coral/babymega/1] hwid-match = ^(BABYMEGA [EF].*|BABYMEGA [G].{11}B.*) name = ASUS Chromebook CX1100CNA [boards/amd64/apollolake/coral/babytiger] codename = babytiger hwid-match = ^BABYTIGER .* [boards/amd64/apollolake/coral/babytiger/0] hwid-match = ^BABYTIGER [ABC].* name = ASUS Chromebook C523 [boards/amd64/apollolake/coral/babytiger/1] hwid-match = ^(BABYTIGER [DE].*|BABYTIGER [F].{11}B.*) name = ASUS Chromebook CX1500CNA [boards/amd64/apollolake/coral/blacktip] codename = blacktip [boards/amd64/apollolake/coral/blacktip/0] hwid-match = ^BLACKTIP [A-Z0-9][A-Z0-9]B-([A-Z0-9\-])+ name = CTL Chromebook NL7 [boards/amd64/apollolake/coral/blacktip/1] hwid-match = ^BLACKTIP [A-Z0-9][A-Z0-9]C-([A-Z0-9\-])+ name = Edxis Chromebook 11 [boards/amd64/apollolake/coral/blacktip/2] hwid-match = ^BLACKTIP [A-Z0-9][A-Z0-9]E-([A-Z0-9\-])+ name = Lanix Chromebook C116 [boards/amd64/apollolake/coral/blacktip/3] hwid-match = ^BLACKTIP [A-Z0-9][A-Z0-9]F-([A-Z0-9\-])+ name = Multilaser Chromebook M11C-PC912 [boards/amd64/apollolake/coral/blacktip/4] hwid-match = ^BLACKTIP [A-Z0-9][A-Z0-9]D-([A-Z0-9\-])+ name = Positivo Chromebook N2110 [boards/amd64/apollolake/coral/blacktip360] codename = blacktip360 [boards/amd64/apollolake/coral/blacktip360/0] hwid-match = ^BLACKTIP360 [A-Z0-9][A-Z0-9]B-([A-Z0-9\-])+ name = CTL Chromebook NL7, NL7T-360, NL7TW-360 [boards/amd64/apollolake/coral/blacktip360/1] hwid-match = ^BLACKTIP360 [A-Z0-9][A-Z0-9]C-([A-Z0-9\-])+ name = Edxis Chromebook X11 [boards/amd64/apollolake/coral/blacktip360/2] hwid-match = ^BLACKTIP360 [A-Z0-9][A-Z0-9]F-([A-Z0-9\-])+ name = Lanix Chromebook C116 [boards/amd64/apollolake/coral/blacktip360/3] hwid-match = ^BLACKTIP360 [A-Z0-9][A-Z0-9]G-([A-Z0-9\-])+ name = Multilaser Chromebook M11HC-PC911 [boards/amd64/apollolake/coral/blacktip360/4] hwid-match = ^BLACKTIP360 [A-Z0-9][A-Z0-9]D-([A-Z0-9\-])+ name = Positivo Chromebook N2112 [boards/amd64/apollolake/coral/blacktip360/5] hwid-match = ^BLACKTIP360 [A-Z0-9][A-Z0-9]E-([A-Z0-9\-])+ name = Viglen Chromebook 360C [boards/amd64/apollolake/coral/blacktiplte] codename = blacktiplte hwid-match = ^BLACKTIPLTE [A-Z0-9][A-Z0-9]B-([A-Z0-9\-])+ name = CTL Chromebook NL7 LTE [boards/amd64/apollolake/coral/blue] codename = blue hwid-match = ^BLUE .* name = Acer Chromebook 15 (CB315-1H, CB315-1HT) [boards/amd64/apollolake/coral/bruce] codename = bruce hwid-match = ^BRUCE .* name = Acer Chromebook Spin 15 (CP315) [boards/amd64/apollolake/coral/epaulette] codename = epaulette hwid-match = ^EPAULETTE .* name = Acer Chromebook 514 [boards/amd64/apollolake/coral/lava] codename = lava hwid-match = ^LAVA .* name = Acer Chromebook Spin 11 (CP311-1H, CP311-1HN) [boards/amd64/apollolake/coral/nasher] codename = nasher hwid-match = ^NASHER .* name = Dell Chromebook 11 (5190) [boards/amd64/apollolake/coral/nasher360] codename = nasher360 hwid-match = ^NASHER360 .* name = Dell Chromebook 11 2-in-1 (5190) [boards/amd64/apollolake/coral/rabbid] codename = rabbid hwid-match = ^RABBID .* name = ASUS Chromebook C403 [boards/amd64/apollolake/coral/rabbid/0] hwid-match = ^RABBID [ABCD].* name = ASUS Chromebook C423 [boards/amd64/apollolake/coral/rabbid/1] hwid-match = ^(RABBID [EF].*|RABBID [G].{11}B.*) name = ASUS Chromebook CX1400CNA [boards/amd64/apollolake/coral/robo] codename = robo hwid-match = ^ROBO .* name = Lenovo 100e Chromebook [boards/amd64/apollolake/coral/robo360] codename = robo360 hwid-match = ^ROBO360 .* name = Lenovo 500e Chromebook [boards/amd64/apollolake/coral/santa] codename = santa hwid-match = ^SANTA .* name = Acer Chromebook 11 (CB311-8H, CB311-8HT) [boards/amd64/apollolake/coral/whitetip] codename = whitetip [boards/amd64/apollolake/coral/whitetip/0] hwid-match = ^WHITETIP [A-Z0-9][A-Z0-9]D-([A-Z0-9\-])+ name = CTL Chromebook J41, J41T [boards/amd64/apollolake/coral/whitetip/1] hwid-match = ^WHITETIP [A-Z0-9][A-Z0-9]G-([A-Z0-9\-])+ name = PCmerge Chromebook AL116 [boards/amd64/apollolake/coral/whitetip/2] hwid-match = ^WHITETIP [A-Z0-9][A-Z0-9]E-([A-Z0-9\-])+ name = Prowise Chromebook Eduline [boards/amd64/apollolake/coral/whitetip/3] hwid-match = ^WHITETIP [A-Z0-9][A-Z0-9]F-([A-Z0-9\-])+ name = Sector 5 E3 Chromebook [boards/amd64/apollolake/coral/whitetip/4] hwid-match = ^WHITETIP [A-Z0-9][A-Z0-9]H-([A-Z0-9\-])+ name = Viglen Chromebook 11C [boards/amd64/apollolake/coral/whitetip/5] hwid-match = ^WHITETIP DEVELOPMENT [boards/amd64/apollolake/reef] codename = reef image-max-size = 32 MiB [boards/amd64/apollolake/reef/0] hwid-match = ^REEF \w{3}-C\w{2}(-\w{3})* name = ASUS Chromebook Flip C213 [boards/amd64/apollolake/reef/1] hwid-match = ^REEF \w{3}-B\w{2}(-\w{3})* name = Acer Chromebook Spin 11 (R751T, CP511) [boards/amd64/apollolake/reef/pyro] codename = pyro hwid-match = ^PYRO .* name = Lenovo ThinkPad 11e 4th Gen Chromebook [boards/amd64/apollolake/reef/sand] codename = sand hwid-match = ^SAND .* name = Acer Chromebook 15 (CB515-1H, CB515-1HT) [boards/amd64/apollolake/reef/snappy] codename = snappy [boards/amd64/apollolake/reef/snappy/0] hwid-match = ^SNAPPY ([A-Z0-9])+-B[A-Z0-9][A-Z0-9]-([A-Z0-9])+-([A-Z0-9\-])+ name = HP Chromebook 11/11a G6 EE [boards/amd64/apollolake/reef/snappy/1] hwid-match = ^SNAPPY ([A-Z0-9])+-C[A-Z0-9][A-Z0-9]-([A-Z0-9])+-([A-Z0-9\-])+ name = HP Chromebook 14 G5 [boards/amd64/apollolake/reef/snappy/2] hwid-match = ^SNAPPY ([A-Z0-9])+-[^BC].* name = HP Chromebook x360 11 G1 EE [boards/amd64/baytrail] codename = chipset-byt [boards/amd64/baytrail/rambi] codename = rambi hwid-match = ^RAMBI .* image-max-size = 16 MiB [boards/amd64/baytrail/rambi/banjo] codename = banjo hwid-match = ^BANJO .* image-max-size = 16 MiB name = Acer Chromebook 15 (CB3-531) [boards/amd64/baytrail/rambi/candy] codename = candy hwid-match = ^CANDY .* name = Dell Chromebook 11 (3120) [boards/amd64/baytrail/rambi/clapper] codename = clapper hwid-match = ^CLAPPER .* name = Lenovo N20 Chromebook [boards/amd64/baytrail/rambi/enguarde] codename = enguarde [boards/amd64/baytrail/rambi/enguarde/0] hwid-match = ^ENGUARDE \w{3}-\w{3}-\w{2}K(-\w{3})*$ name = ASI Chromebook [boards/amd64/baytrail/rambi/enguarde/1] hwid-match = ^ENGUARDE \w{3}-\w{3}-\w{2}A(-\w{3})*$ name = CTL N6 Education Chromebook [boards/amd64/baytrail/rambi/enguarde/10] hwid-match = ^ENGUARDE \w{3}-\w{3}-\w{2}H(-\w{3})*$ name = True IDC Chromebook [boards/amd64/baytrail/rambi/enguarde/11] hwid-match = ^ENGUARDE \w{3}-\w{3}-\w{2}I(-\w{3})*$ name = Videonet Chromebook [boards/amd64/baytrail/rambi/enguarde/12] hwid-match = ^ENGUARDE \w{3}-\w{3}-\w{2}J(-\w{3})*$ name = eduGear Chromebook R [boards/amd64/baytrail/rambi/enguarde/2] hwid-match = ^ENGUARDE \w{3}-\w{3}-\w{2}G(-\w{3})*$ name = Crambo Chromebook [boards/amd64/baytrail/rambi/enguarde/3] hwid-match = ^ENGUARDE DEVELOPMENT [boards/amd64/baytrail/rambi/enguarde/4] hwid-match = ^ENGUARDE \w{3}-\w{3}-\w{2}D(-\w{3})*$ name = Edxis Education Chromebook (NL6) [boards/amd64/baytrail/rambi/enguarde/5] hwid-match = ^ENGUARDE \w{3}-\w{3}-\w{2}L(-\w{3})*$ name = JP Sa Couto Chromebook [boards/amd64/baytrail/rambi/enguarde/6] hwid-match = ^ENGUARDE \w{3}-\w{3}-\w{2}E(-\w{3})*$ name = Lenovo N21 Chromebook [boards/amd64/baytrail/rambi/enguarde/7] hwid-match = ^ENGUARDE \w{3}-\w{3}-\w{2}B(-\w{3})*$ name = M&A Chromebook [boards/amd64/baytrail/rambi/enguarde/8] hwid-match = ^ENGUARDE \w{3}-\w{3}-\w{2}F(-\w{3})*$ name = RGS Education Chromebook [boards/amd64/baytrail/rambi/enguarde/9] hwid-match = ^ENGUARDE \w{3}-\w{3}-\w{2}C(-\w{3})*$ name = Senkatel C1101 Chromebook [boards/amd64/baytrail/rambi/expresso] codename = expresso [boards/amd64/baytrail/rambi/expresso/0] hwid-match = ^EXPRESSO \w{3}-\w{3}-\w{2}B(-\w{3})*$ name = Bobicus Chromebook 11 [boards/amd64/baytrail/rambi/expresso/1] hwid-match = ^EXPRESSO DEVELOPMENT [boards/amd64/baytrail/rambi/expresso/2] hwid-match = ^EXPRESSO \w{3}-\w{3}-\w{2}C(-\w{3})*$ name = Edxis Chromebook [boards/amd64/baytrail/rambi/expresso/3] hwid-match = ^EXPRESSO \w{3}-\w{3}-\w{2}A(-\w{3})*$|^EXPRESSO \w{3}-\w{3}-\w{3}-A\w{2}(-\w{3})*$ name = HEXA Chromebook Pi [boards/amd64/baytrail/rambi/glimmer] codename = glimmer hwid-match = ^GLIMMER .* name = Lenovo ThinkPad 11e Chromebook [boards/amd64/baytrail/rambi/gnawty] codename = gnawty [boards/amd64/baytrail/rambi/gnawty/0] hwid-match = ^GNAWTY \w{3}-\w{3}-\w{3}$|^GNAWTY \w{3}-\w{3}-\w{2}A(-\w{3})*$ name = Acer Chromebook 11 (CB3-111, C730, C730E) [boards/amd64/baytrail/rambi/gnawty/1] hwid-match = ^GNAWTY \w{3}-\w{3}-\w{2}B(-\w{3})+$ name = Acer Chromebook 11 (CB3-131, C735) [boards/amd64/baytrail/rambi/heli] codename = heli hwid-match = ^HELI .* name = Haier Chromebook 11 G2 [boards/amd64/baytrail/rambi/kip] codename = kip [boards/amd64/baytrail/rambi/kip/0] name = HP Chromebook 11 2100-2199, 11 G3 [boards/amd64/baytrail/rambi/kip/1] hwid-match = ^KIP [ABC].* name = HP Chromebook 11 G3/G4/G4 EE [boards/amd64/baytrail/rambi/kip/2] hwid-match = ^KIP [DE].* name = HP Chromebook 14 G4 [boards/amd64/baytrail/rambi/ninja] codename = ninja hwid-match = ^NINJA .* name = AOpen Chromebox Commercial [boards/amd64/baytrail/rambi/orco] codename = orco hwid-match = ^ORCO .* name = Lenovo 100S Chromebook [boards/amd64/baytrail/rambi/quawks] codename = quawks hwid-match = ^QUAWKS .* name = ASUS Chromebook C300 [boards/amd64/baytrail/rambi/squawks] codename = squawks hwid-match = ^SQUAWKS .* name = ASUS Chromebook C200 [boards/amd64/baytrail/rambi/sumo] codename = sumo hwid-match = ^SUMO .* name = AOpen Chromebase Commercial [boards/amd64/baytrail/rambi/swanky] codename = swanky hwid-match = ^SWANKY .* name = Toshiba Chromebook 2 [boards/amd64/baytrail/rambi/winky] codename = winky hwid-match = ^WINKY .* name = Samsung Chromebook 2 11" - XE500C12 [boards/amd64/braswell] codename = chipset-bsw [boards/amd64/braswell/strago] codename = strago image-max-size = 32 MiB [boards/amd64/braswell/strago/banon] codename = banon hwid-match = ^BANON .* name = Acer Chromebook 15 (CB3-532) [boards/amd64/braswell/strago/celes] codename = celes hwid-match = ^CELES .* name = Samsung Chromebook 3 [boards/amd64/braswell/strago/cyan] codename = cyan hwid-match = ^CYAN .* name = Acer Chromebook R11 (CB5-132T, C738T) [boards/amd64/braswell/strago/edgar] codename = edgar hwid-match = ^EDGAR .* name = Acer Chromebook 14 (CB3-431) [boards/amd64/braswell/strago/kefka] codename = kefka [boards/amd64/braswell/strago/kefka/0] hwid-match = ^KEFKA \w{3}-[^B].* name = Dell Chromebook 11 (3180) [boards/amd64/braswell/strago/kefka/1] hwid-match = ^KEFKA \w{3}-B\w{2}(-\w{3})* name = Dell Chromebook 11 2-in-1 (3189) [boards/amd64/braswell/strago/reks] codename = reks [boards/amd64/braswell/strago/reks/0] hwid-match = ^REKS \w{3}-\w{3}-\w{3}-[^BCD]\w{2}(-\w{3})*$ name = Lenovo N22 Chromebook [boards/amd64/braswell/strago/reks/1] hwid-match = ^REKS \w{3}-\w{3}-\w{3}-D\w{2}(-\w{3})*$ name = Lenovo N23 Chromebook [boards/amd64/braswell/strago/reks/2] hwid-match = ^REKS \w{3}-\w{3}-\w{3}-C\w{2}(-\w{3})*$ name = Lenovo N23 Chromebook (Touch) [boards/amd64/braswell/strago/reks/3] hwid-match = ^REKS \w{3}-\w{3}-\w{3}-B\w{2}(-\w{3})*$ name = Lenovo N42 Chromebook [boards/amd64/braswell/strago/relm] codename = relm [boards/amd64/braswell/strago/relm/0] hwid-match = ^RELM ([A-Z2-9])+-((H[2-9][A-Z2-7]-([A-Z2-9\-])+)|(F9[A-Z2-7]-([A-Z2-9])+-[A-Z2-7][2-9][A-DI-LQ-TYZ23]-[A-P]([A-Z2-9\-])+)) name = Acer Chromebook 11 N7 (C731, C731T) [boards/amd64/braswell/strago/relm/1] hwid-match = ^RELM ([A-Z0-9])+-B[A-Z0-9][A-Z0-9]-([A-Z0-9\-])+ name = CTL NL61 Chromebook [boards/amd64/braswell/strago/relm/2] hwid-match = ^RELM ([A-Z0-9])+-E[A-Z0-9][A-Z0-9]-([A-Z0-9\-])+ name = EduGear Chromebook R [boards/amd64/braswell/strago/relm/3] hwid-match = ^RELM ([A-Z0-9])+-C[A-Z0-9][A-Z0-9]-([A-Z0-9\-])+ name = Edxis Education Chromebook (NL6D) [boards/amd64/braswell/strago/relm/4] hwid-match = ^RELM ([A-Z0-9])+-G[A-Z0-9][A-Z0-9]-([A-Z0-9\-])+ name = HP Chromebook 11 G5 EE [boards/amd64/braswell/strago/relm/5] hwid-match = ^RELM ([A-Z2-9])+-((F8[A-Z2-7])|(F9[A-Z2-7]-([A-Z2-9])+-[A-Z2-7][2-9][E-HM-PU-X4-7]))-([A-Z2-9\-])+ name = Mecer V2 Chromebook [boards/amd64/braswell/strago/relm/6] hwid-match = ^RELM ([A-Z0-9])+-A[A-Z0-9][A-Z0-9]-([A-Z0-9\-])+ [boards/amd64/braswell/strago/relm/7] hwid-match = ^RELM ([A-Z0-9])+-I[A-Z0-9][A-Z0-9]-([A-Z0-9\-])+ name = Positivo Chromebook C216B [boards/amd64/braswell/strago/setzer] codename = setzer hwid-match = ^SETZER .* name = HP Chromebook 11 G5 [boards/amd64/braswell/strago/terra] codename = terra [boards/amd64/braswell/strago/terra/0] hwid-match = ^TERRA \w{3}-\w{3}-\w{3}-A.* name = ASUS Chromebook C202SA [boards/amd64/braswell/strago/terra/1] hwid-match = ^TERRA \w{3}-\w{3}-\w{3}-B.* name = ASUS Chromebook C300SA, C301SA [boards/amd64/braswell/strago/ultima] codename = ultima hwid-match = ^ULTIMA .* name = Lenovo ThinkPad 11e 3rd Gen Chromebook [boards/amd64/braswell/strago/wizpig] codename = wizpig [boards/amd64/braswell/strago/wizpig/0] hwid-match = ^WIZPIG \w{3}-\w{3}-\w{2}A(-\w{3})* name = CTL J5 Chromebook [boards/amd64/braswell/strago/wizpig/1] hwid-match = ^WIZPIG \w{3}-\w{3}-\w{2}B(-\w{3})* name = Edugear CMT Chromebook [boards/amd64/braswell/strago/wizpig/2] hwid-match = ^WIZPIG \w{3}-\w{3}-\w{2}C(-\w{3})* name = Haier Chromebook 11 C [boards/amd64/braswell/strago/wizpig/3] hwid-match = ^WIZPIG \w{3}-\w{3}-\w{2}I(-\w{3})* name = Multilaser Chromebook M11C [boards/amd64/braswell/strago/wizpig/4] hwid-match = ^WIZPIG \w{3}-\w{3}-\w{2}H(-\w{3})* name = PCMerge Chromebook PCM-116T-432B [boards/amd64/braswell/strago/wizpig/5] hwid-match = ^WIZPIG \w{3}-\w{3}-\w{2}G(-\w{3})* name = Prowise Chromebook Proline [boards/amd64/braswell/strago/wizpig/6] hwid-match = ^WIZPIG \w{3}-\w{3}-\w{2}D(-\w{3})* name = Viglen Chromebook 360 [boards/amd64/broadwell] codename = chipset-bdw [boards/amd64/broadwell/auron] codename = auron image-max-size = 16 MiB [boards/amd64/broadwell/auron/buddy] codename = buddy hwid-match = ^BUDDY .* name = Acer Chromebase 24 [boards/amd64/broadwell/auron/buddy/cfm] codename = buddy-cfm [boards/amd64/broadwell/auron/gandof] codename = gandof hwid-match = ^GANDOF .* name = Toshiba Chromebook 2 (2015 Edition) [boards/amd64/broadwell/auron/lulu] codename = lulu hwid-match = ^LULU .* name = Dell Chromebook 13 (7310) [boards/amd64/broadwell/auron/paine] codename = paine hwid-match = ^PAINE .* name = Acer Chromebook 11 (C740) [boards/amd64/broadwell/auron/samus] codename = samus hwid-match = ^SAMUS .* name = Google Chromebook Pixel (2015) [boards/amd64/broadwell/auron/yuna] codename = yuna hwid-match = ^YUNA .* name = Acer Chromebook 15 (CB5-571, C910) [boards/amd64/broadwell/jecht] codename = jecht image-max-size = 16 MiB [boards/amd64/broadwell/jecht/guado] codename = guado hwid-match = ^GUADO .* name = ASUS Chromebox 2 (CN62) [boards/amd64/broadwell/jecht/guado/cfm] codename = guado-cfm [boards/amd64/broadwell/jecht/rikku] codename = rikku hwid-match = ^RIKKU .* name = Acer Chromebox CXI2, CXV2 [boards/amd64/broadwell/jecht/rikku/cfm] codename = rikku-cfm [boards/amd64/broadwell/jecht/tidus] codename = tidus hwid-match = ^TIDUS .* name = Lenovo ThinkCentre Chromebox [boards/amd64/cezanne] codename = chipset-cezanne [boards/amd64/cezanne/guybrush] codename = guybrush image-max-size = 512 MiB [boards/amd64/cezanne/guybrush/dewatt] codename = dewatt hwid-match = ^DEWATT[-| ].* name = Acer Chromebook Spin 514 (CP514-3H, CP514-3HH, CP514-3WH) [boards/amd64/cezanne/guybrush/nipperkin] codename = nipperkin hwid-match = ^NIPPERKIN[-| ].* name = HP Elite c645 G2 Chromebook [boards/amd64/cometlake] codename = chipset-cml [boards/amd64/cometlake/drallion] codename = drallion hwid-match = ^DRALLION-.* image-max-size = 512 MiB name = Dell Latitude 7410 Chromebook Enterprise [boards/amd64/cometlake/drallion360] codename = drallion360 hwid-match = ^DRALLION360-.* image-max-size = 512 MiB name = Dell Latitude 7410 Chromebook Enterprise [boards/amd64/cometlake/hatch] codename = hatch image-max-size = 512 MiB [boards/amd64/cometlake/hatch/akemi] codename = akemi hwid-match = ^AKEMI-AOKF .* name = Lenovo IdeaPad Flex 5i Chromebook (13", 5) [boards/amd64/cometlake/hatch/dragonair] codename = dragonair hwid-match = ^DRAGONAIR-.* name = HP Chromebook x360 14c [boards/amd64/cometlake/hatch/dratini] codename = dratini hwid-match = ^DRATINI-.* name = HP Pro c640 Chromebook (Enterprise) [boards/amd64/cometlake/hatch/helios] codename = helios hwid-match = ^HELIOS-.* name = ASUS Chromebook Flip C436FA [boards/amd64/cometlake/hatch/jinlon] codename = jinlon hwid-match = ^JINLON-YTGY .* name = HP Elite c1030 Chromebook, HP Chromebook x360 13c [boards/amd64/cometlake/hatch/kindred] codename = kindred hwid-match = ^KINDRED-.* name = Acer Chromebook 712 (C871) [boards/amd64/cometlake/hatch/kled] codename = kled hwid-match = ^KLED-.* name = Acer Chromebook Spin 713 (CP713-2W) [boards/amd64/cometlake/hatch/kohaku] codename = kohaku hwid-match = ^KOHAKU.* name = Samsung Galaxy Chromebook [boards/amd64/cometlake/hatch/nightfury] codename = nightfury hwid-match = ^NIGHTFURY[-| ].* name = Samsung Galaxy Chromebook 2 [boards/amd64/cometlake/puff] codename = puff image-max-size = 512 MiB [boards/amd64/cometlake/puff/ambassador] codename = ambassador [boards/amd64/cometlake/puff/ambassador/0] hwid-match = ^AMBASSADOR-ANWJ.* name = ASUS Meet Compute System (Intel 10th Gen) [boards/amd64/cometlake/puff/ambassador/1] hwid-match = ^AMBASSADOR-NCZK.* name = CTL Meet Compute System (Intel 10th Gen) [boards/amd64/cometlake/puff/ambassador/2] hwid-match = ^AMBASSADOR-.* name = Meet Compute System (Intel 10th Gen) [boards/amd64/cometlake/puff/ambassador/genesis] codename = genesis hwid-match = ^GENESIS-.* name = Meet Compute System - Series One (Intel 10th Gen) [boards/amd64/cometlake/puff/ambassador/moonbuggy] codename = moonbuggy hwid-match = ^MOONBUGGY-.* name = Series One Board 65 [boards/amd64/cometlake/puff/ambassador/scout] codename = scout hwid-match = ^SCOUT-.* name = Series One Desk 27 [boards/amd64/cometlake/puff/dooly] codename = dooly hwid-match = ^DOOLY-SBGV.* name = HP Chromebase 21.5" All-in-One Desktop [boards/amd64/cometlake/puff/duffy] codename = duffy hwid-match = ^DUFFY-.* name = ASUS Chromebox 4 [boards/amd64/cometlake/puff/faffy] codename = faffy hwid-match = ^FAFFY-.* name = ASUS Fanless Chromebox [boards/amd64/cometlake/puff/kaisa] codename = kaisa hwid-match = ^KAISA.* name = Acer Chromebox CXI4 [boards/amd64/cometlake/puff/noibat] codename = noibat hwid-match = ^NOIBAT-.* name = HP Chromebox G3 [boards/amd64/cometlake/puff/wyvern] codename = wyvern [boards/amd64/cometlake/puff/wyvern/0] hwid-match = ^WYVERN-JLOF.* name = CTL Chromebox (Enterprise) CBx2 [boards/amd64/cometlake/puff/wyvern/1] hwid-match = ^WYVERN-JOTV.* name = Promethean Chromebox 2 [boards/amd64/cometlake/puff/wyvern/2] hwid-match = ^WYVERN-JRVR.* name = ViewSonic NMP760 Chromebox [boards/amd64/cometlake/puff/wyvern/3] hwid-match = ^WYVERN-HOIZ .* name = Wyvern MobLab [boards/amd64/geminilake] codename = chipset-glk [boards/amd64/geminilake/octopus] codename = octopus image-max-size = 512 MiB [boards/amd64/geminilake/octopus/ampton] codename = ampton hwid-match = ^AMPTON .* name = ASUS Chromebook Flip C214 (EDU), C234 (Canada) [boards/amd64/geminilake/octopus/apel] codename = apel hwid-match = ^APEL .* name = ASUS Chromebook C204 [boards/amd64/geminilake/octopus/apele] codename = apele hwid-match = ^APELE .* name = ASUS Chromebook CX1101 (CX1101CMA) [boards/amd64/geminilake/octopus/bloog] codename = bloog hwid-match = ^BLOOG .* name = HP Chromebook x360 12b [boards/amd64/geminilake/octopus/blooglet] codename = blooglet hwid-match = ^BLOOGLET .* name = HP Chromebook 14a-na0 [boards/amd64/geminilake/octopus/blooguard] codename = blooguard [boards/amd64/geminilake/octopus/blooguard/0] hwid-match = ^BLOOGUARD [DEF].{11}A.* name = HP Chromebook x360 14a [boards/amd64/geminilake/octopus/blooguard/1] hwid-match = ^BLOOGUARD [ABC].* name = HP Chromebook x360 14b [boards/amd64/geminilake/octopus/blorb] codename = blorb hwid-match = ^BLORB .* name = Acer Chromebook 315 (CB315) [boards/amd64/geminilake/octopus/bluebird] codename = bluebird hwid-match = ^BLUEBIRD .* name = Samsung Chromebook 4 [boards/amd64/geminilake/octopus/bobba] codename = bobba [boards/amd64/geminilake/octopus/bobba/0] hwid-match = ^(BOBBA [ABC].*|BOBBA [G].{11}A.*) name = Acer Chromebook 311 (C733, C733U, C733T) [boards/amd64/geminilake/octopus/bobba/1] hwid-match = ^(BOBBA [DEF].*)|(BOBBA [G].{11}B.*) name = Acer Chromebook 311 (CB311-9H, CB311-9HT) [boards/amd64/geminilake/octopus/bobba360] codename = bobba360 [boards/amd64/geminilake/octopus/bobba360/0] hwid-match = ^(BOBBA360 [DE].*)|(BOBBA360 [F].{11}B.*) name = Acer Chromebook Spin 311 (CP311-2H, CP311-2HN) [boards/amd64/geminilake/octopus/bobba360/1] hwid-match = ^(BOBBA360 [ABC].*)|(BOBBA360 [F].{11}A.*) name = Acer Chromebook Spin 511 (R752T, R752TN) [boards/amd64/geminilake/octopus/casta] codename = casta hwid-match = ^CASTA .* name = Samsung Chromebook 4+ [boards/amd64/geminilake/octopus/dood] codename = dood hwid-match = ^DOOD .* name = NEC Chromebook Y2 [boards/amd64/geminilake/octopus/dorp] codename = dorp hwid-match = ^DORP .* name = HP Chromebook 14 G6 [boards/amd64/geminilake/octopus/droid] codename = droid [boards/amd64/geminilake/octopus/droid/0] hwid-match = ^DROID [D-F].* name = Acer Chromebook 314 (C933L/LT) [boards/amd64/geminilake/octopus/droid/1] hwid-match = ^DROID [A-C][0-9][^N]-.* name = Acer Chromebook 314 (CB314) [boards/amd64/geminilake/octopus/droid/2] hwid-match = ^DROID [A-C][0-9]N-.* name = Packard Bell Chromebook 314 (PCB314) [boards/amd64/geminilake/octopus/fleex] codename = fleex hwid-match = ^FLEEX .* name = Dell Chromebook 3100 [boards/amd64/geminilake/octopus/foob] codename = foob [boards/amd64/geminilake/octopus/foob/0] hwid-match = ^FOOB \w{3}-\w{3}-\w{3}-B\w{2}-.* name = CTL Chromebook VX11, VX11T [boards/amd64/geminilake/octopus/foob/1] hwid-match = ^FOOB \w{3}-\w{3}-\w{3}-C\w{2}-.* name = Poin2 Chromebook 11P [boards/amd64/geminilake/octopus/foob360] codename = foob360 [boards/amd64/geminilake/octopus/foob360/0] hwid-match = ^FOOB360 \w{3}-\w{3}-\w{3}-B\w{2}-.* name = CTL Chromebook VX11T [boards/amd64/geminilake/octopus/foob360/1] hwid-match = ^FOOB360 \w{3}-\w{3}-\w{3}-C\w{2}-.* name = Poin2 Chromebook 11P [boards/amd64/geminilake/octopus/garfour] codename = garfour hwid-match = ^GARFOUR \w{3}-\w{3}-\w{3}-B\w{2}-.* name = CTL Chromebook NL81, NL81T [boards/amd64/geminilake/octopus/garg] codename = garg [boards/amd64/geminilake/octopus/garg/0] hwid-match = ^GARG \w{3}-\w{3}-\w{3}-L\w{2}-.* name = ADVAN Chromebook 116 [boards/amd64/geminilake/octopus/garg/1] hwid-match = ^GARG \w{3}-\w{3}-\w{3}-P\w{2}-.* name = Axioo Chromebook [boards/amd64/geminilake/octopus/garg/2] hwid-match = ^GARG \w{3}-\w{3}-\w{3}-E\w{2}-.* name = Baicells Chromebook BB01 [boards/amd64/geminilake/octopus/garg/3] hwid-match = ^GARG \w{3}-\w{3}-\w{3}-B\w{2}-.* name = CTL Chromebook NL71/CT/LTE [boards/amd64/geminilake/octopus/garg/4] hwid-match = ^GARG \w{3}-\w{3}-\w{3}-M\w{2}-.* name = EVERCOSS Chromebook CB1/CB1A [boards/amd64/geminilake/octopus/garg/5] hwid-match = ^GARG \w{3}-\w{3}-\w{3}-C\w{2}-.* name = Edxis Chromebook 11 (S20-C) [boards/amd64/geminilake/octopus/garg/6] hwid-match = ^GARG \w{3}-\w{3}-\w{3}-J\w{2}-.* name = JOI Chromebook C100 [boards/amd64/geminilake/octopus/garg/7] hwid-match = ^GARG \w{3}-\w{3}-\w{3}-F\w{2}-.* name = Multilaser Chromebook M11C-PC914 [boards/amd64/geminilake/octopus/garg/8] hwid-match = ^GARG \w{3}-\w{3}-\w{3}-D\w{2}-.* name = Pixart Rxart Chromebook [boards/amd64/geminilake/octopus/garg/9] hwid-match = ^GARG \w{3}-\w{3}-\w{3}-G\w{2}-.* name = Poin2 Chromebook 11A [boards/amd64/geminilake/octopus/garg/10] hwid-match = ^GARG \w{3}-\w{3}-\w{3}-H\w{2}-.* name = Positivo Chromebook N2210 [boards/amd64/geminilake/octopus/garg/11] hwid-match = ^GARG \w{3}-\w{3}-\w{3}-O\w{2}-.* name = SPC Chromebook X1 Mini [boards/amd64/geminilake/octopus/garg/12] hwid-match = ^GARG \w{3}-\w{3}-\w{3}-I\w{2}-.* name = Sector 5 E4 LTE Chromebook [boards/amd64/geminilake/octopus/garg/13] hwid-match = ^GARG \w{3}-\w{3}-\w{3}-K\w{2}-.* name = WS Chromebook A101 [boards/amd64/geminilake/octopus/garg/14] hwid-match = ^GARG \w{3}-\w{3}-\w{3}-R\w{2}-.* name = XO Chromebook [boards/amd64/geminilake/octopus/garg/15] hwid-match = ^GARG \w{3}-\w{3}-\w{3}-N\w{2}-.* name = Zyrex Chromebook (M432-1, M432-128), Zyrex Chromebook 360-1 [boards/amd64/geminilake/octopus/garg360] codename = garg360 [boards/amd64/geminilake/octopus/garg360/0] hwid-match = ^GARG360 \w{3}-\w{3}-\w{3}-L\w{2}-.* name = ADVAN Chromebook 116 [boards/amd64/geminilake/octopus/garg360/1] hwid-match = ^GARG360 \w{3}-\w{3}-\w{3}-H\w{2}-.* name = Ascon Chromebook 11A [boards/amd64/geminilake/octopus/garg360/2] hwid-match = ^GARG360 \w{3}-\w{3}-\w{3}-P\w{2}-.* name = Axioo Chromebook 360 [boards/amd64/geminilake/octopus/garg360/3] hwid-match = ^GARG360 \w{3}-\w{3}-\w{3}-E\w{2}-.* name = Baicells Chromebook BB01 [boards/amd64/geminilake/octopus/garg360/4] hwid-match = ^GARG360 \w{3}-\w{3}-\w{3}-B\w{2}-.* name = CTL Chromebook NL71T/TW/TWB [boards/amd64/geminilake/octopus/garg360/5] hwid-match = ^GARG360 \w{3}-\w{3}-\w{3}-M\w{2}-.* name = EVERCOSS Chromebook CB1A [boards/amd64/geminilake/octopus/garg360/6] hwid-match = ^GARG360 \w{3}-\w{3}-\w{3}-C\w{2}-.* name = Edxis Chromebook 11 (S20-X) [boards/amd64/geminilake/octopus/garg360/7] hwid-match = ^GARG360 \w{3}-\w{3}-\w{3}-J\w{2}-.* name = JOI Chromebook C100 [boards/amd64/geminilake/octopus/garg360/8] hwid-match = ^GARG360 \w{3}-\w{3}-\w{3}-F\w{2}-.* name = Multilaser Chromebook M11HC-PC915 [boards/amd64/geminilake/octopus/garg360/9] hwid-match = ^GARG360 \w{3}-\w{3}-\w{3}-D\w{2}-.* name = Pixart Rxart Chromebook [boards/amd64/geminilake/octopus/garg360/10] hwid-match = ^GARG360 \w{3}-\w{3}-\w{3}-G\w{2}-.* name = Poin2 Chromebook 11A [boards/amd64/geminilake/octopus/garg360/11] hwid-match = ^GARG360 \w{3}-\w{3}-\w{3}-I\w{2}-.* name = Positivo Chromebook N2212 [boards/amd64/geminilake/octopus/garg360/12] hwid-match = ^GARG360 \w{3}-\w{3}-\w{3}-O\w{2}-.* name = SPC Chromebook X1 Mini [boards/amd64/geminilake/octopus/garg360/13] hwid-match = ^GARG360 \w{3}-\w{3}-\w{3}-K\w{2}-.* name = WS Chromebook A101 [boards/amd64/geminilake/octopus/garg360/14] hwid-match = ^GARG360 \w{3}-\w{3}-\w{3}-N\w{2}-.* name = Zyrex Chromebook 360 [boards/amd64/geminilake/octopus/grabbiter] codename = grabbiter hwid-match = ^GRABBITER .* name = Dell Chromebook 3100 2-in-1 [boards/amd64/geminilake/octopus/laser14] codename = laser14 [boards/amd64/geminilake/octopus/laser14/0] hwid-match = ^LASER14 [D|E|G].{11}B.* name = Lenovo IdeaPad 3 CB 14IGL05 [boards/amd64/geminilake/octopus/laser14/1] hwid-match = ^LASER14 [ABC][A-Z0-9].*|^LASER14 [E].{11}A.* name = Lenovo Chromebook S340-14 (Touch) [boards/amd64/geminilake/octopus/lick] codename = lick hwid-match = ^LICK .* name = Lenovo Ideapad 3 Chromebook 11IGL05 [boards/amd64/geminilake/octopus/meep] codename = meep hwid-match = ^MEEP .* name = HP Chromebook x360 11 G2 EE [boards/amd64/geminilake/octopus/mimrock] codename = mimrock hwid-match = ^MIMROCK .* name = HP Chromebook 11 G7 EE [boards/amd64/geminilake/octopus/nospike] codename = nospike hwid-match = ^NOSPIKE .* name = ASUS Chromebook C424 [boards/amd64/geminilake/octopus/orbatrix] codename = orbatrix hwid-match = ^ORBATRIX [ABCD].* name = Dell Chromebook 3400 [boards/amd64/geminilake/octopus/phaser] codename = phaser hwid-match = ^PHASER .* name = Lenovo 100e Chromebook 2nd Gen [boards/amd64/geminilake/octopus/phaser360] codename = phaser360 [boards/amd64/geminilake/octopus/phaser360/0] hwid-match = ^PHASER360 [G|H].{11}C.* name = Lenovo IdeaPad Flex 3 CB 11IGL05 [boards/amd64/geminilake/octopus/phaser360/1] hwid-match = ^PHASER360 [ABCD][A-Z0-9].*|^PHASER360 [G|H|K|L|M].{11}B.* name = Lenovo 300e/500e Chromebook 2nd Gen [boards/amd64/geminilake/octopus/phaser360/2] hwid-match = ^PHASER360 [E|F][A-Z0-9].*|^PHASER360 [G|H|K|L|M|N].{11}C.* name = Lenovo Chromebook C340-11, 300e, 500e 2nd Gen [boards/amd64/geminilake/octopus/phaser360/3] hwid-match = ^PHASER360 [G|H|K].{11}A.* name = NEC Chromebook Y1 [boards/amd64/geminilake/octopus/phaser360/4] hwid-match = ^PHASER360 [G|H|K|L|M].{11}D.* name = NEC Chromebook Y1 Gen 2 [boards/amd64/geminilake/octopus/sparky] codename = sparky hwid-match = ^SPARKY .* name = Acer Chromebook 512 (C851, C851T) [boards/amd64/geminilake/octopus/sparky360] codename = sparky360 hwid-match = ^SPARKY360 .* name = Acer Chromebook Spin 512 (R851TN) [boards/amd64/geminilake/octopus/vorticon] codename = vorticon hwid-match = ^VORTICON .* name = Chromebook 11 G8 EE [boards/amd64/geminilake/octopus/vortininja] codename = vortininja hwid-match = ^VORTININJA .* name = Chromebook x360 11 G3 EE [boards/amd64/haswell] codename = chipset-hsw [boards/amd64/haswell/beltino] codename = beltino image-max-size = 16 MiB [boards/amd64/haswell/beltino/mccloud] codename = mccloud hwid-match = ^MCCLOUD .* name = Acer Chromebox [boards/amd64/haswell/beltino/monroe] codename = monroe [boards/amd64/haswell/beltino/monroe/0] hwid-match = ^MONROE .4.* name = LG Chromebase (22CB25S) [boards/amd64/haswell/beltino/monroe/1] hwid-match = ^MONROE .[23].* name = LG Chromebase (22CV241) [boards/amd64/haswell/beltino/panther] codename = panther hwid-match = ^PANTHER .* name = ASUS Chromebox (CN60) [boards/amd64/haswell/beltino/tricky] codename = tricky hwid-match = ^TRICKY .* name = Dell Chromebox [boards/amd64/haswell/beltino/zako] codename = zako hwid-match = ^ZAKO .* name = HP Chromebox G1 [boards/amd64/haswell/slippy] codename = slippy image-max-size = 16 MiB [boards/amd64/haswell/slippy/falco] codename = falco hwid-match = ^FALCO [ABC].* name = HP Chromebook 14 q000-q099, 14-SMB Atheros [boards/amd64/haswell/slippy/falco/li] codename = falco_li hwid-match = ^FALCO (D|LI-).* name = HP Chromebook 14 q000-q099 WP2, 14-SMB Intel Corp [boards/amd64/haswell/slippy/leon] codename = leon hwid-match = ^LEON .* name = Toshiba Chromebook [boards/amd64/haswell/slippy/peppy] codename = peppy hwid-match = ^PEPPY .* name = Acer Chromebook 11 (C720, C720P) [boards/amd64/haswell/slippy/wolf] codename = wolf hwid-match = ^WOLF .* name = Dell Chromebook 11 [boards/amd64/ivybridge] codename = chipset-ivb [boards/amd64/ivybridge/link] codename = link hwid-match = ^LINK .* image-max-size = 8 MiB name = Google Chromebook Pixel [boards/amd64/ivybridge/parrot] codename = parrot_ivb hwid-match = ^PARROT .*-E .* image-max-size = 8 MiB name = Acer C7 Chromebook IVB [boards/amd64/ivybridge/stout] codename = stout hwid-match = ^STOUT .* image-max-size = 8 MiB name = Lenovo Thinkpad X131e Chromebook [boards/amd64/jasperlake] codename = chipset-jsl [boards/amd64/jasperlake/dedede] codename = dedede image-max-size = 512 MiB [boards/amd64/jasperlake/dedede/blipper] codename = blipper hwid-match = ^BLIPPER-SBBR .* name = Lenovo 3i-15 Chromebook [boards/amd64/jasperlake/dedede/blipper/beetley] codename = beetley hwid-match = BEETLEY.* name = Lenovo (IdeaPad) Flex 3i Chromebook 15 (15, 7) [boards/amd64/jasperlake/dedede/boten] codename = boten hwid-match = ^BOTEN-YGHA.* name = Lenovo 500e Chromebook Gen 3 [boards/amd64/jasperlake/dedede/boten/bookem] codename = bookem hwid-match = ^BOOKEM-LPEW.* name = Lenovo 100e Chromebook Gen 3 (Intel) [boards/amd64/jasperlake/dedede/boten/botenflex] codename = botenflex hwid-match = ^BOTENFLEX.* name = Lenovo (IdeaPad) Flex 3i-11 Chromebook (11", 6) [boards/amd64/jasperlake/dedede/bugzzy] codename = bugzzy hwid-match = ^BUGZZY.* name = Galaxy Chromebook 2 360 [boards/amd64/jasperlake/dedede/cret] codename = cret hwid-match = ^CRET-BKLL.* name = Dell Chromebook 3110 [boards/amd64/jasperlake/dedede/cret/cret360] codename = cret360 hwid-match = ^CRET360-HXIQ.* name = Dell Chromebook 3110 2-in-1 [boards/amd64/jasperlake/dedede/drawcia] codename = drawcia hwid-match = ^DRAWCIA-CFUL.* name = HP Chromebook x360 11 G4 EE [boards/amd64/jasperlake/dedede/drawcia/drawlat] codename = drawlat hwid-match = ^DRAWLAT-EKWL.* name = HP Chromebook 11 G9 EE [boards/amd64/jasperlake/dedede/drawcia/drawman] codename = drawman hwid-match = ^DRAWMAN-RHDN.* name = HP Chromebook 14 G7 [boards/amd64/jasperlake/dedede/drawcia/drawper] codename = drawper hwid-match = ^DRAWPER-OIXD.* name = HP Fortis 14 G10 Chromebook [boards/amd64/jasperlake/dedede/galtic] codename = galtic hwid-match = ^GALTIC-MOIP.* name = ASUS Chromebook CX1 [boards/amd64/jasperlake/dedede/galtic/galith] codename = galith hwid-match = ^GALITH-HFKU.* name = ASUS Chromebook CX1500 (CX1500CKA) [boards/amd64/jasperlake/dedede/galtic/galith360] codename = galith360 hwid-match = ^GALITH360-DSCL.* name = ASUS Chromebook Flip CX1500 (CX1500FKA) [boards/amd64/jasperlake/dedede/galtic/gallop] codename = gallop hwid-match = ^GALLOP-FOBB.* name = ASUS Chromebook CX1700CKA [boards/amd64/jasperlake/dedede/galtic/galnat] codename = galnat hwid-match = ^GALNAT-RGDH.* name = ASUS Chromebook CX1 CX1102 [boards/amd64/jasperlake/dedede/galtic/galnat360] codename = galnat360 hwid-match = ^GALNAT360.* name = ASUS Chromebook Flip CX1 (CX1102) [boards/amd64/jasperlake/dedede/galtic/galtic360] codename = galtic360 hwid-match = ^GALTIC360-RAKQ.* name = ASUS Chromebook Flip CX1400 (CX1400FKA) [boards/amd64/jasperlake/dedede/kracko] codename = kracko hwid-match = ^KRACKO-WPBT .* name = CTL Chromebook NL72 [boards/amd64/jasperlake/dedede/kracko/kracko360] codename = kracko360 [boards/amd64/jasperlake/dedede/kracko/kracko360/0] hwid-match = ^KRACKO360-BLXA .* name = CTL Chromebook NL72T [boards/amd64/jasperlake/dedede/kracko/kracko360/1] hwid-match = ^KRACKO360-LGAB .* name = LG Chromebook 11TC50Q, 11TQ50Q [boards/amd64/jasperlake/dedede/lantis] codename = lantis hwid-match = ^LANTIS-MEXL.* name = HP Chromebook 14a-na1 [boards/amd64/jasperlake/dedede/lantis/landia] codename = landia hwid-match = ^LANDIA-DSQH.* name = HP Chromebook x360 14a-ca1 [boards/amd64/jasperlake/dedede/lantis/landrid] codename = landrid hwid-match = ^LANDRID-ZMHB.* name = HP Chromebook 15a-na0 [boards/amd64/jasperlake/dedede/madoo] codename = madoo hwid-match = ^MADOO.* name = HP Chromebook x360 14b [boards/amd64/jasperlake/dedede/magolor] codename = magolor hwid-match = ^MAGOLOR-DUKI.* name = Acer Chromebook Spin 511 (R753T, R753TN, R752T-R) [boards/amd64/jasperlake/dedede/magolor/magister] codename = magister hwid-match = ^MAGISTER-RNPH.* name = Acer Chromebook Spin 314 (CP314-1H, CP314-1HN) [boards/amd64/jasperlake/dedede/magolor/maglet] codename = maglet hwid-match = ^MAGLET-CFGF .* name = Acer Chromebook 512 (C852) [boards/amd64/jasperlake/dedede/magolor/maglia] codename = maglia hwid-match = ^MAGLIA-VYRC.* name = Acer Chromebook Spin 512 (R853TA, R853TNA) [boards/amd64/jasperlake/dedede/magolor/maglith] codename = maglith hwid-match = ^MAGLITH-STMU.* name = Acer Chromebook 511 (C734, C734T, C733-R, C733T-R) [boards/amd64/jasperlake/dedede/magolor/magma] codename = magma hwid-match = ^MAGMA-QZPR.* name = Acer Chromebook 315 (CB315-4H, CB315-4HT) [boards/amd64/jasperlake/dedede/magolor/magneto] codename = magneto [boards/amd64/jasperlake/dedede/magolor/magneto/0] hwid-match = ^MAGNETO-BWYB.* name = Acer Chromebook 314 (CB314-3H, CB314-3HT, C934, C934T) [boards/amd64/jasperlake/dedede/magolor/magneto/1] hwid-match = ^MAGNETO-SGGB.* name = Packard Bell Chromebook 314 [boards/amd64/jasperlake/dedede/magolor/magneto/2] hwid-match = ^MAGNETO-SGGB.* name = Packard Bell Chromebook 314 (PCB314-2, PCB314-2T) [boards/amd64/jasperlake/dedede/magolor/magpie] codename = magpie hwid-match = ^MAGPIE-TQAU.* name = Acer Chromebook 317 [boards/amd64/jasperlake/dedede/metaknight] codename = metaknight hwid-match = ^METAKNIGHT-GNDV.* name = NEC Chromebook Y3 [boards/amd64/jasperlake/dedede/oscino] codename = oscino hwid-match = ^OSCINO-JZWV.* name = HP Fortis x360 11 G3 J Chromebook (Enterprise) [boards/amd64/jasperlake/dedede/pirika] codename = pirika [boards/amd64/jasperlake/dedede/pirika/0] hwid-match = ^PIRIKA-NPXS .* name = Axioo Chromebook P14 [boards/amd64/jasperlake/dedede/pirika/1] hwid-match = ^PIRIKA-BMAD .* name = CTL Chromebook Enterprise, PX14E, PX14EX, PX14EXT [boards/amd64/jasperlake/dedede/pirika/2] hwid-match = ^PIRIKA-XAJY .* name = Gateway Chromebook 14 [boards/amd64/jasperlake/dedede/pirika/pasara] codename = pasara hwid-match = ^PASARA-TZNR .* name = Gateway Chromebook 15 [boards/amd64/jasperlake/dedede/pirika/pirette] codename = pirette [boards/amd64/jasperlake/dedede/pirika/pirette/0] hwid-match = ^PIRETTE-LLJI .* name = Axioo Chromebook P11 [boards/amd64/jasperlake/dedede/pirika/pirette/1] hwid-match = ^PIRETTE-RVKU .* name = CTL Chromebook PX11E [boards/amd64/jasperlake/dedede/pirika/pirette/2] hwid-match = ^PIRETTE-NGVJ .* name = SPC Chromebook Z1 Mini [boards/amd64/jasperlake/dedede/pirika/pirette/3] hwid-match = ^PIRETTE-UBKE .* name = Zyrex Chromebook M432-2 [boards/amd64/jasperlake/dedede/sasuke] codename = sasuke hwid-match = ^SASUKE-.* name = Galaxy Chromebook Go [boards/amd64/jasperlake/dedede/sasukette] codename = sasukette hwid-match = ^SASUKETTE-QACT .* name = Galaxy Chromebook Go 11 [boards/amd64/jasperlake/dedede/storo] codename = storo hwid-match = ^STORO-HIER.* name = ASUS Chromebook CR1100 (CR1100CKA) [boards/amd64/jasperlake/dedede/storo360] codename = storo360 hwid-match = ^STORO360-JLGJ.* name = ASUS Chromebook Flip CR1100 (CR1100FKA) [boards/amd64/kabylake] codename = chipset-kbl [boards/amd64/kabylake/fizz] codename = fizz hwid-match = ^FIZZ .* image-max-size = 32 MiB name = Chromebox Reference [boards/amd64/kabylake/fizz/endeavour] codename = endeavour hwid-match = ^ENDEAVOUR-HMDJ.* name = Meet Compute System - Series One [boards/amd64/kabylake/fizz/excelsior] codename = excelsior [boardsamd64//kabylake/fizz/excelsior/0] hwid-match = ^EXCELSIOR-URAR.* name = ASUS Meet Compute System [boardsamd64//kabylake/fizz/excelsior/1] hwid-match = ^EXCELSIOR-OOLH.* name = CTL Meet Compute System [boards/amd64/kabylake/fizz/jax] codename = jax [boards/amd64/kabylake/fizz/jax/0] hwid-match = ^JAX \w{3}-\w{3}-\w{1}4\w{1}(-\w{3})* name = AOpen Chromebox Commercial 2 [boards/amd64/kabylake/fizz/jax/1] hwid-match = ^JAX \w{3}-\w{3}-\w{1}5\w{1}(-\w{3})* name = Newline Chromebox A10 [boards/amd64/kabylake/fizz/kench] codename = kench hwid-match = ^KENCH .* name = HP Chromebox G2 [boards/amd64/kabylake/fizz/sion] codename = sion hwid-match = ^SION .* name = Acer Chromebox CXI3 [boards/amd64/kabylake/fizz/teemo] codename = teemo hwid-match = ^TEEMO .* name = ASUS Chromebox 3 (CN65) [boards/amd64/kabylake/fizz/teemo/cfm] codename = fizz-cfm [boards/amd64/kabylake/fizz/wukong] codename = wukong [boards/amd64/kabylake/fizz/wukong/0] hwid-match = ^WUKONG [A-Z0-9][A-Z0-9]D.* name = CTL Chromebox CBx1 [boards/amd64/kabylake/fizz/wukong/1] hwid-match = ^WUKONG [A-Z0-9][A-Z0-9](A|B|E) name = OWG [boards/amd64/kabylake/fizz/wukong/2] hwid-match = ^WUKONG [A-Z0-9][A-Z0-9]H.* name = Promethean Chromebox [boards/amd64/kabylake/fizz/wukong/3] hwid-match = ^WUKONG [A-Z0-9][A-Z0-9]I.* name = SMART Chromebox G3 [boards/amd64/kabylake/fizz/wukong/4] hwid-match = ^WUKONG [A-Z0-9][A-Z0-9]C.* name = ViewSonic NMP660 Chromebox [boards/amd64/kabylake/kalista] codename = kalista image-max-size = 512 MiB [boards/amd64/kabylake/kalista/karma] codename = karma hwid-match = ^KARMA .* name = Acer Chromebase CA24I2 [boards/amd64/kabylake/kalista/karma/cfm] codename = kalista-cfm [boards/amd64/kabylake/nami] codename = nami image-max-size = 32 MiB [boards/amd64/kabylake/nami/akali] codename = akali hwid-match = ^AKALI .* name = Acer Chromebook 13 (CB713-1W) [boards/amd64/kabylake/nami/akali360] codename = akali360 hwid-match = ^AKALI360 .* name = Acer Chromebook Spin 13 (CP713-1WN) [boards/amd64/kabylake/nami/bard] codename = bard hwid-match = ^BARD .* name = Acer Chromebook 715 (CB715-1W, CB715-1WT) [boards/amd64/kabylake/nami/ekko] codename = ekko hwid-match = ^EKKO .* name = Acer Chromebook 714 (CB714-1W, CB714-1WT) [boards/amd64/kabylake/nami/pantheon] codename = pantheon [boards/amd64/kabylake/nami/pantheon/0] hwid-match = ^PANTHEON [DEGHI][A-Z0-9].* name = Lenovo C340-15 Chromebook [boards/amd64/kabylake/nami/pantheon/1] hwid-match = ^PANTHEON [ABCF][A-Z0-9].* name = Lenovo Yoga C630 Chromebook [boards/amd64/kabylake/nami/sona] codename = sona hwid-match = ^SONA .* name = HP Chromebook x360 14 [boards/amd64/kabylake/nami/syndra] codename = syndra hwid-match = ^SYNDRA .* name = HP Chromebook 15 [boards/amd64/kabylake/nami/vayne] codename = vayne hwid-match = ^VAYNE .* name = Dell Inspiron Chromebook 14 2-in-1 (7486) [boards/amd64/kabylake/poppy] codename = poppy image-max-size = 32 MiB [boards/amd64/kabylake/poppy/atlas] codename = atlas hwid-match = ^ATLAS .* image-max-size = 512 MiB name = Google Pixelbook Go [boards/amd64/kabylake/poppy/eve] codename = eve hwid-match = ^EVE .* name = Google Pixelbook [boards/amd64/kabylake/poppy/nautilus] codename = nautilus [boards/amd64/kabylake/poppy/nautilus/0] hwid-match = ^NAUTILUS [A-Z2-7][2-9][A-Z2-7]-[A-Z2-7][2-9][A-Z2-7]-[A-Z2-7][2-9][Q-Z2-7].* name = Samsung Chromebook Plus (LTE) [boards/amd64/kabylake/poppy/nautilus/1] hwid-match = ^NAUTILUS [A-Z2-7][2-9][A-Z2-7]-[A-Z2-7][2-9][A-Z2-7]-[A-Z2-7][2-9][A-P].* name = Samsung Chromebook Plus (V2) [boards/amd64/kabylake/poppy/nocturne] codename = nocturne hwid-match = ^NOCTURNE .* image-max-size = 512 MiB name = Google Pixel Slate [boards/amd64/kabylake/poppy/soraka] codename = soraka hwid-match = ^SORAKA .* name = HP Chromebook x2 [boards/amd64/kabylake/rammus] codename = rammus image-max-size = 512 MiB [boards/amd64/kabylake/rammus/leona] codename = leona hwid-match = ^LEONA .* name = ASUS Chromebook C425 [boards/amd64/kabylake/rammus/shyvana] codename = shyvana [boards/amd64/kabylake/rammus/shyvana/0] hwid-match = ^SHYVANA.* ...(-...){2}-..([DHLPTX37]-.*|[CGKOSW26]-([^A]|A(2A-A)*([^2].-.|.[^A]-.|..-[^A])).*)$ name = ASUS Chromebook Flip C433 [boards/amd64/kabylake/rammus/shyvana/1] hwid-match = ^SHYVANA.* ...((-...){0,3}|(-...){2}-..([ABEFIJMNQRUVYZ45].*|[CGKOSW26]-A(2A-A)*..))$ name = ASUS Chromebook Flip C434 [boards/amd64/mendocino] codename = chipset-mendocino [boards/amd64/mendocino/skyrim] codename = skyrim image-max-size = 512 MiB [boards/amd64/mendocino/skyrim/frostflow] codename = frostflow hwid-match = ^FROSTFLOW-INHJ.* name = ASUS Chromebook CM34 Flip [boards/amd64/mendocino/skyrim/winterhold] codename = winterhold [boards/amd64/mendocino/skyrim/winterhold/whiterun] codename = whiterun hwid-match = ^WHITERUN-WPKT.* name = Dell Latitude 3445 Chromebook [boards/amd64/picasso] codename = chipset-picasso [boards/amd64/picasso/zork] codename = zork image-max-size = 512 MiB [boards/amd64/picasso/zork/berknip] codename = berknip hwid-match = ^BERKNIP-.* name = HP Pro c645 Chromebook Enterprise [boards/amd64/picasso/zork/dirinboz] codename = dirinboz hwid-match = ^DIRINBOZ-.* name = HP Chromebook 14a [boards/amd64/picasso/zork/ezkinil] codename = ezkinil hwid-match = ^EZKINIL-.* name = Acer Chromebook Spin 514 (CP514-1H, CP514-1W, CP514-1WH, CP514-1HH) [boards/amd64/picasso/zork/gumboz] codename = gumboz hwid-match = ^GUMBOZ-JPUQ.* name = HP Chromebook x360 14a [boards/amd64/picasso/zork/jelboz360] codename = jelboz360 hwid-match = ^JELBOZ360[-| ].* name = ASUS Chromebook Flip CM1 (CM1400) [boards/amd64/picasso/zork/morphius] codename = morphius hwid-match = ^MORPHIUS[-| ].* name = Lenovo ThinkPad C13 Yoga Chromebook (Enterprise) [boards/amd64/picasso/zork/vilboz] codename = vilboz hwid-match = ^VILBOZ-LKSP.* name = Lenovo 100e Chromebook Gen 3 [boards/amd64/picasso/zork/vilboz14] codename = vilboz14 hwid-match = VILBOZ14-GMOO .* name = Lenovo 14e Chromebook Gen 2 [boards/amd64/picasso/zork/vilboz360] codename = vilboz360 [boards/amd64/picasso/zork/vilboz360/0] hwid-match = ^VILBOZ360-ZHKO.* name = Lenovo 300e Chromebook Gen 3 [boards/amd64/picasso/zork/vilboz360/1] hwid-match = ^VILBOZ360-YQWT.* name = NEC Chromebook Y1 Gen3A [boards/amd64/picasso/zork/woomax] codename = woomax hwid-match = ^WOOMAX-.* name = ASUS Chromebook Flip CM5 [boards/amd64/pinetrail] codename = chipset-pinetrail [boards/amd64/pinetrail/alex] codename = alex hwid-match = SAMS ALEX (BETA|EPSILON|LAMBDA|NU|OMICRON|PI|THETA|XI)-.*|SAMS ALEX2 (BETA|CHI|EPSILON|LAMBDA|OMEGA|PHI|PSI|THETA)-.*|SAMS ALEXR (BETA|EPSILON|LAMDA|THETA)-.* image-max-size = 8 MiB name = Samsung Chromebook Series 5 US 3G only [boards/amd64/pinetrail/alex/he] codename = alex_he hwid-match = SAMS ALEX (ALPHA|DELTA|ETA|GAMMA|IOTA|KAPPA|MU|ZETA).*|SAMS ALEX2 (ALPHA|DELTA|ETA|GAMMA|IOTA|MU|RHO|SIGMA|TAU|UPSILON|ZETA).*|SAMS ALEXR (ALPHA|DELTA|ETA|KAPPA).* image-max-size = 8 MiB name = Samsung Chromebook Series 5 [boards/amd64/pinetrail/mario] codename = mario hwid-match = ^IEC MARIO .* image-max-size = 8 MiB name = Google Cr-48 [boards/amd64/pinetrail/zgb] codename = zgb hwid-match = ACER ZGB (DOGFOOD|EVT|GAMMA|IOTA|PVT GAMMA|RAMP GAMMA|ZETA).* image-max-size = 8 MiB name = Acer AC700-1529 3G [boards/amd64/pinetrail/zgb/he] codename = zgb_he hwid-match = ACER ZGB (ALPHA|BETA|DELTA|EPSILON|ETA|PVT ALPHA|PVT BETA|RAMP ALPHA|RAMP BETA|THETA).* image-max-size = 8 MiB name = Acer AC700-1099 Wifi [boards/amd64/reven] codename = reven hwid-match = ^REVEN($|-.*) name = Chrome OS Flex [boards/amd64/sandybridge] codename = chipset-snb [boards/amd64/sandybridge/butterfly] codename = butterfly hwid-match = ^BUTTERFLY .* image-max-size = 8 MiB name = HP Pavilion Chromebook 14 [boards/amd64/sandybridge/lumpy] codename = lumpy hwid-match = ^LUMPY .* image-max-size = 8 MiB name = Samsung Chromebook Series 5 550 [boards/amd64/sandybridge/parrot] codename = parrot hwid-match = ^PARROT .*-[ABCDFGH] .* image-max-size = 8 MiB name = Acer C7 Chromebook [boards/amd64/sandybridge/stumpy] codename = stumpy hwid-match = ^STUMPY .* image-max-size = 8 MiB name = Samsung Chromebox Series 3 [boards/amd64/skylake] codename = chipset-skl [boards/amd64/skylake/glados] codename = glados image-max-size = 32 MiB [boards/amd64/skylake/glados/caroline] codename = caroline hwid-match = ^CAROLINE .* name = Samsung Chromebook Pro [boards/amd64/skylake/glados/cave] codename = cave hwid-match = ^CAVE .* name = ASUS Chromebook Flip C302 [boards/amd64/skylake/glados/chell] codename = chell hwid-match = ^CHELL .* name = HP Chromebook 13 G1 [boards/amd64/skylake/kunimitsu] codename = kunimitsu image-max-size = 32 MiB [boards/amd64/skylake/kunimitsu/asuka] codename = asuka hwid-match = ^ASUKA .* name = Dell Chromebook 13 (3380) [boards/amd64/skylake/kunimitsu/lars] codename = lars [boards/amd64/skylake/kunimitsu/lars/0] hwid-match = ^LARS [DE]\w{2}-\w{3}-\w{3}-.3.* name = Acer Chromebook 11 (C771, C771T) [boards/amd64/skylake/kunimitsu/lars/1] hwid-match = ^LARS [ABCF-Z].* name = Acer Chromebook 14 for Work (CP5-471) [boards/amd64/skylake/kunimitsu/sentry] codename = sentry hwid-match = ^SENTRY .* name = Lenovo ThinkPad 13 [boards/amd64/stoneyridge] codename = chipset-stnyridge [boards/amd64/stoneyridge/grunt] codename = grunt image-max-size = 512 MiB [boards/amd64/stoneyridge/grunt/aleena] codename = aleena hwid-match = ^ALEENA[-| ].* name = Acer Chromebook 315 (CB315-2H) [boards/amd64/stoneyridge/grunt/barla] codename = barla hwid-match = ^BARLA[-| ].* name = HP Chromebook 11A G6 EE, 11A G8 EE [boards/amd64/stoneyridge/grunt/careena] codename = careena hwid-match = ^CAREENA[-| ].* name = HP Chromebook 14 db0000-db0999, 14A G5 [boards/amd64/stoneyridge/grunt/kasumi] codename = kasumi hwid-match = ^KASUMI[-| ].* name = Acer Chromebook 311 (C721) [boards/amd64/stoneyridge/grunt/kasumi360] codename = kasumi360 hwid-match = ^KASUMI360[-| ].* name = Acer Chromebook Spin 311 (R721T) [boards/amd64/stoneyridge/grunt/liara] codename = liara hwid-match = ^LIARA[-| ].* name = Lenovo 14e Chromebook, S345-14 [boards/amd64/stoneyridge/grunt/treeya] codename = treeya [boards/amd64/stoneyridge/grunt/treeya/0] hwid-match = ^TREEYA .*|^TREEYA-BAUA .* name = Lenovo 100e Gen 2 AST [boards/amd64/stoneyridge/grunt/treeya/1] hwid-match = ^TREEYA-QCDF .* name = Lenovo IdeaPad 3 Chromebook (11", 5) [boards/amd64/stoneyridge/grunt/treeya360] codename = treeya360 [boards/amd64/stoneyridge/grunt/treeya360/0] hwid-match = ^TREEYA360 .*|^TREEYA360-XFUX .* name = Lenovo 300e Gen 2 AST [boards/amd64/stoneyridge/grunt/treeya360/1] hwid-match = ^TREEYA360-TZIV .* name = NEC Chromebook Y1 Gen2A [boards/amd64/tigerlake] codename = chipset-tgl [boards/amd64/tigerlake/volteer] codename = volteer image-max-size = 512 MiB [boards/amd64/tigerlake/volteer/chronicler] codename = chronicler hwid-match = ^CHRONICLER-FYSO.* name = FMV Chromebook 14F, WM1/F3 [boards/amd64/tigerlake/volteer/collis] codename = collis hwid-match = ^COLLIS-WMMD.* name = ASUS Chromebook Flip CX3 [boards/amd64/tigerlake/volteer/copano] codename = copano hwid-match = ^COPANO-MRFF.* name = ASUS Chromebook Flip CX5 (CX5400) [boards/amd64/tigerlake/volteer/delbin] codename = delbin [boards/amd64/tigerlake/volteer/delbin/0] hwid-match = ^DELBIN-XHVI.* name = ASUS Chromebook Flip CX5 (CX5500), C536 [boards/amd64/tigerlake/volteer/delbin/1] hwid-match = ^DELBIN-NHYA.* name = ASUS Chromebook Vibe CX55 Flip [boards/amd64/tigerlake/volteer/drobit] codename = drobit hwid-match = ^DROBIT-QSIM.* name = ASUS Chromebook CX9 (CX9400) [boards/amd64/tigerlake/volteer/eldrid] codename = eldrid hwid-match = ^ELDRID-FCPG.* name = HP Chromebook x360 14c [boards/amd64/tigerlake/volteer/elemi] codename = elemi hwid-match = ^ELEMI-NZRH.* name = HP Pro c640 G2 Chromebook [boards/amd64/tigerlake/volteer/lindar] codename = lindar [boards/amd64/tigerlake/volteer/lindar/0] hwid-match = ^LINDAR-EDFZ.* name = Lenovo 5i-14 Chromebook [boards/amd64/tigerlake/volteer/lindar/1] hwid-match = ^LINDAR-LCDF.* name = Lenovo Slim 5 Chromebook [boards/amd64/tigerlake/volteer/lindar/lillipup] codename = lillipup hwid-match = ^LILLIPUP-MQUZ.* name = Lenovo IdeaPad Flex 5i Chromebook (13", 6) [boards/amd64/tigerlake/volteer/voema] codename = voema hwid-match = ^VOEMA-DHAS .* name = Acer Chromebook Spin 514 (CP514-2H) [boards/amd64/tigerlake/volteer/volet] codename = volet hwid-match = ^VOLET-XIYN.*|^VOLET-ZZCR.* name = Acer Chromebook 515 (CB515-1W, CB515-1WT) [boards/amd64/tigerlake/volteer/voxel] codename = voxel hwid-match = ^VOXEL-GFMQ.* name = Acer Chromebook Spin 713 (CP713-3W) [boards/amd64/tigerlake/volteer/voxel/volta] codename = volta hwid-match = ^VOLTA-OIFF .* name = Acer Chromebook 514 (CB514-1W, CB514-1WT) [boards/amd64/whiskeylake] codename = chipset-whl [boards/amd64/whiskeylake/sarien] codename = sarien hwid-match = ^SARIEN-.* image-max-size = 512 MiB name = Dell Latitude 5400 Chromebook Enterprise [boards/amd64/whiskeylake/sarien/arcada] codename = arcada hwid-match = ^ARCADA-.* name = Dell Latitude 5300 2-in-1 Chromebook Enterprise [boards/arm] arch = arm boots-lz4-kernel = False boots-lzma-kernel = False codename = arm-generic dt-compatible = google,.* image-format = fit [boards/arm/exynos5] codename = chipset-exynos5 dt-compatible = samsung,exynos5 [boards/arm/exynos5/daisy] codename = daisy dt-compatible = google,daisy image-max-size = 8 MiB image-start-address = 0x42000000 fit-ramdisk-load-address = 0x50000000 loads-dtb-off-by-one = True loads-fit-ramdisk = True [boards/arm/exynos5/daisy/skate] codename = skate dt-compatible = google,skate hwid-match = ^SKATE .* name = HP Chromebook 11 G2 [boards/arm/exynos5/daisy/snow] codename = snow dt-compatible = google,snow hwid-match = ^SNOW .* name = Samsung Chromebook (XE303C12) [boards/arm/exynos5/daisy/spring] codename = spring dt-compatible = google,spring hwid-match = ^SPRING .* name = HP Chromebook 11 G1 [boards/arm/exynos5/peach] codename = peach dt-compatible = google,peach image-max-size = 8 MiB image-start-address = 0x20008000 fit-ramdisk-load-address = 0x44000000 loads-dtb-off-by-one = True loads-fit-ramdisk = True [boards/arm/exynos5/peach/pi] codename = pi dt-compatible = google,pi hwid-match = ^PI .* name = Samsung Chromebook 2 13" (XE503C32) [boards/arm/exynos5/peach/pit] codename = pit dt-compatible = google,pit hwid-match = ^PIT ([^TEST.*]).* name = Samsung Chromebook 2 11" (XE503C12) [boards/arm/ipq4019] codename = chipset-ipq4019 dt-compatible = qcom,ipq4019 [boards/arm/ipq4019/gale] codename = gale dt-compatible = google,gale hwid-match = ^GALE .* image-max-size = 32 MiB name = Google WiFi loads-fit-ramdisk = False [boards/arm/ipq8064] codename = chipset-ipq8064 dt-compatible = qcom,ipq8064 [boards/arm/ipq8064/storm] codename = storm dt-compatible = google,storm image-max-size = 16 MiB image-start-address = 0x44000000 loads-fit-ramdisk = False [boards/arm/ipq8064/storm/arkham] codename = arkham dt-compatible = google,arkham hwid-match = ^ARKHAM .* name = OnHub SRT-AC1900 [boards/arm/ipq8064/storm/whirlwind] codename = whirlwind dt-compatible = google,whirlwind hwid-match = ^WHIRLWIND .* name = OnHub Router TGR1900 [boards/arm/rk3288] codename = chipset-rk3288 dt-compatible = rockchip,rk3288 [boards/arm/rk3288/veyron] codename = veyron dt-compatible = google,veyron image-max-size = 16 MiB image-start-address = 0x2000000 loads-fit-ramdisk = False [boards/arm/rk3288/veyron/fievel] codename = fievel dt-compatible = google,veyron-fievel hwid-match = ^FIEVEL .* name = AOpen Chromebox Mini [boards/arm/rk3288/veyron/jaq] codename = jaq dt-compatible = google,veyron-jaq [boards/arm/rk3288/veyron/jaq/0] hwid-match = ^JAQ \w{3}-\w{3}-\w{3}-A\w{2}(-\w{3})*$ name = Haier Chromebook 11 [boards/arm/rk3288/veyron/jaq/1] hwid-match = ^JAQ \w{3}-\w{3}-\w{3}-C\w{2}(-\w{3})*$ name = Lava Xolo Chromebook [boards/arm/rk3288/veyron/jaq/2] hwid-match = ^JAQ \w{3}-\w{3}-\w{3}-D\w{2}(-\w{3})*$ name = Medion Chromebook Akoya S2013 [boards/arm/rk3288/veyron/jaq/3] hwid-match = ^JAQ \w{3}-\w{3}-\w{3}-B\w{2}(-\w{3})*$ name = True IDC Chromebook 11 [boards/arm/rk3288/veyron/jerry] codename = jerry dt-compatible = google,veyron-jerry [boards/arm/rk3288/veyron/jerry/0] hwid-match = ^JERRY \w{3}-\w{3}-\w{3}-B\w{2}.* name = CTL J2, J4 Chromebook [boards/arm/rk3288/veyron/jerry/1] hwid-match = ^JERRY \w{3}-\w{3}-\w{3}-D\w{2}(-\w{3})*$ name = EduGear Chromebook K [boards/arm/rk3288/veyron/jerry/2] hwid-match = ^JERRY \w{3}-\w{3}-\w{3}-F\w{2}(-\w{3})*$ name = Epik 11.6" Chromebook ELB1101 [boards/arm/rk3288/veyron/jerry/3] hwid-match = ^JERRY \w{3}-\w{3}-\w{3}-A\w{2}(-\w{3})*$ name = HiSense Chromebook 11 [boards/arm/rk3288/veyron/jerry/4] hwid-match = ^JERRY \w{3}-\w{3}-\w{3}-H\w{2}(-\w{3})*$ name = Mecer Chromebook [boards/arm/rk3288/veyron/jerry/5] hwid-match = ^JERRY \w{3}-\w{3}-\w{3}-E\w{2}(-\w{3})*$ name = NComputing Chromebook CX100 [boards/arm/rk3288/veyron/jerry/6] hwid-match = ^JERRY \w{3}-\w{3}-\w{3}-C\w{2}.* name = Poin2 Chromebook 11 [boards/arm/rk3288/veyron/jerry/7] hwid-match = ^JERRY \w{3}-\w{3}-\w{3}-G\w{2}(-\w{3})*$ name = Positivo Chromebook CH1190 [boards/arm/rk3288/veyron/jerry/8] hwid-match = ^JERRY \w{3}-\w{3}-\w{3}-I\w{2}(-\w{3})*$ name = VideoNet Chromebook BL10 [boards/arm/rk3288/veyron/mickey] codename = mickey dt-compatible = google,veyron-mickey hwid-match = ^MICKEY .* name = ASUS Chromebit CS10 [boards/arm/rk3288/veyron/mighty] codename = mighty dt-compatible = google,veyron-mighty [boards/arm/rk3288/veyron/mighty/0] hwid-match = ^MIGHTY \w{3}-\w{3}-\w{3}-D\w{2}(-\w{3})*$ name = EduGear Chromebook M [boards/arm/rk3288/veyron/mighty/1] hwid-match = ^MIGHTY \w{3}-\w{3}-\w{3}-A\w{2}(-\w{3})*$ name = Haier Chromebook 11e [boards/arm/rk3288/veyron/mighty/2] hwid-match = ^MIGHTY \w{3}-\w{3}-\w{3}-H\w{2}(-\w{3})*$ name = Lumos Education Chromebook [boards/arm/rk3288/veyron/mighty/3] hwid-match = ^MIGHTY \w{3}-\w{3}-\w{3}-I\w{2}(-\w{3})*$ name = MEDION Chromebook S2015 [boards/arm/rk3288/veyron/mighty/4] hwid-match = ^MIGHTY \w{3}-\w{3}-\w{3}-B\w{2}(-\w{3})*$ name = Nexian Chromebook 11.6" [boards/arm/rk3288/veyron/mighty/5] hwid-match = ^MIGHTY \w{3}-\w{3}-\w{3}-G\w{2}(-\w{3})*$ name = PCMerge Chromebook PCM-116E/PCM-116EB [boards/arm/rk3288/veyron/mighty/6] hwid-match = ^MIGHTY \w{3}-\w{3}-\w{3}-J\w{2}(-\w{3})*$ name = Prowise Chromebook Entryline [boards/arm/rk3288/veyron/mighty/7] hwid-match = ^MIGHTY \w{3}-\w{3}-\w{3}-E\w{2}(-\w{3})*$ name = Sector 5 E1 Rugged Chromebook [boards/arm/rk3288/veyron/mighty/8] hwid-match = ^MIGHTY \w{3}-\w{3}-\w{3}-F\w{2}(-\w{3})*$ name = Viglen Chromebook 11 [boards/arm/rk3288/veyron/minnie] codename = minnie dt-compatible = google,veyron-minnie hwid-match = ^MINNIE .* name = ASUS Chromebook Flip C100PA [boards/arm/rk3288/veyron/speedy] codename = speedy dt-compatible = google,veyron-speedy hwid-match = ^SPEEDY .* name = ASUS Chromebook C201PA [boards/arm/rk3288/veyron/speedy/libreboot] codename = speedy-libreboot hwid-match = None image-max-size = 32 MiB loads-fit-ramdisk = True name = ASUS Chromebook C201PA (Libreboot) [boards/arm/rk3288/veyron/tiger] codename = tiger dt-compatible = google,veyron-tiger hwid-match = ^TIGER .* name = AOpen Chromebase Mini [boards/arm/tegra124] codename = chipset-tegra124 dt-compatible = nvidia,tegra124 [boards/arm/tegra124/nyan] codename = nyan dt-compatible = google,nyan image-max-size = 16 MiB image-start-address = 0x81000000 loads-fit-ramdisk = False [boards/arm/tegra124/nyan/big] codename = big dt-compatible = google,nyan-big hwid-match = ^BIG .* name = Acer Chromebook 13 (CB5-311, C810) [boards/arm/tegra124/nyan/blaze] codename = blaze dt-compatible = google,nyan-blaze hwid-match = ^BLAZE .* name = HP Chromebook 14 G3 [boards/arm/tegra124/nyan/kitty] codename = kitty dt-compatible = google,nyan-kitty hwid-match = ^KITTY .* name = Acer Chromebase [boards/arm64] arch = arm64 boots-lz4-kernel = True boots-lzma-kernel = True codename = arm64-generic dt-compatible = google,.* image-format = fit loads-fit-ramdisk = True [boards/arm64/mt8173] codename = chipset-mt8173 dt-compatible = mediatek,mt8173 [boards/arm64/mt8173/oak] codename = oak dt-compatible = google,oak image-max-size = 32 MiB [boards/arm64/mt8173/oak/elm] codename = elm dt-compatible = google,elm hwid-match = ^ELM .* name = Acer Chromebook R13 (CB5-312T) [boards/arm64/mt8173/oak/hana] codename = hana dt-compatible = google,hana [boards/arm64/mt8173/oak/hana/0] hwid-match = ^HANA ([A-Z0-9])+-([A-Z0-9])+-[A-Z0-9][A-Z0-9]J-([A-Z0-9\\-])+ name = ASUS Chromebook C202XA [boards/arm64/mt8173/oak/hana/1] hwid-match = ^HANA ([A-Z0-9])+-([A-Z0-9])+-[A-Z0-9][A-Z0-9]H-([A-Z0-9\-])+ name = Lenovo 100e Chromebook 2nd Gen MTK [boards/arm64/mt8173/oak/hana/2] hwid-match = ^HANA ([A-Z0-9])+-([A-Z0-9])+-[A-Z0-9][A-Z0-9]I-([A-Z0-9\-])+ name = Lenovo 300e Chromebook 2nd Gen MTK [boards/arm64/mt8173/oak/hana/3] hwid-match = ^HANA ([A-Z0-9])+-([A-Z0-9])+-[A-Z0-9][A-Z0-9]A-([A-Z0-9\-])+ name = Lenovo 300e/N23 Yoga/Flex 11 Chromebook [boards/arm64/mt8173/oak/hana/4] hwid-match = ^HANA ([A-Z0-9])+-([A-Z0-9])+-[A-Z0-9][A-Z0-9]E-([A-Z0-9\-])+ name = Lenovo Ideapad C330 Chromebook [boards/arm64/mt8173/oak/hana/5] hwid-match = ^HANA ([A-Z0-9])+-([A-Z0-9])+-[A-Z0-9][A-Z0-9]F-([A-Z0-9\-])+ name = Lenovo Ideapad S330 Chromebook [boards/arm64/mt8173/oak/hana/6] hwid-match = ^HANA ([A-Z0-9])+-([A-Z0-9])+-[A-Z0-9][A-Z0-9][CD]-([A-Z0-9\-])+ name = Poin2 Chromebook 11C [boards/arm64/mt8173/oak/hana/7] hwid-match = ^HANA ([A-Z0-9])+-([A-Z0-9])+-[A-Z0-9][A-Z0-9]B-([A-Z0-9\-])+ name = Poin2 Chromebook 14 [boards/arm64/mt8173/oak/hana/8] hwid-match = ^HANA ([A-Z0-9])+-([A-Z0-9])+-[A-Z0-9][A-Z0-9]G-([A-Z0-9\-])+ name = Prowise Chromebook Eduline (360) [boards/arm64/mt8183] codename = chipset-mt8183 dt-compatible = mediatek,mt8183 [boards/arm64/mt8183/kukui] codename = kukui dt-compatible = google,kukui image-max-size = 512 MiB [boards/arm64/mt8183/kukui/jacuzzi] codename = jacuzzi dt-compatible = google,jacuzzi [boards/arm64/mt8183/kukui/jacuzzi/burnet] codename = burnet dt-compatible = google,burnet hwid-match = ^BURNET-.* name = HP Chromebook x360 11MK G3 EE [boards/arm64/mt8183/kukui/jacuzzi/burnet/esche] codename = esche hwid-match = ^ESCHE-.* name = HP Chromebook 11MK G9 EE [boards/arm64/mt8183/kukui/jacuzzi/damu] codename = damu dt-compatible = google,damu hwid-match = ^DAMU.* name = ASUS Chromebook Flip CM3200FVA [boards/arm64/mt8183/kukui/jacuzzi/fennel] codename = fennel dt-compatible = google,fennel hwid-match = ^FENNEL-.* name = Lenovo IdeaPad Flex 3 Chromebook [boards/arm64/mt8183/kukui/jacuzzi/fennel14] codename = fennel14 dt-compatible = google,fennel hwid-match = ^FENNEL14-.* name = Lenovo IdeaPad 3 Chromebook [boards/arm64/mt8183/kukui/jacuzzi/icarus] codename = icarus [boards/arm64/mt8183/kukui/jacuzzi/icarus/cozmo] codename = cozmo dt-compatible = google,cozmo hwid-match = ^COZMO.* name = Acer Chromebook 314 (CB314-2H, CB314-2HT) [boards/arm64/mt8183/kukui/jacuzzi/icarus/pico] codename = pico dt-compatible = google,pico hwid-match = ^PICO-EXEM.* name = Acer Chromebook Spin 311 - R722T [boards/arm64/mt8183/kukui/jacuzzi/juniper] codename = juniper dt-compatible = google,juniper hwid-match = ^JUNIPER-HVPU .* name = Acer Chromebook Spin 311 (CP311-3H) [boards/arm64/mt8183/kukui/jacuzzi/juniper/kenzo] codename = kenzo hwid-match = ^KENZO-IGRW.* name = Acer Chromebook 311 (CB311-11H/CB311-11HT) [boards/arm64/mt8183/kukui/jacuzzi/kappa] codename = kappa dt-compatible = google,kappa [boards/arm64/mt8183/kukui/jacuzzi/kappa/0] hwid-match = ^KAPPA-EWFK .* name = HP Chromebook 11.6 inch [boards/arm64/mt8183/kukui/jacuzzi/kappa/1] hwid-match = ^KAPPA-.* name = HP Chromebook 11a na0xxx [boards/arm64/mt8183/kukui/jacuzzi/makomo] codename = makomo dt-compatible = google,makomo hwid-match = ^MAKOMO-UTTX .* name = Lenovo 100e Gen 2 [boards/arm64/mt8183/kukui/jacuzzi/willow] codename = willow dt-compatible = google,willow [boards/arm64/mt8183/kukui/jacuzzi/willow/0] hwid-match = ^WILLOW-ZZCR .* name = Acer Chromebook 311 (C722, C722T) [boards/arm64/mt8183/kukui/jacuzzi/willow/1] hwid-match = ^WILLOW-TFIY .* name = Acer Chromebook 311 (C722, C722T) [boards/arm64/mt8183/kukui/kakadu] codename = kakadu dt-compatible = google,kadaku hwid-match = ^KAKADU-WFIQ.* name = ASUS Chromebook Detachable CM3 [boards/arm64/mt8183/kukui/katsu] codename = katsu dt-compatible = google,katsu hwid-match = ^KATSU.* name = ASUS Chromebook Detachable CZ1 [boards/arm64/mt8183/kukui/kodama] codename = kodama dt-compatible = google,kodama hwid-match = ^KODAMA.* name = Lenovo 10e Chromebook Tablet [boards/arm64/mt8183/kukui/krane] codename = krane dt-compatible = google,krane hwid-match = ^KRANE-ZDKS .* name = Lenovo (IdeaPad) Chromebook Duet [boards/arm64/mt8186] codename = chipset-mt8186 dt-compatible = mediatek,mt8186 [boards/arm64/mt8186/corsola] codename = corsola dt-compatible = google,corsola image-max-size = 512 MiB [boards/arm64/mt8186/corsola/steelix] codename = steelix hwid-match = ^STEELIX-VZSZ.* name = Lenovo 300e Yoga Chromebook Gen 4 [boards/arm64/mt8186/corsola/steelix/magneton] codename = magneton hwid-match = ^MAGNETON-LCKC.* name = Lenovo IP Slim 3 Chrome 14M868 [boards/arm64/mt8186/corsola/steelix/rusty] codename = rusty hwid-match = ^RUSTY-ZNCE.* name = Lenovo 100e Chromebook Gen 4 [boards/arm64/mt8186/corsola/tentacruel] codename = tentacruel hwid-match = ^TENTACRUEL-VAFH.* name = ASUS Chromebook CM14 Flip (CM1402F) [boards/arm64/mt8186/corsola/tentacruel/tentacool] codename = tentacool hwid-match = ^TENTACOOL-ZLJE.* name = ASUS Chromebook CM14 (CM1402C) [boards/arm64/mt8192] codename = chipset-mt8192 dt-compatible = mediatek,mt8192 [boards/arm64/mt8192/asurada] codename = asurada dt-compatible = google,asurada image-max-size = 512 MiB [boards/arm64/mt8192/asurada/hayato] codename = hayato dt-compatible = google,hayato hwid-match = ^HAYATO-YLRO.* name = ASUS Chromebook Flip CM3200FM1A [boards/arm64/mt8192/asurada/spherion] codename = spherion dt-compatible = google,spherion hwid-match = ^SPHERION.* name = Acer Chromebook 514 (CB514-2H, CB514-2HT) [boards/arm64/mt8195] codename = chipset-mt8195 dt-compatible = mediatek,mt8195 [boards/arm64/mt8195/cherry] codename = cherry dt-compatible = google,cherry image-max-size = 512 MiB [boards/arm64/mt8195/cherry/dojo] codename = dojo dt-compatible = google,dojo hwid-match = ^DOJO-EJPG.* name = HP Chromebook x360 13.3" (13b-ca0xxx) [boards/arm64/mt8195/cherry/tomato] codename = tomato dt-compatible = google,tomato hwid-match = ^TOMATO-LYVN.* name = ACER Chromebook (Enterprise) Spin 513 (CP513-2H) [boards/arm64/qc7180] codename = chipset-qc7180 dt-compatible = qcom,sc7180 [boards/arm64/qc7180/trogdor] codename = trogdor dt-compatible = google,trogdor image-max-size = 512 MiB [boards/arm64/qc7180/trogdor/kingoftown] codename = kingoftown dt-compatible = google,kingoftown hwid-match = ^KINGOFTOWN-KDDA.* name = HP Fortis 11 G9 Q Chromebook (Enterprise) [boards/arm64/qc7180/trogdor/lazor] codename = lazor dt-compatible = google,lazor(-rev\d+)?(-sku[02])? hwid-match = ^LAZOR.* name = Acer Chromebook Spin 513 (CP513-1H/1HL, R841T/LT) [boards/arm64/qc7180/trogdor/lazor/limozeen] codename = limozeen dt-compatible = google,lazor(-rev\d+)?(-sku[456])? hwid-match = ^LIMOZEEN.* name = Acer Chromebook 511 (C741L/C741LT) [boards/arm64/qc7180/trogdor/pazquel] codename = pazquel dt-compatible = google,pazquel [boards/arm64/qc7180/trogdor/pazquel/0] hwid-match = ^PAZQUEL-HGNV .* name = Libera-Merdeka Chromebook C100/C110/C120/C150 [boards/arm64/qc7180/trogdor/pazquel/1] hwid-match = ^PAZQUEL-OPNA .* name = Orbic Chromebook [boards/arm64/qc7180/trogdor/pompom] codename = pompom dt-compatible = google,pompom hwid-match = ^POMPOM-MZVS .* name = Dynabook Chromebook C1 [boards/arm64/qc7180/trogdor/strongbad] codename = strongbad dt-compatible = google,strongbad [boards/arm64/qc7180/trogdor/strongbad/coachz] codename = coachz dt-compatible = google,coachz hwid-match = ^COACHZ.* name = HP Chromebook x2 11c [boards/arm64/qc7180/trogdor/strongbad/homestar] codename = homestar dt-compatible = google,homestar hwid-match = ^HOMESTAR-MBLE.* name = Lenovo (IdeaPad) Chromebook Duet 5 [boards/arm64/qc7180/trogdor/strongbad/quackingstick] codename = quackingstick dt-compatible = google,quackingstick hwid-match = ^QUACKINGSTICK.* name = Acer Chromebook (Enterprise) Tab 510 [boards/arm64/qc7180/trogdor/strongbad/wormdingler] codename = wormdingler dt-compatible = google,wormdingler hwid-match = ^WORMDINGLER-JQAO.* name = Lenovo (IdeaPad) Chromebook Duet 3 (Education Edition) [boards/arm64/qcs404] codename = chipset-qcs404 dt-compatible = qcom,qcs404 [boards/arm64/qcs404/mistral] codename = mistral dt-compatible = google,mistral image-max-size = 512 MiB name = Nest Wifi router [boards/arm64/rk3399] codename = chipset-rk3399 dt-compatible = rockchip,rk3399 [boards/arm64/rk3399/gru] codename = gru dt-compatible = google,gru image-max-size = 32 MiB [boards/arm64/rk3399/gru/bob] codename = bob dt-compatible = google,bob hwid-match = ^BOB .* name = ASUS Chromebook Flip C101PA [boards/arm64/rk3399/gru/kevin] codename = kevin dt-compatible = google,kevin hwid-match = ^KEVIN .* name = Samsung Chromebook Plus (XE513C24) [boards/arm64/rk3399/gru/scarlet] codename = scarlet dt-compatible = google,scarlet hwid-match = ^SCARLET .* name = Acer Chromebook Tab 10 [boards/arm64/rk3399/gru/scarlet/dru] codename = dru hwid-match = ^DRU .* name = Acer Chromebook Tab 10 (D651N, D650N) [boards/arm64/rk3399/gru/scarlet/druwl] codename = druwl [boards/arm64/rk3399/gru/scarlet/druwl/0] hwid-match = ^DRUWL [A-Z0-9][A-Z0-9]D.* name = AOpen Chromebook Commercial Tab [boards/arm64/rk3399/gru/scarlet/druwl/1] hwid-match = ^DRUWL [A-Z0-9][A-Z0-9]C.* name = CTL Chromebook Tablet Tx1 for Education [boards/arm64/rk3399/gru/scarlet/druwl/2] hwid-match = ^DRUWL [A-Z0-9][A-Z0-9]B.* [boards/arm64/rk3399/gru/scarlet/dumo] codename = dumo hwid-match = ^DUMO .* name = ASUS Chromebook Tablet CT100 [boards/arm64/tegra210] codename = chipset-tegra210 dt-compatible = nvidia,tegra210 [boards/arm64/tegra210/foster] codename = foster dt-compatible = nvidia,foster image-max-size = 32 MiB name = NVIDIA Shield TV [boards/arm64/tegra210/smaug] codename = smaug dt-compatible = google,smaug image-max-size = 32 MiB name = Google Pixel C depthcharge-tools-0.6.2/depthcharge_tools/config.ini000066400000000000000000000011641444761253100226100ustar00rootroot00000000000000# SPDX-License-Identifier: GPL-2.0-or-later # depthcharge-tools configuration # Copyright (C) 2021-2023 Alper Nebi Yasak # See COPYRIGHT and LICENSE files for full copyright information. [depthcharge-tools] enable-system-hooks = True #vboot-keyblock = /etc/depthcharge-tools/kernel.keyblock #vboot-private-key = /etc/depthcharge-tools/kernel_data_key.vbprivk #vboot-public-key = /etc/depthcharge-tools/kernel_subkey.vbpubk [depthchargectl] #board = ignore-initramfs = False images-dir = /boot/depthcharge #kernel-cmdline = console=tty0 quiet splash zimage-initramfs-hack = set-init-size depthcharge-tools-0.6.2/depthcharge_tools/depthchargectl/000077500000000000000000000000001444761253100236215ustar00rootroot00000000000000depthcharge-tools-0.6.2/depthcharge_tools/depthchargectl/__init__.py000066400000000000000000000646361444761253100257510ustar00rootroot00000000000000#! /usr/bin/env python3 # SPDX-License-Identifier: GPL-2.0-or-later # depthcharge-tools depthchargectl command # Copyright (C) 2020-2023 Alper Nebi Yasak # See COPYRIGHT and LICENSE files for full copyright information. import argparse import collections import configparser import copy import glob import logging import os import platform import re import shlex import tempfile from pathlib import Path from depthcharge_tools import ( __version__, config_ini, boards_ini, config_files, ) from depthcharge_tools.utils.argparse import ( Command, Argument, Group, Subparsers, ) from depthcharge_tools.utils.collections import ( ConfigDict, ) from depthcharge_tools.utils.os import ( Disks, ) from depthcharge_tools.utils.platform import ( Architecture, vboot_keys, cros_hwid, dt_compatibles, is_cros_boot, is_cros_libreboot, kernel_cmdline, proc_cmdline, ) from depthcharge_tools.utils.string import ( parse_bytesize, ) class Board: def __init__(self, config): self._config = config @property def name(self): name = self._config.get("name") if name is None: name = "Unnamed {} board".format(self.codename or 'unknown') return name @property def codename(self): return self._config.get("codename") @property def arch(self): return Architecture(self._config.get("arch")) @property def dt_compatible(self): pattern = self._config.get("dt-compatible") # Try to detect non-regex values and extend them to match any # rev/sku, but if a rev/sku is given match only the given one. if pattern and re.fullmatch("[\w,-]+", pattern): prefix, rev, sku = re.fullmatch( "(.*?)(-rev\d+)?(-sku\d+)?", pattern, ).groups() pattern = "{}{}{}".format( prefix, rev or "(-rev\d+)?", sku or "(-sku\d+)?", ) if pattern: return re.compile(pattern) @property def hwid_match(self): pattern = self._config.get("hwid-match") if pattern in (None, "None", "none"): return None if pattern: return re.compile(pattern) @property def boots_lz4_kernel(self): return self._config.getboolean("boots-lz4-kernel", False) @property def boots_lzma_kernel(self): return self._config.getboolean("boots-lzma-kernel", False) @property def loads_zimage_ramdisk(self): return self._config.getboolean("loads-zimage-ramdisk", False) @property def loads_fit_ramdisk(self): return self._config.getboolean("loads-fit-ramdisk", False) @property def loads_dtb_off_by_one(self): return self._config.getboolean("loads-dtb-off-by-one", False) @property def fit_ramdisk_load_address(self): addr = self._config.get("fit-ramdisk-load-address", None) return parse_bytesize(addr) @property def image_start_address(self): addr = self._config.get("image-start-address", None) return parse_bytesize(addr) @property def image_max_size(self): max_size = self._config.get("image-max-size") if max_size in (None, "None", "none"): return float("inf") return parse_bytesize(max_size) @property def image_format(self): return self._config.get("image-format") class depthchargectl( Command, prog="depthchargectl", usage="%(prog)s [options] command ...", add_help=False, ): """Manage Chrome OS kernel partitions.""" _logger = logging.getLogger(__name__) config_section = "depthchargectl" @property def logger(self): # Set verbosity before logging messages self.verbosity return self._logger @Group def global_options(self): """Global options""" @global_options.add @Argument("-h", "--help", action="help") def print_help(self): """Show this help message.""" # type(self).parser.print_help() @global_options.add @Argument( "-V", "--version", action="version", version="depthcharge-tools %(prog)s {}".format(__version__), ) def version(self): """Print program version.""" return type(self).version.version % {"prog": type(self).prog} @global_options.add @Argument("-v", "--verbose", count=True) def verbosity(self, verbosity=0): """Print more detailed output.""" level = logging.WARNING - int(verbosity) * 10 self._logger.setLevel(level) return verbosity @global_options.add @Argument("--tmpdir", nargs=1) def tmpdir(self, dir_=None): """Directory to keep temporary files.""" if dir_ is None: dir_ = tempfile.TemporaryDirectory( prefix="depthchargectl-", ) dir_ = self.exitstack.enter_context(dir_) dir_ = Path(dir_) os.makedirs(dir_, exist_ok=True) self.logger.debug("Working in temp dir '{}'.".format(dir_)) return dir_ @global_options.add @Argument("--root", nargs=1) def root(self, root=None): """Root device or mountpoint of the system to work on.""" if root is None: cmdline = self.kernel_cmdline for c in cmdline: lhs, _, rhs = c.partition("=") if lhs.lower() == "root": root = rhs if root: self.logger.info( "Using root '{}' set in cmdline." .format(root) ) return str(root) self.logger.info( "Defaulting to current system root '/'." ) return Path("/").resolve() if os.path.ismount(Path(root).resolve()): self.logger.info( "Using root argument '{}' as the system to work on." .format(root) ) return Path(root).resolve() if root in (None, "", "none", "None"): self.logger.info( "Using no root argument for the kernel cmdline." .format(root) ) else: self.logger.info( "Using root argument '{}' as a device description." .format(root) ) return str(root) @global_options.add @Argument("--root-mountpoint", nargs=1, metavar="DIR") def root_mountpoint(self, mnt=None): """Root mountpoint of the system to work on.""" if mnt: mnt = Path(mnt).resolve() self.logger.info( "Using root mountpoint '{}' from given argument." .format(mnt) ) return mnt if self.root in ("", "None", "none", None): return Path("/").resolve() if isinstance(self.root, Path): return self.root disk = self.diskinfo.evaluate(self.root) mountpoints = sorted( self.diskinfo.mountpoints(disk), key=lambda p: len(p.parents), ) if len(mountpoints) > 1: mnt = mountpoints[0] self.logger.warning( "Choosing '{}' from multiple root mountpoints: {}." .format(mnt, ", ".join(str(m) for m in mountpoints)) ) return mnt elif mountpoints: mnt = mountpoints[0] if mnt != Path("/").resolve(): self.logger.info( "Using root mountpoint '{}'." .format(mnt) ) return mnt self.logger.warning( "Couldn't find root mountpoint, falling back to '/'." ) return Path("/").resolve() @global_options.add @Argument("--boot-mountpoint", nargs=1, metavar="DIR") def boot_mountpoint(self, boot=None): """Boot mountpoint of the system to work on.""" if boot: boot = Path(boot).resolve() self.logger.info( "Using boot mountpoint '{}' from given argument." .format(boot) ) return boot boot_str = self.diskinfo.by_mountpoint("/boot", fstab_only=True) device = self.diskinfo.evaluate(boot_str) mountpoints = sorted( self.diskinfo.mountpoints(device), key=lambda p: len(p.parents), ) if device and not mountpoints: self.logger.warning( "Boot partition '{}' for specified root is not mounted." .format(device) ) if len(mountpoints) > 1: self.logger.warning( "Choosing '{}' from multiple /boot mountpoints: {}." .format(mountpoints[0], ", ".join(str(m) for m in mountpoints)) ) if mountpoints: return mountpoints[0] root = self.root_mountpoint boot = (root / "boot").resolve() self.logger.info( "Couldn't find /boot in fstab, falling back to '{}'." .format(boot) ) if root != Path("/").resolve() and not boot.is_dir(): self.logger.warning( "Boot mountpoint '{}' does not exist for custom root." .format(boot) ) self.logger.warning( "Not falling back to the running system for boot mountpoint." ) return boot @Argument(dest=argparse.SUPPRESS, help=argparse.SUPPRESS) def diskinfo(self): # Break cyclic dependencies here yield Disks() root = self.root_mountpoint return Disks( fstab=(root / "etc" / "fstab"), crypttab=(root / "etc" / "crypttab"), ) @Group def config_options(self): """Configuration options""" # Update the values in the configparser object so that the # config subcommand can query e.g. the autodetected board. self.config.update({ 'board': self.board.codename if self.board else "none", 'images-dir': str(self.images_dir), 'vboot-keyblock': str(self.vboot_keyblock), 'vboot-public-key': str(self.vboot_public_key), 'vboot-private-key': str(self.vboot_private_key), 'kernel-cmdline': " ".join(self.kernel_cmdline), 'ignore-initramfs': str(self.ignore_initramfs), 'zimage-initramfs-hack': str(self.zimage_initramfs_hack), }) if self.board is not None: def pattern(regex): try: return str(regex.pattern) except: return "None" self.config.update({ "arch": str(self.board.arch), "codename": str(self.board.codename), "boots-lz4-kernel": str(self.board.boots_lz4_kernel), "boots-lzma-kernel": str(self.board.boots_lzma_kernel), "dt-compatible": pattern(self.board.dt_compatible), "fit-ramdisk-load-address": str(self.board.fit_ramdisk_load_address), "hwid-match": pattern(self.board.hwid_match), "image-format": str(self.board.image_format), "image-max-size": str(self.board.image_max_size), "image-start-address": str(self.board.image_start_address), "loads-dtb-off-by-one": str(self.board.loads_dtb_off_by_one), "loads-fit-ramdisk": str(self.board.loads_fit_ramdisk), "loads-zimage-ramdisk": str(self.board.loads_zimage_ramdisk), "name": str(self.board.name), }) @config_options.add @Argument("--config", nargs=1) def config(self, file_=None): """Additional configuration file to read""" if isinstance(file_, configparser.SectionProxy): parser = file_.parser elif isinstance(file_, configparser.ConfigParser): parser = file_ file_ = None else: parser = configparser.ConfigParser( default_section="depthcharge-tools", dict_type=ConfigDict, ) parser.read_string(config_ini, source="config.ini") parser.read_string(boards_ini, source="boards.ini") try: for p in parser.read(config_files): self.logger.debug("Read config file '{}'.".format(p)) except configparser.ParsingError as err: self.logger.warning( "Config file '{}' could not be parsed." .format(err.filename) ) root = self.root_mountpoint boot = self.boot_mountpoint def fixup_path(f): p = Path(f).resolve() if f.startswith("/boot"): return str(boot / p.relative_to("/boot")) elif f.startswith("/"): return str(root / p.relative_to("/")) if root != Path("/").resolve(): extra_parser = configparser.ConfigParser() extra_files = [ *root.glob("etc/depthcharge-tools/config"), *root.glob("etc/depthcharge-tools/config.d/*"), ] try: for p in extra_parser.read(extra_files): self.logger.debug("Read config file '{}'.".format(p)) except configparser.ParsingError as err: self.logger.warning( "Config file '{}' could not be parsed." .format(err.filename) ) file_configs = [ "images-dir", "vboot-keyblock", "vboot-public-key", "vboot-private-key", ] for sect, conf in extra_parser.items(): for key, value in conf.items(): if key in file_configs: conf[key] = fixup_path(value) parser.read_dict(extra_parser) if self.config_section not in parser.sections(): if self.config_section != parser.default_section: parser.add_section(self.config_section) if isinstance(file_, collections.abc.Mapping): parser[self.config_section].update(file_) elif file_ is not None: try: read = parser.read([file_]) except configparser.ParsingError as err: raise ValueError( "Config file '{}' could not be parsed." .format(err.filename) ) if file_ not in read: raise ValueError( "Config file '{}' could not be read." .format(file_) ) return parser[self.config_section] @config_options.add @Argument("--board", nargs=1) def board(self, codename=""): """Assume we're running on the specified board""" if isinstance(codename, Board): return codename elif isinstance(codename, configparser.SectionProxy): return Board(codename) elif codename is None: return None boards = { sectname: Board(section) for sectname, section in self.config.parser.items() if sectname.startswith("boards/") } if not codename: codename = self.config.get("board", "") if codename in ("None", "none"): return None elif codename: parts = str(codename).lower().replace('-', '_').split('_') def codename_match(item): if item is None: return (len(parts) - 1, 0) sectname, board = item matchparts = sectname.split("/") + ( [] if board.codename is None else board.codename.lower().replace('-', '_').split('_') ) # Don't match sections without explicit codenames parent, _, _ = sectname.rpartition('/') if parent in boards and boards[parent].codename == board.codename: return (len(parts) - 1, float("inf")) # Some kind of a fuzzy match, how many parts of the # given codename exist in the parts of this config idx = len(parts) - 1 while parts and matchparts and idx >= 0: if parts[idx] == matchparts[-1]: idx -= 1 # Oldest boards have x86-alex_he etc. elif (parts[idx], matchparts[-1]) == ("x86", "amd64"): idx -= 1 matchparts.pop() # Avoid matching only on "libreboot" without actual board if parts[-1] == "libreboot" and idx == len(parts) - 2: return (len(parts) - 1, float("inf")) return (idx, len(sectname.split("/"))) match_groups = collections.defaultdict(list) for item in (None, *boards.items()): match_groups[codename_match(item)].append(item) score, matches = min(match_groups.items()) if not matches or None in matches: raise ValueError( "Unknown board codename '{}'." .format(codename) ) elif len(matches) > 1: raise ValueError( "Ambiguous board codename '{}' matches {}." .format(codename, [b.codename for s, b in matches]) ) sectname, board = matches[0] self.logger.info( "Assuming board '{}' ('{}') by codename argument or config." .format(board.name, board.codename) ) return board hwid = cros_hwid() def hwid_match(item): sectname, board = item try: return bool(re.match(board.hwid_match, hwid)) except: return False if hwid is not None: matches = tuple(filter(hwid_match, boards.items())) else: matches = () if matches: sectname, board = matches[0] if is_cros_libreboot(): libreboot_name = "{}/libreboot".format(sectname) libreboot_board = boards.get(libreboot_name, None) if libreboot_board: sectname = libreboot_name board = libreboot_board self.logger.info( "Detected board '{}' ('{}') by HWID." .format(board.name, board.codename) ) return board compatibles = dt_compatibles() def compat_preference(item): if item is None: return (len(compatibles), 0) sectname, board = item if board.dt_compatible is None: return (float("inf"), 0) for i, c in enumerate(compatibles): if board.dt_compatible.fullmatch(c): return (i, -len(sectname.split("/"))) else: return (float("inf"), -1) if compatibles is not None: match = min((None, *boards.items()), key=compat_preference) else: match = None if match is not None: sectname, board = match if is_cros_libreboot(): libreboot_name = "{}/libreboot".format(sectname) libreboot_board = boards.get(libreboot_name, None) if libreboot_board: sectname = libreboot_name board = libreboot_board self.logger.info( "Detected board '{}' ('{}') by device-tree compatibles." .format(board.name, board.codename) ) return board # This might actually be running on non-ChromeOS hardware. # Check this after the board detection code, because we might # also be running on e.g. RW_LEGACY but still with depthcharge. if not is_cros_boot(): return None # Use generic boards per cpu architecture, since we couldn't # detect this system as a proper board arch = platform.machine() if arch in Architecture.arm_32: sectname = "boards/arm" elif arch in Architecture.arm_64: sectname = "boards/arm64" elif arch in Architecture.x86: sectname = "boards/amd64" board = boards.get(sectname, None) if board is not None: self.logger.warning( "Assuming a generic board of architecture '{}'." .format(board.arch) ) return board raise ValueError( "Could not detect which board this is running on." ) @config_options.add @Argument("--images-dir", nargs=1) def images_dir(self, dir_=None): """Directory to store built images""" if dir_ is not None: return Path(dir_).resolve() dir_ = self.config.get("images-dir") if dir_ is None: raise ValueError( "Images directory is not specified" ) dir_ = Path(dir_).resolve() boot = self.boot_mountpoint if boot != Path("/boot").resolve() and dir_.is_relative_to("/boot"): return boot / dir_.relative_to("/boot") root = self.root_mountpoint if root != Path("/").resolve() and not dir_.is_relative_to(root): return root / dir_.relative_to("/") return dir_ @config_options.add @Argument("--vboot-keydir", nargs=1, help=argparse.SUPPRESS) def vboot_keydir(self, dir_=None): """Directory containing default vboot keys to use""" if dir_: keydir = vboot_keys(dir_, system=None)[0] if keydir: return Path(keydir).resolve() root = self.root_mountpoint if root != Path("/").resolve(): keydir = vboot_keys(root=root)[0] if keydir: return Path(keydir).resolve() keydir = vboot_keys()[0] if keydir: return Path(keydir).resolve() return None @config_options.add @Argument("--vboot-keyblock", nargs=1) def vboot_keyblock(self, keyblock=None): """Keyblock file to include in images""" if keyblock: return Path(keyblock).resolve() keyblock = self.config.get("vboot-keyblock") if keyblock: return Path(keyblock).resolve() if self.vboot_keydir: keyblock = self.vboot_keydir / "kernel.keyblock" if keyblock.exists(): return Path(keyblock).resolve() @config_options.add @Argument("--vboot-public-key", nargs=1) def vboot_public_key(self, signpubkey=None): """Public key file to verify images with""" if signpubkey: return Path(signpubkey).resolve() signpubkey = self.config.get("vboot-public-key") if signpubkey: return Path(signpubkey).resolve() if self.vboot_keydir: signpubkey = self.vboot_keydir / "kernel_subkey.vbpubk" if signpubkey.exists(): return Path(signpubkey).resolve() return signpubkey @config_options.add @Argument("--vboot-private-key", nargs=1) def vboot_private_key(self, signprivate=None): """Private key file to sign images with""" if signprivate: return Path(signprivate).resolve() signprivate = self.config.get("vboot-private-key") if signprivate: return Path(signprivate).resolve() if self.vboot_keydir: signprivate = self.vboot_keydir / "kernel_data_key.vbprivk" if signprivate.exists(): return Path(signprivate).resolve() return signprivate @config_options.add @Argument("--kernel-cmdline", nargs="+", metavar="CMD") def kernel_cmdline(self, *cmds): """Command line options for the kernel""" # Break cyclic dependencies here yield [] cmdline_src = "given options" if len(cmds) == 0: cmdline = self.config.get("kernel-cmdline") if cmdline is not None: cmds = shlex.split(cmdline) cmdline_src = "configuration" if len(cmds) == 0: cmds = kernel_cmdline(self.root_mountpoint) cmdline_src = "/etc/kernel/cmdline" if len(cmds) == 0: if self.root_mountpoint == Path("/").resolve(): cmds = [ cmd for cmd in proc_cmdline() if cmd.split("=", 1)[0] not in ( "root", "initrd", "kern_guid", "BOOT_IMAGE", "cros_secure", "cros_legacy", "cros_efi", ) ] cmdline_src = "/proc/cmdline" flat_cmds = [] for cmd in cmds: flat_cmds.extend(shlex.split(cmd)) if flat_cmds: self.logger.info( "Using kernel cmdline from {}: {}" .format(cmdline_src, " ".join(flat_cmds)) ) return flat_cmds @config_options.add @Argument("--ignore-initramfs", ignore=True) def ignore_initramfs(self, ignore=None): """Do not include initramfs in images""" if ignore is None: ignore = self.config.getboolean("ignore-initramfs", False) return ignore @config_options.add @Argument("--zimage-initramfs-hack", nargs=1, help=argparse.SUPPRESS) def zimage_initramfs_hack(self, hack=None): """Which initramfs support hack should be used for zimage format""" if hack is None: hack = self.config.get("zimage-initramfs-hack", "init-size") if hack in ("None", "none"): hack = None return hack @Subparsers() def command(self, cmd): """Supported subcommands""" def __call__(self): if hasattr(type(self), "list"): self.logger.info("No subcommand given, defaulting to list") return type(self).list() else: raise ValueError("No subcommand given") import depthcharge_tools.depthchargectl._bless import depthcharge_tools.depthchargectl._build import depthcharge_tools.depthchargectl._check import depthcharge_tools.depthchargectl._config import depthcharge_tools.depthchargectl._list import depthcharge_tools.depthchargectl._remove import depthcharge_tools.depthchargectl._target import depthcharge_tools.depthchargectl._write depthcharge-tools-0.6.2/depthcharge_tools/depthchargectl/__main__.py000066400000000000000000000005461444761253100257200ustar00rootroot00000000000000#! /usr/bin/env python3 # SPDX-License-Identifier: GPL-2.0-or-later # depthcharge-tools depthchargectl program # Copyright (C) 2021 Alper Nebi Yasak # See COPYRIGHT and LICENSE files for full copyright information. from depthcharge_tools.depthchargectl import depthchargectl if __name__ == "__main__": depthchargectl.main() depthcharge-tools-0.6.2/depthcharge_tools/depthchargectl/_bless.py000066400000000000000000000171371444761253100254530ustar00rootroot00000000000000#! /usr/bin/env python3 # SPDX-License-Identifier: GPL-2.0-or-later # depthcharge-tools depthchargectl bless subcommand # Copyright (C) 2020-2022 Alper Nebi Yasak # See COPYRIGHT and LICENSE files for full copyright information. import argparse import logging import subprocess from depthcharge_tools import __version__ from depthcharge_tools.utils.argparse import ( Command, Argument, Group, CommandExit, ) from depthcharge_tools.utils.os import ( Disk, Partition, CrosPartition, ) from depthcharge_tools.utils.platform import ( is_cros_boot, ) from depthcharge_tools.depthchargectl import depthchargectl @depthchargectl.subcommand("bless") class depthchargectl_bless( depthchargectl, prog="depthchargectl bless", usage="%(prog)s [options] [DISK | PARTITION]", add_help=False, ): """Set the active or given partition as successfully booted.""" _logger = depthchargectl._logger.getChild("bless") config_section = "depthchargectl/bless" @depthchargectl.board.copy() def board(self, codename=""): """Assume we're running on the specified board""" # We can bless partitions without knowing the board. try: return super().board except Exception as err: self.logger.warning(err) return None @Group def positionals(self): """Positional arguments""" if self.disk is not None and self.partition is not None: raise ValueError( "Disk and partition arguments are mutually exclusive." ) device = self.disk or self.partition if isinstance(device, str): sys_device = self.diskinfo.evaluate(device) if sys_device is not None: self.logger.info( "Using argument '{}' as a block device." .format(device) ) device = sys_device else: self.logger.info( "Using argument '{}' as a disk image." .format(device) ) device = Disk(device) if isinstance(device, Disk): if self.partno is None: raise ValueError( "Partno argument is required for disks." ) partition = device.partition(self.partno) elif isinstance(device, Partition): if self.partno is not None and self.partno != device.partno: raise ValueError( "Partition and partno arguments are mutually exclusive." ) partition = device elif device is None: self.logger.info( "No partition given, defaulting to currently booted one." ) partition = self.diskinfo.by_kern_guid() if partition is None: if is_cros_boot(): raise ValueError( "Couldn't figure out the currently booted partition." ) else: raise ValueError( "A disk or partition argument is required when not " "booted with depthcharge." ) self.logger.info( "Working on partition '{}'." .format(partition) ) try: cros_partitions = partition.disk.cros_partitions() except subprocess.CalledProcessError as err: self.logger.debug( err, exc_info=self.logger.isEnabledFor(logging.DEBUG), ) raise ValueError( "Couldn't get partitions for disk '{}'." .format(partition.disk) ) from err if partition not in cros_partitions: raise ValueError( "Partition '{}' is not a ChromeOS Kernel partition" .format(partition) ) partition = CrosPartition(partition) self.partition = partition self.disk = partition.disk self.partno = partition.partno @positionals.add @Argument(nargs=0) def disk(self, disk=None): """Disk image to manage partitions of""" return disk @positionals.add @Argument def partition(self, partition=None): """ChromeOS Kernel partition device to manage""" return partition @Group def options(self): """Options""" @options.add @Argument("-i", "--partno", nargs=1) def partno(self, number=None): """Partition number in the given disk image""" try: if number is not None: number = int(number) except: raise TypeError( "Partition number must be a positive integer." ) if number is not None and not number > 0: raise ValueError( "Partition number must be a positive integer." ) return number @options.add @Argument("--bad", bad=True) def bad(self, bad=False): """Set the partition as unbootable""" return bad @options.add @Argument("--oneshot", oneshot=True) def oneshot(self, oneshot=False): """Set the partition to be tried once""" return oneshot def __call__(self): if self.bad == False: try: self.partition.tries = 1 except subprocess.CalledProcessError as err: raise CommandExit( "Failed to set remaining tries for partition '{}'." .format(self.partition) ) from err if self.oneshot == False: try: self.partition.successful = 1 except subprocess.CalledProcessError as err: raise CommandExit( "Failed to set success flag for partition '{}'." .format(self.partition) ) from err self.logger.warning( "Set partition '{}' as successfully booted." .format(self.partition) ) else: try: self.partition.successful = 0 except subprocess.CalledProcessError as err: raise CommandExit( "Failed to unset successful flag for partition '{}'." .format(self.partition) ) from err self.logger.warning( "Set partition '{}' as not yet successfully booted." .format(self.partition) ) try: self.partition.prioritize() except subprocess.CalledProcessError as err: raise CommandExit( "Failed to prioritize partition '{}'." .format(self.partition) ) from err self.logger.info( "Set partition '{}' as the highest-priority bootable part." .format(self.partition) ) else: try: self.partition.attribute = 0x000 except subprocess.CalledProcessError as err: raise CommandExit( "Failed to zero attributes for partition '{}'." .format(self.partition) ) from err self.logger.warning( "Set partition '{}' as a zero-priority unbootable part." .format(self.partition) ) global_options = depthchargectl.global_options config_options = depthchargectl.config_options depthcharge-tools-0.6.2/depthcharge_tools/depthchargectl/_build.py000066400000000000000000000465601444761253100254440ustar00rootroot00000000000000#! /usr/bin/env python3 # SPDX-License-Identifier: GPL-2.0-or-later # depthcharge-tools depthchargectl build subcommand # Copyright (C) 2020-2023 Alper Nebi Yasak # See COPYRIGHT and LICENSE files for full copyright information. import argparse import collections import configparser import logging import os import shlex import textwrap from pathlib import Path from functools import lru_cache from depthcharge_tools import __version__ from depthcharge_tools.mkdepthcharge import mkdepthcharge from depthcharge_tools.utils.argparse import ( Command, Argument, Group, CommandExit, ) from depthcharge_tools.utils.os import ( Partition, ) from depthcharge_tools.utils.pathlib import ( copy, ) from depthcharge_tools.utils.platform import ( KernelEntry, cpu_microcode, vboot_keys, installed_kernels, root_requires_initramfs, ) from depthcharge_tools.utils.subprocess import ( fdtget, ) from depthcharge_tools.depthchargectl import depthchargectl class SizeTooBigError(CommandExit): def __init__(self): super().__init__( "Couldn't build a small enough image for this board.", returncode=4, ) class InitramfsSizeTooBigError(SizeTooBigError): def __init__(self): super(SizeTooBigError, self).__init__( "Couldn't build a small enough image for this board. " "This is usually solvable by making the initramfs smaller, " "check your OS's documentation on how to do so.", returncode=3, ) @depthchargectl.subcommand("build") class depthchargectl_build( depthchargectl, prog="depthchargectl build", usage="%(prog)s [options] [KERNEL_VERSION]", add_help=False, ): """Buld a depthcharge image for the running system.""" _logger = depthchargectl._logger.getChild("build") config_section = "depthchargectl/build" @Group def positionals(self): """Positional arguments""" @positionals.add @Argument def kernel_version(self, kernel_version=None): """Installed kernel version to build an image for.""" if isinstance(kernel_version, KernelEntry): return kernel_version kernels = installed_kernels( root=self.root_mountpoint, boot=self.boot_mountpoint, ) kernel_arches = self.board.arch.kernel_arches for k in list(kernels): if k.arch not in kernel_arches: self.logger.info( "Ignoring kernel '{}' incompatible with board arch." .format(k.release or "(unknown)") ) kernels.remove(k) if isinstance(kernel_version, str): kernel = max( (k for k in kernels if k.release == kernel_version), default=None, ) if kernel is None: raise ValueError( "Could not find an installed kernel for version '{}'." .format(kernel_version) ) elif kernels: kernel = max(kernels) else: self.logger.warning( "Could not find any installed kernel." ) kernel = None return kernel @Group def options(self): """Options""" @depthchargectl.board.copy() def board(self, codename=""): board = super().board if board is None: raise ValueError( "Cannot build depthcharge images when no board is specified.", ) return board @depthchargectl.zimage_initramfs_hack.copy() def zimage_initramfs_hack(self, hack=None): hack = super().zimage_initramfs_hack if hack not in (None, "set-init-size", "pad-vmlinuz"): raise ValueError( "Unknown zimage initramfs support hack '{}'." .format(hack) ) return hack @Group def custom_kernel_options(self): """Custom kernel specification""" @custom_kernel_options.add @Argument("--kernel-release", nargs=1) def kernel_release(self, name=None): """Release name for the kernel used in image name""" if name is None and self.kernel_version is not None: if self.kernel == self.kernel_version.kernel: name = self.kernel_version.release return name @custom_kernel_options.add @Argument("--kernel", nargs=1) def kernel(self, file_=None): """Kernel executable""" if file_ is None and self.kernel_version is not None: file_ = self.kernel_version.kernel # vmlinuz is always mandatory if file_ is None and self.kernel_release is not None: raise ValueError( "No vmlinuz file found for version '{}'." .format(self.kernel_release) ) elif file_ is None: raise ValueError("No vmlinuz file found.") return Path(file_) @custom_kernel_options.add @Argument("--initramfs", nargs='+') def initrd(self, *files): """Ramdisk images""" # Trigger more important errors first self.kernel if not files and self.kernel_version is not None: if self.kernel == self.kernel_version.kernel: microcode = cpu_microcode(self.boot_mountpoint) files = [*microcode, self.kernel_version.initrd] if self.ignore_initramfs: for file in files: self.logger.warning( "Ignoring initramfs '{}' as configured." .format(file) ) return None if len(files) == 1 and files[0] in (None, "None", "none"): self.logger.warning("Not using initramfs.") return None # Initramfs is optional. if not files and self.kernel_release is not None: self.logger.info( "No initramfs file found for version '{}'." .format(self.kernel_release) ) return None elif not files: self.logger.info("No initramfs file found.") return None else: return [Path(file) for file in files] @custom_kernel_options.add @Argument("--fdtdir", nargs=1) def fdtdir(self, dir_=None): """Directory to search device-tree binaries for the board""" if dir_ is None and self.kernel_version is not None: if self.kernel == self.kernel_version.kernel: dir_ = self.kernel_version.fdtdir if dir_ is None: return None else: return Path(dir_) @custom_kernel_options.add @Argument("--dtbs", nargs="+", metavar="FILE") def dtbs(self, *files): """Device-tree binary files to use instead of searching fdtdir""" # Trigger more important errors first self.kernel # Device trees are optional based on board configuration. if self.board.dt_compatible and len(files) == 0: if self.fdtdir is None and self.kernel_release is not None: raise ValueError( "No dtb directory found for version '{}', " "but this board needs a dtb." .format(self.kernel_release) ) elif self.fdtdir is None: raise ValueError( "No dtb directory found, " "but this board needs a dtb." ) self.logger.info( "Searching '{}' for dtbs compatible with pattern '{}'." .format(self.fdtdir, self.board.dt_compatible.pattern) ) def is_compatible(dt_file): return any( self.board.dt_compatible.fullmatch(compat) for compat in fdtget.get( dt_file, "/", "compatible", default="", ).split() ) files = list(filter( is_compatible, self.fdtdir.glob("**/*.dtb"), )) if len(files) == 0: raise ValueError( "No dtb file compatible with pattern '{}' found in '{}'." .format(self.board.dt_compatible.pattern, self.fdtdir) ) else: files = [Path(f) for f in files] if self.board.image_format == "zimage" and len(files) != 0: raise ValueError( "Image format '{}' doesn't support dtb files." .format(self.board.image_format) ) return sorted(files, key=lambda f: f.name) @options.add @Argument("--description", nargs=1) def description(self, desc=None): """Human-readable description for the image""" if desc is None and self.kernel_version is not None: desc = self.kernel_version.description return desc @options.add @depthchargectl.root.copy("--root") def root(self, root=None): """Root device to add to the kernel cmdline""" if root in ("", "None", "none"): return None root = super().root if isinstance(root, Path): mnt = self.root_mountpoint root = self.diskinfo.by_mountpoint("/", fstab_only=True) if root: self.logger.info( "Using root '{}' set in '{}'." .format(root, mnt / "etc" / "fstab") ) return root dev = self.diskinfo.by_mountpoint(mnt) uuid = self.diskinfo.get_uuid(dev) if uuid: root = "UUID={}".format(uuid.upper()) self.logger.warning( "Using '{}' as root from currently mounted '{}'." .format(root, dev) ) return root if mnt != Path("/").resolve(): raise ValueError( "Couldn't convert mountpoint '{}' to a root cmdline." .format(mnt) ) raise ValueError( "Couldn't figure out a root cmdline for this system." .format(mnt) ) if not self.diskinfo.evaluate(root): self.logger.warning( "Using root '{}' but it's not a device or mountpoint." .format(root) ) return str(root) @depthchargectl.kernel_cmdline.copy() def kernel_cmdline(self, *cmds): # This is some deep magic that evaluates self.kernel_cmdline # according to the parent definition and sets it in self. cmdline = super().kernel_cmdline # This evaluates self.root with the above self.kernel_cmdline, # then continues here to set self.kernel_cmdline a second time. append_root = self.root is not None for c in list(cmdline): lhs, _, rhs = c.partition("=") if lhs.lower() != "root": continue if rhs == self.root: append_root = False continue if self.root is None: self.logger.warning( "Kernel cmdline has a root '{}', keeping it." .format(rhs) ) continue self.logger.warning( "Kernel cmdline has a different root '{}', removing it." .format(rhs) ) cmdline.remove(c) if append_root: self.logger.info( "Appending 'root={}' to kernel cmdline." .format(self.root) ) cmdline.append('root={}'.format(self.root)) if self.ignore_initramfs: self.logger.warning( "Ignoring initramfs as configured, " "appending 'noinitrd' to the kernel cmdline." .format(self.initrd) ) cmdline.append("noinitrd") # Linux kernel without an initramfs only supports certain # types of root parameters, check for them. if self.initrd is None and self.root is not None: if root_requires_initramfs(self.root): raise ValueError( "An initramfs is required for root '{}'." .format(self.root) ) return cmdline @options.add @Argument("--compress", nargs="+", metavar="TYPE") def compress(self, *compress): """Compression types to attempt.""" # Allowed compression levels. We will call mkdepthcharge by # hand multiple times for these. for c in compress: if c not in ("none", "lz4", "lzma"): raise ValueError( "Unsupported compression type '{}'." .format(t) ) if len(compress) == 0: compress = ["none"] if self.board.boots_lz4_kernel: compress += ["lz4"] if self.board.boots_lzma_kernel: compress += ["lzma"] # zimage doesn't support compression if self.board.image_format == "zimage": compress = ["none"] return sorted(set(compress), key=compress.index) @options.add @Argument("--timestamp", nargs=1) def timestamp(self, seconds=None): """Build timestamp for the image""" if seconds is None: if "SOURCE_DATE_EPOCH" in os.environ: seconds = os.environ["SOURCE_DATE_EPOCH"] # Initramfs date is bound to be later than vmlinuz date, so # prefer that if possible. if seconds is None: if self.initrd is not None: seconds = max( int(initrd.stat().st_mtime) for initrd in self.initrd ) else: seconds = int(self.kernel.stat().st_mtime) if seconds is None: self.logger.error( "Couldn't determine a timestamp from initramfs " "nor vmlinuz." ) return seconds @options.add @Argument("-o", "--output", nargs=1) def output(self, path=None): """Output image to path instead of storing in images-dir""" if path is None: image_name = "{}.img".format(self.kernel_release or "default") path = self.images_dir / image_name return Path(path) def __call__(self): self.logger.warning( "Building depthcharge image for board '{}' ('{}')." .format(self.board.name, self.board.codename) ) self.logger.info( "Building for kernel version '{}'." .format(self.kernel_release or "(unknown)") ) # Images dir might not have been created at install-time os.makedirs(self.output.parent, exist_ok=True) # Build to a temporary file so we do not overwrite existing # images with an unbootable image. outtmp = self.tmpdir / "{}.tmp".format(self.output.name) # Try to keep output reproducible. if self.timestamp is not None: os.environ["SOURCE_DATE_EPOCH"] = str(self.timestamp) # Error early if initramfs is absolutely too big to fit initrd_size = ( sum(initrd.stat().st_size for initrd in self.initrd) if self.initrd is not None else 0 ) if initrd_size >= self.board.image_max_size: self.logger.error( "Initramfs alone is larger than the maximum image size." ) raise InitramfsSizeTooBigError() # The earliest boards apparently have an off-by-one error while # loading the chosen dtb, adding each file twice solves it. dtbs = ( self.dtbs if not self.board.loads_dtb_off_by_one else [dtb for dtb in self.dtbs for _ in (0, 1)] ) # Skip compress="none" if inputs wouldn't fit max image size compress_list = self.compress inputs_size = sum([ self.kernel.stat().st_size, initrd_size, *(dtb.stat().st_size for dtb in dtbs), ]) if inputs_size > self.board.image_max_size and "none" in compress_list: self.logger.info( "Inputs are too big, skipping uncompressed build." ) compress_list.remove("none") if not compress_list: raise SizeTooBigError() # Avoid passing format-specific options unrelated to board format image_format_opts = { "image_format": self.board.image_format, } if self.board.image_format == "fit": image_format_opts["name"] = self.description image_format_opts["patch_dtbs"] = not self.board.loads_fit_ramdisk if self.board.fit_ramdisk_load_address is not None: image_format_opts["ramdisk_load_address"] = ( self.board.fit_ramdisk_load_address ) elif self.board.image_format == "zimage": if not self.board.loads_zimage_ramdisk: hack = self.zimage_initramfs_hack image_format_opts["pad_vmlinuz"] = (hack == "pad-vmlinuz") image_format_opts["set_init_size"] = (hack == "set-init-size") for compress in compress_list: self.logger.info("Trying with compression '{}'.".format(compress)) tmpdir = self.tmpdir / "mkdepthcharge-{}".format(compress) try: mkdepthcharge( arch=self.board.arch, cmdline=self.kernel_cmdline, compress=compress, dtbs=dtbs, **image_format_opts, kernel_start=self.board.image_start_address, initramfs=self.initrd, keyblock=self.vboot_keyblock, output=outtmp, signprivate=self.vboot_private_key, signpubkey=self.vboot_public_key, vmlinuz=self.kernel, tmpdir=tmpdir, verbosity=self.verbosity, ) except Exception as err: raise CommandExit( "Failed while creating depthcharge image.", ) from err if outtmp.stat().st_size < self.board.image_max_size: break self.logger.warning( "Image with compression '{}' is too big for this board." .format(compress) ) else: # The necessary zimage padding might be too big, actually # check if reducing the initramfs would make things fit. if outtmp.stat().st_size - initrd_size < self.board.image_max_size: raise InitramfsSizeTooBigError() else: raise SizeTooBigError() self.logger.info("Copying newly built image to output.") try: copy(outtmp, self.output) except PermissionError as err: raise PermissionError( "Couldn't copy to '{}', permission denied." .format(self.output) ) self.logger.warning( "Built depthcharge image for kernel version '{}'." .format(self.kernel_release or "(unknown)") ) return self.output global_options = depthchargectl.global_options config_options = depthchargectl.config_options depthcharge-tools-0.6.2/depthcharge_tools/depthchargectl/_check.py000066400000000000000000000146661444761253100254240ustar00rootroot00000000000000#! /usr/bin/env python3 # SPDX-License-Identifier: GPL-2.0-or-later # depthcharge-tools depthchargectl check subcommand # Copyright (C) 2020-2023 Alper Nebi Yasak # See COPYRIGHT and LICENSE files for full copyright information. import argparse import logging from pathlib import Path from depthcharge_tools import __version__ from depthcharge_tools.utils.argparse import ( Command, Argument, Group, CommandExit, ) from depthcharge_tools.utils.subprocess import ( fdtget, vbutil_kernel, ) from depthcharge_tools.depthchargectl import depthchargectl class SizeTooBigError(CommandExit): def __init__(self, image, image_size, max_size): message = ( "Image '{}' ({} bytes) must be smaller than {} bytes." .format(image, image_size, max_size) ) self.image = image self.image_size = image_size self.max_size = max_size super().__init__(output=False, returncode=3, message=message) class NotADepthchargeImageError(CommandExit): def __init__(self, image): message = ( "Image '{}' is not a depthcharge image." .format(image) ) self.image = image super().__init__(output=False, returncode=4, message=message) class VbootSignatureError(CommandExit): def __init__(self, image): message = ( "Depthcharge image '{}' is not signed by the configured keys." .format(image) ) self.image = image super().__init__(output=False, returncode=5, message=message) class ImageFormatError(CommandExit): def __init__(self, image, board_format): message = ( "Image '{}' must be in '{}' format." .format(image, board_format) ) self.image = image self.board_format = board_format super().__init__(output=False, returncode=6, message=message) class MissingDTBError(CommandExit): def __init__(self, image, compat): message = ( "Image '{}' must have a device-tree binary compatible with pattern '{}'." .format(image, compat) ) self.image = image self.compat = compat super().__init__(output=False, returncode=7, message=message) @depthchargectl.subcommand("check") class depthchargectl_check( depthchargectl, prog="depthchargectl check", usage = "%(prog)s [options] IMAGE", add_help=False, ): """Check if a depthcharge image can be booted.""" _logger = depthchargectl._logger.getChild("check") config_section = "depthchargectl/check" @Group def positionals(self): """Positional arguments""" @positionals.add @Argument def image(self, image): """Depthcharge image to check validity of.""" image = Path(image) if not image.is_file(): raise ValueError("Image argument must be a file") return image @depthchargectl.board.copy() def board(self, codename=""): """Assume we're running on the specified board""" board = super().board if board is None: raise ValueError( "Cannot check depthcharge images when no board is specified.", ) return board @depthchargectl.zimage_initramfs_hack.copy() def zimage_initramfs_hack(self, hack=None): hack = super().zimage_initramfs_hack if hack not in (None, "set-init-size", "pad-vmlinuz"): raise ValueError( "Unknown zimage initramfs support hack '{}'." .format(hack) ) return hack def __call__(self): image = self.image self.logger.warning( "Verifying depthcharge image for board '{}' ('{}')." .format(self.board.name, self.board.codename) ) self.logger.info("Checking if image fits into size limit.") image_size = image.stat().st_size if image_size > self.board.image_max_size: raise SizeTooBigError( image, image_size, self.board.image_max_size, ) self.logger.info("Checking depthcharge image validity.") if vbutil_kernel( "--verify", image, check=False, ).returncode != 0: raise NotADepthchargeImageError(image) self.logger.info("Checking depthcharge image signatures.") if self.vboot_public_key is not None: if vbutil_kernel( "--verify", image, "--signpubkey", self.vboot_public_key, check=False, ).returncode != 0: raise VbootSignatureError(image) itb = self.tmpdir / "{}.itb".format(image.name) vbutil_kernel( "--get-vmlinuz", image, "--vmlinuz-out", itb, check=False, ) if self.board.image_format == "fit": self.logger.info("Checking FIT image format.") nodes = fdtget.subnodes(itb) if "images" not in nodes and "configurations" not in nodes: raise ImageFormatError(image, self.board.image_format) def is_compatible(dt_file, conf_path): return any( self.board.dt_compatible.fullmatch(compat) for compat in fdtget.get( dt_file, conf_path, "compatible", default="", ).split() ) self.logger.info("Checking included DTB binaries.") for conf in fdtget.subnodes(itb, "/configurations"): conf_path = "/configurations/{}".format(conf) if is_compatible(itb, conf_path): break dtb = fdtget.get(itb, conf_path, "fdt") dtb_path = "/images/{}".format(dtb) dtb_data = fdtget.get(itb, dtb_path, "data", type=bytes) dtb_file = self.tmpdir / "{}.dtb".format(conf) dtb_file.write_bytes(dtb_data) if is_compatible(dtb_file, "/"): break else: raise MissingDTBError( image, self.board.dt_compatible.pattern, ) self.logger.warning( "This command is incomplete, the image might be unbootable " "despite passing currently implemented checks." ) global_options = depthchargectl.global_options config_options = depthchargectl.config_options depthcharge-tools-0.6.2/depthcharge_tools/depthchargectl/_config.py000066400000000000000000000046351444761253100256070ustar00rootroot00000000000000#! /usr/bin/env python3 # SPDX-License-Identifier: GPL-2.0-or-later # depthcharge-tools depthchargectl config subcommand # Copyright (C) 2021-2022 Alper Nebi Yasak # See COPYRIGHT and LICENSE files for full copyright information. import argparse import logging from depthcharge_tools import __version__ from depthcharge_tools.utils.argparse import ( Command, Argument, Group, CommandExit, ) from depthcharge_tools.depthchargectl import depthchargectl @depthchargectl.subcommand("config") class depthchargectl_config( depthchargectl, prog="depthchargectl config", usage="%(prog)s [options] KEY", add_help=False, ): """Get depthchargectl configuration values.""" _logger = depthchargectl._logger.getChild("config") config_section = "depthchargectl/config" @depthchargectl.board.copy() def board(self, codename=""): """Assume we're running on the specified board""" # We can query configs without knowing the board. try: return super().board except Exception as err: self.logger.warning(err) return None @Group def positionals(self): """Positional arguments""" @positionals.add @Argument def key(self, key): """Config key to get value of.""" return key @Group def options(self): """Options""" @options.add @Argument("--section", nargs=1) def section(self, section=None): """Config section to work on.""" parser = self.config.parser if section is None: section = self.config.name if section not in parser.sections(): if section != parser.default_section: parser.add_section(section) return parser[section] @options.add @Argument("--default", nargs=1) def default(self, default=None): """Value to return if key doesn't exist in section.""" return default def __call__(self): if self.key not in self.section: if self.default is not None: return self.default else: raise KeyError( "Key '{}' not found in section '{}'." .format(self.key, self.section.name) ) return self.section[self.key] global_options = depthchargectl.global_options config_options = depthchargectl.config_options depthcharge-tools-0.6.2/depthcharge_tools/depthchargectl/_list.py000066400000000000000000000200151444761253100253030ustar00rootroot00000000000000#! /usr/bin/env python3 # SPDX-License-Identifier: GPL-2.0-or-later # depthcharge-tools depthchargectl list subcommand # Copyright (C) 2020-2022 Alper Nebi Yasak # See COPYRIGHT and LICENSE files for full copyright information. import argparse import logging import subprocess from depthcharge_tools import __version__ from depthcharge_tools.utils.argparse import ( Command, Argument, Group, CommandExit, ) from depthcharge_tools.utils.collections import ( TypedList ) from depthcharge_tools.utils.os import ( Disk, CrosPartition, ) from depthcharge_tools.depthchargectl import depthchargectl class CrosPartitions(TypedList(CrosPartition)): def __init__(self, partitions=None, columns=None, headings=True): super().__init__(partitions) if columns is None: if any(part.path is None for part in partitions): columns = ["S", "P", "T", "DISKPATH", "PARTNO"] else: columns = ["S", "P", "T", "PATH"] self._headings = headings self._columns = columns def _row(self, part): values = {} if set(self._columns).intersection(( "A", "S", "P", "T", "ATTRIBUTE", "SUCCESSFUL", "PRIORITY", "TRIES", )): flags = part.flags values.update({ "A": flags["attribute"], "S": flags["successful"], "P": flags["priority"], "T": flags["tries"], "ATTRIBUTE": flags["attribute"], "SUCCESSFUL": flags["successful"], "PRIORITY": flags["priority"], "TRIES": flags["tries"], }) if "SIZE" in self._columns: values["SIZE"] = part.size if part.path is not None: values["PATH"] = part.path if part.disk is not None and part.disk.path is not None: values["DISK"] = part.disk.path values["DISKPATH"] = part.disk.path if part.partno is not None: values["PARTNO"] = part.partno return [str(values.get(c, "")) for c in self._columns] def __str__(self): rows = [] if self._headings: rows.append(self._columns) parts = sorted(self, key=lambda p: p.path or p.disk.path) rows.extend(self._row(part) for part in parts) # Using tab characters makes things misalign when the data # widths vary, so find max width for each column from its data, # and format everything to those widths. widths = [max(4, *map(len, col)) for col in zip(*rows)] fmt = " ".join("{{:{w}}}".format(w=w) for w in widths) return "\n".join(fmt.format(*row) for row in rows) @depthchargectl.subcommand("list") class depthchargectl_list( depthchargectl, prog="depthchargectl list", usage="%(prog)s [options] [DISK ...]", add_help=False, ): """List ChromeOS kernel partitions.""" _logger = depthchargectl._logger.getChild("list") config_section = "depthchargectl/list" @depthchargectl.board.copy() def board(self, codename=""): """Assume we're running on the specified board""" # We can list partitions without knowing the board. try: return super().board except Exception as err: self.logger.warning(err) return None @Group def positionals(self): """Positional arguments""" @positionals.add @Argument(metavar="DISK") def disks(self, *disks): """Disks to check for ChromeOS kernel partitions.""" if self.all_disks: self.logger.info("Searching all disks.") disks = self.diskinfo.roots() elif disks: self.logger.info( "Searching real disks for {}." .format(", ".join(str(d) for d in disks)) ) images = [] for d in disks: if self.diskinfo.evaluate(d) is None: try: images.append(Disk(d)) except ValueError as err: self.logger.warning( err, exc_info=self.logger.isEnabledFor(logging.DEBUG), ) disks = [*self.diskinfo.roots(*disks), *images] else: self.logger.info("Searching bootable disks.") root = ( self.diskinfo.by_mountpoint("/", fstab_only=True) or self.diskinfo.by_mountpoint(self.root_mountpoint) ) boot = ( self.diskinfo.by_mountpoint("/boot", fstab_only=True) or self.diskinfo.by_mountpoint(self.boot_mountpoint) ) disks = self.diskinfo.roots(root, boot) if disks: self.logger.info( "Using disks: {}." .format(", ".join(str(d) for d in disks)) ) else: raise ValueError("Could not find any matching disks.") return disks @Group def options(self): """Options""" if self.count and self.output: raise ValueError( "Count and Output arguments are mutually exclusive." ) @options.add @Argument("-n", "--noheadings", headings=False) def headings(self, headings=True): """Don't print column headings.""" return headings @options.add @Argument("-a", "--all-disks", all_disks=True) def all_disks(self, all_disks=False): """List partitions on all disks.""" return all_disks valid_columns = { "ATTRIBUTE", "SUCCESSFUL", "PRIORITY", "TRIES", "A", "S", "P", "T", "PATH", "DISKPATH", "DISK", "PARTNO", "SIZE", } @options.add @Argument("-o", "--output", nargs=1, append=True) def output(self, *columns): """Comma separated list of columns to output.""" if len(columns) == 0: self.logger.info("Using default output format.") return None elif len(columns) == 1 and isinstance(columns[0], str): columns = columns[0] self.logger.info("Using output format '{}'.".format(columns)) else: columns = ",".join(columns) self.logger.info("Using output format '{}'.".format(columns)) columns = columns.split(',') invalid_columns = sorted( set(columns).difference(self.valid_columns), key=columns.index, ) if invalid_columns: raise ValueError( "Unsupported output columns '{}'." .format(invalid_columns) ) return columns @options.add @Argument("-c", "--count", count=True) def count(self, count=False): """Print only the count of partitions.""" return count def __call__(self): parts = [] error_disks = [] for disk in self.disks: try: parts.extend(disk.cros_partitions()) except subprocess.CalledProcessError as err: error_disks.append(disk) self.logger.debug( "Couldn't get partitions for disk '{}'." .format(disk) ) self.logger.debug( err, exc_info=self.logger.isEnabledFor(logging.DEBUG), ) if self.count: output = len(parts) else: output = CrosPartitions( parts, headings=self.headings, columns=self.output, ) if error_disks: return CommandExit( message=( "Couldn't get partitions for disks {}." .format(", ".join(str(d) for d in error_disks)) ), output=output, returncode=1, ) return output global_options = depthchargectl.global_options config_options = depthchargectl.config_options depthcharge-tools-0.6.2/depthcharge_tools/depthchargectl/_remove.py000066400000000000000000000205261444761253100256340ustar00rootroot00000000000000#! /usr/bin/env python3 # SPDX-License-Identifier: GPL-2.0-or-later # depthcharge-tools depthchargectl remove subcommand # Copyright (C) 2020-2022 Alper Nebi Yasak # See COPYRIGHT and LICENSE files for full copyright information. import argparse import logging import subprocess from pathlib import Path from depthcharge_tools import ( __version__, ) from depthcharge_tools.utils.argparse import ( Command, Argument, Group, CommandExit, ) from depthcharge_tools.depthchargectl import depthchargectl class BootedPartitionError(CommandExit): def __init__(self, partition): self.partition = partition super().__init__( "Refusing to disable currently booted partition '{}'." .format(partition) ) @depthchargectl.subcommand("remove") class depthchargectl_remove( depthchargectl, prog="depthchargectl remove", usage="%(prog)s [options] (KERNEL_VERSION | IMAGE)", add_help=False, ): """Remove images and disable partitions containing them.""" _logger = depthchargectl._logger.getChild("remove") config_section = "depthchargectl/remove" @depthchargectl.board.copy() def board(self, codename=""): """Assume we're running on the specified board""" # We can disable partitions without knowing the board. try: return super().board except Exception as err: self.logger.warning(err) return None @Group def positionals(self): """Positional arguments""" if self.image is not None and self.kernel_version is not None: raise ValueError( "Image and kernel_version arguments are mutually exclusive." ) if self.image is not None: image = self.image else: image = self.kernel_version if isinstance(image, str): # This can be run after the kernel is uninstalled, where the # version would no longer be valid, so don't check for that. # Instead just check if we have it as an image. img = (self.images_dir / "{}.img".format(image)).resolve() if img.parent == self.images_dir and img.is_file(): self.logger.info( "Disabling partitions for kernel version '{}'." .format(image) ) self.image = img self.kernel_version = image else: self.image = Path(image).resolve() self.kernel_version = None self.logger.info( "Disabling partitions for depthcharge image '{}'." .format(image) ) if not self.image.is_file(): raise TypeError( "Image to remove '{}' is not a file." .format(self.image) ) @positionals.add @Argument(dest=argparse.SUPPRESS, nargs=0) def kernel_version(self, kernel_version): """Installed kernel version to disable.""" return kernel_version @positionals.add @Argument def image(self, image): """Depthcharge image to disable.""" return image @Group def options(self): """Options""" @options.add @Argument("-f", "--force", force=True) def force(self, force=False): """Allow disabling the currently booted partition.""" return force def __call__(self): image = self.image # When called with --vblockonly vbutil_kernel creates a file of # size 64KiB == 0x10000. image_vblock = image.read_bytes()[:0x10000] partitions = depthchargectl.list( root=self.root, root_mountpoint=self.root_mountpoint, boot_mountpoint=self.boot_mountpoint, config=self.config, board=self.board, tmpdir=self.tmpdir / "list", images_dir=self.images_dir, vboot_keyblock=self.vboot_keyblock, vboot_public_key=self.vboot_public_key, vboot_private_key=self.vboot_private_key, kernel_cmdline=self.kernel_cmdline, ignore_initramfs=self.ignore_initramfs, verbosity=self.verbosity, ) self.logger.info( "Searching for Chrome OS Kernel partitions containing '{}'." .format(image) ) badparts = [] error_disks = [] for part in partitions: self.logger.info("Checking partition '{}'.".format(part)) # It's OK to check only the vblock header, as that # contains signatures on the content and those will be # different if the content is different. with part.path.open("rb") as p: if p.read(0x10000) == image_vblock: try: if part.attribute: badparts.append(part) except subprocess.CalledProcessError as err: self.logger.warning( "Couldn't get attribute for partition '{}'." .format(part) ) self.logger.debug( err, exc_info=self.logger.isEnabledFor( logging.DEBUG, ), ) current = self.diskinfo.by_kern_guid() if current in badparts: if self.force: self.logger.warning( "Deactivating the currently booted partition '{}'. " "This might make your system unbootable." .format(current) ) else: raise BootedPartitionError(current) done_parts = [] error_parts = [] for part in badparts: self.logger.info("Deactivating '{}'.".format(part)) try: depthchargectl.bless( partition=part, bad=True, root=self.root, root_mountpoint=self.root_mountpoint, boot_mountpoint=self.boot_mountpoint, config=self.config, board=self.board, tmpdir=self.tmpdir / "bless", images_dir=self.images_dir, vboot_keyblock=self.vboot_keyblock, vboot_public_key=self.vboot_public_key, vboot_private_key=self.vboot_private_key, kernel_cmdline=self.kernel_cmdline, ignore_initramfs=self.ignore_initramfs, verbosity=self.verbosity, ) except Exception as err: error_parts.append(part) self.logger.debug( err, exc_info=self.logger.isEnabledFor(logging.DEBUG), ) continue done_parts.append(part) self.logger.warning("Deactivated '{}'.".format(part)) if image.parent == self.images_dir and not error_disks and not error_parts: self.logger.info( "Image '{}' is in images dir, deleting." .format(image) ) image.unlink() self.logger.warning("Deleted image '{}'.".format(image)) else: self.logger.info( "Not deleting image file '{}'." .format(image) ) output = badparts or None error_msg = [] if error_disks: error_msg.append( "Couldn't disable partitions for disks {}." .format(", ".join(str(d) for d in error_disks)) ) if error_parts: error_msg.append( "Couldn't disable partitions {}." .format(", ".join(str(d) for d in error_parts)) ) if error_msg: return CommandExit( message="\n".join(error_msg), output=done_parts, returncode=1, ) if not output: self.logger.warning( "No active partitions contain the given image." ) return output global_options = depthchargectl.global_options config_options = depthchargectl.config_options depthcharge-tools-0.6.2/depthcharge_tools/depthchargectl/_target.py000066400000000000000000000202021444761253100256140ustar00rootroot00000000000000#! /usr/bin/env python3 # SPDX-License-Identifier: GPL-2.0-or-later # depthcharge-tools depthchargectl target subcommand # Copyright (C) 2020-2022 Alper Nebi Yasak # See COPYRIGHT and LICENSE files for full copyright information. import argparse import logging import subprocess import sys import types from depthcharge_tools import __version__ from depthcharge_tools.utils.argparse import ( Command, Argument, Group, CommandExit, ) from depthcharge_tools.utils.os import ( Disk, CrosPartition, Partition, ) from depthcharge_tools.utils.string import ( parse_bytesize, ) from depthcharge_tools.depthchargectl import depthchargectl class NotABlockDeviceError(CommandExit): def __init__(self, device): message = ( "Target '{}' is not a valid block device." .format(device) ) self.device = device super().__init__(message=message, returncode=2) class NotCrosPartitionError(CommandExit): def __init__(self, partition): message = ( "Partition '{}' is not of type Chrome OS Kernel." .format(partition) ) self.partition = partition super().__init__(message=message, returncode=5) class BootedPartitionError(CommandExit): def __init__(self, partition): message = ( "Partition '{}' is the currently booted parttiion." .format(partition) ) self.partition = partition super().__init__(message=message, returncode=6) class PartitionSizeTooSmallError(CommandExit): def __init__(self, partition, part_size, min_size): message = ( "Partition '{}' ('{}' bytes) is smaller than '{}' bytes." .format(partition, part_size, min_size) ) self.partition = partition self.part_size = part_size self.min_size = min_size super().__init__(message=message, returncode=7) class NoUsableCrosPartition(CommandExit): def __init__(self): message = ( "No usable Chrome OS Kernel partition found " "for given input arguments." ) super().__init__(message=message, output=None) @depthchargectl.subcommand("target") class depthchargectl_target( depthchargectl, prog="depthchargectl target", usage="%(prog)s [options] [PARTITION | DISK ...]", add_help=False, ): """Choose or validate a ChromeOS Kernel partition to use.""" _logger = depthchargectl._logger.getChild("target") config_section = "depthchargectl/target" @depthchargectl.board.copy() def board(self, codename=""): """Assume we're running on the specified board""" # We can target partitions without knowing the board. try: return super().board except Exception as err: self.logger.warning(err) return None @Group def positionals(self): """Positional arguments""" disks = list(self.disks) partitions = list(self.partitions) # The inputs can be a mixed list of partitions and disks, # separate the two. for d in list(disks): try: partitions.append(Partition(d)) self.logger.info("Using target '{}' as a partition.".format(d)) disks.remove(d) except: pass self.disks = disks self.partitions = partitions @positionals.add @Argument(metavar="PARTITION", nargs=0) def partitions(self, *partitions): """Chrome OS kernel partition to validate.""" return partitions @positionals.add @Argument(metavar="DISK") def disks(self, *disks): """Disks to search for an appropriate Chrome OS kernel partition.""" return disks @Group def options(self): """Options""" @options.add @Argument("-s", "--min-size", nargs=1) def min_size(self, bytes_=None): """Target partitions larger than this size.""" if bytes_ is None: return 0x10000 return parse_bytesize(bytes_) @options.add @Argument("--allow-current", allow=True) def allow_current(self, allow=False): """Allow targeting the currently booted partition.""" return allow @options.add @Argument("-a", "--all-disks", all_disks=True) def all_disks(self, all_disks=False): """Target partitions on all disks.""" return all_disks def __call__(self): disks = list(self.disks) partitions = list(self.partitions) # We will need to check partitions against this if allow_current # is false. current = self.diskinfo.by_kern_guid() # Given a single partition, check if the partition is valid. if len(partitions) == 1 and len(disks) == 0: part = partitions[0] self.logger.info("Checking if target partition is writable.") if part.path is not None and not part.path.is_block_device(): raise NotABlockDeviceError(part.path) self.logger.info("Checking if targeted partition's disk is writable.") if not part.disk.path.is_block_device(): raise NotABlockDeviceError(part.disk.path) self.logger.info( "Checking if targeted partition's type is Chrome OS Kernel." ) if part not in part.disk.cros_partitions(): raise NotCrosPartitionError(part) self.logger.info( "Checking if targeted partition is currently booted one." ) if current is not None and not self.allow_current: if part.path == current.path: raise BootedPartitionError(part) self.logger.info( "Checking if targeted partition is bigger than given " "minimum size." ) if self.min_size is not None and part.size < self.min_size: raise PartitionSizeTooSmallError( part, part.size, self.min_size, ) # For arguments which are disks, search all their partitions. # If no disks or partitions were given, search bootable disks. # Search all disks if explicitly asked. if disks or not partitions or self.all_disks: partitions += depthchargectl.list( disks=disks, all_disks=self.all_disks, root=self.root, root_mountpoint=self.root_mountpoint, boot_mountpoint=self.boot_mountpoint, config=self.config, board=self.board, tmpdir=self.tmpdir / "list", images_dir=self.images_dir, vboot_keyblock=self.vboot_keyblock, vboot_public_key=self.vboot_public_key, vboot_private_key=self.vboot_private_key, kernel_cmdline=self.kernel_cmdline, ignore_initramfs=self.ignore_initramfs, verbosity=self.verbosity, ) good_partitions = [] for p in partitions: if self.min_size is not None and p.size < self.min_size: self.logger.warning( "Skipping partition '{}' as too small." .format(p) ) continue if current is not None and not self.allow_current: if p.path == current.path: self.logger.info( "Skipping currently booted partition '{}'." .format(p) ) continue self.logger.info("Partition '{}' is usable.".format(p)) good_partitions.append( CrosPartition(p.disk.path, partno=p.partno), ) # Get the least-successful, least-priority, least-tries-left # partition in that order of preference. if good_partitions: return min(good_partitions) else: return NoUsableCrosPartition() global_options = depthchargectl.global_options config_options = depthchargectl.config_options depthcharge-tools-0.6.2/depthcharge_tools/depthchargectl/_write.py000066400000000000000000000240771444761253100254760ustar00rootroot00000000000000#! /usr/bin/env python3 # SPDX-License-Identifier: GPL-2.0-or-later # depthcharge-tools depthchargectl write subcommand # Copyright (C) 2020-2022 Alper Nebi Yasak # See COPYRIGHT and LICENSE files for full copyright information. import argparse import logging import os import subprocess from pathlib import Path from depthcharge_tools import ( __version__, ) from depthcharge_tools.utils.argparse import ( Command, Argument, Group, CommandExit, ) from depthcharge_tools.utils.platform import ( KernelEntry, installed_kernels, ) from depthcharge_tools.depthchargectl import depthchargectl class ImageBuildError(CommandExit): def __init__(self, kernel_version=None): self.kernel_version = kernel_version if kernel_version is None: message = "Failed to build depthcharge image." else: message = ( "Failed to build depthcharge image for kernel version '{}'." .format(kernel_version) ) super().__init__(message=message) class NotBootableImageError(CommandExit): def __init__(self, image): self.image = image super().__init__( "Image '{}' is not bootable on this board." .format(image) ) class NoUsableCrosPartitionError(CommandExit): def __init__(self): super().__init__( "No usable Chrome OS Kernel partition found." ) @depthchargectl.subcommand("write") class depthchargectl_write( depthchargectl, prog="depthchargectl write", usage="%(prog)s [options] [KERNEL-VERSION | IMAGE]", add_help=False, ): """Write an image to a ChromeOS kernel partition.""" _logger = depthchargectl._logger.getChild("write") config_section = "depthchargectl/write" @depthchargectl.board.copy() def board(self, codename=""): """Assume we're running on the specified board""" # We can write images to partitions without knowing the board. # The image argument will become required if this returns None. try: return super().board except Exception as err: self.logger.warning(err) return None @Group def positionals(self): """Positional arguments""" if self.image is not None and self.kernel_version is not None: raise ValueError( "Image and kernel_version arguments are mutually exclusive" ) arg = self.image or self.kernel_version # Turn arg into a relevant KernelEntry if it's a kernel version # or a Path() if not if isinstance(arg, str): arg = max( (k for k in installed_kernels() if k.release == arg), default=Path(arg).resolve(), ) if isinstance(arg, KernelEntry): self.image = None self.kernel_version = arg elif isinstance(arg, Path): self.image = arg self.kernel_version = None if self.board is None and self.image is None: raise ValueError( "An image file is required when no board is specified." ) @positionals.add @Argument(dest=argparse.SUPPRESS, nargs=0) def kernel_version(self, kernel_version): """Installed kernel version to write to disk.""" return kernel_version @positionals.add @Argument def image(self, image=None): """Depthcharge image to write to disk.""" return image @Group def options(self): """Options""" @options.add @Argument("-f", "--force", force=True) def force(self, force=False): """Write image even if it cannot be verified.""" return force @options.add @Argument("-t", "--target", metavar="DISK|PART") def target(self, target): """Specify a disk or partition to write to.""" return target @options.add @Argument("--no-prioritize", prioritize=False) def prioritize(self, prioritize=True): """Don't set any flags on the partition.""" return prioritize @options.add @Argument("--allow-current", allow=True) def allow_current(self, allow=False): """Allow overwriting the currently booted partition.""" return allow def __call__(self): if self.board is None: self.logger.warning( "Using given image '{}' without board-specific checks." .format(self.image) ) image = self.image elif self.image is not None: self.logger.info("Using given image '{}'." .format(self.image)) image = self.image try: depthchargectl.check( image=image, config=self.config, board=self.board, tmpdir=self.tmpdir / "check", images_dir=self.images_dir, vboot_keyblock=self.vboot_keyblock, vboot_public_key=self.vboot_public_key, vboot_private_key=self.vboot_private_key, kernel_cmdline=self.kernel_cmdline, ignore_initramfs=self.ignore_initramfs, verbosity=self.verbosity, ) except Exception as err: if self.force: self.logger.warning( "Image '{}' is not bootable on this board, " "continuing due to --force." .format(image) ) else: raise NotBootableImageError(image) from err else: # No image given, try creating one. try: image = depthchargectl.build_( kernel_version=self.kernel_version, root=self.root, root_mountpoint=self.root_mountpoint, boot_mountpoint=self.boot_mountpoint, config=self.config, board=self.board, tmpdir=self.tmpdir / "build", images_dir=self.images_dir, vboot_keyblock=self.vboot_keyblock, vboot_public_key=self.vboot_public_key, vboot_private_key=self.vboot_private_key, kernel_cmdline=self.kernel_cmdline, ignore_initramfs=self.ignore_initramfs, verbosity=self.verbosity, ) except Exception as err: raise ImageBuildError(self.kernel_version) from err # We don't want target to unconditionally avoid the current # partition since we will also check that here. But whatever we # choose must be bigger than the image we'll write to it. self.logger.info("Searching disks for a target partition.") try: target = depthchargectl.target( disks=[self.target] if self.target else [], min_size=image.stat().st_size, allow_current=self.allow_current, root=self.root, root_mountpoint=self.root_mountpoint, boot_mountpoint=self.boot_mountpoint, config=self.config, board=self.board, tmpdir=self.tmpdir / "target", images_dir=self.images_dir, vboot_keyblock=self.vboot_keyblock, vboot_public_key=self.vboot_public_key, vboot_private_key=self.vboot_private_key, kernel_cmdline=self.kernel_cmdline, ignore_initramfs=self.ignore_initramfs, verbosity=self.verbosity, ) except Exception as err: raise NoUsableCrosPartitionError() from err if target is None: raise NoUsableCrosPartitionError() self.logger.info("Targeted partition '{}'.".format(target)) # Check and warn if we targeted the currently booted partition, # as that usually means it's the only partition. current = self.diskinfo.by_kern_guid() if current is not None and self.allow_current and target.path == current.path: self.logger.warning( "Overwriting the currently booted partition '{}'. " "This might make your system unbootable." .format(target) ) self.logger.info( "Writing image '{}' to partition '{}'." .format(image, target) ) target.write_bytes(image.read_bytes()) self.logger.warning( "Wrote image '{}' to partition '{}'." .format(image, target) ) if self.prioritize: self.logger.info( "Setting '{}' as the highest-priority bootable part." .format(target) ) try: depthchargectl.bless( partition=target, oneshot=True, root=self.root, root_mountpoint=self.root_mountpoint, boot_mountpoint=self.boot_mountpoint, config=self.config, board=self.board, tmpdir=self.tmpdir / "bless", images_dir=self.images_dir, vboot_keyblock=self.vboot_keyblock, vboot_public_key=self.vboot_public_key, vboot_private_key=self.vboot_private_key, kernel_cmdline=self.kernel_cmdline, ignore_initramfs=self.ignore_initramfs, verbosity=self.verbosity, ) except Exception as err: raise CommandExit( "Failed to set '{}' as the highest-priority bootable part." .format(target) ) from err self.logger.warning( "Set partition '{}' as next to boot." .format(target) ) return target global_options = depthchargectl.global_options config_options = depthchargectl.config_options depthcharge-tools-0.6.2/depthcharge_tools/mkdepthcharge.py000066400000000000000000001132671444761253100240320ustar00rootroot00000000000000#! /usr/bin/env python3 # SPDX-License-Identifier: GPL-2.0-or-later # depthcharge-tools mkdepthcharge program # Copyright (C) 2020-2023 Alper Nebi Yasak # See COPYRIGHT and LICENSE files for full copyright information. import argparse import logging import os import platform import struct import subprocess import sys import tempfile from mmap import mmap from pathlib import Path from depthcharge_tools import __version__ from depthcharge_tools.utils.argparse import ( Command, Argument, Group, ) from depthcharge_tools.utils.pathlib import ( copy, decompress, ) from depthcharge_tools.utils.platform import ( Architecture, vboot_keys, ) from depthcharge_tools.utils.string import ( parse_bytesize, ) from depthcharge_tools.utils.subprocess import ( mkimage, vbutil_kernel, lz4, lzma, fdtget, fdtput, ) class mkdepthcharge( Command, prog="mkdepthcharge", usage="%(prog)s [options] -o FILE [--] [VMLINUZ] [INITRAMFS ...] [DTB ...]", add_help=False, ): """Build boot images for the ChromeOS bootloader.""" _logger = logging.getLogger(__name__) @property def logger(self): # Set verbosity before logging messages self.verbosity return self._logger # Inputs can have the same name and cause collisions in tmpdir. def _tempfile(self, name): f = self.tmpdir / name if not f.exists(): return f for i in range(9999): f = f.with_name("{}-idx{:04}".format(name, i)) if not f.exists(): return f raise FileExistsError(self.tmpdir / name) # Debian packs the arm64 kernel uncompressed, but the bindeb-pkg # kernel target packs it as gzip. So we'll try to decompress inputs. def _decompress(self, f): self.logger.info( "Trying to decompress file '{}'." .format(f) ) decomp = decompress(f, self._tempfile(f.name)) if decomp is not None: self.logger.info( "Decompressed input '{}' as '{}'." .format(f, decomp) ) return decomp or f # Copy inputs to tmpdir because mkimage wants modifiable files. def _copy(self, f): if self.tmpdir not in f.parents: f = copy(f, self._tempfile(f.name)) f.chmod(0o755) return f @Group def input_files(self): """Input files""" @input_files.add @Argument(help=argparse.SUPPRESS) def files(self, *files): """Input files (vmlinuz, initramfs, dtbs)""" vmlinuz = [] initramfs = [] dtbs = [] files = [Path(f).resolve() for f in files] for f in files: # Decompress files to run detection on content. decomp = self._decompress(f) with decomp.open("rb") as f_: head = f_.read(4096) # Portable Executable and ELF files if head.startswith(b"MZ") or head.startswith(b"ELF"): self.logger.info( "File '{}' identified as a vmlinuz." .format(f) ) vmlinuz.append(decomp) # Cpio files elif ( head.startswith(b"070701") or head.startswith(b"070702") or head.startswith(b"070707") ): self.logger.info( "File '{}' identified as an initramfs." .format(f) ) # Keep initramfs compressed initramfs.append(f) # Avoid name collision when copying initramfs to tmpdir if decomp != f: decomp.unlink() # Device-tree blobs elif head.startswith(b"\xd0\x0d\xfe\xed"): self.logger.info( "File '{}' identified as a device-tree blob." .format(f) ) dtbs.append(decomp) # Failed to detect, assume in the order in usage string elif len(vmlinuz) == 0: self.logger.info( "Assuming file '{}' is a vmlinuz." .format(f) ) vmlinuz.append(decomp) elif len(initramfs) == 0: self.logger.info( "Assuming file '{}' is an initramfs." .format(f) ) # Keep initramfs compressed initramfs.append(f) else: self.logger.info( "Assuming file '{}' is a device-tree blob." .format(f) ) dtbs.append(decomp) return { "vmlinuz": vmlinuz, "initramfs": initramfs, "dtbs": dtbs, } @input_files.add @Argument("-d", "--vmlinuz", nargs=1) def vmlinuz(self, vmlinuz=None): """Kernel executable""" files = self.files["vmlinuz"] if vmlinuz is not None: vmlinuz = Path(vmlinuz).resolve() self.logger.info( "Using file '{}' as a vmlinuz." .format(vmlinuz) ) files = [self._decompress(vmlinuz), *files] if len(files) == 0: raise ValueError( "vmlinuz argument is required." ) elif len(files) > 1: raise ValueError( "Can't build with multiple kernels" ) vmlinuz = self._copy(files[0]) return vmlinuz @input_files.add @Argument("-i", "--initramfs", metavar="INITRAMFS", nargs="+") def initramfs(self, *files): """Ramdisk images""" files = [ *(Path(f).resolve() for f in files if f is not None), *self.files["initramfs"], ] for file in files: self.logger.info( "Using file '{}' as an initramfs." .format(file) ) if len(files) > 1: self.logger.info( "Concatenating initramfs files as a single initramfs." ) initramfs = self._tempfile("merged-initramfs.img") with initramfs.open('xb') as merge: for file in files: merge.write(file.read_bytes()) elif len(files) == 1: initramfs = self._copy(files[0]) elif not files: initramfs = None return initramfs @input_files.add @Argument("-b", "--dtbs", metavar="DTB", nargs="+") def dtbs(self, *dtbs): """Device-tree binary file""" files = self.files["dtbs"] dtbs = [Path(dtb).resolve() for dtb in dtbs] for dtb in dtbs: self.logger.info( "Using file '{}' as a device-tree blob." .format(dtb) ) dtbs = [self._decompress(dtb) for dtb in dtbs] dtbs = [self._copy(dtb) for dtb in (*dtbs, *files)] return dtbs @Group def options(self): """Options""" # Check incompatible combinations if self.image_format == "zimage": if self.compress not in (None, "none"): raise ValueError( "Compress argument not supported with zimage format." ) if self.name is not None: raise ValueError( "Name argument not supported with zimage format." ) if self.dtbs: raise ValueError( "Device tree files not supported with zimage format." ) @options.add @Argument("-h", "--help", action="help") def print_help(self): """Show this help message.""" # type(self).parser.print_help() @options.add @Argument( "-V", "--version", action="version", version="depthcharge-tools %(prog)s {}".format(__version__), ) def version(self): """Print program version.""" return type(self).version.version % {"prog": type(self).prog} @options.add @Argument("-v", "--verbose", count=True) def verbosity(self, verbosity=0): """Print more detailed output.""" level = logging.WARNING - int(verbosity) * 10 self._logger.setLevel(level) return verbosity @options.add @Argument("-o", "--output", required=True) def output(self, file_): """Write resulting image to FILE.""" # Output path is obviously required if file_ is None: raise ValueError( "Output argument is required." ) return Path(file_).resolve() @options.add @Argument("--tmpdir", nargs=1) def tmpdir(self, dir_=None): """Directory to keep temporary files.""" if dir_ is None: dir_ = tempfile.TemporaryDirectory( prefix="mkdepthcharge-", ) dir_ = self.exitstack.enter_context(dir_) dir_ = Path(dir_) os.makedirs(dir_, exist_ok=True) self.logger.debug("Working in temp dir '{}'.".format(dir_)) return dir_ @options.add @Argument("-A", "--arch", nargs=1) def arch(self, arch=None): """Architecture to build for.""" # We should be able to make an image for other architectures, but # the default should be whatever board the kernel is for. if arch is None: with self.vmlinuz.open("rb") as f: head = f.read(4096) if head[0x202:0x206] == b"HdrS": arch = Architecture("x86") elif head[0x38:0x3c] == b"ARM\x64": arch = Architecture("arm64") elif head[0x34:0x38] == b"\x45\x45\x45\x45": arch = Architecture("arm") self.logger.info( "Assuming CPU architecture '{}' from vmlinuz file." .format(arch) ) elif arch not in Architecture.all: raise ValueError( "Can't build images for unknown architecture '{}'" .format(arch) ) return Architecture(arch) @options.add @Argument("--format", nargs=1) def image_format(self, format_=None): """Kernel image format to use.""" # Default to architecture-specific formats. if format_ is None: if self.arch in Architecture.arm: format_ = "fit" elif self.arch in Architecture.x86: format_ = "zimage" self.logger.info("Assuming image format '{}'.".format(format_)) if format_ not in ("fit", "zimage"): raise ValueError( "Can't build images for unknown image format '{}'" .format(format_) ) return format_ @options.add @Argument("--kernel-start", nargs=1) def kernel_start(self, addr=None): """Start of depthcharge kernel buffer in memory.""" if addr is not None: return parse_bytesize(addr) if self.arch in Architecture.x86: return 0x100000 @options.add @Argument( "--no-pad-vmlinuz", pad=False, help=argparse.SUPPRESS, ) @Argument( "--pad-vmlinuz", pad=True, help="Pad vmlinuz for safe decompression.", ) def pad_vmlinuz(self, pad=None): """Pad vmlinuz for safe decompression.""" if pad is None: pad = ( self.image_format == "fit" and self.patch_dtbs ) return bool(pad) @Group def fit_options(self): """FIT image options""" @fit_options.add @Argument("-C", "--compress", nargs=1) def compress(self, type_=None): """Compress vmlinuz file before packing.""" # We need to pass "-C none" to mkimage or it assumes gzip. if type_ is None and self.image_format == "fit": type_ = "none" if type_ not in (None, "none", "lz4", "lzma"): raise ValueError( "Compression type '{}' is not supported." .format(type_) ) return type_ @fit_options.add @Argument("-n", "--name", nargs=1) def name(self, desc=None): """Description of vmlinuz to put in the FIT.""" # If we don't pass "-n " to mkimage, the kernel image # description is left blank. Other images get "unavailable" # as their description, so it looks better if we match that. if desc is None and self.image_format == "fit": desc = "unavailable" return desc @fit_options.add @Argument("--ramdisk-load-address", nargs=1) def ramdisk_load_address(self, addr=None): """Add load address to FIT ramdisk image section.""" if addr is not None: return parse_bytesize(addr) return None @fit_options.add @Argument( "--no-patch-dtbs", patch_dtbs=False, help=argparse.SUPPRESS, ) @Argument( "--patch-dtbs", patch_dtbs=True, help="Add linux,initrd properties to device-tree binary files.", ) def patch_dtbs(self, patch_dtbs=False): """Add linux,initrd properties to device-tree binary files.""" if ( patch_dtbs and self.kernel_start is None and self.ramdisk_load_address is None ): raise ValueError( "The kernel buffer start address or a ramdisk load address " "is required to patch DTB files for initramfs support." ) return bool(patch_dtbs) @Group def zimage_options(self): """zImage format options""" @zimage_options.add @Argument( "--no-set-init-size", init_size=False, help="Don't set init_size boot param.", ) @Argument( "--set-init-size", init_size=True, help=argparse.SUPPRESS, ) def set_init_size(self, init_size=None): """Set init_size boot param for safe decompression.""" if init_size is None: return ( self.image_format == "zimage" and self.initramfs is not None ) return bool(init_size) @Group def vboot_options(self): """Depthcharge image options""" keydirs = [] if self.keydir is not None: keydirs += [self.keydir] # If any of the arguments are given, search nearby for others if self.keyblock is not None: keydirs += [self.keyblock.parent] if self.signprivate is not None: keydirs += [self.signprivate.parent] if self.signpubkey is not None: keydirs += [self.signpubkey.parent] if None in (self.keyblock, self.signprivate, self.signpubkey): for d in sorted(set(keydirs), key=keydirs.index): self.logger.info( "Searching '{}' for vboot keys." .format(d) ) # Defaults to distro-specific paths for necessary files. keydir, keyblock, signprivate, signpubkey = vboot_keys(*keydirs) if keydir: self.logger.info( "Defaulting to keys from '{}' for missing arguments." .format(keydir) ) if self.keyblock is None: self.keyblock = keyblock if self.signprivate is None: self.signprivate = signprivate if self.signpubkey is None: self.signpubkey = signpubkey # We might still not have the vboot keys after all that. if self.keyblock is None: raise ValueError( "Couldn't find a usable keyblock file." ) elif not self.keyblock.is_file(): raise ValueError( "Keyblock file '{}' does not exist." .format(self.keyblock) ) else: self.logger.info( "Using keyblock file '{}'." .format(self.keyblock) ) if self.signprivate is None: raise ValueError( "Couldn't find a usable signprivate file." ) elif not self.signprivate.is_file(): raise ValueError( "Signprivate file '{}' does not exist." .format(self.signprivate) ) else: self.logger.info( "Using signprivate file '{}'." .format(self.signprivate) ) if self.signpubkey is None: self.logger.warning( "Couldn't find a usable signpubkey file." ) elif not self.signpubkey.is_file(): self.logger.warning( "Signpubkey file '{}' does not exist." .format(self.keyblock) ) self.signpubkey = None else: self.logger.info( "Using signpubkey file '{}'." .format(self.signpubkey) ) @vboot_options.add @Argument("-c", "--cmdline", append=True, nargs="+") def cmdline(self, *cmd): """Command-line parameters for the kernel.""" # If the cmdline is empty vbutil_kernel returns an error. We can use # "--" instead of putting a newline or a space into the cmdline. if len(cmd) == 0: cmdline = "--" elif len(cmd) == 1 and isinstance(cmd[0], str): cmdline = cmd[0] elif isinstance(cmd, (list, tuple)): cmdline = " ".join(cmd) # The firmware replaces any '%U' in the kernel cmdline with the # PARTUUID of the partition it booted from. Chrome OS uses # kern_guid=%U in their cmdline and it's useful information, so # prepend it to cmdline. if (self.kern_guid is None) or self.kern_guid: cmdline = " ".join(("kern_guid=%U", cmdline)) return cmdline @vboot_options.add @Argument( "--no-kern-guid", kern_guid=False, help="Don't prepend kern_guid=%%U to the cmdline." ) @Argument( "--kern-guid", kern_guid=True, help=argparse.SUPPRESS, ) def kern_guid(self, kern_guid=True): """Prepend kern_guid=%%U to the cmdline.""" return kern_guid @vboot_options.add @Argument("--bootloader", nargs=1) def bootloader(self, file_=None): """Bootloader stub binary to use.""" if file_ is not None: file_ = Path(file_).resolve() if ( self.image_format == "zimage" and self.initramfs is not None and file_ is not None ): raise ValueError( "Can't build images with both initramfs and " "bootloader stub for zimage format." ) return file_ @vboot_options.add @Argument("--keydir") def keydir(self, dir_): """Directory containing vboot keys to use.""" if dir_ is not None: dir_ = Path(dir_).resolve() return dir_ @vboot_options.add @Argument("--keyblock") def keyblock(self, file_): """The key block file (.keyblock).""" if file_ is not None: file_ = Path(file_).resolve() return file_ @vboot_options.add @Argument("--signprivate") def signprivate(self, file_): """Private key (.vbprivk) to sign the image.""" if file_ is not None: file_ = Path(file_).resolve() return file_ @vboot_options.add @Argument("--signpubkey") def signpubkey(self, file_): """Public key (.vbpubk) to verify the image.""" if file_ is not None: file_ = Path(file_).resolve() return file_ def __call__(self): vmlinuz = self.vmlinuz initramfs = self.initramfs bootloader = self.bootloader dtbs = self.dtbs tmpdir = self.tmpdir if bootloader is not None: bootloader = copy(bootloader, tmpdir) # Depthcharge on arm64 with FIT supports these two compressions. if self.compress == "lz4": self.logger.info("Compressing kernel with lz4.") vmlinuz = lz4.compress(vmlinuz, self._tempfile("vmlinuz.lz4")) elif self.compress == "lzma": self.logger.info("Compressing kernel with lzma.") vmlinuz = lzma.compress(vmlinuz, self._tempfile("vmlinuz.lzma")) elif self.compress not in (None, "none"): fmt = "Compression type '{}' is not supported." msg = fmt.format(compress) raise ValueError(msg) # vbutil_kernel --config argument wants cmdline as a file. cmdline_file = self._tempfile("kernel.args") cmdline_file.write_text(self.cmdline) # vbutil_kernel --bootloader argument is mandatory, but it's # unused in depthcharge except as a multiboot ramdisk. Prepare # this empty file as its replacement where necessary. empty = self._tempfile("empty.bin") empty.write_bytes(bytes(512)) # The kernel decompression overwrites parts of the buffer we # control while decompressing itself. We need to make sure we # don't place initramfs in that range. For that, we need to know # how offsets in file correspond to addresses in memory. def addr_to_offs(addr, load_addr=self.kernel_start): return addr - load_addr + 0x10000 def offs_to_addr(offs, load_addr=self.kernel_start): return offs + load_addr - 0x10000 def align_up(size, align=0x1000): return ((size + align - 1) // align) * align # Size for a small padding, sometimes necessary in some # places for unknown reasons, added and set empirically. small_pad = 0x40000 if self.image_format == "fit": fit_image = self._tempfile("depthcharge.fit") initramfs_args = [] if initramfs is not None: initramfs_args += ["-i", initramfs] dtb_args = [] for dtb in dtbs: dtb_args += ["-b", dtb] # The subimage nodes can be @1 or -1. def subimage_by_type(fit_image, subimage_type): for subimage in fdtget.subnodes(fit_image, "/images"): node = "/images/{}".format(subimage) try: if fdtget.get(fit_image, node, "type") == subimage_type: return node except: continue # On later 32-bit ARM Chromebooks, the KERNEL_START address # can be very close to the where kernel decompresses itself # that the process overwrites the initramfs. The device-tree # is luckily copied away before then. We need to add some # vmlinuz padding to prevent this. if initramfs is not None and self.pad_vmlinuz: # We need the decompressed kernel size, not easy to get. # Try to find the compressed vmlinux inside vmlinuz, # then try to decompress it. data = vmlinuz.read_bytes() vmlinuz_size = len(data) decomp_size = -1 for fmt, magic in { "gzip": b'\x1f\x8b\x08', "xz": b'\xfd7zXZ\x00', "zstd": b'(\xb5/\xfd', "lzma": b'\x5d\x00\x00\x00', "lz4": b'\02!L\x18', "bzip2": b'BZh', "lzop": b'\x89\x4c\x5a', }.items(): offs = data.find(magic) while 0 < offs < vmlinuz_size: decomp = decompress(data[offs:], partial=True) if decomp: self.logger.info( "Found {} at {:#x} in vmlinuz, with size {:#x}." .format(fmt, offs, len(decomp)) ) decomp_size = max(decomp_size, len(decomp)) offs = data.find(magic, offs + 1) if decomp_size == -1: raise ValueError( "Couldn't find decompressed kernel inside vmlinuz." ) self.logger.info( "Vmlinuz size is {:#x}, {:#x} decompressed." .format(vmlinuz_size, decomp_size) ) # Decompression starts at start of physical memory, # calculated per AUTO_ZRELADDR. But first kernel copies # itself after where the decompressed copy would end. decomp_addr = self.kernel_start & 0xf8000000 safe_initrd_start = ( decomp_addr + decomp_size + vmlinuz_size + small_pad ) initrd_start = ( self.kernel_start + vmlinuz_size + sum(dtb.stat().st_size for dtb in self.dtbs) ) if initrd_start < safe_initrd_start: pad_to = align_up( vmlinuz_size + (safe_initrd_start - initrd_start) ) self.logger.info( "Padding vmlinuz to {:#x}." .format(pad_to) ) with vmlinuz.open("r+b") as f, mmap(f.fileno(), 0) as data: data.resize(pad_to) # The later 32-bit ARM Chromebooks use Depthcharge, but # their stock versions don't have the code to support FIT # ramdisks. But since we know the fixed KERNEL_START we can # deduce where the initramfs will be, and inject its address # into the DTBs the way Linux expects bootloaders to do. if initramfs is not None and self.patch_dtbs: # We'll probably never need this, as only old U-Boot builds # need a ramdisk load address and those can handle the # initrd properties fine. if self.ramdisk_load_address: initrd_start = self.ramdisk_load_address initrd_end = initrd_start + initramfs.stat().st_size else: # Allocate space for the properties we want to set, # adding them later would shift things around. self.logger.info("Preparing dtb files for initramfs support.") for dtb in dtbs: fdtput.put(dtb, "/chosen", "linux,initrd-start", 0) fdtput.put(dtb, "/chosen", "linux,initrd-end", 0) # Make a temporary image and search for the initramfs # inside it, because I don't want to risk a wrong # estimate and don't want to mess with pylibfdt. self.logger.info("Packing files as temp FIT image:") tmp_image = self._tempfile("depthcharge.fit.tmp") proc = mkimage( "-f", "auto", "-A", self.arch.mkimage, "-T", "kernel", "-O", "linux", "-C", self.compress, "-n", self.name, *initramfs_args, *dtb_args, "-d", vmlinuz, tmp_image, ) self.logger.info(proc.stdout) # Mkimage breaks the config node key with -T kernel_noload. # Apparently this shifts things around as well, so... self.logger.info("Patching temp FIT for kernel_noload type.") fdtput.put( tmp_image, subimage_by_type(tmp_image, "kernel"), "type", "kernel_noload", ) with tmp_image.open("r+b") as f, mmap(f.fileno(), 0) as data: initrd_offset = data.find(initramfs.read_bytes()) self.logger.info( "Initramfs is at offset {:#x} in FIT image." .format(initrd_offset) ) initrd_start = initrd_offset + self.kernel_start initrd_end = initrd_start + initramfs.stat().st_size self.logger.info( "Initramfs should be at address {:#x} - {:#x} in memory." .format(initrd_start, initrd_end) ) self.logger.info("Patching dtb files for initramfs support.") for dtb in dtbs: fdtput.put(dtb, "/chosen", "linux,initrd-start", initrd_start) fdtput.put(dtb, "/chosen", "linux,initrd-end", initrd_end) self.logger.info("Packing files as FIT image:") proc = mkimage( "-f", "auto", "-A", self.arch.mkimage, "-T", "kernel", "-O", "linux", "-C", self.compress, "-n", self.name, *initramfs_args, *dtb_args, "-d", vmlinuz, fit_image, ) self.logger.info(proc.stdout) # Earlier 32-bit ARM Chromebooks use U-Boot, which needs a # usable load address for the FIT ramdisk image section. if initramfs is not None and self.ramdisk_load_address: self.logger.info("Patching FIT for ramdisk load address.") fdtput.put( fit_image, subimage_by_type(fit_image, "ramdisk"), "load", self.ramdisk_load_address, ) # Mkimage breaks the config node key with -T kernel_noload. self.logger.info("Patching FIT for kernel_noload type.") fdtput.put( fit_image, subimage_by_type(fit_image, "kernel"), "type", "kernel_noload", ) if ( initramfs is not None and self.patch_dtbs and self.ramdisk_load_address is None ): with fit_image.open("r+b") as f, mmap(f.fileno(), 0) as data: if initrd_offset != data.find(initramfs.read_bytes()): raise RuntimeError( "Initramfs FIT offset changed after rebuild." ) self.logger.info("Packing files as depthcharge image.") proc = vbutil_kernel( "--version", "1", "--arch", self.arch.vboot, "--vmlinuz", fit_image, "--config", cmdline_file, "--bootloader", bootloader or empty, "--keyblock", self.keyblock, "--signprivate", self.signprivate, "--pack", self.output, ) self.logger.info(proc.stdout) elif self.image_format == "zimage" and initramfs is None: self.logger.info("Packing files as depthcharge image.") proc = vbutil_kernel( "--version", "1", "--arch", self.arch.vboot, "--vmlinuz", vmlinuz, "--config", cmdline_file, "--bootloader", bootloader or empty, "--keyblock", self.keyblock, "--signprivate", self.signprivate, "--pack", self.output, ) self.logger.info(proc.stdout) elif self.image_format == "zimage": # bzImage header has the address the kernel will decompress # to, and the amount of memory it needs there to work. # See Documentation/x86/boot.rst in Linux tree for offsets. with vmlinuz.open("r+b") as f, mmap(f.fileno(), 0) as data: if data[0x202:0x206] != b"HdrS": raise ValueError( "Vmlinuz file is not a Linux kernel bzImage." ) pref_address, init_size = struct.unpack( " low_usable: pad_to = align_up(addr_to_offs(pref_address + init_size)) if pad_to > data.size(): self.logger.info( "Padding vmlinuz to size {:#x}" .format(pad_to) ) data.resize(pad_to) # vbutil_kernel picks apart the vmlinuz in ways I don't # really want to reimplement right now, so just call it. self.logger.info("Packing files as temporary image.") temp_img = self._tempfile("temp.img") proc = vbutil_kernel( "--version", "1", "--arch", self.arch.vboot, "--vmlinuz", vmlinuz, "--config", cmdline_file, "--bootloader", initramfs, "--keyblock", self.keyblock, "--signprivate", self.signprivate, "--pack", temp_img, ) self.logger.info(proc.stdout) # Do binary editing for now, until I get time to write # parsers for vboot_reference structs and kernel headers. with temp_img.open("r+b") as f, mmap(f.fileno(), 0) as data: if data[:8] != b"CHROMEOS": raise RuntimeError( "Unexpected output format from vbutil_kernel, " "expected 'CHROMEOS' magic at start of file." ) # File starts with a keyblock and a kernel preamble # immediately afterwards, and padding up to 0x10000. keyblock_size = struct.unpack( " # See COPYRIGHT and LICENSE files for full copyright information. import argparse import copy import contextlib import functools import inspect import logging import sys def filter_action_kwargs(kwargs, action="store"): """ Filter out the kwargs which argparse actions don't recognize. ArgumentParser.add_argument() raises an error on unknown kwargs, filter them out. Also unset any values that are None. """ action = kwargs.get("action", action) allowed = { "action", "dest", "nargs", "const", "default", "type", "choices", "required", "help", "metavar", } if action == "store": pass elif action == "store_const": allowed -= {"nargs", "type", "choices"} elif action == "store_true": allowed -= {"nargs", "const", "type", "choices", "metavar"} elif action == "store_false": allowed -= {"nargs", "const", "type", "choices", "metavar"} elif action == "append": pass elif action == "append_const": allowed -= {"nargs", "type", "choices"} elif action == "count": allowed -= {"nargs", "const", "type", "choices", "metavar"} elif action == "help": allowed = {"action", "dest", "default", "help"} elif action == "version": allowed = {"action", "version", "dest", "default", "help"} elif action is FunctionBindAction: allowed |= {"func", "append", "count", "args", "kwargs"} else: allowed = kwargs.keys() action_kwargs = {} other_kwargs = {} for key, value in kwargs.items(): if key in allowed: action_kwargs[key] = value else: other_kwargs[key] = value return action_kwargs, other_kwargs class FunctionBindAction(argparse.Action): def __init__( self, option_strings, dest, func, append=False, count=False, **kwargs, ): self.signature = inspect.signature(func) self.f_args = kwargs.pop("args", ()) self.f_kwargs = kwargs.pop("kwargs", {}) self.append = append self.count = count if append and kwargs.get("nargs", "*") == 0: raise ValueError( "'{}' action '{}' with append=True must be able to " "consume command-line arguments (nargs must not be 0)" .format(type(self).__name__, dest) ) if count and kwargs.get("nargs", 0) != 0: raise ValueError( "'{}' action '{}' with count=True can't consume any " "command-line arguments (nargs must be 0)" .format(type(self).__name__, dest) ) if count and append: raise ValueError( "'{}' action '{}' arguments append=True and count=True " "are incompatible." .format(type(self).__name__, dest) ) if (count or append) and (self.f_args or self.f_kwargs): raise NotImplementedError( "'{}' action '{}' with append=True or count=True " "does not support prebinding arguments yet." ) super_kwargs, _ = filter_action_kwargs(kwargs) super().__init__(option_strings, dest, **super_kwargs) def __call__(self, parser, namespace, values, option_string=None): if self.dest in (None, argparse.SUPPRESS): return current = getattr(namespace, self.dest, None) if self.nargs in ("?", None): values = [values] if self.append: args = current.args if current else () bound = self.signature.bind_partial(*args, *values) elif self.count: n = int(current.args[0]) if current else 0 bound = self.signature.bind(n + 1) else: bound = self.signature.bind( *self.f_args, *values, **self.f_kwargs, ) setattr(namespace, self.dest, bound) class _MethodDecorator: def __init__(self, *args, **kwargs): super().__init__() self.__name__ = None self.__self__ = None self.__func__ = None if args and callable(args[0]): self.wrap(args[0]) args = args[1:] self._args = args self._kwargs = kwargs def __get__(self, instance, owner): if self.__self__ is not None: return self if instance is None: bound = copy.copy(self) bound.__self__ = owner return bound if self.__name__ not in instance.__dict__: if self.__func__ is not None: return self.__func__.__get__(instance, owner) return None return instance.__dict__[self.__name__] def __set__(self, instance, value): instance.__dict__[self.__name__] = value def __set_name__(self, owner, name): self.__name__ = name @property def __call__(self): if self.__self__ is None: return self.wrap if self.__func__ is None: return None call = self.__func__.__get__(self, type(self)) self.__signature__ = inspect.signature(call) return call def wrap(self, func): if not callable(func): raise TypeError("Can't wrap non-callable objects") self.__func__ = func functools.update_wrapper(self, func) self.__name__ = func.__name__ return self def copy(self, *args, **kwargs): # Non-call decorator form if len(args) == 1 and callable(args[0]) and not kwargs: func, *args = args else: func = None args = (*self._args, *args) kwargs = {**self._kwargs, **kwargs} obj = type(self)(*args, **kwargs) obj.__func__ = None if func: obj.wrap(func) return obj class Argument(_MethodDecorator): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.group = None def wrap(self, wrapped): if isinstance(wrapped, Argument): group = Group() self.wrap(wrapped.__func__) group.wrap(wrapped.__func__) group.add(wrapped) group.add(self) # Don't duplicate help message wrapped.__doc__ = None self.__doc__ = None return group if isinstance(wrapped, Group): group = wrapped self.wrap(group.__func__) group.add(self) # Don't duplicate help message self.__doc__ = None return group return super().wrap(wrapped) def copy(self, *args, **kwargs): arg = super().copy(*args, **kwargs) arg.group = self.group return arg def __get__(self, instance, owner): arg = super().__get__(instance, owner) if isinstance(arg, inspect.BoundArguments): inputs = instance.__dict__.pop(self.__name__) func = super().__get__(instance, owner) instance.__dict__[self.__name__] = inputs if callable(func): try: outputs = func(*inputs.args, **inputs.kwargs) except AttributeError as err: raise RuntimeError( "Argument method raised AttributeError" ) from err if inspect.isgenerator(outputs): try: while True: instance.__dict__[self.__name__] = next(outputs) except StopIteration as err: outputs = err.value instance.__dict__[self.__name__] = outputs return outputs return inputs return arg @property def __auto_kwargs(self): kwargs = {} if self.__func__ is not None: # Bind to anything to skip the "self" argument func = self.__func__.__get__(object(), object) act_kwargs, f_kwargs = filter_action_kwargs( self._kwargs, action=FunctionBindAction, ) if f_kwargs: act_kwargs.setdefault("kwargs", {}) act_kwargs["kwargs"].update(f_kwargs) f_args = act_kwargs.get("args", ()) f_kwargs = act_kwargs.get("kwargs", {}) partial = functools.partial(func, *f_args, **f_kwargs) sig = inspect.signature(partial, follow_wrapped=False) params = sig.parameters kwargs["action"] = FunctionBindAction kwargs["func"] = func else: params = {} nargs_min = 0 nargs_max = 0 var_args = None var_kwargs = None first_arg = next(iter(params.keys())).upper() if params else None for name, param in params.items(): if param.kind == inspect.Parameter.VAR_POSITIONAL: var_args = param continue elif param.kind == inspect.Parameter.VAR_KEYWORD: var_kwargs = param continue elif param.kind == inspect.Parameter.KEYWORD_ONLY: # partial() objs accept kwargs that're already bound if name in f_kwargs: continue if param.default == inspect.Parameter.empty: nargs_min += 1 nargs_max += 1 else: nargs_max += 1 option_strings = self._args kwargs["dest"] = self.__name__ doc = inspect.getdoc(self) if doc is not None: kwargs["help"] = doc.split("\n\n")[0] # attr = Argument() if self.__func__ is None and not option_strings: kwargs["nargs"] = 1 # attr = Argument("--arg") elif self.__func__ is None: kwargs["nargs"] = "?" # func(a, *b) elif (var_args or var_kwargs) and nargs_min > 0: kwargs["nargs"] = "+" kwargs["metavar"] = (var_args or var_kwargs).name # func(*a) elif (var_args or var_kwargs) and nargs_min == 0: kwargs["nargs"] = "*" kwargs["metavar"] = (var_args or var_kwargs).name # func() elif (nargs_min, nargs_max) == (0, 0): kwargs["nargs"] = 0 # func(a=None) elif (nargs_min, nargs_max) == (0, 1): kwargs["nargs"] = "?" kwargs["metavar"] = first_arg # func(a=None, b=None) elif nargs_min == 0: kwargs["nargs"] = "*" kwargs["metavar"] = first_arg # func(a, b=None) elif nargs_min != nargs_max: kwargs["nargs"] = "+" kwargs["metavar"] = first_arg # func(a, b) else: kwargs["nargs"] = nargs_min if option_strings: kwargs["metavar"] = tuple(params.keys()) else: kwargs["metavar"] = first_arg def format_metavar(s): return s.replace("-","_").strip(" -_").upper() if "metavar" in kwargs: metavar = kwargs["metavar"] if isinstance(metavar, tuple): metavar = tuple(format_metavar(m) for m in metavar) else: metavar = format_metavar(metavar) kwargs["metavar"] = metavar return kwargs @property def __kwargs(self): kwargs = self.__auto_kwargs kwargs.update(self._kwargs) act_kwargs, f_kwargs = filter_action_kwargs(kwargs) act = act_kwargs.get("action", None) if isinstance(act, type) and issubclass(act, FunctionBindAction): if act_kwargs.get("count", False): act_kwargs["nargs"] = 0 if f_kwargs: act_kwargs.setdefault("kwargs", {}) act_kwargs["kwargs"].update(f_kwargs) return act_kwargs def build(self, parent): option_strings = self._args kwargs = self.__kwargs return parent.add_argument(*option_strings, **kwargs) def __property_from_kwargs(name): @property def prop(self): try: return self.__kwargs[name] except KeyError: raise AttributeError( "Argument '{}' does not pass '{}' to add_argument" .format(self.__name__, name) ) from None @prop.setter def prop(self, value): self._kwargs[name] = value @prop.deleter def prop(self): del self._kwargs[name] return prop @property def name_or_flags(self): return self._args or (self.__name__,) action = __property_from_kwargs("action") nargs = __property_from_kwargs("nargs") default = __property_from_kwargs("default") type = __property_from_kwargs("type") choices = __property_from_kwargs("choices") required = __property_from_kwargs("required") help = __property_from_kwargs("help") metavar = __property_from_kwargs("metavar") dest = __property_from_kwargs("dest") append = __property_from_kwargs("append") count = __property_from_kwargs("count") version = __property_from_kwargs("version") del __property_from_kwargs class Group(_MethodDecorator): def __init__(self, *args, **kwargs): self._arguments = [] self.group = None super().__init__(*args, **kwargs) def wrap(self, wrapped): if isinstance(wrapped, Argument): self.wrap(wrapped.__func__) self.add(wrapped) # Don't duplicate help message wrapped.__doc__ = None return self if isinstance(wrapped, Group): old_doc = self.__doc__ self.wrap(wrapped.__func__) self.add(wrapped) # Don't override help message if old_doc is not None: wrapped.__doc__ = self.__doc__ self.__doc__ = old_doc return self return super().wrap(wrapped) def copy(self, *args, **kwargs): grp = super().copy(*args, **kwargs) for arg in self._arguments: grp.add(arg) return grp def __get__(self, instance, owner): grp = super().__get__(instance, owner) if isinstance(grp, inspect.BoundArguments): inputs = instance.__dict__.pop(self.__name__) func = super().__get__(instance, owner) instance.__dict__[self.__name__] = inputs if callable(func): try: outputs = func(*inputs.args, **inputs.kwargs) except AttributeError as err: raise RuntimeError( "Group method raised AttributeError" ) from err if inspect.isgenerator(outputs): try: while True: instance.__dict__[self.__name__] = next(outputs) except StopIteration as err: outputs = err.value instance.__dict__[self.__name__] = outputs return outputs return inputs return grp @property def __auto_kwargs(self): kwargs = {} doc = inspect.getdoc(self) if doc: blocks = doc.split("\n\n") kwargs["title"] = blocks[0].replace("\n", " ") if len(blocks) > 1: kwargs["description"] = "\n\n".join(blocks[1:]) else: kwargs["title"] = self.__name__ return kwargs @property def __kwargs(self): kwargs = self.__auto_kwargs kwargs.update(self._kwargs) return kwargs def build(self, parent): parser = parent.add_argument_group(*self._args, **self.__kwargs) items = list(self._arguments) while items: item = items.pop(0) item.__get__(self.__self__, type(self.__self__)) # Argparse doesn't print help message for nested groups, # so we flatten them here. if isinstance(item, Group): items = list(item._arguments) + items continue item.build(parser) return parser def add(self, arg): self._arguments.append(arg) arg.group = self return arg def __property_from_kwargs(name): @property def prop(self): try: return self.__kwargs[name] except KeyError: raise AttributeError( "Group '{}' does not pass '{}' to add_argument_group" .format(self.__name__, name) ) from None @prop.setter def prop(self, value): self._kwargs[name] = value @prop.deleter def prop(self): del self._kwargs[name] return prop title = __property_from_kwargs("title") description = __property_from_kwargs("description") del __property_from_kwargs class Subparsers(_MethodDecorator): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._commands = [] def wrap(self, func): if isinstance(func, CommandMeta): self.add(func) return self return super().wrap(func) @property def __auto_kwargs(self): kwargs = {} doc = inspect.getdoc(self) if doc: blocks = doc.split("\n\n") kwargs["title"] = blocks[0].replace("\n", " ") kwargs["description"] = "\n\n".join(blocks[1:]) else: kwargs["title"] = self.__name__ kwargs["dest"] = self.__name__ return kwargs @property def __kwargs(self): kwargs = self.__auto_kwargs kwargs.update(self._kwargs) return kwargs def build(self, parent): subparsers = parent.add_subparsers(*self._args, **self.__kwargs) for cmd in self._commands: cmd.build(subparsers) return subparsers def add(self, cmd): self._commands.append(cmd) return cmd def __property_from_kwargs(name): @property def prop(self): try: return self.__kwargs[name] except KeyError: raise AttributeError( "Subparsers '{}' does not pass '{}' to add_subparsers" .format(self.__name__, name) ) from None @prop.setter def prop(self, value): self._kwargs[name] = value @prop.deleter def prop(self): del self._kwargs[name] return prop title = __property_from_kwargs("title") description = __property_from_kwargs("description") dest = __property_from_kwargs("dest") del __property_from_kwargs def command_call(call): def __call__(self, **kwargs): for kwarg, value in kwargs.items(): func = getattr(self, kwarg) if not callable(func): setattr(self, kwarg, value) continue if isinstance(value, inspect.BoundArguments): setattr(self, kwarg, value) continue sig = inspect.signature(func) var_args = None var_kwargs = None for name, param in sig.parameters.items(): if param.kind == inspect.Parameter.VAR_POSITIONAL: var_args = param elif param.kind == inspect.Parameter.VAR_KEYWORD: var_kwargs = param raise NotImplementedError elif param.kind == inspect.Parameter.KEYWORD_ONLY: raise NotImplementedError # func(*a) # func(a, *b) if var_args and isinstance(value, (list, set, tuple)): value = sig.bind(*value) # func() elif len(sig.parameters) == 0: value = sig.bind() if value else None # func(a) # func(a=None) elif len(sig.parameters) == 1: value = sig.bind(value) # func(a, b) # func(a, b=None) # func(a=None, b=None) else: value = sig.bind(*value) setattr(self, kwarg, value) for cmd in reversed(type(self).__mro__): if not isinstance(cmd, CommandMeta): continue for arg_name, arg in cmd.arguments(): arg.__self__ = None try: func = arg.__func__.__get__(object(), object) sig = inspect.signature(func) self.__dict__.setdefault(arg_name, sig.bind()) except: self.__dict__.setdefault(arg_name, None) for grp_name, grp in cmd.groups(): grp.__self__ = None for arg in grp._arguments: arg.__self__ = None if not hasattr(arg, "dest") or arg.dest == argparse.SUPPRESS: continue try: func = arg.__func__.__get__(object(), object) sig = inspect.signature(func) self.__dict__.setdefault(arg.dest, sig.bind()) except: self.__dict__.setdefault(arg.dest, None) if grp_name not in ( arg.dest for arg in grp._arguments if hasattr(arg, "dest") ): try: # grp.__get__(self, type(self)) would mutate self func = grp.__func__.__get__(object(), object) sig = inspect.signature(func) self.__dict__.setdefault(grp_name, sig.bind()) except: self.__dict__.setdefault(grp_name, None) for cmd in reversed(type(self).__mro__): if not isinstance(cmd, CommandMeta): continue for arg_name, arg in cmd.arguments(): getattr(self, arg_name, None) for grp_name, grp in cmd.groups(): getattr(self, grp_name, None) return call(self) functools.update_wrapper(__call__, call) return __call__ class CommandExit(Exception): def __init__( self, message=None, output=None, returncode=1, errno=None, ): if errno is not None: if message: errmsg = "[Errno {}] {}".format(errno, message) else: errmsg = "[Errno {}]".format(errno) else: errmsg = message self.returncode = returncode self.output = output self.message = message self.errno = errno super().__init__(errmsg) def __repr__(self): return "{}(output={!r}, returncode={!r}, message={!r})".format( type(self).__qualname__, self.output, self.returncode, self.message, ) class CommandMeta(type): def __new__(mcls, name, bases, attrs, **kwargs): call = attrs.get("__call__", None) if call is not None: attrs["__call__"] = command_call(call) cls = super().__new__(mcls, name, bases, attrs) cls.__custom_kwargs = kwargs return cls @property def __call__(cls): cls_call = cls.__dict__.get("__call__", None) cls_call = getattr(cls_call, "__func__", cls_call) if inspect.isgeneratorfunction(cls_call): return cls.__generator_call else: return cls.__normal_call def __normal_call(cls, *args, **kwargs): instance = super().__call__() raise_exit = kwargs.pop("__raise_CommandExit", False) if hasattr(instance, "__enter__"): with instance as inst: retval = inst(*args, **kwargs) else: retval = instance(*args, **kwargs) if isinstance(retval, CommandExit): if raise_exit: raise retval elif retval.returncode != 0 and hasattr(instance, "_logger"): instance._logger.warning(retval.message) retval = retval.output else: retval = retval.output return retval def __generator_call(cls, *args, **kwargs): instance = super().__call__() raise_exit = kwargs.pop("__raise_CommandExit", False) if hasattr(instance, "__enter__"): with instance as inst: retval = yield from inst(*args, **kwargs) else: retval = yield from instance(*args, **kwargs) if isinstance(retval, CommandExit): if raise_exit: raise retval elif retval.returncode != 0 and hasattr(instance, "_logger"): instance._logger.warning(retval.message) retval = retval.output else: retval = retval.output return retval def main(cls, *argv): if len(argv) == 0: prog, *argv = sys.argv parser = cls.parser args = parser.parse_args(argv) command = getattr(args, "__command") kwargs = { k: v for k, v in vars(args).items() if k != "__command" } root_logger = logging.getLogger() root_logger.addHandler(logging.StreamHandler()) root_logger.setLevel(logging.NOTSET) if hasattr(command, "_logger"): logger = command._logger else: logging.getLogger(cls.__module__) def log_error(err): is_debug_level = logger.isEnabledFor(logging.DEBUG) if not is_debug_level and err.__cause__ is not None: log_error(err.__cause__) logger.error(err, exc_info=is_debug_level) def print_out(output): if inspect.isgenerator(output): try: while True: print(next(output)) except StopIteration as err: output = err.value elif isinstance(output, list): print(*output, sep="\n") elif isinstance(output, tuple): print(*output, sep="\n") elif output is not None: print(output) try: output = command(__raise_CommandExit=True, **kwargs) print_out(output) except CommandExit as exit: if exit.returncode != 0: log_error(exit) else: logger.warning(exit) print_out(exit.output) sys.exit(exit.returncode) except Exception as err: log_error(err) sys.exit(1) def items(cls): def order(tup): attr, value = tup return ( isinstance(value, Group), isinstance(value, Argument), isinstance(value, Subparsers), isinstance(value, CommandMeta), ) pairs = ( (k, v) for k, v in vars(cls).items() if not k.startswith("_") ) return sorted(pairs, key=order, reverse=True) def groups(cls): yield from ( (k, v) for k, v in cls.items() if isinstance(v, Group) ) def arguments(cls): yield from ( (k, v) for k, v in cls.items() if isinstance(v, Argument) ) def subparsers(cls): yield from ( (k, v) for k, v in cls.items() if isinstance(v, Subparsers) ) def subcommands(cls): yield from ( (k, v) for k, v in cls.items() if isinstance(v, CommandMeta) ) def subcommand(cls, arg): if isinstance(arg, CommandMeta): name = arg.__name__ while hasattr(cls, name): name = "{}_".format(name) setattr(cls, name, arg) return arg def add_subcommand(cmd): name = arg.replace("-", "_") while hasattr(cls, name): name = "{}_".format(name) setattr(cls, name, cmd) cmd.__name__ = arg return cmd return add_subcommand @property def __auto_kwargs(cls): kwargs = {} doc = inspect.getdoc(cls) if doc is not None: blocks = doc.split("\n\n") for i, block in enumerate(blocks): if block.strip("- ") == "": kwargs["help"] = blocks[0] kwargs["description"] = "\n\n".join(blocks[:i]) kwargs["epilog"] = "\n\n".join(blocks[i+1:]) break else: kwargs["help"] = blocks[0] kwargs["description"] = doc kwargs["argument_default"] = argparse.SUPPRESS kwargs["formatter_class"] = argparse.RawDescriptionHelpFormatter kwargs["conflict_handler"] = "resolve" return kwargs @property def __kwargs(cls): kwargs = cls.__auto_kwargs kwargs.update(cls.__custom_kwargs) return kwargs @property def parser(cls): return cls.__build() def __build(cls, parent=None): kwargs = cls.__kwargs if parent is None: kwargs.pop("help", None) parser = argparse.ArgumentParser(**kwargs) else: parser = parent.add_parser(cls.__name__, **kwargs) subparsers = None for attr, value in cls.items(): if isinstance(value, Group): group = getattr(cls, attr) if group.group is None: group.build(parser) elif isinstance(value, Argument): arg = getattr(cls, attr) if arg.group is None: arg.build(parser) elif isinstance(value, Subparsers): obj = getattr(cls, attr) subparsers = obj.build(parser) elif isinstance(value, CommandMeta): cmd = getattr(cls, attr) cmd.__self__ = cls if subparsers is None: subparsers = parser.add_subparsers() cmd.__build(subparsers) parser.set_defaults(__command=cls) return parser build = __build def __property_from_kwargs(name): @property def prop(cls): if name in cls.__dict__: return cls.__dict__[name] try: return cls.__kwargs[name] except KeyError: raise AttributeError( "Command '{}' does not pass '{}' to ArgumentParser" .format(cls.__name__, name) ) from None @prop.setter def prop(cls, value): if name in cls.__dict__: prop = getattr(type(cls), name) delattr(type(cls), name) super().__setattr__(name, value) setattr(type(cls), name, prop) else: cls.__custom_kwargs[name] = value @prop.deleter def prop(cls): if name in cls.__dict__: prop = getattr(type(cls), name) delattr(type(cls), name) super().__delattr__(name) setattr(type(cls), name, prop) else: del cls.__custom_kwargs[name] return prop prog = __property_from_kwargs("prog") usage = __property_from_kwargs("usage") description = __property_from_kwargs("description") epilog = __property_from_kwargs("epilog") parents = __property_from_kwargs("parents") formatter_class = __property_from_kwargs("formatter_class") prefix_chars = __property_from_kwargs("prefix_chars") fromfile_prefix_chars = __property_from_kwargs("fromfile_prefix_chars") argument_default = __property_from_kwargs("argument_default") conflict_handler = __property_from_kwargs("conflict_handler") add_help = __property_from_kwargs("add_help") allow_abbrev = __property_from_kwargs("allow_abbrev") help = __property_from_kwargs("help") del __property_from_kwargs class Command(metaclass=CommandMeta): def __init__(self): self.exitstack = contextlib.ExitStack() def __enter__(self): self.exitstack.__enter__() return self def __exit__(self, exc_type, exc_value, traceback): return self.exitstack.__exit__(exc_type, exc_value, traceback) depthcharge-tools-0.6.2/depthcharge_tools/utils/collections.py000066400000000000000000000142011444761253100246660ustar00rootroot00000000000000#! /usr/bin/env python3 # SPDX-License-Identifier: GPL-2.0-or-later # depthcharge-tools collections utilities # Copyright (C) 2021-2022 Alper Nebi Yasak # See COPYRIGHT and LICENSE files for full copyright information. import collections # Inheritance for config sections class ConfigDict(collections.OrderedDict): def __getitem__(self, key): super_ = super() if not isinstance(key, str) or "/" not in key: return super_.__getitem__(key) def getitem(key): try: return super_.__getitem__(key) except KeyError: return KeyError def parents(leaf): idx = leaf.find("/") while idx != -1: yield leaf[:idx] idx = leaf.find("/", idx + 1) yield leaf items = list( item for item in reversed([ getitem(p) for p in parents(key) ]) if item != KeyError ) if all(isinstance(i, dict) for i in items): return collections.ChainMap(*items) if items: return items[0] raise KeyError(key) # To write config sections in sort order def SortedDict(key=None): if not callable(key): raise TypeError( "SortedDict argument must be a callable, not {}" .format(type(key).__name__) ) class SortedDict(collections.UserDict): __key = key def __iter__(self): yield from sorted(super().__iter__(), key=type(self).__key) return SortedDict def TypedList(T): if not isinstance(T, type): raise TypeError( "TypedList argument must be a type, not {}" .format(type(T).__name__) ) name = "TypedList.{}List".format(str.title(T.__name__)) class TypedList(collections.UserList): __type = T __name__ = name __qualname__ = name def __init__(self, initlist=None): if initlist is not None: self.__typecheck(*initlist) super().__init__(initlist) def __typecheck(self, *values): if not all(isinstance(value, self.__type) for value in values): raise TypeError( "{} items must be of type {}." .format(type(self).__name__, self.__type.__name__) ) def __setitem__(self, idx, value): self.__typecheck(value) return super().__setitem__(idx, value) def __iadd__(self, other): self.__typecheck(*other) return super().__iadd__(other) def append(self, value): self.__typecheck(value) return super().append(value) def insert(self, idx, value): self.__typecheck(value) return super().insert(idx, value) def extend(self, other): self.__typecheck(*other) return super().extend(other) return TypedList class DirectedGraph: def __init__(self): self.__edges = {} def add_edge(self, node, child): self.add_node(node) self.add_node(child) self.__edges[node].add(child) def add_node(self, node): if node not in self.__edges: self.__edges[node] = set() def remove_edge(self, node, child): if node in self.__edges: self.__edges[node].discard(child) def remove_node(self, node): self.__edges.pop(node, None) for k, v in self.__edges.items(): v.discard(node) def replace_node(self, node, replacement, merge=False): if replacement in self.__edges and not merge: raise ValueError( "Replacement node '{}' already in graph." .format(replacement) ) parents = self.parents(node) children = self.children(node) self.remove_node(node) self.add_node(replacement) for p in parents: self.add_edge(p, replacement) for c in children: self.add_edge(replacement, c) def edges(self): return set( (n, c) for n, cs in self.__edges.items() for c in cs ) def nodes(self): return set(self.__edges.keys()) def children(self, *nodes): node_children = set() for node in nodes: node_children.update(self.__edges.get(node, set())) return node_children def parents(self, *nodes): node_parents = set() for parent, children in self.__edges.items(): if children.intersection(nodes): node_parents.add(parent) return node_parents def ancestors(self, *nodes): nodes = set(nodes) ancestors = self.parents(*nodes) tmp = self.parents(*ancestors) while tmp - ancestors: ancestors.update(tmp) tmp = self.parents(*ancestors) return ancestors def descendants(self, *nodes): nodes = set(nodes) descendants = self.children(*nodes) tmp = self.children(*descendants) while tmp - descendants: descendants.update(tmp) tmp = self.children(*descendants) return descendants def leaves(self, *nodes): nodes = set(nodes) leaves = set() if len(nodes) == 0: leaves.update(k for k, v in self.__edges.items() if not v) return leaves leaves = self.leaves() node_leaves = set() while nodes: node_leaves.update(nodes.intersection(leaves)) nodes.difference_update(node_leaves) nodes = self.children(*nodes) return node_leaves def roots(self, *nodes): nodes = set(nodes) roots = set() if len(nodes) == 0: roots.update(self.__edges.keys()) roots.difference_update(*self.__edges.values()) return roots roots = self.roots() node_roots = set() while nodes: node_roots.update(nodes.intersection(roots)) nodes.difference_update(node_roots) nodes = self.parents(*nodes) return node_roots depthcharge-tools-0.6.2/depthcharge_tools/utils/os.py000066400000000000000000000445741444761253100230110ustar00rootroot00000000000000#! /usr/bin/env python3 # SPDX-License-Identifier: GPL-2.0-or-later # depthcharge-tools os utilities # Copyright (C) 2020-2022 Alper Nebi Yasak # See COPYRIGHT and LICENSE files for full copyright information. import collections import re import shlex from pathlib import Path from depthcharge_tools.utils.collections import ( DirectedGraph, ) from depthcharge_tools.utils.pathlib import ( iterdir, read_lines, ) from depthcharge_tools.utils.platform import ( proc_cmdline, ) from depthcharge_tools.utils.subprocess import ( cgpt, ) class Disks(DirectedGraph): def __init__( self, sys="/sys", dev="/dev", fstab="/etc/fstab", mtab="/etc/mtab", procmounts="/proc/self/mounts", mountinfo="/proc/self/mountinfo", crypttab="/etc/crypttab", ): super().__init__() self._sys = sys = Path(sys) self._dev = dev = Path(dev) self._fstab = fstab = Path(fstab) self._procmounts = procmounts = Path(procmounts) self._mtab = mtab = Path(mtab) self._mountinfo = mountinfo = Path(mountinfo) self._crypttab = crypttab = Path(crypttab) for sysdir in iterdir(sys / "class" / "block"): for device in read_lines(sysdir / "dm" / "name"): self.add_edge(dev / sysdir.name, dev / "mapper" / device) for device in iterdir(sysdir / "slaves"): self.add_edge(dev / device.name, dev / sysdir.name) for device in iterdir(sysdir / "holders"): self.add_edge(dev / sysdir.name, dev / device.name) for device in iterdir(sysdir): if device.name.startswith(sysdir.name): self.add_edge(dev / sysdir.name, dev / device.name) for line in read_lines(crypttab): if line and not line.startswith("#"): fields = shlex.split(line) cryptdev, device = fields[0], fields[1] if device != 'none': cryptdev = dev / "mapper" / cryptdev self.add_edge(device, cryptdev) fstab_mounts = {} for line in read_lines(fstab): if line and not line.startswith("#"): fields = shlex.split(line) device, mount = fields[0], fields[1] if mount != 'none': fstab_mounts[mount] = device procmounts_mounts = {} for line in read_lines(procmounts): if line and not line.startswith("#"): fields = shlex.split(line) device, mount = fields[0], fields[1] device = self.evaluate(device) if device is not None: procmounts_mounts[mount] = device mtab_mounts = {} for line in read_lines(mtab): if line and not line.startswith("#"): fields = shlex.split(line) device, mount = fields[0], fields[1] device = self.evaluate(device) if device is not None: mtab_mounts[mount] = device mountinfo_mounts = {} for line in read_lines(mountinfo): if line and not line.startswith("#"): fields = shlex.split(line) device, fsroot, mount = fields[2], fields[3], fields[4] if fsroot != "/": mountinfo_mounts[mount] = None continue device = self.evaluate(device) if device is not None: mountinfo_mounts[mount] = device mounts = collections.ChainMap( fstab_mounts, mountinfo_mounts, procmounts_mounts, mtab_mounts, ) self._fstab_mounts = fstab_mounts self._procmounts_mounts = procmounts_mounts self._mtab_mounts = mtab_mounts self._mountinfo_mounts = mountinfo_mounts self._mounts = mounts def __getitem__(self, key): return self.evaluate(key) def evaluate(self, device): dev = self._dev sys = self._sys if device is None: return None elif isinstance(device, Path): device = str(device) elif isinstance(device, (Disk, Partition)): device = str(device.path) if device.startswith("ID="): id_ = device[len("ID="):] if not id_: return None device = dev / "disk" / "by-id" / id_ elif device.startswith("LABEL="): label = device[len("LABEL="):] if not label: return None device = dev / "disk" / "by-label" / label elif device.startswith("PARTLABEL="): partlabel = device[len("PARTLABEL="):] if not partlabel: return None device = dev / "disk" / "by-partlabel" / partlabel elif device.startswith("UUID="): uuid = device[len("UUID="):] if not uuid: return None device = dev / "disk" / "by-uuid" / uuid if not device.exists(): device = dev / "disk" / "by-uuid" / uuid.lower() elif device.startswith("PARTUUID="): partuuid, _, partnroff = ( device[len("PARTUUID="):].partition("/PARTNROFF=") ) if not partuuid: return None device = dev / "disk" / "by-partuuid" / partuuid if not device.exists(): device = dev / "disk" / "by-partuuid" / partuuid.lower() if partnroff: device = device.resolve() match = re.match("(.*[^0-9])([0-9]+)$", device.name) if not match: return None prefix, partno = match.groups() partno = str(int(partno) + int(partnroff)) device = device.with_name("{}{}".format(prefix, partno)) elif re.match("[0-9]+:[0-9]+", device): device = dev / "block" / device # Encrypted devices may currently be set up with names different # than in the crypttab file, so check that as well. elif device.startswith(str(dev / "mapper")): if not Path(device).resolve().exists(): for line in read_lines(self._crypttab): if not line or line.startswith("#"): continue fields = shlex.split(line) parentdev, cryptdev = fields[1], fields[0] if cryptdev != device.split("/")[-1]: continue parentdev = self.evaluate(parentdev) siblings = self.children(parentdev) if len(siblings) == 1: device = str(siblings.pop()) # This is actually wrong, but we can't really decide # which to use. The parent's good enough for us since # we usually only care about going up the tree. else: device = str(parentdev) device = Path(device).resolve() if not device.exists() or dev not in device.parents: return None try: return Partition(device, dev=dev, sys=sys) except: pass try: return Disk(device, dev=dev, sys=sys) except: pass def by_mountpoint(self, mountpoint, fstab_only=False): if not Path(mountpoint).exists(): return None if fstab_only: # We want the form in the fstab, e.g. PARTUUID=* device = self._fstab_mounts.get(mountpoint) return device else: device = self._mounts.get(str(mountpoint)) return self.evaluate(device) def mountpoints(self, device, include_fstab=False): device = self.evaluate(device) if device is None: return set() # Exclude fstab whose entries are not necessarily mounted if not include_fstab: mounts = collections.ChainMap( self._mountinfo_mounts, self._procmounts_mounts, self._mtab_mounts, ) else: mounts = self._mounts mountpoints = set() for mnt, dev in mounts.items(): dev = self.evaluate(dev) if dev == device: mnt = Path(mnt).resolve() if mnt.exists() or include_fstab: mountpoints.add(mnt) return mountpoints def by_id(self, id_): return self.evaluate("ID={}".format(id_)) def by_label(self, label): return self.evaluate("LABEL={}".format(label)) def by_partlabel(self, partlabel): return self.evaluate("PARTLABEL={}".format(partlabel)) def by_uuid(self, uuid): return self.evaluate("UUID={}".format(uuid)) def by_partuuid(self, partuuid): return self.evaluate("PARTUUID={}".format(partuuid)) def _get_dev_disk_info(self, device, prop): device = self.evaluate(device) for path in self._dev.glob("disk/by-{}/*".format(prop)): dev = self.evaluate(path) if dev == device: return path.name def get_id(self, device): return self._get_dev_disk_info(device, "id") def get_label(self, device): return self._get_dev_disk_info(device, "label") def get_partlabel(self, device): return self._get_dev_disk_info(device, "partlabel") def get_uuid(self, device): return self._get_dev_disk_info(device, "uuid") def get_partuuid(self, device): return self._get_dev_disk_info(device, "partuuid") def by_kern_guid(self): for arg in proc_cmdline(): lhs, _, rhs = arg.partition("=") if lhs == "kern_guid": return self.by_partuuid(rhs) def add_edge(self, node, child): node = self.evaluate(node) child = self.evaluate(child) if node is not None and child is not None and node != child: return super().add_edge(node, child) def children(self, *nodes): return super().children(*map(self.evaluate, nodes)) def parents(self, *nodes): return super().parents(*map(self.evaluate, nodes)) def leaves(self, *nodes): return super().leaves(*map(self.evaluate, nodes)) def roots(self, *nodes): return super().roots(*map(self.evaluate, nodes)) class Disk: def __init__(self, path, dev="/dev", sys="/sys"): self._sys = sys = Path(sys) self._dev = dev = Path(dev) if isinstance(path, Disk): path = path.path else: path = Path(path).resolve() if not (path.is_file() or path.is_block_device()): fmt = "Disk '{}' is not a file or block device." msg = fmt.format(str(path)) raise ValueError(msg) self.path = path def partition(self, partno): return Partition(self, partno, dev=self._dev, sys=self._sys) def partitions(self): return [ Partition(self, n, dev=self._dev, sys=self._sys) for n in cgpt.find_partitions(self.path) ] def cros_partitions(self): return [ CrosPartition(self, n, dev=self._dev, sys=self._sys) for n in cgpt.find_partitions(self.path, type="kernel") ] @property def size(self): if self.path.is_file(): return self.path.stat().st_size if self.path.is_block_device(): sysdir = self._sys / "class" / "block" / self.path.name size_f = sysdir / "size" if size_f.exists(): blocks = int(size_f.read_text()) return blocks * 512 def __hash__(self): return hash((self.path,)) def __eq__(self, other): if isinstance(other, Disk): return self.path == other.path return False def __str__(self): return str(self.path) def __repr__(self): cls = self.__class__.__name__ return "{}('{}')".format(cls, self.path) class Partition: def __init__(self, path, partno=None, dev="/dev", sys="/sys"): self._dev = dev = Path(dev) self._sys = sys = Path(sys) if isinstance(path, Disk): disk = path path = None elif isinstance(path, Partition): disk = path.disk partno = path.partno path = path.path else: disk = None path = Path(path).resolve() if ( disk is None and partno is None and path.parent == dev and path.is_block_device() ): match = ( re.fullmatch("(.*[0-9])p([0-9]+)", path.name) or re.fullmatch("(.*[^0-9])([0-9]+)", path.name) ) if match: diskname, partno = match.groups() partno = int(partno) disk = Disk(path.with_name(diskname), dev=dev, sys=sys) if disk is None: disk = Disk(path, dev=dev, sys=sys) path = None if partno is None: fmt = "Partition number not given for disk '{}'." msg = fmt.format(str(disk)) raise ValueError(msg) elif not (isinstance(partno, int) and partno > 0): fmt = "Partition number '{}' must be a positive integer." msg = fmt.format(partno) raise ValueError(msg) elif ( path is None and disk.path.parent == dev and disk.path.is_block_device() ): fmt = "{}p{}" if disk.path.name[-1].isnumeric() else "{}{}" name = fmt.format(disk.path.name, partno) path = disk.path.with_name(name) if path is not None: if not (path.is_file() or path.is_block_device()): path = None self.disk = disk self.path = path self.partno = partno @property def size(self): if self.path is None: return cgpt.get_size(self.disk.path, self.partno) if self.path.is_file(): return self.path.stat().st_size if self.path.is_block_device(): sysdir = self._sys / "class" / "block" / self.path.name size_f = sysdir / "size" if size_f.exists(): blocks = int(size_f.read_text()) return blocks * 512 def write_bytes(self, data): data = bytes(data) if len(data) >= self.size: raise ValueError( "Data to be written ('{}' bytes) is bigger than " "partition '{}' ('{}' bytes)." .format(len(data), self, self.size) ) if self.path is None: start = cgpt.get_start(self.disk.path, self.partno) with self.disk.path.open("r+b") as disk: seek = disk.seek(start) if seek != start: raise IOError( "Couldn't seek disk to start of partition '{}'." .format(self) ) written = disk.write(data) if written != len(data): raise IOError( "Couldn't write data to partition '{}' " "(wrote '{}' out of '{}' bytes)." .format(self, written, len(data)) ) else: self.path.write_bytes(data) def __hash__(self): return hash((self.path, self.disk, self.partno)) def __eq__(self, other): if isinstance(other, Partition): return ( self.path == other.path and self.disk == other.disk and self.partno == other.partno ) return False def __str__(self): if self.path is not None: return str(self.path) else: return "{}#{}".format(self.disk.path, self.partno) def __repr__(self): cls = self.__class__.__name__ if self.path is not None: return "{}('{}')".format(cls, self.path) else: return "{}('{}', {})".format(cls, self.disk.path, self.partno) class CrosPartition(Partition): @property def attribute(self): return cgpt.get_raw_attribute(self.disk.path, self.partno) @attribute.setter def attribute(self, attr): return cgpt.set_raw_attribute(self.disk.path, self.partno, attr) @property def flags(self): flags = cgpt.get_flags(self.disk.path, self.partno) return { "attribute": flags["A"], "successful": flags["S"], "priority": flags["P"], "tries": flags["T"], } @flags.setter def flags(self, value): if isinstance(value, dict): A = value.get("attribute", None) S = value.get("successful", None) P = value.get("priority", None) T = value.get("tries", None) else: A = getattr(value, "attribute", None) S = getattr(value, "successful", None) P = getattr(value, "priority", None) T = getattr(value, "tries", None) cgpt.set_flags(self.disk.path, self.partno, A=A, S=S, P=P, T=T) @property def successful(self): return self.flags["successful"] @successful.setter def successful(self, value): self.flags = {"successful": value} @property def tries(self): return self.flags["tries"] @tries.setter def tries(self, value): self.flags = {"tries": value} @property def priority(self): return self.flags["priority"] @priority.setter def priority(self, value): self.flags = {"priority": value} def prioritize(self): return cgpt.prioritize(self.disk.path, self.partno) def _comparable_parts(self): flags = self.flags size = self.size return ( flags["successful"], flags["priority"], flags["tries"], self.size, ) def __lt__(self, other): if not isinstance(other, CrosPartition): return NotImplemented return self._comparable_parts() < other._comparable_parts() def __gt__(self, other): if not isinstance(other, CrosPartition): return NotImplemented return self._comparable_parts() > other._comparable_parts() depthcharge-tools-0.6.2/depthcharge_tools/utils/pathlib.py000066400000000000000000000026261444761253100240030ustar00rootroot00000000000000#! /usr/bin/env python3 # SPDX-License-Identifier: GPL-2.0-or-later # depthcharge-tools pathlib utilities # Copyright (C) 2020-2022 Alper Nebi Yasak # See COPYRIGHT and LICENSE files for full copyright information. import shutil import subprocess from pathlib import Path from depthcharge_tools.utils.subprocess import ( gzip, lz4, lzma, lzop, bzip2, xz, zstd, ) def copy(src, dest): dest = shutil.copy2(src, dest) return Path(dest) def decompress(src, dest=None, partial=False): if dest is not None: dest = Path(dest) for runner in (gzip, zstd, xz, lz4, lzma, bzip2, lzop): try: return runner.decompress(src, dest) except FileNotFoundError: if dest: dest.unlink() except subprocess.CalledProcessError as err: if dest is None and err.output and partial: return err.output elif dest and dest.stat().st_size > 0 and partial: return dest elif dest: dest.unlink() def iterdir(path): try: if path.is_dir(): return path.iterdir() else: return [] except: return [] def read_lines(path): try: if path.is_file(): return path.read_text().splitlines() else: return [] except: return [] depthcharge-tools-0.6.2/depthcharge_tools/utils/platform.py000066400000000000000000000347331444761253100242100ustar00rootroot00000000000000#! /usr/bin/env python3 # SPDX-License-Identifier: GPL-2.0-or-later # depthcharge-tools platform utilities # Copyright (C) 2020-2022 Alper Nebi Yasak # See COPYRIGHT and LICENSE files for full copyright information. import collections import glob import platform import re import shlex from pathlib import Path from depthcharge_tools.utils.pathlib import ( decompress, ) from depthcharge_tools.utils.subprocess import ( crossystem, ) def dt_compatibles(): dt_model = Path("/proc/device-tree/compatible") if dt_model.exists(): return dt_model.read_text().strip("\x00").split("\x00") def dt_model(): dt_model = Path("/proc/device-tree/model") if dt_model.exists(): return dt_model.read_text().strip("\x00") def cros_hwid(): hwid_file = Path("/proc/device-tree/firmware/chromeos/hardware-id") if hwid_file.exists(): return hwid_file.read_text().strip("\x00") for hwid_file in Path("/sys/bus/platform/devices").glob("GGL0001:*/HWID"): if hwid_file.exists(): return hwid_file.read_text().strip() # Try crossystem as a last resort try: return crossystem.hwid() except: pass def cros_fwid(): fwid_file = Path("/proc/device-tree/firmware/chromeos/firmware-version") if fwid_file.exists(): return fwid_file.read_text().strip("\x00") for fwid_file in Path("/sys/bus/platform/devices").glob("GGL0001:*/FWID"): if fwid_file.exists(): return fwid_file.read_text().strip() # Try crossystem as a last resort try: return crossystem.fwid() except: pass def os_release(root=None): os_release = {} if root is None: root = "/" root = Path(root).resolve() os_release_f = root / "etc" / "os-release" if not os_release_f.exists(): os_release_f = root / "usr" / "lib" / "os-release" if os_release_f.exists(): for line in os_release_f.read_text().splitlines(): lhs, _, rhs = line.partition("=") os_release[lhs] = rhs.strip('\'"') return os_release def kernel_cmdline(root=None): cmdline = "" if root is None: root = "/" root = Path(root).resolve() cmdline_f = root / "etc" / "kernel" / "cmdline" if not cmdline_f.exists(): cmdline_f = root / "usr" / "lib" / "kernel" / "cmdline" if cmdline_f.exists(): cmdline = cmdline_f.read_text().rstrip("\n") return shlex.split(cmdline) def proc_cmdline(): cmdline = "" cmdline_f = Path("/proc/cmdline") if cmdline_f.exists(): cmdline = cmdline_f.read_text().rstrip("\n") return shlex.split(cmdline) def is_cros_boot(): dt_cros_firmware = Path("/proc/device-tree/firmware/chromeos") if dt_cros_firmware.is_dir(): return True # Chrome OS firmware injects this into the kernel cmdline. if "cros_secure" in proc_cmdline(): return True return False def is_cros_libreboot(): fwid = cros_fwid() if fwid is None: return False return fwid.lower().startswith("libreboot") def root_requires_initramfs(root): x = "[0-9a-fA-F]" uuid = "{x}{{8}}-{x}{{4}}-{x}{{4}}-{x}{{4}}-{x}{{12}}".format(x=x) ntsig = "{x}{{8}}-{x}{{2}}".format(x=x) # Depthcharge replaces %U with an uuid, so we can use that as well. uuid = "({}|%U)".format(uuid) # Tries to validate the root=* kernel cmdline parameter. # See init/do_mounts.c in Linux tree. for pat in ( "[0-9a-fA-F]{4}", "/dev/nfs", "/dev/[0-9a-zA-Z]+", "/dev/[0-9a-zA-Z]+[0-9]+", "/dev/[0-9a-zA-Z]+p[0-9]+", "PARTUUID=({uuid}|{ntsig})".format(uuid=uuid, ntsig=ntsig), "PARTUUID=({uuid}|{ntsig})/PARTNROFF=[0-9]+".format( uuid=uuid, ntsig=ntsig, ), "[0-9]+:[0-9]+", "PARTLABEL=.+", "/dev/cifs", ): if re.fullmatch(pat, root): return False return True def vboot_keys(*keydirs, system=True, root=None): if len(keydirs) == 0 or system: if root is None: root = "/" root = Path(root).resolve() keydirs = ( *keydirs, root / "etc" / "depthcharge-tools", root / "usr" / "share" / "vboot" / "devkeys", root / "usr" / "local" / "share" / "vboot" / "devkeys", ) for keydir in keydirs: keydir = Path(keydir) if not keydir.is_dir(): continue keyblock = keydir / "kernel.keyblock" signprivate = keydir / "kernel_data_key.vbprivk" signpubkey = keydir / "kernel_subkey.vbpubk" if not keyblock.exists(): keyblock = None if not signprivate.exists(): signprivate = None if not signpubkey.exists(): signpubkey = None if keyblock or signprivate or signpubkey: return keydir, keyblock, signprivate, signpubkey return None, None, None, None def cpu_microcode(boot=None): microcode = [] for f in ( *boot.glob("amd-ucode.img"), *boot.glob("amd-uc.img"), ): if f.is_file(): microcode.append(f) break for f in ( *boot.glob("intel-ucode.img"), *boot.glob("intel-uc.img"), ): if f.is_file(): microcode.append(f) break if not microcode: for f in ( *boot.glob("early_ucode.cpio"), *boot.glob("microcode.cpio"), ): if f.is_file(): microcode.append(f) break return microcode def installed_kernels(root=None, boot=None): kernels = {} initrds = {} fdtdirs = {} if root is None: root = "/" root = Path(root).resolve() if boot is None: boot = root / "boot" boot = Path(boot).resolve() for f in ( *root.glob("lib/modules/*/vmlinuz"), *root.glob("lib/modules/*/vmlinux"), *root.glob("lib/modules/*/Image"), *root.glob("lib/modules/*/zImage"), *root.glob("lib/modules/*/bzImage"), *root.glob("usr/lib/modules/*/vmlinuz"), *root.glob("usr/lib/modules/*/vmlinux"), *root.glob("usr/lib/modules/*/Image"), *root.glob("usr/lib/modules/*/zImage"), *root.glob("usr/lib/modules/*/bzImage"), ): if not f.is_file(): continue release = f.parent.name kernels[release] = f.resolve() for f in ( *boot.glob("vmlinuz-*"), *boot.glob("vmlinux-*"), ): if not f.is_file(): continue _, _, release = f.name.partition("-") kernels[release] = f.resolve() for f in ( *boot.glob("vmlinuz"), *boot.glob("vmlinux"), *root.glob("vmlinuz"), *root.glob("vmlinux"), *boot.glob("Image"), *boot.glob("zImage"), *boot.glob("bzImage"), ): if not f.is_file(): continue kernels[None] = f.resolve() break for f in ( *root.glob("lib/modules/*/initrd"), *root.glob("lib/modules/*/initramfs"), *root.glob("lib/modules/*/initrd.img"), *root.glob("lib/modules/*/initramfs.img"), *root.glob("usr/lib/modules/*/initrd"), *root.glob("usr/lib/modules/*/initramfs"), *root.glob("usr/lib/modules/*/initrd.img"), *root.glob("usr/lib/modules/*/initramfs.img"), ): if not f.is_file(): continue release = f.parent.name initrds[release] = f.resolve() for f in ( *boot.glob("initrd-*.img"), *boot.glob("initramfs-*.img"), ): if not f.is_file(): continue _, _, release = f.name.partition("-") release = release[:-4] initrds[release] = f.resolve() for f in ( *boot.glob("initrd-*"), *boot.glob("initrd.img-*"), *boot.glob("initramfs-*"), *boot.glob("initramfs.img-*"), ): if not f.is_file(): continue _, _, release = f.name.partition("-") initrds[release] = f.resolve() for f in ( *boot.glob("initrd.img"), *boot.glob("initrd"), *boot.glob("initramfs-linux.img"), *boot.glob("initramfs-vanilla"), *boot.glob("initramfs"), *root.glob("initrd.img"), *root.glob("initrd"), *root.glob("initramfs"), ): if not f.is_file(): continue initrds[None] = f.resolve() break for d in ( *root.glob("usr/lib/linux-image-*"), ): if not d.is_dir(): continue _, _, release = d.name.partition("linux-image-") fdtdirs[release] = d.resolve() for d in ( *root.glob("lib/modules/*/dtb"), *root.glob("lib/modules/*/dtbs"), *root.glob("usr/lib/modules/*/dtb"), *root.glob("usr/lib/modules/*/dtbs"), ): if not d.is_dir(): continue release = d.parent.name fdtdirs[release] = d.resolve() for d in ( *boot.glob("dtb-*"), *boot.glob("dtbs-*"), ): if not d.is_dir(): continue _, _, release = d.name.partition("-") fdtdirs[release] = d.resolve() for d in ( *boot.glob("dtb/*"), *boot.glob("dtbs/*"), ): if not d.is_dir(): continue if d.name in kernels: fdtdirs[d.name] = d.resolve() for d in ( *boot.glob("dtbs"), *boot.glob("dtb"), *root.glob("usr/share/dtbs"), *root.glob("usr/share/dtb"), ): if not d.is_dir(): continue # Duplicate dtb files means that the directory is split by # kernel release and we can't use it for a single release. dtbs = d.glob("**/*.dtb") counts = collections.Counter(dtb.name for dtb in dtbs) if all(c <= 1 for c in counts.values()): fdtdirs[None] = d.resolve() break if None in kernels: kernel, release = kernels[None], None for r, k in kernels.items(): if k == kernel and r is not None: release = r break if release is not None: del kernels[None] if None in initrds: initrds.setdefault(release, initrds[None]) del initrds[None] if None in fdtdirs: fdtdirs.setdefault(release, fdtdirs[None]) del fdtdirs[None] return [ KernelEntry( release, kernel=kernels[release], initrd=initrds.get(release, None), fdtdir=fdtdirs.get(release, None), os_name=os_release(root=root).get("NAME", None), ) for release in kernels.keys() ] class KernelEntry: def __init__(self, release, kernel, initrd=None, fdtdir=None, os_name=None): self.release = release self.kernel = kernel self.initrd = initrd self.fdtdir = fdtdir self.os_name = os_name @property def description(self): if self.os_name is None: return "Linux {}".format(self.release) else: return "{}, with Linux {}".format(self.os_name, self.release) @property def arch(self): kernel = Path(self.kernel) decomp = decompress(kernel) if decomp: head = decomp[:4096] else: with kernel.open("rb") as f: head = f.read(4096) if head[0x202:0x206] == b"HdrS": return Architecture("x86") elif head[0x38:0x3c] == b"ARM\x64": return Architecture("arm64") elif head[0x34:0x38] == b"\x45\x45\x45\x45": return Architecture("arm") def _comparable_parts(self): pattern = "([^a-zA-Z0-9]?)([a-zA-Z]*)([0-9]*)" if self.release is None: return () parts = [] for sep, text, num in re.findall(pattern, self.release): # x.y.z > x.y-* == x.y* > x.y~* sep = { "~": -1, ".": 1, }.get(sep, 0) # x.y-* == x.y* > x.y > x.y-rc* == x.y-trunk* text = ({ "rc": -1, "trunk": -1, }.get(text, 0), text) # Compare numbers as numbers num = int(num) if num else 0 parts.append((sep, text, num)) return tuple(parts) def __lt__(self, other): if not isinstance(other, KernelEntry): return NotImplemented return self._comparable_parts() < other._comparable_parts() def __gt__(self, other): if not isinstance(other, KernelEntry): return NotImplemented return self._comparable_parts() > other._comparable_parts() def __str__(self): return self.description def __repr__(self): return ( "KernelEntry(release={!r}, kernel={!r}, initrd={!r}, fdtdir={!r}, os_name={!r})" .format(self.release, self.kernel, self.initrd, self.fdtdir, self.os_name) ) class Architecture(str): arm_32 = ["arm", "ARM", "armv7", "ARMv7", ] arm_64 = ["arm64", "ARM64", "aarch64", "AArch64"] arm = arm_32 + arm_64 x86_32 = ["i386", "x86"] x86_64 = ["x86_64", "amd64", "AMD64"] x86 = x86_32 + x86_64 all = arm + x86 groups = (arm_32, arm_64, x86_32, x86_64) def __eq__(self, other): if isinstance(other, Architecture): for group in self.groups: if self in group and other in group: return True return str(self) == str(other) def __ne__(self, other): if isinstance(other, Architecture): for group in self.groups: if self in group and other not in group: return True return str(self) != str(other) @property def mkimage(self): if self in self.arm_32: return "arm" if self in self.arm_64: return "arm64" if self in self.x86_32: return "x86" if self in self.x86_64: return "x86_64" @property def vboot(self): if self in self.arm_32: return "arm" if self in self.arm_64: return "aarch64" if self in self.x86_32: return "x86" if self in self.x86_64: return "amd64" @property def kernel_arches(self): if self in self.arm_32: return self.arm_32 if self in self.arm_64: return self.arm if self in self.x86_32: return self.x86_32 if self in self.x86_64: return self.x86 depthcharge-tools-0.6.2/depthcharge_tools/utils/string.py000066400000000000000000000041051444761253100236600ustar00rootroot00000000000000#! /usr/bin/env python3 # SPDX-License-Identifier: GPL-2.0-or-later # depthcharge-tools string utilities # Copyright (C) 2022 Alper Nebi Yasak # See COPYRIGHT and LICENSE files for full copyright information. import ast import re def bytesize_suffixes(): def long_forms(x): formats = ("{}", "{}byte", "{} byte", "{}bytes", "{} bytes") cases = (str.upper, str.lower, str.title) for f in formats: for c in cases: yield c(f.format(x)) for size, suffixes in { 1: ("B", "byte", "bytes", ""), 1e3: ("kB", "KB", *long_forms("kilo")), 1e6: ("MB", *long_forms("mega")), 1e9: ("GB", *long_forms("giga")), 1e12: ("TB", *long_forms("tera")), 1e15: ("PB", *long_forms("peta")), 1e18: ("EB", *long_forms("exa")), 1e21: ("ZB", *long_forms("zetta")), 1e24: ("YB", *long_forms("yotta")), 2 ** 10: ("kiB", "KiB", "K", *long_forms("kibi")), 2 ** 20: ("MiB", "M", *long_forms("mebi")), 2 ** 30: ("GiB", "G", *long_forms("gibi")), 2 ** 40: ("TiB", "T", *long_forms("tebi")), 2 ** 50: ("PiB", "P", *long_forms("pebi")), 2 ** 60: ("EiB", "E", *long_forms("exbi")), 2 ** 70: ("ZiB", "Z", *long_forms("zebi")), 2 ** 80: ("YiB", "Y", *long_forms("yobi")), }.items(): for suffix in suffixes: yield (suffix.strip(), int(size)) bytesize_suffixes = dict(bytesize_suffixes()) def parse_bytesize(val): if val is None: return None try: return int(val) except: pass try: return int(ast.literal_eval(val)) except: pass try: s = str(val) suffix = re.search("[a-zA-Z\s]*\Z", s)[0].strip() number = s.rpartition(suffix)[0].strip() multiplier = bytesize_suffixes[suffix] return int(ast.literal_eval(number)) * multiplier except Exception as err: raise ValueError( "Cannot convert '{}' to a byte-size." .format(val) ) depthcharge-tools-0.6.2/depthcharge_tools/utils/subprocess.py000066400000000000000000000335441444761253100245530ustar00rootroot00000000000000#! /usr/bin/env python3 # SPDX-License-Identifier: GPL-2.0-or-later # depthcharge-tools subprocess utilities # Copyright (C) 2020-2022 Alper Nebi Yasak # See COPYRIGHT and LICENSE files for full copyright information. import contextlib import logging import re import subprocess import shlex from pathlib import Path logger = logging.getLogger(__name__) class ProcessRunner: def __init__(self, *args_prefix, **kwargs_defaults): self.args_prefix = args_prefix self.kwargs_defaults = { 'encoding': "utf-8", 'check': True, } self.kwargs_defaults.update(kwargs_defaults) def __call__(self, *args_suffix, **kwargs_overrides): args = (*self.args_prefix, *args_suffix) kwargs = {**self.kwargs_defaults, **kwargs_overrides} with contextlib.ExitStack() as ctx: stdin = kwargs.get("stdin", None) if isinstance(stdin, str): stdin = Path(stdin) if isinstance(stdin, bytes): kwargs["stdin"] = None kwargs["encoding"] = None kwargs["input"] = stdin if isinstance(stdin, Path): kwargs["stdin"] = ctx.enter_context(stdin.open("r")) if stdin is None: kwargs["stdin"] = subprocess.PIPE stdout = kwargs.get("stdout", None) if isinstance(stdout, str): stdout = Path(stdout) if isinstance(stdout, Path): kwargs["stdout"] = ctx.enter_context(stdout.open("x")) if stdout is None: kwargs["stdout"] = subprocess.PIPE stderr = kwargs.get("stderr", None) if isinstance(stderr, str): stderr = Path(stderr) if isinstance(stderr, Path): kwargs["stderr"] = ctx.enter_context(stderr.open("x")) if stderr is None: kwargs["stderr"] = subprocess.PIPE try: return subprocess.run(args, **kwargs) except subprocess.CalledProcessError as err: our_err = self._parse_subprocess_error(err) if our_err is None: return subprocess.CompletedProcess( args=err.cmd, returncode=err.returncode, stdout=err.stdout, stderr=err.stderr, ) if our_err is not err: raise our_err raise def _parse_subprocess_error(self, err): return err class GzipRunner(ProcessRunner): def __init__(self): super().__init__("gzip", encoding=None) def compress(self, src, dest=None): proc = self("-c", "-6", stdin=src, stdout=dest) if dest is None: return proc.stdout else: return Path(dest) def decompress(self, src, dest=None): proc = self("-c", "-d", stdin=src, stdout=dest) if dest is None: return proc.stdout else: return Path(dest) def test(self, path): proc = self("-t", stdin=path, check=False) return proc.returncode == 0 class Lz4Runner(ProcessRunner): def __init__(self): super().__init__("lz4", encoding=None) def compress(self, src, dest=None): proc = self("-z", "-9", stdin=src, stdout=dest) if dest is None: return proc.stdout else: return Path(dest) def decompress(self, src, dest=None): proc = self("-d", stdin=src, stdout=dest) if dest is None: return proc.stdout else: return Path(dest) def test(self, path): proc = self("-t", stdin=path, check=False) return proc.returncode == 0 class LzmaRunner(ProcessRunner): def __init__(self): super().__init__("lzma", encoding=None) def compress(self, src, dest=None): proc = self("-z", stdin=src, stdout=dest) if dest is None: return proc.stdout else: return Path(dest) def decompress(self, src, dest=None): proc = self("-d", stdin=src, stdout=dest) if dest is None: return proc.stdout else: return Path(dest) def test(self, path): proc = self("-t", stdin=path, check=False) return proc.returncode == 0 class LzopRunner(ProcessRunner): def __init__(self): super().__init__("lzop", encoding=None) def compress(self, src, dest=None): proc = self("-c", stdin=src, stdout=dest) if dest is None: return proc.stdout else: return Path(dest) def decompress(self, src, dest=None): proc = self("-c", "-d", stdin=src, stdout=dest) if dest is None: return proc.stdout else: return Path(dest) def test(self, path): proc = self("-t", stdin=path, check=False) return proc.returncode == 0 class Bzip2Runner(ProcessRunner): def __init__(self): super().__init__("bzip2", encoding=None) def compress(self, src, dest=None): proc = self("-c", stdin=src, stdout=dest) if dest is None: return proc.stdout else: return Path(dest) def decompress(self, src, dest=None): proc = self("-c", "-d", stdin=src, stdout=dest) if dest is None: return proc.stdout else: return Path(dest) def test(self, path): proc = self("-t", stdin=path, check=False) return proc.returncode == 0 class XzRunner(ProcessRunner): def __init__(self): super().__init__("xz", encoding=None) def compress(self, src, dest=None): proc = self("-z", "--check=crc32", stdin=src, stdout=dest) if dest is None: return proc.stdout else: return Path(dest) def decompress(self, src, dest=None): proc = self("-d", stdin=src, stdout=dest) if dest is None: return proc.stdout else: return Path(dest) def test(self, path): proc = self("-t", stdin=path, check=False) return proc.returncode == 0 class ZstdRunner(ProcessRunner): def __init__(self): super().__init__("zstd", encoding=None) def compress(self, src, dest=None): proc = self("-z", "-9", stdin=src, stdout=dest) if dest is None: return proc.stdout else: return Path(dest) def decompress(self, src, dest=None): proc = self("-d", stdin=src, stdout=dest) if dest is None: return proc.stdout else: return Path(dest) def test(self, path): proc = self("-t", stdin=path, check=False) return proc.returncode == 0 class MkimageRunner(ProcessRunner): def __init__(self): super().__init__("mkimage") class VbutilKernelRunner(ProcessRunner): def __init__(self): super().__init__("futility", "vbutil_kernel") class CgptRunner(ProcessRunner): def __init__(self): super().__init__("cgpt") def __call__(self, *args, **kwargs): proc = super().__call__(*args, **kwargs) lines = proc.stdout.splitlines() # Sometimes cgpt prints duplicate output. # https://bugs.chromium.org/p/chromium/issues/detail?id=463414 mid = len(lines) // 2 if lines[:mid] == lines[mid:]: proc.stdout = "\n".join(lines[:mid]) return proc def _parse_subprocess_error(self, err): # Exits with nonzero status if it finds no partitions of # given type even if the disk has a valid partition table if not err.stderr: return None m = re.fullmatch( "ERROR: Can't open (.*): Permission denied\n", err.stderr, ) if m: return PermissionError( "Couldn't open '{}', permission denied." .format(m.groups()[0]) ) return err def get_raw_attribute(self, disk, partno): proc = self("show", "-A", "-i", str(partno), str(disk)) attribute = int(proc.stdout, 16) return attribute def set_raw_attribute(self, disk, partno, attribute): self("add", "-A", hex(attribute), "-i", str(partno), str(disk)) def get_flags(self, disk, partno): attribute = self.get_raw_attribute(disk, partno) successful = (attribute >> 8) & 0x1 tries = (attribute >> 4) & 0xF priority = (attribute >> 0) & 0xF return { "A": attribute, "S": successful, "P": priority, "T": tries, } def set_flags(self, disk, partno, A=None, S=None, P=None, T=None): flag_args = [] if A is not None: flag_args += ["-A", str(int(A))] if S is not None: flag_args += ["-S", str(int(S))] if P is not None: flag_args += ["-P", str(int(P))] if T is not None: flag_args += ["-T", str(int(T))] self("add", *flag_args, "-i", str(partno), str(disk)) def get_size(self, disk, partno): proc = self("show", "-s", "-i", str(partno), str(disk)) blocks = int(proc.stdout) return blocks * 512 def get_start(self, disk, partno): proc = self("show", "-b", "-i", str(partno), str(disk)) blocks = int(proc.stdout) return blocks * 512 def find_partitions(self, disk, type=None): if type is None: # cgpt find needs at least one of -t, -u, -l proc = self("show", "-q", "-n", disk) lines = proc.stdout.splitlines() partnos = [int(shlex.split(line)[2]) for line in lines] else: proc = self("find", "-n", "-t", type, disk) partnos = [int(n) for n in proc.stdout.splitlines()] return partnos def prioritize(self, disk, partno): self("prioritize", "-i", str(partno), str(disk)) class CrossystemRunner(ProcessRunner): def __init__(self): super().__init__("crossystem") def hwid(self): proc = self("hwid", check=False) if proc.returncode == 0: return proc.stdout else: return None def fwid(self): proc = self("fwid", check=False) if proc.returncode == 0: return proc.stdout else: return None class FdtgetRunner(ProcessRunner): def __init__(self): super().__init__("fdtget") def get(self, dt_file, node='/', prop='', default=None, type=None): options = [] if default is not None: options += ["--default", str(default)] if type == str: options += ["--type", "s"] elif type == int: options += ["--type", "i"] elif type == bytes: options += ["--type", 'bx'] elif type is not None: options += ["--type", str(type)] proc = self(*options, str(dt_file), str(node), str(prop)) # str.split takes too much memory def split(s): for m in re.finditer("(\S*)\s", s): yield m.group() if type in (None, int): try: data = [int(i) for i in split(proc.stdout)] return data[0] if len(data) == 1 else data except: pass if type in (None, bytes): try: # bytes.fromhex("0") doesn't work data = bytes(int(x, 16) for x in split(proc.stdout)) return data except: pass data = str(proc.stdout).strip("\n") return data def properties(self, dt_file, node='/'): proc = self("--properties", str(dt_file), str(node), check=False) if proc.returncode == 0: return proc.stdout.splitlines() else: return [] def subnodes(self, dt_file, node='/'): proc = self("--list", str(dt_file), str(node), check=False) nodes = proc.stdout.splitlines() if proc.returncode == 0: return proc.stdout.splitlines() else: return [] class FdtputRunner(ProcessRunner): def __init__(self): super().__init__("fdtput") def put(self, dt_file, node='/', prop='', value=None, type=None): if isinstance(value, list): values = value else: values = [value] value_args = [] for value in values: if isinstance(value, str): value_args.append(value) if type is None: type = str elif isinstance(value, bytes): value_args.extend(hex(c) for c in value) if type is None: type = bytes elif isinstance(value, int): value_args.append(str(value)) if type is None: type = int else: value_args.append(str(value)) options = [] if type == str: options += ["--type", "s"] elif type == int: options += ["--type", "i"] elif type == bytes: options += ["--type", 'bx'] elif type is not None: options += ["--type", str(type)] self(*options, str(dt_file), str(node), str(prop), *value_args) class FileRunner(ProcessRunner): def __init__(self): super().__init__("file") def brief(self, path): proc = self("-b", path, check=False) if proc.returncode == 0: return proc.stdout.strip("\n") else: return None gzip = GzipRunner() lz4 = Lz4Runner() lzma = LzmaRunner() lzop = LzopRunner() bzip2 = Bzip2Runner() xz = XzRunner() zstd = ZstdRunner() mkimage = MkimageRunner() vbutil_kernel = VbutilKernelRunner() cgpt = CgptRunner() crossystem = CrossystemRunner() fdtget = FdtgetRunner() fdtput = FdtputRunner() file = FileRunner() depthcharge-tools-0.6.2/depthchargectl.rst000066400000000000000000000457771444761253100207010ustar00rootroot00000000000000.. SPDX-License-Identifier: GPL-2.0-or-later .. depthcharge-tools depthchargectl(1) manual page .. Copyright (C) 2019-2022 Alper Nebi Yasak .. See COPYRIGHT and LICENSE files for full copyright information. ============== depthchargectl ============== -------------------------------------------------- Manage the ChromeOS bootloader and its boot images -------------------------------------------------- :date: 2023-06-30 :version: v0.6.2 :manual_section: 1 :manual_group: depthcharge-tools .. |mkdepthcharge| replace:: *mkdepthcharge*\ (1) .. |cgpt| replace:: *cgpt*\ (1) .. |vbutil_kernel| replace:: *vbutil_kernel*\ (1) .. |CONFIG_DIR| replace:: **/etc/depthcharge-tools** .. |CONFIG_FILE| replace:: **/etc/depthcharge-tools/config** .. |CONFIGD_DIR| replace:: **/etc/depthcharge-tools/config.d** .. |IMAGES_DIR| replace:: **/boot/depthcharge** .. |INITD_DIR| replace:: **/etc/init.d** .. |SYSTEMD_DIR| replace:: **/usr/lib/systemd/system** .. |USR_CMDLINE_FILE| replace:: **/usr/lib/kernel/cmdline** .. |ETC_CMDLINE_FILE| replace:: **/etc/kernel/cmdline** .. |PROC_CMDLINE_FILE| replace:: **/proc/cmdline** .. |VBOOT_DEVKEYS| replace:: **/usr/share/vboot/devkeys** .. |USR_KI_DIR| replace:: **/usr/lib/kernel/install.d** .. |ETC_KI_CONF| replace:: **/etc/kernel/install.conf** SYNOPSIS ======== **depthchargectl** [options] *COMMAND* ... **depthchargectl bless** [options] [*PARTITION* | *DISK*] **depthchargectl build** [options] [*KERNEL_VERSION*] **depthchargectl check** [options] *IMAGE* **depthchargectl config** [options] *KEY* **depthchargectl list** [options] [*DISK* ...] **depthchargectl remove** [options] (*KERNEL_VERSION* | *IMAGE*) **depthchargectl target** [options] [*PARTITION* | *DISK* ...] **depthchargectl write** [options] [*KERNEL_VERSION* | *IMAGE*] DESCRIPTION =========== **depthchargectl** automatically manages the ChromeOS bootloader by building images for the current board and system, writing them to appropriate ChromeOS kernel partitions, prioritizing those partitions accordingly, and setting them as successful on boot. When you have more than one ChromeOS kernel partition, they will be utilized in rotation so that an unsuccessful boot attempt can revert to the last good version. The *KERNEL_VERSION* argument is a distro-specific representation of a kernel and usually is the latter part of **/boot/vmlinuz-**\ *VERSION*. The *IMAGE* argument is a boot image for the ChromeOS bootloader, or a file suspected to be one. *DISK* should be a physical disk containing a GPT partition table (e.g. **/dev/mmcblk0**, **/dev/sda**), but virtual disks (e.g. **/dev/dm-0**) are resolved to such physical disks if possible. *PARTITION* must be one of partition devices of a physical disk (e.g **/dev/mmcblk0p1**, **/dev/sda2**). The *vmlinuz*, *initramfs* and *dtb* files are as explained in |mkdepthcharge|. The program's functionality is divided into subcommands: depthchargectl bless -------------------- Sets bootloader-specific flags for a given partition or the currently booted partition as detected from the **kern_guid=**\ *PARTUUID* parameter |mkdepthcharge| adds to the kernel command line. By default, this marks the partition as successfully booted and the most preferred one, but can disable the partition or make it boot only on the next attempt as well. depthchargectl build -------------------- Builds a bootable image from the running system for this board, using the latest or a specific kernel version. **depthchargectl** keeps a database of known ChromeOS boards and how to build bootable images for them. For example, it keeps track of which device-tree file that needs to be included for each ARM board. It also figures out distro-specific information of where the *vmlinuz*, *initramfs* and *dtb* files are located. It uses this information and |mkdepthcharge| to build this image. It automatically adds an appropriate **root=**\ *ROOT* kernel command line parameter deduced from **/etc/fstab**. Higher compression levels for the kernel are automatically tried as necessary, when the firmware supports them. depthchargectl config --------------------- Retrieves the configured value for a given configuration key, primarily for use in scripts that integrate **depthchargectl** with the system upgrade process. Can also query information about boards. depthchargectl check -------------------- Checks if a file is a depthcharge image that can be booted on this board. **depthchargectl** also keeps track of restrictions on images for each board. For example, earlier ChromeOS board can boot images up to a specific size, e.g. 32MiB. It checks if its input is in a format the ChromeOS bootloader expects and satisfies these restrictions. depthchargectl list ------------------- Prints a table of ChromeOS kernel partitions and their bootloader specific GPT flags (i.e. Successful, Priority, Tries). By default, it only searches the physical disks on which the boot and root partitions reside. depthchargectl remove --------------------- Disables partitions that contain a specific image or a specific kernel version. This is most useful when you are removing a kernel version and its modules from your system, and know images built with this kernel will fail to boot from that point on. depthchargectl target --------------------- Chooses and prints the lowest priority, preferably unsuccessful ChromeOS kernel partition to write a boot image to. By default, searches the same disks as the **list** subcommand. If a partition is given, it checks if it is an appropriate for a boot image. Tries to avoid the currently booted kernel. depthchargectl write -------------------- Writes a specific image or builds and writes a *kernel-version* image to a partition the **target** subcommand returns, and marks it as bootable once on the next boot. The **bless** subcommand must be run after a successful boot to make the partiiton permanently bootable, but that is possible to do automatically with the service files provided with this package. OPTIONS ======= Global options -------------- -h, --help Show a help message and exit. -v, --verbose Print info messages and |mkdepthcharge| output to stderr. -V, --version Print program version and exit. --root ROOT Root device or mountpoint of the system to work on. If a mounted device is given, its mountpoint is used. Defaults to the currently booted system's root. --root-mountpoint DIR, --boot-mountpoint DIR Root and boot mountpoints of the system to work on. If not given, deduced from the **--root** argument. These are helpful because the **--root** argument is overloaded by the **build** subcommand, which adds it as a kernel command line argument, and it can be desirable to avoid that while building an image for a chroot. --tmpdir DIR Directory to keep temporary files. Normally **depthchargectl** creates a temporary directory by itself and removes it when it quits. However, if a temporary directory is specified with this option any temporary files will be created under it and will not be deleted. Configuration options --------------------- In addition to its built-in configuration, **depthchargectl** reads |CONFIG_FILE| and |CONFIGD_DIR|/*\ ** as configuration files to make it adaptable to different boards and systems. The following options allow this configuration to be overridden temporarily. --config FILE Additional configuration file to read. This can include changing board properties or adding new boards, which mostly isn't possible to do with command-line options. --board CODENAME Assume **depthchargectl** is running on the specified board. Normally it tries to detect which board it's running on primarily based on the HWID of the board set by the vendor, among other things. --images-dir DIR Directory to store and look for built depthcharge images. By default, set to |IMAGES_DIR|. --vboot-keyblock KEYBLOCK The kernel keyblock file required to sign and verify images. By default, **depthchargectl** searches for these keys in |CONFIG_DIR| and |VBOOT_DEVKEYS| directories. --vboot-public-key SIGNPUBKEY The public key required to verify images, in .vbpubk format. By default, **depthchargectl** searches for these keys in |CONFIG_DIR| and |VBOOT_DEVKEYS| directories. --vboot-private-key SIGNPRIVATE The private key necessary to sign images, in .vbprivk format. By default, **depthchargectl** searches for these keys in |CONFIG_DIR| and |VBOOT_DEVKEYS| directories. --kernel-cmdline *CMD* [*CMD* ...] Command-line parameters for the kernel. By default, these are read from |ETC_CMDLINE_FILE|, |USR_CMDLINE_FILE| or |PROC_CMDLINE_FILE|. **depthchargectl** and |mkdepthcharge| may append some other values to this: an appropriate **root=**\ *ROOT*, the **kern_guid=%U** parameter required for the **bless** subcommand, **noinitrd** if **--ignore-initramfs** is given. --ignore-initramfs Do not include *initramfs* in the built images, ignore the initramfs checks for the **root=**\ *ROOT* argument, and add **noinitrd** to the kernel cmdline. If you know that your OS kernel can boot on this board without an initramfs (perhaps because it has a built-in one), you can specify this option to build an initramfs-less image. --zimage-initramfs-hack Choose which initramfs support hack will be used for the zimage format. Either **set-init-size** (the default), **pad-vmlinuz** for kernels without **KASLR**, or **none** if depthcharge ever gets native support for safely loading zimage initramfs. depthchargectl bless options ---------------------------- --bad Set the specified partition as unbootable. This sets all three of the *Successful*, *Priority*, *Tries* flags to 0. --oneshot Set the specified partition to be tried once in the next boot. This sets the *Successful* flag to 0, *Tries* flag to 1, and makes sure the *Priority* flag is the highest one among all the partitions of the disk the specified one is in. -i NUM, --partno NUM Partition number in the given disk image, for when the positional argument is a disk image instead of a partition block device. depthchargectl build options ---------------------------- --description DESC Human-readable description for the image. By default, a string that describes your system with the specified kernel release name, like "Debian GNU/Linux, with Linux 5.10.0-6-arm64". --root ROOT Root device to add to kernel cmdline. By default, this is acquired from **/etc/fstab** or a filesystem UUID is derived from the mounted root. If **none** is passed, no root parameter is added. --compress *TYPE* [*TYPE* ...] Compression types to attempt. By default, all compression types that the board supports based on **depthchargectl** configuration are attempted from lowest to highest compression. --timestamp SECONDS Build timestamp for the image. By default, **SOURCE_DATE_EPOCH** is used if it's set. If not, the modification date of either the *initramfs* or *vmlinuz* is used as an attempt to keep images somewhat reproducible. -o PATH, --output PATH Output image to path instead of storing it in the images-dir. The following options allow one to specify the exact files to be used in building the image, instead of letting **depthchargectl** deduce them: --kernel-release NAME Release name for the kernel to be used in image filename under the images-dir (unless **--output** is specified). --kernel FILE Kernel executable. Usually **/boot/vmlinuz-**\ *VERSION* by default, but depends on your OS. --initramfs *FILE* [*FILE* ...] Ramdisk image. Usually **/boot/initrd.img-**\ *VERSION* by default, but depends on your OS. If **none** is passed, no initramfs is added. --fdtdir DIR Directory to search device-tree binaries for the board. Usually **/boot/dtbs** or a directory like **/usr/lib/linux-image-**\ *VERSION*, depends on your OS. *dtb* files in this dir are searched to find ones matching your board's device-tree compatible string set in configuration. --dtbs *FILE* [*FILE* ...] Device-tree binary files to use instead of searching *fdtdir*. depthchargectl config options ----------------------------- --section SECTION Config section to retrieve configured values from. By default, this is the globally default section: **depthcharge-tools**. --default DEFAULT A default value to return if the given config key doesn't exist in the given config section. If a default value is not given, this subcommand prints an error message and exits with nonzero status when the key is missing. depthchargectl check options ---------------------------- This subcommand takes no specific options. depthchargectl list options --------------------------- -a, --all-disks List partitions on all disks. -c, --count Print only the count of partitions. -n, --noheadings Don't print column headings. -o COLUMNS, --output COLUMNS Comma separated list of columns to output. Supported columns are **ATTRIBUTE** (or **A**), **SUCCESSFUL** (or **S**), **TRIES** (or **T**), **PRIORITY** (or **P**) for ChromeOS GPT flags, **PATH** for the partition device (if exists), **DISKPATH** (or **DISK**) for the disk device/image the partition is in, **PARTNO** for the partition number, and **SIZE** for the partition size in bytes. depthchargectl remove options ----------------------------- -f, --force Allow disabling the currently booted partition. depthchargectl target options ----------------------------- -a, --all-disks Consider all available disks, instead of considering only disks containing the root and boot partitions. --allow-current Allow targeting the currently booted partition. -s BYTES, --min-size BYTES Only consider partitions larger than this size in bytes. Defaults to **64 KiB** to ignore unused partitions in ChromeOS installations. depthchargectl write options ---------------------------- --allow-current Allow overwriting the currently booted partition. -f, --force Write image to disk even if it cannot be verified by the **check** subcommand. --no-prioritize Don't modify ChromeOS GPT flags on the partition. Normally, the flags would be set to make the system boot from the newly written partition on the next boot. -t DEVICE, --target DEVICE Specify a disk or partition device to write to. This device is passed to the **target** subcommand to determine where exactly to write to. EXIT STATUS =========== In general, exits with zero on success and non-zero on failure. Some subcommands return more specified exit statuses: depthchargectl build exit status -------------------------------- 0 Image built and stored successfully. 1 An error occurred before or during building the image. 3 Can build an image with an *initramfs*, but it is too big for the board despite using maximum allowed kernel compression. This might be solvable by reducing the *initramfs* size. 4 Like **3**, but without an *initramfs* or reducing the *initramfs* size wouldn't make things fit. This might be solvable by reducing the *vmlinuz* size, perhaps by building a custom kernel. depthchargectl check exit status -------------------------------- 0 The *image* passes all checks. 1 Errors unrelated to image checks. 2 The *image* isn't a readable file. 3 Size of the *image* is too big for the board. 4 The *image* cannot be interpreted by |vbutil_kernel|. 5 The *image* fails the |vbutil_kernel| signature checks. 6 The *image* is built with a wrong format for the board. 7 The *image* is missing device-tree files compatible with the board. depthchargectl target exit status --------------------------------- 0 A usable *partition* is given, or a usable partition was chosen from *disk*\ s. The partition passes the checks and is printed to output. 1 Errors unrelated to partition checks. 2 The *partition* is not a writable block device. 3 The disk containing the *partition* is not a writable block device. 4 Cannot parse a partition number from the *partition*. 5 The *partition* is not a ChromeOS kernel partition. 6 The *partition* is the currently booted partition. 7 The *partition* is smaller than the **--min-size** argument. FILES ===== |CONFIG_DIR| Configuration directory. **depthchargectl** searches this directory for configuration files and ChromiumOS verified boot keys. |CONFIG_FILE| System configuration file. The "Configuration options" explained above can be set here to have them as long-term defaults. It's also possible to modify board properties or add new boards here. |CONFIGD_DIR|/*\ ** These files are considered appended to the **config** file. |ETC_CMDLINE_FILE|, |USR_CMDLINE_FILE|, |PROC_CMDLINE_FILE| Files from which **depthchargectl** may deduce a default kernel command line. |SYSTEMD_DIR|\ **/depthchargectl-bless.service** A systemd service that runs **depthchargectl bless** on successful boots. |USR_KI_DIR|\ **/90-depthcharge-tools.install** A systemd kernel-install plugin that can automatically manage your system if **layout=depthcharge-tools** is set in |ETC_KI_CONF|. |INITD_DIR|\ **/depthchargectl-bless** An init service that runs **depthchargectl bless** on successful boots. |VBOOT_DEVKEYS| A directory containing test keys which should have been installed by |vbutil_kernel|. **depthchargectl** also searches this directory if no verified boot keys are set in configuration or found in config directories. |IMAGES_DIR|/*\ **\ **.img** The most recently built images for each kernel version. EXAMPLES ======== **depthchargectl** **list** **-n** **-o** *PATH* Get a list of partitions **depthchargectl** will act on by default. **depthchargectl** **build** **--board** *kevin* **--root** */mnt* **--output** *depthcharge.img* Build an image for the Samsung Chromebook Plus (v1), using files from and intended to boot with the chroot system mounted at */mnt*. **depthchargectl** **config** *board* Print the board codename for the detected board. **depthchargectl** **config** **--default** *False* *enable-system-hooks* Print the *enable-system-hooks* config if it's set, *False* if not. This specific config key is meant to be a common mechanism which distro packagers can use to let users disable system upgrade hooks that use depthchargectl. **depthchargectl** **write** **--allow-current** Build, check and write an image for the latest *kernel-version* of this system to disk while allowing overwriting the currently booted partiiton. You might use this if you only have a single ChromeOS kernel partition, but broken kernels might make your system unbootable. **depthchargectl** **write** *vmlinux.kpart* **-t** */dev/mmcblk1p1* Write the **vmlinux.kpart** file to **/dev/mmcblk1p1**, only if both the image and the partition are valid. Something of this form would be used for writing images to a secondary or external disk. SEE ALSO ======== |mkdepthcharge|, |cgpt|, |vbutil_kernel| depthcharge-tools-0.6.2/init.d/000077500000000000000000000000001444761253100163275ustar00rootroot00000000000000depthcharge-tools-0.6.2/init.d/depthchargectl-bless000066400000000000000000000016201444761253100223400ustar00rootroot00000000000000#!/bin/sh # SPDX-License-Identifier: GPL-2.0-or-later # depthcharge-tools depthchargectl-bless sysvinit service # Copyright (C) 2020-2021 Alper Nebi Yasak # See COPYRIGHT and LICENSE files for full copyright information. ### BEGIN INIT INFO # Provides: depthchargectl-bless # Required-Start: $remote_fs # Required-Stop: # Default-Start: 2 3 4 5 # Default-Stop: # Short-Description: Mark the current depthcharge partition as successful ### END INIT INFO if ! command -v depthchargectl >/dev/null 2>/dev/null; then exit 0 fi if ! grep "cros_secure" /proc/cmdline >/dev/null 2>/dev/null; then # Not booted by depthcharge. exit 0 fi if [ -f /lib/lsb/init-functions ]; then . /lib/lsb/init-functions fi case "$1" in start|restart|reload|force-reload) depthchargectl bless ;; stop|status) # Not a daemon. ;; esac depthcharge-tools-0.6.2/mkdepthcharge.rst000066400000000000000000000322351444761253100205070ustar00rootroot00000000000000.. SPDX-License-Identifier: GPL-2.0-or-later .. depthcharge-tools mkdepthcharge(1) manual page .. Copyright (C) 2019-2022 Alper Nebi Yasak .. See COPYRIGHT and LICENSE files for full copyright information. ============= mkdepthcharge ============= --------------------------------------------- Build boot images for the ChromeOS bootloader --------------------------------------------- :date: 2023-06-30 :version: v0.6.2 :manual_section: 1 :manual_group: depthcharge-tools .. |depthchargectl| replace:: *depthchargectl*\ (1) .. |mkimage| replace:: *mkimage*\ (1) .. |vbutil_kernel| replace:: *vbutil_kernel*\ (1) .. |futility| replace:: *futility*\ (1) .. |CONFIG_DIR| replace:: **/etc/depthcharge-tools** .. |CONFIG_FILE| replace:: **/etc/depthcharge-tools/config** .. |CONFIGD_DIR| replace:: **/etc/depthcharge-tools/config.d** .. |VBOOT_DEVKEYS| replace:: **/usr/share/vboot/devkeys** .. |VBOOT_KEYBLOCK| replace:: **kernel.keyblock** .. |VBOOT_SIGNPUBKEY| replace:: **kernel_subkey.vbpubk** .. |VBOOT_SIGNPRIVATE| replace:: **kernel_data_key.vbprivk** SYNOPSIS ======== **mkdepthcharge** **-o** *FILE* [options] [*VMLINUZ*] [*INITRAMFS* ...] [*DTB* ...] DESCRIPTION =========== **mkdepthcharge** wraps the |mkimage| and |vbutil_kernel| programs with reasonable defaults to package its inputs into the format the ChromeOS bootloader expects. It also automates preprocessing steps and initramfs support hacks that a user would have to do manually or write a script for. The *VMLINUZ* should be a kernel executable, *INITRAMFS* should be a ramdisk image that the kernel should be able to use on its own, and *DTB* files should be device-tree binary files appropriate for the kernel. **mkdepthcharge** tries to determine the type of each input file by some heuristics on their contents, but failing that it assumes a file is whatever is missing in the *VMLINUZ*, *INITRAMFS*, *DTB* order. Alternatively, these files can be specified as options instead of positional arguments. OPTIONS ======= Input files ----------- -d VMLINUZ, --vmlinuz VMLINUZ Kernel executable. If a compressed file is given here, it is decompressed and its contents are used in its place. -i *INITRAMFS* [*INITRAMFS* ...], --initramfs *INITRAMFS* [*INITRAMFS* ...] Ramdisk image. If multiple files are given (e.g. for CPU microcode updates), they are concatenated and used as a single file. -b *DTB* [*DTB* ...], --dtbs *DTB* [*DTB* ...] Device-tree binary files. Global options -------------- -A ARCH, --arch ARCH Architecture to build the images for. The following architectures are understood: **arm**, **arm64**, **aarch64** for ARM boards; **x86**, **x86_64**, **amd64** for x86 boards. If not given, the build architecture of the *VMLINUZ* file is used. --format FORMAT Kernel image format to use, either **fit** or **zimage**. If not given, architecture-specific defaults are used. fit This is the default on ARM boards. The *VMLINUZ* and the optional *INITRAMFS*, *DTB* files are packaged into the Flattened Image Tree (FIT) format using |mkimage| and that is passed to |vbutil_kernel|. zimage This is the default for x86 boards. The *VMLINUZ* is passed mostly unmodified to |vbutil_kernel|, except for decompression and padding for self-decompression. The *INITRAMFS* file is passed as the **--bootloader** argument and the kernel header is modified to point to where it will be in memory. It does not support packaging *DTB* files. -h, --help Show a help message and exit. --kernel-start ADDR Start of the Depthcharge kernel buffer in memory. Depthcharge loads the packed data to a fixed physical address in memory, and some initramfs support hacks require this value to be known. This is exactly the board-specific **CONFIG_KERNEL_START** value in the Depthcharge source code and defaults to **0x100000** for the x86 architecture. -o FILE, --output FILE Write the image to *FILE*. The image isn't generated at the output, but copied to it from a temporary working directory. This option is mandatory. --pad-vmlinuz, --no-pad-vmlinuz Pad the *VMLINUZ* file so that the kernel's self-decompression has enough space to avoid overwriting the *INITRAMFS* file during boot. This has different defaults and behaviour depending on the image format, see explanations in their respective sections. --tmpdir DIR Create and keep temporary files in *DIR*. If not given, a temporary **mkdepthcharge-\*** directory is created in **/tmp** and removed at exit. -v, --verbose Print info messages, |mkimage| output and |vbutil_kernel| output to stderr. -V, --version Print program version and exit. FIT image options ----------------- -C TYPE, --compress TYPE Compress the *VMLINUZ* before packaging it into a FIT image, either with **lz4** or **lzma**. **none** is also accepted, but does nothing. -n DESC, --name DESC Description of the *VMLINUZ* to put in the FIT image. --pad-vmlinuz, --no-pad-vmlinuz Pad the *VMLINUZ* file so that the kernel's self-decompression has enough space to avoid overwriting the *INITRAMFS* file during boot. The necessary padding is calculated based on compressed and decompressed kernel sizes and the **--kernel-start** argument. On earliest boards U-Boot moves the *INITRAMFS* away to a safe place before running the *VMLINUZ*, and on ARM64 boards Depthcharge itself decompresses the *VMLINUZ* to a safe place. But 32-bit ARM boards with Depthcharge lack FIT ramdisk support and run the *VMLINUZ* in-place, so this initramfs support hack is necessary on those. This option is enabled by default when **--patch-dtbs** is given, use the **--no-pad-vmlinuz** argument to disable it. --patch-dtbs, --no-patch-dtbs Add **linux,initrd-start** and **linux,initrd-end** properties to the *DTB* files' **/chosen** nodes. Their values are based on the **--kernel-start** or the **--ramdisk-load-address** argument, one of which is required if this argument is given. These properties are normally added by Depthcharge, but 32-bit ARM Chromebooks were released with versions before FIT ramdisk support was introduced, so this initramfs support hack is necessary on those. --ramdisk-load-address ADDR Add a **load** property to the FIT ramdisk subimage section. The oldest ARM Chromebooks use an old custom U-Boot that implements the same verified boot flow as Depthcharge. Its FIT ramdisk support requires an explicit load address for the ramdisk, which can be provided with this argument. zImage image options -------------------- --pad-vmlinuz, --no-pad-vmlinuz Pad the *VMLINUZ* file so that the kernel's self-decompression has enough space to avoid overwriting the *INITRAMFS* file during boot. The necessary padding is calculated based on values in the zImage header and the **--kernel-start** argument. If the *VMLINUZ* and *INITRAMFS* are small enough (about 16 MiB in total) they may fit between **--kernel-start** and the start of the decompression buffer. In this case the padding is unnecessary and not added. The padding is usually larger than the decompressed version of the kernel, so it results in unbootable images for older boards with small image size limits. For these, it is usually necessary to use **--set-init-size**, or custom kernels to make the parts fit as described above. This is disabled by default in favour of **--set-init-size**, use the **--pad-vmlinuz** argument to enable it. --set-init-size, --no-set-init-size Increase the **init_size** kernel boot parameter so that the kernel's self-decompression does not overwrite the *INITRAMFS* file during boot. The modified value is calculated based on values in the zImage header and the **--kernel-start** argument. This only works if the kernel has **KASLR** enabled (as is the default), because then the kernel itself tries to avoid overwriting the *INITRAMFS* during decompression. However it does not do this when first copying the *VMLINUZ* to the end of the decompression buffer. Increasing **init_size** shifts copy this upwards to avoid it overlapping *INITRAMFS*. If the *VMLINUZ* and *INITRAMFS* are small enough, they may fit before the first compressed copy's start. In this case changing the value is unnecessary and skipped. This is enabled by default, use the **--no-set-init-size** argument to disable it. Depthcharge image options ------------------------- --bootloader FILE Bootloader stub for the very first Chromebooks that use H2C as their firmware. Beyond those, this field is ignored on the firmware side except as a ramdisk for the **multiboot** and **zbi** formats. If an *INITRAMFS* is given for the **zimage** format, it is placed here as part of an initramfs support hack for x86 boards. Otherwise, an empty file is used. -c *CMD* [*CMD* ...], --cmdline *CMD* [*CMD* ...] Command-line parameters for the kernel. Can be used multiple times to append new values. If not given, **--** is used. The ChromeOS bootloader expands any instance of **%U** in the kernel command line with the PARTUUID of the ChromeOS kernel partition it has chosen to boot, e.g. **root=PARTUUID=%U/PARTNROFF=1** will set the root partition to the one after the booted partition. As knowing the currently booted partition is generally useful, **mkdepthcharge** prepends **kern_guid=%U** to the given kernel command line parameters to capture it. Use **--no-kern-guid** to disable this. --kern-guid, --no-kern-guid Prepend **kern_guid=%U** to kernel command-line parameters. This is enabled by default, use the **--no-kern-guid** argument to disable it. --keydir KEYDIR Directory containing verified boot keys to use. Equivalent to using **--keyblock** *KEYDIR*\/|VBOOT_KEYBLOCK|, **--signprivate** *KEYDIR*\/|VBOOT_SIGNPRIVATE|, and **--signpubkey** *KEYDIR*\ /|VBOOT_SIGNPUBKEY|. --keyblock FILE, --signprivate FILE, --signpubkey FILE ChromiumOS verified boot keys. More specifically: kernel key block, private keys in .vbprivk format, and public keys in .vbpubk format. If not given, defaults to files set in **depthcharge-tools** configuration. If those are not set, **mkdepthcharge** searches for these keys in |CONFIG_DIR| and |VBOOT_DEVKEYS| directories, the latter being test keys that may be distributed with |vbutil_kernel|. You can set these in **depthcharge-tools** configuration by the **vboot-keyblock**, **vboot-private-key** and **vboot-public-key** options under a **depthcharge-tools** config section. EXIT STATUS =========== In general, exits with zero on success and non-zero on failure. FILES ===== |CONFIG_FILE|, |CONFIGD_DIR|/*\ ** The **depthcharge-tools** configuration files. These might be used to specify locations of the ChromiumOS verified boot keys as system configuration. |CONFIG_DIR| The **depthcharge-tools** configuration directory. **mkdepthcharge** searches this directory for verified boot keys. |VBOOT_DEVKEYS| A directory containing test keys which should have been installed by |vbutil_kernel|. *KEYDIR*/|VBOOT_KEYBLOCK| Default kernel key block file used for signing the image. *KEYDIR*/|VBOOT_SIGNPUBKEY| Default public key used to verify signed images. *KEYDIR*/|VBOOT_SIGNPRIVATE| Default private key used for signing the image. EXAMPLES ======== **mkdepthcharge** **-o** *depthcharge.img* */boot/vmlinuz* The simplest invocation possible. If tried on an ARM board, the firmware might refuse to boot the output image since it doesn't have a dtb for the board. Otherwise, even if the firmware runs the */boot/vmlinuz* binary, it might not correctly boot due to non-firmware causes (e.g. kernel panic due to not having a root). **mkdepthcharge** **-o** *system.img* **--cmdline** *"root=/dev/mmcblk0p2"* **--compress** *lz4* **--** */boot/vmlinuz.gz* */boot/initrd.img* *rk3399-gru-kevin.dtb* A command someone using a Samsung Chromebook Plus (v1) might run on their board to create a bootable image for their running system. **mkdepthcharge** **-o** *veyron.img* **-c** *"root=LABEL=ROOT gpt"* **--kernel-start** *0x2000000* **--patch-dtbs** **--** */boot/vmlinuz* */boot/initramfs-linux.img* */boot/dtbs/rk3288-veyron-\*.dtb* Build an image intended to work on veyron boards like ASUS Chromebook C201PA and Chromebook Flip C100PA. The stock Depthcharge on these boards doesn't process the FIT ramdisk, so the dtbs needs to be patched to boot with initramfs. **mkdepthcharge** **-o** *peach-pit.img* **-c** *"console=null"* **--ramdisk-load-address** *0x44000000* **--** *vmlinuz* *initramfs* *exynos5420-peach-pit.dtb* *exynos5420-peach-pit.dtb* Build an image intended to work on a Samsung Chromebook 2 (11"). This board uses a custom U-Boot, so needs an explicit ramdisk load address. Its firmware has a bug with loading the device-tree file, so needs the file twice for the result to be actually bootable. SEE ALSO ======== |depthchargectl|, |mkimage|, |vbutil_kernel|, |futility| depthcharge-tools-0.6.2/setup.py000066400000000000000000000031261444761253100166560ustar00rootroot00000000000000# SPDX-License-Identifier: GPL-2.0-or-later # depthcharge-tools python package setup script # Copyright (C) 2020-2023 Alper Nebi Yasak # See COPYRIGHT and LICENSE files for full copyright information. #! /usr/bin/env python3 import pathlib import setuptools root = pathlib.Path(__file__).resolve().parent readme = (root / 'README.rst').read_text() setuptools.setup( name='depthcharge-tools', version='0.6.2', description='Tools to manage the Chrome OS bootloader', long_description=readme, long_description_content_type="text/x-rst", url='https://github.com/alpernebbi/depthcharge-tools', author='Alper Nebi Yasak', author_email='alpernebiyasak@gmail.com', license='GPL2+', license_files=["LICENSE", "COPYRIGHT"], classifiers=[ 'Development Status :: 3 - Alpha', 'Environment :: Console', 'Intended Audience :: Developers', 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)', 'Operating System :: POSIX :: Linux', 'Programming Language :: Python :: 3', 'Topic :: System :: Boot', ], entry_points={ 'console_scripts': [ 'mkdepthcharge=depthcharge_tools.mkdepthcharge:mkdepthcharge.main', 'depthchargectl=depthcharge_tools.depthchargectl:depthchargectl.main', ], }, keywords='ChromeOS ChromiumOS depthcharge vboot vbutil_kernel', packages=setuptools.find_packages(), package_data={ "depthcharge_tools": ["config.ini", "boards.ini"], }, install_requires=[ 'setuptools', ], ) depthcharge-tools-0.6.2/systemd/000077500000000000000000000000001444761253100166325ustar00rootroot00000000000000depthcharge-tools-0.6.2/systemd/90-depthcharge-tools.install000077500000000000000000000203541444761253100240730ustar00rootroot00000000000000#!/bin/sh # SPDX-License-Identifier: LGPL-2.1-or-later # depthcharge-tools kernel-install plugin # Copyright (C) 2022 Alper Nebi Yasak # See COPYRIGHT and LICENSE files for full copyright information. # This is a modified copy of 90-loaderentry.install from systemd. # # systemd is free software; you can redistribute it and/or modify it # under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 2.1 of the License, or # (at your option) any later version. # # systemd is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with systemd; If not, see . set -e COMMAND="${1:?}" KERNEL_VERSION="${2:?}" ENTRY_DIR_ABS="${3:?}" KERNEL_IMAGE="$4" INITRD_OPTIONS_SHIFT=4 [ "$KERNEL_INSTALL_LAYOUT" = "depthcharge-tools" ] || exit 0 log() { if [ "$KERNEL_INSTALL_VERBOSE" -gt 0 ]; then echo "$@" fi } >&2 maybe_error() { # Ignore errors if we're not booted with depthcharge if grep "cros_secure" /proc/cmdline >/dev/null 2>&1; then if [ -n "$1" ]; then echo "Error: $1" fi echo "Error: Failed to update depthcharge partitions, system may be unbootable." exit 1 else if [ -n "$1" ]; then log "Error: $1" fi log "Not booted with depthcharge, so ignoring that." exit 0 fi } >&2 # Disable if our package is not installed. if ! command -v depthchargectl >/dev/null 2>&1; then log "Not running depthcharge plugin, depthchargectl is missing." exit 0 fi run_depthchargectl() { if [ "$KERNEL_INSTALL_VERBOSE" -gt 0 ]; then log "Running depthchargectl $@:" depthchargectl --verbose "$@" else depthchargectl "$@" 2>/dev/null fi } MACHINE_ID="$KERNEL_INSTALL_MACHINE_ID" ENTRY_TOKEN="$KERNEL_INSTALL_ENTRY_TOKEN" BOOT_ROOT="$KERNEL_INSTALL_BOOT_ROOT" BOOT_MNT="$(stat -c %m "$BOOT_ROOT")" if [ "$BOOT_MNT" = '/' ]; then BOOT_DIR="$ENTRY_DIR_ABS" else BOOT_DIR="${ENTRY_DIR_ABS#"$BOOT_MNT"}" fi case "$COMMAND" in remove) ENABLED="$( depthchargectl config \ --section depthchargectl/remove \ --default False \ enable-system-hooks 2>/dev/null )" || maybe_error # Disable based on package configuration if [ "$ENABLED" != "True" ]; then log "Not removing depthcharge image, disabled by config." exit 0 fi IMAGES_DIR="$( depthchargectl config \ --section depthchargectl/remove \ images-dir 2>/dev/null )" || maybe_error if [ -f "$IMAGES_DIR/$KERNEL_VERSION.img" ]; then # Assuming kernel-install handles warnings about removing the running kernel run_depthchargectl remove --force "$KERNEL_VERSION" >/dev/null \ || maybe_error else log "Not removing depthcharge image, already doesn't exist." fi exit 0 ;; add) ;; *) exit 0 ;; esac ENABLED="$( depthchargectl config \ --section depthchargectl/write \ --default False \ enable-system-hooks )" || maybe_error if [ "$ENABLED" != "True" ]; then log "Not writing depthcharge image, disabled by config." exit 0 fi IMAGES_DIR="$( depthchargectl config \ --section depthchargectl/write \ images-dir )" || maybe_error BOARD="$(depthchargectl config board)" || maybe_error if [ "$BOARD" = "none" ]; then maybe_error "Cannot build depthcharge images when no board is specified." fi KERNEL_CMDLINE="$( depthchargectl config \ --section depthchargectl/write \ kernel-cmdline 2>/dev/null )" || KERNEL_CMDLINE="" if [ -n "$KERNEL_INSTALL_CONF_ROOT" ]; then if [ -f "$KERNEL_INSTALL_CONF_ROOT/cmdline" ]; then BOOT_OPTIONS="$(tr -s "$IFS" ' ' <"$KERNEL_INSTALL_CONF_ROOT/cmdline")" fi elif [ -f /etc/kernel/cmdline ]; then BOOT_OPTIONS="$(tr -s "$IFS" ' ' "$KERNEL_INSTALL_STAGING_AREA/merged-initrd.img" \ || maybe_error "Could not merge initrd files for depthchargectl." INITRD="$KERNEL_INSTALL_STAGING_AREA/merged-initrd.img" set -- fi # Check possible dtbs paths FDTDIR="" for fdtdir in \ "$BOOT_ROOT/dtbs/$KERNEL_VERSION" \ "$BOOT_ROOT/dtb/$KERNEL_VERSION" \ "$BOOT_ROOT/dtbs-$KERNEL_VERSION" \ "$BOOT_ROOT/dtb-$KERNEL_VERSION" \ "/usr/lib/linux-image-$KERNEL_VERSION" \ "/usr/lib/modules/$KERNEL_VERSION/dtbs" \ "/usr/lib/modules/$KERNEL_VERSION/dtb" \ "/lib/modules/$KERNEL_VERSION/dtbs" \ "/lib/modules/$KERNEL_VERSION/dtb" \ "$BOOT_ROOT/dtbs" \ "$BOOT_ROOT/dtb" \ "/usr/share/dtbs" \ "/usr/share/dtb" \ ; do if [ -d "$fdtdir" ]; then FDTDIR="$fdtdir" break fi done # Depthchargectl write doesn't take custom files, so build image first IMAGE="$( run_depthchargectl build \ --kernel "$KERNEL_IMAGE" \ ${INITRD:+--initramfs "$INITRD"} \ ${FDTDIR:+--fdtdir "$FDTDIR"} \ --kernel-cmdline "$BOOT_OPTIONS" \ --kernel-release "$KERNEL_VERSION" \ )" || maybe_error PART_COUNT="$(depthchargectl list -c 2>/dev/null)" || maybe_error if [ "$PART_COUNT" -gt 1 ]; then run_depthchargectl write "$IMAGE" >/dev/null \ || maybe_error elif [ "$PART_COUNT" -eq 1 ]; then run_depthchargectl write --allow-current "$IMAGE" >/dev/null \ || maybe_error else maybe_error "No usable Chrome OS Kernel partition found." fi exit 0 depthcharge-tools-0.6.2/systemd/depthchargectl-bless.service000066400000000000000000000020051444761253100243000ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1+ # depthcharge-tools depthchargectl-bless systemd service # Copyright (C) 2019-2021 Alper Nebi Yasak # See COPYRIGHT and LICENSE files for full copyright information. # This is a modified copy of systemd-bless-boot.service from systemd. # # systemd is free software; you can redistribute it and/or modify it # under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 2.1 of the License, or # (at your option) any later version. [Unit] Description=Mark the current depthcharge partition as successful Documentation=man:depthchargectl(8) DefaultDependencies=no Requires=boot-complete.target After=local-fs.target boot-complete.target Conflicts=shutdown.target Before=shutdown.target ConditionKernelCommandLine=cros_secure [Service] Type=oneshot RemainAfterExit=yes ExecStart=depthchargectl bless # systemd-bless-boot-generator symlinks its file to basic.target.wants. [Install] WantedBy=basic.target depthcharge-tools-0.6.2/update_config.py000066400000000000000000001203021444761253100203210ustar00rootroot00000000000000# SPDX-License-Identifier: GPL-2.0-or-later # depthcharge-tools boards.ini update script # Copyright (C) 2021-2022 Alper Nebi Yasak # See COPYRIGHT and LICENSE files for full copyright information. #! /usr/bin/env python3 import configparser import collections import json import logging import re from functools import lru_cache from pathlib import Path from depthcharge_tools import __version__ from depthcharge_tools.utils.argparse import ( Command, Argument, Group, ) from depthcharge_tools.utils.collections import ( DirectedGraph, SortedDict, ) from depthcharge_tools.utils.pathlib import ( iterdir, read_lines, ) class update_config( Command, prog="update_config.py", add_help=False, ): """ Maintainer tool to help update depthcharge-tools config.ini --- If you're packaging depthcharge-tools, don't use this as a build step. Results from this are intended to be checked and modified manually before they go into the final config.ini, the file committed to the repository is the canonical one. """ logger = logging.getLogger(__name__) @Group def options(self): """Options""" @options.add @Argument("-h", "--help", action="help") def print_help(self): """Show this help message.""" # type(self).parser.print_help() @options.add @Argument( "-V", "--version", action="version", version="depthcharge-tools %(prog)s {}".format(__version__), ) def version(self): """Print program version.""" return type(self).version.version % {"prog": type(self).prog} @options.add @Argument("-v", "--verbose", count=True) def verbosity(self, verbosity=0): """Print more detailed output.""" level = logging.WARNING - int(verbosity) * 10 self.logger.setLevel(level) return verbosity def parse_recovery_conf_block(self, block): values = {} for line in block.splitlines(): if line.startswith("#"): continue key, sep, value = line.partition("=") if sep != "=": raise ValueError( "No equals sign in line: '{}'" .format(line) ) if key not in values: values[key] = value elif isinstance(values[key], list): values[key].append(value) else: values[key] = [values[key], value] if "filesize" in values: values["filesize"] = int(values["filesize"] or 0) if "zipfilesize" in values: values["zipfilesize"] = int(values["zipfilesize"] or 0) return values @options.add @Argument("-r", "--recovery-conf") def recovery_conf(self, path=None): """\ Chrome OS recovery.conf file for their Linux recovery tool https://dl.google.com/dl/edgedl/chromeos/recovery/recovery.conf """ return Path(path) if path else None @property @lru_cache def recovery_conf_boards(self): if self.recovery_conf is None: return {} header, *blocks = [ self.parse_recovery_conf_block(block) for block in re.split("\n\n+", self.recovery_conf.read_text()) ] version = header.get( "recovery_tool_linux_version", header.get("recovery_tool_version"), ) if version != "0.9.2": raise TypeError( "Unsupported recovery.conf version: {}" .format(header.get("recovery_tool_update", version)) ) boards = collections.defaultdict(list) for block in blocks: hwidmatch = block.get("hwidmatch") # This might be a parent board, but the best fallback we have codename = block.get("file").split("_")[2] if hwidmatch == "duplicate of rabbid": codename = "rabbid" block["hwidmatch"] = None elif hwidmatch == "duplicate of C433": codename = "shyvana" block["hwidmatch"] = None elif hwidmatch == "Duplicate of BARLA": codename = "barla" block["hwidmatch"] = None elif hwidmatch.strip("^(").startswith("ACER ZGB"): pass # x86-zgb, x86-zgb-he elif hwidmatch.strip("^(").startswith("IEC MARIO"): pass # x86-mario elif hwidmatch.strip("^(").startswith("SAMS ALEX"): pass # x86-alex, x86-alex-he elif hwidmatch in ( "DOES NOT MATCH ANYTHING", "NO MATCH JUST FOR ENTRY", ): codename = block.get("file").split("_")[2] block["hwidmatch"] = None else: m = re.match("^\^?\(?([0-9A-Z]+)[^0-9A-Za-z]", hwidmatch) if m: codename = m.group(1).lower() else: self.logger.warning( "Could not parse codename for hwidmatch '{}'." .format(hwidmatch) ) if codename: boards[codename].append(block) return dict(boards) def read_profiles_repo_name(self, d): # A single-line file, so return the first line for line in read_lines(d / "profiles" / "repo_name"): return line.strip() def parse_layout_conf(self, d): values = {} for line in read_lines(d / "metadata" / "layout.conf"): key, eq, value = line.partition("=") if eq == "=" and "#" not in key: values[key.strip()] = value.strip() return values def get_profiles_base_parent_boards(self, d): parents = [] for line in read_lines(d / "profiles" / "base" / "parent"): # Most end with :base, but there were e.g. freon:base/amd64 lhs, sep, rhs = line.partition(":") if sep == ":" and rhs.startswith("base"): parents.append(lhs) # Very old scheme, e.g. firmware-snow-2695.B tegra variants prefix = "../../../" suffix = "/profiles/base" if line.startswith(prefix) and line.endswith(suffix): parents.append(line[len(prefix):-len(suffix)]) return parents def get_model_yaml_boards(self, d): children = set() # chromeos-config-bsp directories can have inconsistent names. for config_d in d.glob("chromeos-base/chromeos-config-bsp*"): for line in read_lines(config_d / "files" / "model.yaml"): # A giant hack that lets me avoid parsing yaml keyname = "- $device-name:" space, sep, child = line.partition(keyname) if sep == keyname and "#" not in space: children.add(child.strip().strip('\'"')) return children @options.add @Argument("-b", "--board-overlays-repo") def board_overlays_repo(self, path=None): """\ Chromium OS board-overlays git repository https://chromium.googlesource.com/chromiumos/overlays/board-overlays """ return Path(path) if path else None def get_project_config_boards(self, d): children = set() project_config = ( d / "sw_build_config" / "platform" / "chromeos-config" / "generated" / "project-config.json" ) if project_config.is_file(): config = json.loads(project_config.read_text()) for section in config["chromeos"]["configs"]: if section["name"]: children.add(section["name"]) return children @options.add @Argument("-p", "--chromiumos-project-repo") def chromiumos_project_repo(self, path=None): """\ Chromium OS's chromiumos/project git repository https://chromium.googlesource.com/chromiumos/project """ return Path(path) if path else None def parse_defconfig(self, text): values = dict() for line in text.splitlines(): if line.startswith("#"): continue lhs, sep, rhs = line.partition("=") if sep != "=" or not lhs.startswith("CONFIG_"): continue if rhs == "y": value = True elif rhs == "n": value = False elif rhs.startswith("0x"): value = int(rhs, 16) else: value = rhs.strip().strip("'\"") key = lhs[len("CONFIG_"):] values[key] = value # Not going to parse Kconfig for this if values.get("ARCH_ARM") and not values.get("ARCH_ARM_V8"): values["ARCH_ARM_V7"] = True return values @options.add @Argument("-d", "--depthcharge-repo") def depthcharge_repo(self, path=None): """\ Chromium OS depthcharge firmware git repository https://chromium.googlesource.com/chromiumos/platform/depthcharge """ return Path(path) if path else None def parse_kconfig_defaults(self, text): defaults = {} clean_text, _ = re.subn("#.*\n", "\n", text) blocks = re.split("\n\n+", clean_text) for block in blocks: config = None for line in block.splitlines(): line = line.strip() if not line or line.startswith("help"): if config is None: continue else: config = None break m = re.match("config ([0-9A-Z_]+)", line) if m: config = m.group(1) type_ = lambda s: str.strip(s, "'\"") defaults[config] = {} if config is None: continue if line.startswith("hex"): type_ = lambda x: int(x, 16) elif line.startswith("int"): type_ = int elif line.startswith("bool"): type_ = lambda b: b in ("y", "Y") elif line.startswith("string"): type_ = lambda s: str.strip(s, "'\"") m = re.match("default (\S+|\".+\")$", line) try: value = type_(m.group(1).strip("'\"")) except ValueError: value = m.group(1) except AttributeError: value = None finally: if value is not None: defaults[config][None] = value value = None m = re.match("default (\S+|\".+\") if ([0-9A-Z_]+)", line) try: value = type_(m.group(1)) cond = m.group(2) except ValueError: value = m.group(1) cond = m.group(2) except AttributeError: value = None cond = None finally: if value is not None and cond is not None: defaults[config][cond] = value value = None return defaults def parse_kconfig_selects(self, text): selects = {} clean_text, _ = re.subn("#.*\n", "\n", text) blocks = re.split("\n\n+", clean_text) for block in blocks: config = None for line in block.splitlines(): line = line.strip() if not line or line.startswith("help"): if config is None: continue else: config = None break m = re.match("config ([0-9A-Z_]+)", line) if m: config = m.group(1) type_ = lambda s: str.strip(s, "'\"") selects[config] = {} selects[config][None] = [] if config is None: continue m = re.match("select (\S+|\".+\")$", line) if m: value = m.group(1).strip("'\"") selects[config][None].append(value) m = re.match("select (\S+|\".+\") if ([0-9A-Z_]+)", line) if m: value = m.group(1) cond = m.group(2) if cond not in selects[config]: selects[config][cond] = [] selects[config][cond].append(value) return selects @property @lru_cache def depthcharge_boards(self): boards = {} defaults = collections.defaultdict(dict) if self.depthcharge_repo is None: return boards # Provide a limited set of default values to avoid having to # parse all Kconfig files or something image_f = self.depthcharge_repo / "src/image/Kconfig" image_d = self.parse_kconfig_defaults(image_f.read_text()) for cond, default in image_d.get("KERNEL_SIZE", {}).items(): defaults[cond]["KERNEL_SIZE"] = default for defconfig_f in self.depthcharge_repo.glob("board/*/defconfig"): defconfig = self.parse_defconfig(defconfig_f.read_text()) # CONFIG_BOARD is removed in master board = defconfig.get("BOARD", defconfig_f.parent.name) # kevin, kevin-tpm2 both have BOARD="kevin", prefer former if board in boards and board != defconfig_f.parent.name: continue board_d = {} board_d.update(defaults.get(None, {})) for cond, config in defaults.items(): if cond and defconfig.get(cond, None): board_d.update(config) board_d.update(defconfig) boards[board] = board_d return boards @options.add @Argument("-c", "--coreboot-repo") def coreboot_repo(self, path=None): """\ Chromium OS coreboot firmware git repository https://chromium.googlesource.com/chromiumos/third_party/coreboot """ return Path(path) if path else None @property @lru_cache def coreboot_boards(self): boards = {} if self.coreboot_repo is None: return boards def get_board_name(config): parts = config.split("_") if len(parts) < 2 or parts[0] != "BOARD": return None vendor = parts[1].lower() if not (self.coreboot_repo / "src/mainboard" / vendor).is_dir(): return None board = "_".join(config.split("_")[2:]).lower() return board for kconfig_f in self.coreboot_repo.glob("src/mainboard/*/*/Kconfig"): kconfig_name = kconfig_f.with_name("Kconfig.name") kconfig = kconfig_f.read_text() if kconfig_name.is_file(): kconfig_name = kconfig_name.read_text() else: kconfig_name = "" defaults = self.parse_kconfig_defaults(kconfig) selects = self.parse_kconfig_selects(kconfig) selects.update(self.parse_kconfig_selects(kconfig_name)) def add_board(config): board = get_board_name(config) if board in boards: return boards[board] boards[board] = {} for cond, selectlist in selects.get(config, {}).items(): if cond is None or cond in boards[board]: for select in selectlist: if get_board_name(select): boards[board].update(add_board(select)) boards[board][select] = True for key, values in defaults.items(): if get_board_name(key): continue value = values.get(config, values.get(None)) if value is not None: boards[board][key] = value board_opts = selects.get("BOARD_SPECIFIC_OPTIONS", {}) for cond, selectlist in board_opts.items(): if cond is None or cond in boards[board]: for select in selectlist: if get_board_name(select): boards[board].update(add_board(select)) boards[board][select] = True boards[board][config] = True return boards[board] for config, _ in defaults.items(): if get_board_name(config): add_board(config) for select, _ in selects.items(): if get_board_name(select): add_board(select) for board, block in list(boards.items()): suffix = "_common" if board.endswith(suffix): actual = board[:-len(suffix)] boards.setdefault(actual, boards.pop(board)) board = actual prefix = "baseboard_" if board.startswith(prefix): actual = "baseboard-{}".format(board[len(prefix):]) boards.setdefault(actual, boards.pop(board)) board = actual if not block.get("MAINBOARD_HAS_CHROMEOS", False): if board in boards: boards.pop(board) continue return boards @property @lru_cache def board_relations(self): board_relations = DirectedGraph() repo_names = {} # Find canonical names for each board for board_d in iterdir(self.board_overlays_repo): if not board_d.is_dir() or board_d.name.startswith("."): continue layout_conf = self.parse_layout_conf(board_d) repo_name = layout_conf.get("repo-name") # e.g. overlay-amd64-host doesn't have layout.conf if repo_name is None: repo_name = self.read_profiles_repo_name(board_d) if repo_name is None: self.logger.warning( "Couldn't find a canonical name for board dir '{}'." .format(board_d.name) ) repo_name = board_d.name repo_names[board_d.name] = repo_name board_relations.add_node(repo_name) for overlay, repo_name in repo_names.items(): board_d = self.board_overlays_repo / overlay for parent in self.get_profiles_base_parent_boards(board_d): if parent != repo_name: board_relations.add_edge(parent, repo_name) # Various model/skus of recent boards don't have explicit overlay # dirs, but are specified in model.yaml in the base overlay for child in self.get_model_yaml_boards(board_d): if repo_name != child: board_relations.add_edge(repo_name, child) # Some relations only exists in layout.conf, e.g. # - x86-generic -> x86-generic_embedded # - project-* dirs and their children # - peach -> peach_pit in firmware-gru-8785.B layout_conf = self.parse_layout_conf(board_d) for parent in layout_conf.get("masters", "").split(): if parent != repo_name and parent not in ( "chromiumos", "portage-stable", "eclass-overlay", ): board_relations.add_edge(parent, repo_name) # "snow" is the default, implicit "daisy" if board_relations.nodes().intersection(("snow", "daisy")): board_relations.add_edge("daisy", "snow") # Some newer board variants are only in this project repo for board in iterdir(self.chromiumos_project_repo): if not board.is_dir() or board.name.startswith("."): continue board_relations.add_node(board.name) for profile in iterdir(board): if not profile.is_dir() or profile.name.startswith("."): continue # puff/puff exists if profile.name != board.name: board_relations.add_edge(board.name, profile.name) for child in self.get_project_config_boards(profile): # shadowkeep/shadowkeep/shadowkeep exists if child == profile.name == board.name: continue # galaxy/{andromeda,sombrero} has galaxy # make them {andromeda,sombrero}_galaxy elif child == board.name: child = "{}_{}".format(profile.name, child) if child != profile.name: board_relations.add_edge(profile.name, child) # Project repo lists all "veyron" boards under "veyron-pinky" if "veyron-pinky" in board_relations.nodes(): board_relations.add_edge("veyron", "veyron-pinky") for child in board_relations.children("veyron-pinky"): board_relations.add_edge("veyron", child) board_relations.remove_edge("veyron-pinky", child) # Weird stuff from depthcharge for board, config in self.depthcharge_boards.items(): parent = config.get("BOARD", None) parent = config.get("BOARD_DIR", parent) if parent is None: continue board_relations.add_node(board) # src/board/ and BOARD_DIR has gru (baseboard) and veyron_* # (variants), we can't just always add "baseboard-". if "baseboard-{}".format(parent) in board_relations.nodes(): parent = "baseboard-{}".format(parent) # This looks incorrect for a few boards, so only add the # relation if we don't know anything about the board if not board_relations.parents(board): if parent != board: board_relations.add_edge(parent, board) nodes = { node.replace("-", "_"): node for node in board_relations.nodes() } def coreboot_board_name(config): if config is None or not config.startswith("BOARD_"): return None board = "_".join(config.split("_")[2:]).lower() if board.startswith("baseboard_"): board = "baseboard-{}".format(board[len("baseboard_"):]) if board not in self.coreboot_boards: return None return board def add_coreboot_parents(board): if board is None: return None board = nodes.get(board.replace("-", "_"), board) board_relations.add_node(board) block = self.coreboot_boards.get(board, {}) parents = set( coreboot_board_name(config) for config, value in block.items() if value ) for parent in parents - {board, None}: add_coreboot_parents(parent) parent = nodes.get(parent.replace("-", "_"), parent) # This also has conflicts with board-overlays if not board_relations.parents(board): board_relations.add_edge(parent, board) for board, block in self.coreboot_boards.items(): add_coreboot_parents(board) nodes = { node.replace("_", "-"): node for node in board_relations.nodes() } # Recovery.conf heuristics, doesn't have actual parent board info for board, blocks in self.recovery_conf_boards.items(): parents = set([b.get("file").split("_")[2] for b in blocks]) parents.discard(board) if len(parents) > 1: continue elif len(parents) == 0: parent = None else: parent = parents.pop() # This is really inaccurate with underscores replaced with # hyphens, so only use it if we don't know anything else if board in nodes: continue # Don't duplicate veyron_speedy as speedy if parent in nodes: parent = nodes[parent] if parent and parent.endswith(board): parent = parent[:-len(board)-1] board_relations.add_node(board) if parent: board_relations.add_edge(parent, board) # Add board architectures as root parent for board, config in self.depthcharge_boards.items(): if config.get("ARCH_X86"): arch = "amd64" elif config.get("ARCH_ARM_V8"): arch = "arm64" elif config.get("ARCH_ARM"): arch = "arm" else: continue roots = board_relations.roots(board) for root in roots - {"x86", "amd64", "arm64", "arm"}: board_relations.add_edge(arch, root) # Baseboards, chipsets shouldn't depend on others in their class for board in board_relations.nodes(): if board.startswith("chipset-"): for child in board_relations.children(board): if child.startswith("chipset-"): board_relations.remove_edge(board, child) for parent in board_relations.parents(board): board_relations.add_edge(parent, child) elif board.startswith("baseboard-"): for child in board_relations.children(board): if child.startswith("baseboard-"): board_relations.remove_edge(board, child) for parent in board_relations.parents(board): board_relations.add_edge(parent, child) # Relations from older versions no longer in main branches, # coreboot Kconfigs I'm too lazy to parse, etc. for parent, children in [ ("amd64", ["chipset-pinetrail", "chipset-snb", "chipset-ivb", "chipset-hsw", "chipset-cnl", "chipset-icl", "chipset-rpl", "reven"]), ("chipset-adl", ["adlrvp", "shadowmountain"]), ("chipset-bdw", ["baseboard-auron"]), ("chipset-cml", ["cmlrvp"]), ("chipset-cnl", ["cnlrvp"]), ("chipset-glk", ["glkrvp"]), ("chipset-hsw", ["baseboard-slippy", "baseboard-beltino"]), ("chipset-icl", ["iclrvp", "dragonegg"]), ("chipset-ivb", ["stout"]), ("chipset-kbl", ["kblrvp"]), ("chipset-pinetrail", ["x86-alex-he", "x86-mario", "x86-zgb-he"]), ("chipset-snb", ["butterfly", "lumpy", "stumpy"]), ("chipset-tgl", ["tglrvp", "deltaur", "deltan", ]), ("chipset-whl", ["whlrvp"]), ("chipset-mendocino", ["chausie"]), ("baseboard-auron", ["auron"]), ("baseboard-slippy", ["slippy"]), ("baseboard-beltino", ["beltino"]), ("arm", ["chipset-tegra124", "chipset-exynos5", "chipset-cygnus", "chipset-ipq4019", "chipset-ipq8064", "chipset-rk3288"]), ("chipset-cygnus", ["purin"]), ("chipset-exynos5", ["daisy", "peach"]), ("chipset-ipq4019", ["gale"]), ("chipset-ipq8064", ["storm"]), ("chipset-rk3288", ["veyron"]), ("chipset-tegra124", ["nyan"]), ("storm", ["arkham", "whirlwind"]), ("veyron", ["veyron_mickey", "veyron_rialto"]), ("arm64", ["chipset-tegra210", "chipset-qcs404", "chipset-mt8188g"]), ("chipset-qcs404", ["mistral"]), ("chipset-tegra210", ["foster", "smaug"]), ]: for child in children: board_relations.add_edge(parent, child) return board_relations @options.add @Argument("-o", "--output", required=True) def output(self, path): """Write updated config to PATH.""" if path is None: raise ValueError( "Output argument is required." ) return Path(path).resolve() @property @lru_cache def board_config_sections(self): board_relations = self.board_relations # "project-*" overlays don't really look like boards. if self.board_overlays_repo is not None: projects = set( overlay.name.partition("-")[2] for overlay in self.board_overlays_repo.glob("project-*") ) else: projects = set() nonboards = set(( *projects, "unprovisioned", "signed", "embedded", "legacy", "npcx796", "npcx796fc", "ext_ec", "extec", "alc1015_amp", )) chipsets = { "chipset-adl": "alderlake", "chipset-adln": "alderlake-n", "chipset-apl": "apollolake", "chipset-bdw": "broadwell", "chipset-bsw": "braswell", "chipset-byt": "baytrail", "chipset-cml": "cometlake", "chipset-cnl": "cannonlake", "chipset-glk": "geminilake", "chipset-hsw": "haswell", "chipset-icl": "icelake", "chipset-ivb": "ivybridge", "chipset-jsl": "jasperlake", "chipset-kbl": "kabylake", "chipset-mtl": "meteorlake", "chipset-rpl": "raptorlake", "chipset-skl": "skylake", "chipset-snb": "sandybridge", "chipset-tgl": "tigerlake", "chipset-whl": "whiskeylake", "chipset-stnyridge": "stoneyridge", } def get_parent(board): # Projects can be the sole parent of actual boards (e.g. # freon was to a lot of boards) so don't use them as parents # at all, despite breaking e.g. termina/tael parentage. parents = board_relations.parents(board) - nonboards if len(parents) > 1: self.logger.warning( "Board '{}' has multiple parents: '{}'" .format(board, parents) ) elif len(parents) == 0: return None # Prefer longer chains return max( parents, key=lambda p: len(board_relations.ancestors(p)), ) aliases = {} def add_alias(alias, board): if alias in aliases: aliases[alias] = None else: aliases[alias] = board # Do not alias nonboards to anything for nonboard in nonboards: aliases[nonboard] = None # Convert the nodes to the path-to-node format we want paths = {} for board in board_relations.nodes(): parts = [board] parent = get_parent(board) # Prefer full names for chipsets if board.startswith("chipset-"): chipset = chipsets.get(board, board[len("chipset-"):]) parts = [chipset] # Don't keep baseboard prefix if board.startswith("baseboard-"): baseboard = board[len("baseboard-"):] parts = [baseboard] if parent is not None: lhs, sep, rhs = board.partition("_") if sep != "_": pass # Fixup left-duplication e.g. veyron/veyron_speedy elif lhs == parent: parts = [rhs] add_alias(rhs, board) # Fixup right-duplication e.g. hatch/unprovisioned_hatch elif rhs == parent: parts = [lhs] add_alias(lhs, board) # Split e.g. unprovisioned_kohaku -> kohaku/unprovisioned elif lhs in nonboards: parts = [lhs, rhs] # e.g. arcada_signed, volteer2_ti50, helios_diskswap etc. else: parts = [rhs, lhs] while parent is not None: # Prefer full names for chipsets if parent.startswith("chipset-"): chipset = chipsets.get(parent, parent[len("chipset-"):]) parts.append(chipset) # Normalize boards with the same name as baseboard elif parent.startswith("baseboard-"): baseboard = parent[len("baseboard-"):] if parts[-1] != baseboard: parts.append(baseboard) else: parts.append(parent) parent = get_parent(parent) paths[board] = "boards/{}".format("/".join(reversed(parts))) for alias, board in aliases.items(): if board is not None: paths.setdefault(alias, paths[board]) return paths def __call__(self): config = configparser.ConfigParser( dict_type=SortedDict(lambda s: s.split('/')), ) for arch in ("x86", "amd64", "arm", "arm64"): name = self.board_config_sections.get(arch, None) if name is None: continue config.add_section(name) config[name]["arch"] = arch config[name]["codename"] = "{}-generic".format(arch) for board, name in self.board_config_sections.items(): if board.startswith("chipset-"): config.add_section(name) config[name]["codename"] = board for codename, blocks in self.recovery_conf_boards.items(): name = self.board_config_sections.get(codename, None) if name is None: continue config.add_section(name) board = config[name] board["codename"] = codename for i, block in enumerate(blocks): if len(blocks) > 1: name_i = "{}/{}".format(name, i) config.add_section(name_i) board = config[name_i] if block.get("hwidmatch", None): board["hwid-match"] = block["hwidmatch"] if block.get("name", None): board["name"] = block["name"] # Some heuristics for kernel compression if self.depthcharge_repo is not None: arm64_boot_c = (self.depthcharge_repo / "src/arch/arm/boot64.c") if arm64_boot_c.is_file(): arm64_boot_c = arm64_boot_c.read_text() else: arm64_boot_c = "" fit_c = (self.depthcharge_repo / "src/boot/fit.c") if fit_c.is_file(): fit_c = fit_c.read_text() else: fit_c = "" fit_h = (self.depthcharge_repo / "src/boot/fit.h") if fit_h.is_file(): fit_h = fit_h.read_text() else: fit_h = "" else: arm64_boot_c = "" fit_c = "" fit_h = "" if "fit_decompress(kernel" in arm64_boot_c: arm64_lz4_kernel = "CompressionLz4" in fit_h + fit_c arm64_lzma_kernel = "CompressionLzma" in fit_h + fit_c elif "switch(kernel->compression)" in arm64_boot_c: arm64_lz4_kernel = "case CompressionLz4" in arm64_boot_c arm64_lzma_kernel = "case CompressionLzma" in arm64_boot_c else: arm64_lz4_kernel = False arm64_lzma_kernel = False for codename, block in self.depthcharge_boards.items(): name = self.board_config_sections.get(codename, None) if name is None: name = codename if name not in config: config.add_section(name) board = config[name] board["codename"] = codename if block.get("KERNEL_SIZE", None): board["image-max-size"] = str(block["KERNEL_SIZE"]) if block.get("KERNEL_FIT", False): board["image-format"] = "fit" elif block.get("KERNEL_ZIMAGE", False): board["image-format"] = "zimage" if block.get("ARCH_ARM_V8", False): board["boots-lz4-kernel"] = str(arm64_lz4_kernel) board["boots-lzma-kernel"] = str(arm64_lzma_kernel) # compatible string board_c = ( self.depthcharge_repo / "src/board" / codename / "board.c" ) board_c = board_c.read_text() if board_c.is_file() else "" if block.get("KERNEL_FIT", False): m = re.search( 'fit_(?:add|set)_compat(?:_by_rev)?\(' '"([^"]+?)(?:-rev[^-]+|-sku[^-]+)*"', board_c, ) if m: board["dt-compatible"] = m.group(1) elif "sprintf(compat, pattern, CONFIG_BOARD," in fit_c: board["dt-compatible"] = "google,{}".format( block.get("BOARD", codename).lower() .replace("_", "-").replace(" ", "-") ) elif '"google,%s", mb_part_string' in fit_c: block = self.coreboot_boards.get(codename, {}) mb_part_string = block.get( "MAINBOARD_PART_NUMBER", codename, ) board["dt-compatible"] = "google,{}".format( mb_part_string.lower() .replace("_", "-").replace(" ", "-") ) def graph(config): graph = DirectedGraph() for section in config.sections(): parts = section.split('/') for i, part in enumerate(parts): parent = str.join("/", parts[:i]) child = str.join("/", (*parts[:i], part)) graph.add_node(child) if parent: graph.add_edge(parent, child) return graph # Propagate config values upwards def propagate_configs(graph, config, section, keys): commons = {} for child in graph.children(section): propagate_configs(graph, config, child, keys) if section not in config: config.add_section(section) for k in keys: values = set( config[child].get(k, None) for child in graph.children(section) ) if len(values) == 1: value = values.pop() if value is not None: config[section].setdefault(k, value) for child in graph.children(section): for k in keys: if k in config[section] and k in config[child]: if config[section][k] == config[child][k]: config[child].pop(k) # Don't propagate up to the root, only up to baseboard nodes. g = graph(config) for arch in g.children("boards"): for chipset in g.children(arch): for baseboard in g.children(chipset): propagate_configs(g, config, baseboard, { "image-format", "image-max-size", "boots-lz4-kernel", "boots-lzma-kernel", }) # Trim unreleased boards that don't have names, hwid-matches max_depth = max(b.count('/') for b in config.sections()) for _ in range(0, max_depth): deleted = False for section in graph(config).leaves(): c = config[section] if "hwid-match" not in c and "name" not in c: self.logger.warning( "Skipping unreleased board '{}'." .format(section) ) del config[section] deleted = True if not deleted: break with self.output.open("w") as output_f: config.write(output_f) if __name__ == "__main__": update_config.main()