pax_global_header00006660000000000000000000000064145621671350014524gustar00rootroot0000000000000052 comment=d638decbcf945621a49f0caa145d04e4e5a87786 distrobuilder-3.0/000077500000000000000000000000001456216713500142415ustar00rootroot00000000000000distrobuilder-3.0/.github/000077500000000000000000000000001456216713500156015ustar00rootroot00000000000000distrobuilder-3.0/.github/workflows/000077500000000000000000000000001456216713500176365ustar00rootroot00000000000000distrobuilder-3.0/.github/workflows/builds.yml000066400000000000000000000010441456216713500216420ustar00rootroot00000000000000name: Builds on: - push - pull_request permissions: contents: read jobs: doc: name: Documentation (Sphinx) runs-on: ubuntu-20.04 steps: - name: Checkout code uses: actions/checkout@v3 - name: Build docs run: make doc - name: Print warnings run: if [ -s .sphinx/warnings.txt ]; then cat .sphinx/warnings.txt; exit 1; fi - name: Upload artifacts if: always() uses: actions/upload-artifact@v3 with: name: documentation path: doc/html distrobuilder-3.0/.github/workflows/commits.yml000066400000000000000000000016441456216713500220410ustar00rootroot00000000000000name: Commits on: - pull_request permissions: contents: read jobs: dco-check: permissions: pull-requests: read # for tim-actions/get-pr-commits to get list of commits from the PR name: Signed-off-by (DCO) runs-on: ubuntu-20.04 steps: - name: Get PR Commits id: 'get-pr-commits' uses: tim-actions/get-pr-commits@master with: token: ${{ secrets.GITHUB_TOKEN }} - name: Check that all commits are signed-off uses: tim-actions/dco@master with: commits: ${{ steps.get-pr-commits.outputs.commits }} target-branch: permissions: contents: none name: Branch target runs-on: ubuntu-20.04 steps: - name: Check branch target env: TARGET: ${{ github.event.pull_request.base.ref }} run: | set -x [ "${TARGET}" = "main" ] && exit 0 echo "Invalid branch target: ${TARGET}" exit 1 distrobuilder-3.0/.github/workflows/tests.yml000066400000000000000000000035601456216713500215270ustar00rootroot00000000000000name: Tests on: - push - pull_request permissions: contents: read concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true jobs: code-tests: name: Code tests strategy: fail-fast: false matrix: go: - 1.20.x - 1.21.x os: - ubuntu-20.04 - ubuntu-22.04 runs-on: ${{ matrix.os }} steps: - name: Checkout code uses: actions/checkout@v3 - name: Dependency Review uses: actions/dependency-review-action@v3 if: github.event_name == 'pull_request' - name: Install Go uses: actions/setup-go@v4 with: go-version: ${{ matrix.go }} - name: Install dependencies run: | sudo apt-get -qq update sudo apt-get install -y squashfs-tools - name: Update Go modules run: make update-gomod - name: Run static analysis run: make static-analysis - name: Unit tests (all) run: make check documentation: name: Documentation tests runs-on: ubuntu-latest steps: - name: Checkout uses: actions/checkout@v3 - name: Install dependencies run: | sudo apt-get install aspell aspell-en sudo snap install mdl - name: Run markdown linter run: | make doc-lint - name: Run spell checker run: | make doc-spellcheck - name: Run inclusive naming checker uses: get-woke/woke-action@v0 with: fail-on-error: true woke-args: "*.md **/*.md -c https://github.com/canonical-web-and-design/Inclusive-naming/raw/main/config.yml" - name: Run link checker # This can fail intermittently due to external resources being unavailable. continue-on-error: true run: | make doc-linkcheck distrobuilder-3.0/.gitignore000066400000000000000000000002031456216713500162240ustar00rootroot00000000000000# Sphinx doc/html/ .sphinx/deps/ .sphinx/themes/ .sphinx/venv/ .sphinx/warnings.txt .sphinx/.wordlist.dic .sphinx/_static/download distrobuilder-3.0/.golangci.yml000066400000000000000000000003321456216713500166230ustar00rootroot00000000000000linters: enable: - gofmt - misspell - godot - whitespace - gci - errorlint linters-settings: gci: sections: - standard - default - prefix(github.com/lxc/distrobuilder) distrobuilder-3.0/.sphinx/000077500000000000000000000000001456216713500156305ustar00rootroot00000000000000distrobuilder-3.0/.sphinx/.markdownlint/000077500000000000000000000000001456216713500204175ustar00rootroot00000000000000distrobuilder-3.0/.sphinx/.markdownlint/doc-lint.sh000077500000000000000000000013541456216713500224720ustar00rootroot00000000000000#!/bin/sh -eu if ! command -v mdl >/dev/null; then echo "Install mdl with 'snap install mdl' first." exit 1 fi trap "rm -rf .tmp/" EXIT ## Preprocessing for fn in $(find doc/ -name '*.md'); do mkdir -p $(dirname ".tmp/$fn"); sed -E "s/(\(.+\)=)/\1\n/" $fn > .tmp/$fn; done mdl .tmp/doc -s.sphinx/.markdownlint/style.rb -u.sphinx/.markdownlint/rules.rb --ignore-front-matter > .tmp/errors.txt || true if [ ! -s ".tmp/errors.txt" ]; then echo "Passed!" exit 0 fi ## Postprocessing filtered_errors="$(grep -vxFf .sphinx/.markdownlint/exceptions.txt .tmp/errors.txt)" if [ "$(echo "$filtered_errors" | wc -l)" = "2" ]; then echo "Passed!" exit 0 else echo "Failed!" echo "$filtered_errors" exit 1 fi distrobuilder-3.0/.sphinx/.markdownlint/exceptions.txt000066400000000000000000000000001456216713500233270ustar00rootroot00000000000000distrobuilder-3.0/.sphinx/.markdownlint/rules.rb000066400000000000000000000024401456216713500220760ustar00rootroot00000000000000rule 'Myst-MD031', 'Fenced code blocks should be surrounded by blank lines' do tags :code, :blank_lines aliases 'blanks-around-fences' check do |doc| errors = [] # Some parsers (including kramdown) have trouble detecting fenced code # blocks without surrounding whitespace, so examine the lines directly. in_code = false fence = nil lines = [''] + doc.lines + [''] lines.each_with_index do |line, linenum| line.strip.match(/^(`{3,}|~{3,})/) unless Regexp.last_match(1) && ( !in_code || (Regexp.last_match(1).slice(0, fence.length) == fence) ) next end fence = in_code ? nil : Regexp.last_match(1) in_code = !in_code if (in_code && !(lines[linenum - 1].empty? || lines[linenum - 1].match(/^[:\-\*]*\s*\% /))) || (!in_code && !(lines[linenum + 1].empty? || lines[linenum + 1].match(/^\s*:/))) errors << linenum end end errors end end rule 'Myst-IDs', 'MyST IDs should be preceded by a blank line' do check do |doc| errors = [] ids = doc.matching_text_element_lines(/^\(.+\)=\s*$/) ids.each do |linenum| if (linenum > 1) && !doc.lines[linenum - 2].empty? errors << linenum end end errors.sort end end distrobuilder-3.0/.sphinx/.markdownlint/style.rb000066400000000000000000000004121456216713500221010ustar00rootroot00000000000000all exclude_rule 'MD013' exclude_rule 'MD046' exclude_rule 'MD041' exclude_rule 'MD040' exclude_rule 'MD024' exclude_rule 'MD033' exclude_rule 'MD022' exclude_rule 'MD031' rule 'MD026', :punctuation => '.,;:!' rule 'MD003', :style => :atx rule 'MD007', :indent => 3 distrobuilder-3.0/.sphinx/.spellcheck.yaml000066400000000000000000000006351456216713500207130ustar00rootroot00000000000000matrix: - name: Markdown files aspell: lang: en d: en_US dictionary: wordlists: - .sphinx/wordlist.txt output: .sphinx/.wordlist.dic sources: - doc/html/**/*.html pipeline: - pyspelling.filters.html: comments: false attributes: - title - alt ignores: - code - pre - spellexception - link - title - div.relatedlinks distrobuilder-3.0/.sphinx/_extra/000077500000000000000000000000001456216713500171125ustar00rootroot00000000000000distrobuilder-3.0/.sphinx/_extra/versions.json000066400000000000000000000001041456216713500216500ustar00rootroot00000000000000[ { "version": "latest", "id": "latest" } ] distrobuilder-3.0/.sphinx/_static/000077500000000000000000000000001456216713500172565ustar00rootroot00000000000000distrobuilder-3.0/.sphinx/_static/custom.css000066400000000000000000000077761456216713500213230ustar00rootroot00000000000000/** Fix the font weight (300 for normal, 400 for slightly bold) **/ div.page, h1, h2, h3, h4, h5, h6, .sidebar-tree .current-page>.reference, button, input, optgroup, select, textarea, th.head { font-weight: 300 } .toc-tree li.scroll-current>.reference, dl.glossary dt, dl.simple dt, dl:not([class]) dt { font-weight: 400; } /** Table styling **/ th.head { text-transform: uppercase; font-size: var(--font-size--small); } table.docutils { border: 0; box-shadow: none; width:100%; } table.docutils td, table.docutils th, table.docutils td:last-child, table.docutils th:last-child, table.docutils td:first-child, table.docutils th:first-child { border-right: none; border-left: none; } /* center align table cells with ":-:" */ td.text-center { text-align: center; } /** No rounded corners **/ .admonition, code.literal, .sphinx-tabs-tab, .sphinx-tabs-panel, .highlight { border-radius: 0; } /** Admonition styling **/ .admonition { border-top: 1px solid #d9d9d9; border-right: 1px solid #d9d9d9; border-bottom: 1px solid #d9d9d9; } /** Color for the "copy link" symbol next to headings **/ a.headerlink { color: var(--color-brand-primary); } /** Line to the left of the current navigation entry **/ .sidebar-tree li.current-page { border-left: 2px solid var(--color-brand-primary); } /** Some tweaks for issue #16 **/ [role="tablist"] { border-bottom: 1px solid var(--color-sidebar-item-background--hover); } .sphinx-tabs-tab[aria-selected="true"] { border: 0; border-bottom: 2px solid var(--color-brand-primary); background-color: var(--color-sidebar-item-background--current); font-weight:300; } .sphinx-tabs-tab{ color: var(--color-brand-primary); font-weight:300; } .sphinx-tabs-panel { border: 0; border-bottom: 1px solid var(--color-sidebar-item-background--hover); background: var(--color-background-primary); } button.sphinx-tabs-tab:hover { background-color: var(--color-sidebar-item-background--hover); } /** Custom classes to fix scrolling in tables by decreasing the font size or breaking certain columns. Specify the classes in the Markdown file with, for example: ```{rst-class} break-col-4 min-width-4-8 ``` **/ table.dec-font-size { font-size: smaller; } table.break-col-1 td.text-left:first-child { word-break: break-word; } table.break-col-4 td.text-left:nth-child(4) { word-break: break-word; } table.min-width-1-15 td.text-left:first-child { min-width: 15em; } table.min-width-4-8 td.text-left:nth-child(4) { min-width: 8em; } /** Underline for abbreviations **/ abbr[title] { text-decoration: underline solid #cdcdcd; } /** Use the same style for right-details as for left-details **/ .bottom-of-page .right-details { font-size: var(--font-size--small); display: block; } /** Version switcher */ button.version_select { color: var(--color-foreground-primary); background-color: var(--color-toc-background); padding: 5px 10px; border: none; } .version_select:hover, .version_select:focus { background-color: var(--color-sidebar-item-background--hover); } .version_dropdown { position: relative; display: inline-block; text-align: right; font-size: var(--sidebar-item-font-size); } .available_versions { display: none; position: absolute; right: 0px; background-color: var(--color-toc-background); box-shadow: 0px 8px 16px 0px rgba(0,0,0,0.2); z-index: 11; } .available_versions a { color: var(--color-foreground-primary); padding: 12px 16px; text-decoration: none; display: block; } .available_versions a:hover {background-color: var(--color-sidebar-item-background--current)} .show {display:block;} /** Fix for nested numbered list - the nested list is lettered **/ ol.arabic ol.arabic { list-style: lower-alpha; } /** Make expandable sections look like links **/ details summary { color: var(--color-link); } /** Hide the line above the search (looks strange without the version switcher) **/ .sidebar-search { border-top: 0 } distrobuilder-3.0/.sphinx/_static/header-nav.js000066400000000000000000000004051456216713500216250ustar00rootroot00000000000000$(document).ready(function() { $(document).on("click", function () { $(".more-links-dropdown").hide(); }); $('.nav-more-links').click(function(event) { $('.more-links-dropdown').toggle(); event.stopPropagation(); }); }) distrobuilder-3.0/.sphinx/_static/version-switcher.js000066400000000000000000000072461456216713500231400ustar00rootroot00000000000000/* JavaScript for the _templates/variant-selector.html file, implementing * the version switcher for the documentation. * * The script gets available versions from the versions.json file on the * master branch (because the master branch contains the current information * on which versions we want to display). * It then links to other versions of the documentation - to the same page * if the page is available or to the index otherwise. */ // Link to the versions.json file on the master branch. var versionURL = "https://linuxcontainers.org/distrobuilder/docs/master/versions.json"; // URL prefix that is common for the different documentation sets. var URLprefix = "https://linuxcontainers.org/distrobuilder/docs/" $(document).ready(function() { // Read the versions.json file and call the listVersions function. var xhr = new XMLHttpRequest(); xhr.onreadystatechange = function () { if (xhr.readyState === 4) { if (xhr.status === 200) { listVersions(JSON.parse(xhr.responseText)); } else { console.log("URL "+versionURL+" cannot be loaded."); } } }; xhr.open('GET', versionURL, true); xhr.send(); }); // Retrieve the name of the current documentation set (for example, // 'master' or 'stable-5.0') and the path to the page (for example, // 'howto/pagename/'). function getPaths() { var paths = {}; var prefix = new URL(URLprefix); var url = window.location.pathname; if (url.startsWith(prefix.pathname)) { path = url.substr(prefix.pathname.length).split("/"); paths['current'] = path.shift(); if (paths['current'] == "master") { paths['current'] = "latest"; }; paths['page'] = path.join("/"); } else { console.log("Unexpected hosting URL!"); } return paths; } // Populate the version dropdown. function listVersions(data) { paths = getPaths(); var all_versions = document.getElementById("all-versions"); var current = document.getElementById("current"); for( var i = 0; i < data.length; i++ ) { var one = data[i]; if (one.id === paths['current']) { // Put the current version at the top without link. current.innerText = one.version+" ⌄"; } else { // Put other versions into the dropdown and link them to the // suitable URL. var version = document.createElement("a"); version.appendChild(document.createTextNode(one.version)); version.href = findNewURL(paths,one.id); all_versions.appendChild(version); } } } // Check if the same page exists in the other documentation set. // If yes, return the new link. Otherwise, link to the index page of // the other documentation set. function findNewURL(paths,newset) { var newURL = URLprefix.concat(newset,"/",paths['page']); var xhr = new XMLHttpRequest(); xhr.open('HEAD', newURL, false); xhr.send(); if (xhr.status == "404") { return URLprefix.concat(newset,"/"); } else { return newURL; } } // Toggle the version dropdown. function dropdown() { document.getElementById("all-versions").classList.toggle("show"); } // Close the dropdown menu if the user clicks outside of it. window.onclick = function(event) { if (!event.target.matches('.version_select')) { var dropdowns = document.getElementsByClassName("available_versions"); var i; for (i = 0; i < dropdowns.length; i++) { var openDropdown = dropdowns[i]; if (openDropdown.classList.contains('show')) { openDropdown.classList.remove('show'); } } } } distrobuilder-3.0/.sphinx/_templates/000077500000000000000000000000001456216713500177655ustar00rootroot00000000000000distrobuilder-3.0/.sphinx/_templates/footer.html000066400000000000000000000044411456216713500221540ustar00rootroot00000000000000{# ru-fu: copied from Furo, with modifications as stated below #}
{%- if show_copyright %} {%- endif %} {# ru-fu: removed "Made with" #} {%- if last_updated -%}
{% trans last_updated=last_updated|e -%} Last updated on {{ last_updated }} {%- endtrans -%}
{%- endif %}
{# ru-fu: replaced RTD icons with our links #} {%- if show_source and has_source and sourcename %} {%- endif %} {% if github_url and github_version and github_folder and github_filetype and has_source and sourcename %} {% endif %}
distrobuilder-3.0/.sphinx/_templates/header.html000066400000000000000000000120331456216713500221020ustar00rootroot00000000000000 distrobuilder-3.0/.sphinx/_templates/page.html000066400000000000000000000024241456216713500215710ustar00rootroot00000000000000{% extends "furo/page.html" %} {% block footer %} {% include "footer.html" %} {% endblock footer %} {% block body -%} {% include "header.html" %} {{ super() }} {%- endblock body %} {% if meta and ((meta.discourse and discourse_prefix) or meta.relatedlinks) %} {% set furo_hide_toc_orig = furo_hide_toc %} {% set furo_hide_toc=false %} {% endif %} {% block right_sidebar %}
{% if not furo_hide_toc_orig %}
{{ _("Contents") }}
{{ toc }}
{% endif %} {% if meta and ((meta.discourse and discourse_prefix) or meta.relatedlinks) %} {% endif %}
{% endblock right_sidebar %} distrobuilder-3.0/.sphinx/_templates/sidebar/000077500000000000000000000000001456216713500213765ustar00rootroot00000000000000distrobuilder-3.0/.sphinx/_templates/sidebar/variant-selector.html000066400000000000000000000002731456216713500255500ustar00rootroot00000000000000
Doc version:
distrobuilder-3.0/.sphinx/conf.py000066400000000000000000000125301456216713500171300ustar00rootroot00000000000000import datetime import os import sys import yaml # Project config. project = "distrobuilder" author = "distrobuilder contributors" copyright = "2018-%s %s" % (datetime.date.today().year, author) with open("../shared/version/version.go") as fd: version = fd.read().split("\n")[-2].split()[-1].strip("\"") # Extensions. extensions = [ "myst_parser", "sphinx_tabs.tabs", "sphinx_reredirects", "sphinxext.opengraph", "youtube-links", "related-links", "custom-rst-roles", "sphinxcontrib.jquery", "sphinx_design", "sphinx.ext.intersphinx" ] myst_enable_extensions = [ "substitution", "deflist", "linkify" ] myst_linkify_fuzzy_links=False myst_heading_anchors = 7 intersphinx_mapping = { 'incus': ('https://linuxcontainers.org/incus/docs/main/', None) } if os.path.exists("../doc/substitutions.yaml"): with open("../doc/substitutions.yaml", "r") as fd: myst_substitutions = yaml.safe_load(fd.read()) # Setup theme. templates_path = ["_templates"] html_theme = "furo" html_show_sphinx = False html_last_updated_fmt = "" html_favicon = "_static/download/favicon.ico" html_static_path = ['_static'] html_css_files = ['custom.css'] html_js_files = ['header-nav.js','version-switcher.js'] html_extra_path = ['_extra'] html_theme_options = { "sidebar_hide_name": True, "light_css_variables": { "font-stack": "Ubuntu, -apple-system, Segoe UI, Roboto, Oxygen, Cantarell, Fira Sans, Droid Sans, Helvetica Neue, sans-serif", "font-stack--monospace": "Ubuntu Mono, Consolas, Monaco, Courier, monospace", "color-foreground-primary": "#111", "color-foreground-secondary": "var(--color-foreground-primary)", "color-foreground-muted": "#333", "color-background-secondary": "#FFF", "color-background-hover": "#f2f2f2", "color-brand-primary": "#111", "color-brand-content": "#06C", "color-api-background": "#cdcdcd", "color-inline-code-background": "rgba(0,0,0,.03)", "color-sidebar-link-text": "#111", "color-sidebar-item-background--current": "#ebebeb", "color-sidebar-item-background--hover": "#f2f2f2", "toc-font-size": "var(--font-size--small)", "color-admonition-title-background--note": "var(--color-background-primary)", "color-admonition-title-background--tip": "var(--color-background-primary)", "color-admonition-title-background--important": "var(--color-background-primary)", "color-admonition-title-background--caution": "var(--color-background-primary)", "color-admonition-title--note": "#24598F", "color-admonition-title--tip": "#24598F", "color-admonition-title--important": "#C7162B", "color-admonition-title--caution": "#F99B11", "color-highlighted-background": "#EbEbEb", "color-link-underline": "var(--color-background-primary)", "color-link-underline--hover": "var(--color-background-primary)", }, "dark_css_variables": { "color-foreground-secondary": "var(--color-foreground-primary)", "color-foreground-muted": "#CDCDCD", "color-background-secondary": "var(--color-background-primary)", "color-background-hover": "#666", "color-brand-primary": "#fff", "color-brand-content": "#06C", "color-sidebar-link-text": "#f7f7f7", "color-sidebar-item-background--current": "#666", "color-sidebar-item-background--hover": "#333", "color-admonition-background": "transparent", "color-admonition-title-background--note": "var(--color-background-primary)", "color-admonition-title-background--tip": "var(--color-background-primary)", "color-admonition-title-background--important": "var(--color-background-primary)", "color-admonition-title-background--caution": "var(--color-background-primary)", "color-admonition-title--note": "#24598F", "color-admonition-title--tip": "#24598F", "color-admonition-title--important": "#C7162B", "color-admonition-title--caution": "#F99B11", "color-highlighted-background": "#666", "color-link-underline": "var(--color-background-primary)", "color-link-underline--hover": "var(--color-background-primary)", }, } html_context = { "github_url": "https://github.com/lxc/distrobuilder", "github_version": "master", "github_folder": "/doc/", "github_filetype": "md", "discourse_prefix": "https://discuss.linuxcontainers.org/t/" } html_sidebars = { "**": [ # "sidebar/variant-selector.html", "sidebar/search.html", "sidebar/scroll-start.html", "sidebar/navigation.html", "sidebar/scroll-end.html", ] } source_suffix = ".md" # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = ['html', 'README.md'] # Open Graph configuration ogp_site_url = "https://linuxcontainers.org/distrobuilder/docs/latest/" ogp_site_name = "distrobuilder documentation" ogp_image = "https://linuxcontainers.org/static/img/containers.png" # Links to ignore when checking links linkcheck_ignore = [ 'https://web.libera.chat/#lxc' ] # Setup redirects (https://documatt.gitlab.io/sphinx-reredirects/usage.html) redirects = { "index/index": "../index.html" } distrobuilder-3.0/.sphinx/requirements.txt000066400000000000000000000007721456216713500211220ustar00rootroot00000000000000alabaster Babel certifi charset-normalizer colorama docutils idna imagesize Jinja2 livereload MarkupSafe packaging Pygments pyparsing pytz requests six snowballstemmer Sphinx sphinx-autobuild sphinxcontrib-applehelp sphinxcontrib-devhelp sphinxcontrib-htmlhelp sphinxcontrib-jsmath sphinxcontrib-qthelp sphinxcontrib-serializinghtml sphinxcontrib-jquery tornado urllib3 myst-parser sphinx-tabs sphinx-reredirects linkify-it-py furo sphinxext-opengraph>=0.6.1 lxd-sphinx-extensions pyspelling sphinx-design distrobuilder-3.0/.sphinx/wordlist.txt000066400000000000000000000035451456216713500202470ustar00rootroot00000000000000AAAA ABI ACL ACLs AIO APIs AppArmor ArchLinux ARMv ARP ASN AXFR backend backends balancer balancers benchmarking BGP bibi bool bootable Btrfs bugfix bugfixes Centos Ceph CephFS Ceph's CFS cgroup cgroupfs cgroups checksum checksums Chocolatey CIDR CLI CPUs CRIU CRL cron CSV CUDA customizable dataset DCO dereferenced devtmpfs DHCP DHCPv distrobuilder DNS DNSSEC DoS downloader downloaders Dqlite DRM EB Ebit eBPF ECDHE ECDSA EiB Eibit endian ESA ETag failover FQDNs gapped GARP GbE Gbit Geneve GiB Gibit GID GIDs Golang goroutines GPG GPUs Grafana HAProxy hardcoded Homebrew hotplug hotplugged hotplugging HTTPS ICMP idmap idmapped idmaps incrementing Incus InfiniBand InfluxDB init initramfs integrations IOPS IOV IPs IPv IPVLAN jq JSON kB kbit KiB kibi Kibit lookups LTS LV LVM LXC LXCFS LXD LXD's MAAS macOS macvlan Makefile manpages Mbit MiB Mibit MicroCeph MicroCloud MII MITM MTU multicast MyST namespace namespaced namespaces NATed natively NDP netmask NFS NIC NICs NUMA NVRAM OData OpenMetrics OpenSSL OSD overcommit overcommitting overlayfs OVMF OVN OVS Pbit PCI PCIe peerings Permalink PFs PiB Pibit PID PKI PNG Pongo POSIX pre preseed proxied proxying PTS QEMU qgroup qgroups RADOS RBAC RBD reconfiguring requestor RESTful RHEL rootfs RSA rST runtime SATA scalable SDN Seccomp SFTP SHA shiftfs SIGHUP SIGTERM simplestreams SLAAC SMTP Solaris SPAs SPL SquashFS SSDs SSL stateful stderr stdin stdout STP struct structs subcommands subitem subnet subnets subpage substep subtree subtrees subvolume subvolumes superset SVG symlink symlinks syscall syscalls sysfs Tbit TCP Telegraf TiB Tibit TLS tmpfs toolchain topologies TPM TSIG TTL TTYs UDP UEFI UFW UI UID UIDs unconfigured unmanaged unmount unmounting uplink uptime userspace UUID vCPU vCPUs VFs VFS VirtIO virtualize virtualized VLAN VLANs VM VMs VPD VPS vSwitch VXLAN WebSocket WebSockets XFS XHR YAML Zettabyte ZFS zpool zpools distrobuilder-3.0/AUTHORS000066400000000000000000000003631456216713500153130ustar00rootroot00000000000000Unless mentioned otherwise in a specific file's header, all code in this project is released under the Apache 2.0 license. The list of authors and contributors can be retrieved from the git commit history and in some cases, the file headers. distrobuilder-3.0/CODE_OF_CONDUCT.md000066400000000000000000000125701456216713500170450ustar00rootroot00000000000000# Contributor Covenant Code of Conduct ## Our Pledge We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, caste, color, religion, or sexual identity and orientation. We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community. ## Our Standards Examples of behavior that contributes to a positive environment for our community include: * Demonstrating empathy and kindness toward other people * Being respectful of differing opinions, viewpoints, and experiences * Giving and gracefully accepting constructive feedback * Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience * Focusing on what is best not just for us as individuals, but for the overall community Examples of unacceptable behavior include: * The use of sexualized language or imagery, and sexual attention or advances of any kind * Trolling, insulting or derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or email address, without their explicit permission * Other conduct which could reasonably be considered inappropriate in a professional setting ## Enforcement Responsibilities Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful. Community leaders have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, and will communicate reasons for moderation decisions when appropriate. ## Scope This Code of Conduct applies within all community spaces, and also applies when an individual is officially representing the community in public spaces. Examples of representing our community include using an official e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement at maintainers@linuxcontainers.org. All complaints will be reviewed and investigated promptly and fairly. All community leaders are obligated to respect the privacy and security of the reporter of any incident. ## Enforcement Guidelines Community leaders will follow these Community Impact Guidelines in determining the consequences for any action they deem in violation of this Code of Conduct: ### 1. Correction **Community Impact**: Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community. **Consequence**: A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate. A public apology may be requested. ### 2. Warning **Community Impact**: A violation through a single incident or series of actions. **Consequence**: A warning with consequences for continued behavior. No interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, for a specified period of time. This includes avoiding interactions in community spaces as well as external channels like social media. Violating these terms may lead to a temporary or permanent ban. ### 3. Temporary Ban **Community Impact**: A serious violation of community standards, including sustained inappropriate behavior. **Consequence**: A temporary ban from any sort of interaction or public communication with the community for a specified period of time. No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban. ### 4. Permanent Ban **Community Impact**: Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals. **Consequence**: A permanent ban from any sort of public interaction within the community. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 2.1, available at [https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. Community Impact Guidelines were inspired by [Mozilla's code of conduct enforcement ladder][Mozilla CoC]. For answers to common questions about this code of conduct, see the FAQ at [https://www.contributor-covenant.org/faq][FAQ]. Translations are available at [https://www.contributor-covenant.org/translations][translations]. [homepage]: https://www.contributor-covenant.org [v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html [Mozilla CoC]: https://github.com/mozilla/diversity [FAQ]: https://www.contributor-covenant.org/faq [translations]: https://www.contributor-covenant.org/translations distrobuilder-3.0/CONTRIBUTING.md000066400000000000000000000051501456216713500164730ustar00rootroot00000000000000# Contributing ## Pull requests: Changes to this project should be proposed as pull requests on Github at: Proposed changes will then go through code review there and once acked, be merged in the main branch. ## License and copyright: By default, any contribution to this project is made under the Apache 2.0 license. The author of a change remains the copyright holder of their code (no copyright assignment). ## Developer Certificate of Origin: To improve tracking of contributions to this project we use the DCO 1.1 and use a "sign-off" procedure for all changes going into the branch. The sign-off is a simple line at the end of the explanation for the commit which certifies that you wrote it or otherwise have the right to pass it on as an open-source contribution. > Developer Certificate of Origin > Version 1.1 > > Copyright (C) 2004, 2006 The Linux Foundation and its contributors. > 660 York Street, Suite 102, > San Francisco, CA 94110 USA > > Everyone is permitted to copy and distribute verbatim copies of this > license document, but changing it is not allowed. > > Developer's Certificate of Origin 1.1 > > By making a contribution to this project, I certify that: > > (a) The contribution was created in whole or in part by me and I > have the right to submit it under the open source license > indicated in the file; or > > (b) The contribution is based upon previous work that, to the best > of my knowledge, is covered under an appropriate open source > license and I have the right under that license to submit that > work with modifications, whether created in whole or in part > by me, under the same open source license (unless I am > permitted to submit under a different license), as indicated > in the file; or > > (c) The contribution was provided directly to me by some other > person who certified (a), (b) or (c) and I have not modified > it. > > (d) I understand and agree that this project and the contribution > are public and that a record of the contribution (including all > personal information I submit with it, including my sign-off) is > maintained indefinitely and may be redistributed consistent with > this project or the open source license(s) involved. An example of a valid sign-off line is: ``` Signed-off-by: Random J Developer ``` Use your real name and a valid e-mail address. Sorry, no pseudonyms or anonymous contributions are allowed. We also require each commit be individually signed-off by their author, even when part of a larger set. You may find `git commit -s` useful. distrobuilder-3.0/COPYING000066400000000000000000000261361456216713500153040ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. distrobuilder-3.0/Makefile000066400000000000000000000042061456216713500157030ustar00rootroot00000000000000VERSION=$(shell grep "var Version" shared/version/version.go | cut -d'"' -f2) ARCHIVE=distrobuilder-$(VERSION).tar GO111MODULE=on SPHINXENV=.sphinx/venv/bin/activate .PHONY: default default: gofmt -s -w . go install -v ./... @echo "distrobuilder built successfully" .PHONY: update-gomod update-gomod: go get -t -v -d -u ./... go mod tidy .PHONY: check check: default go test -v ./... .PHONY: dist dist: # Cleanup rm -Rf $(ARCHIVE).gz # Create build dir $(eval TMP := $(shell mktemp -d)) git archive --prefix=distrobuilder-$(VERSION)/ HEAD | tar -x -C $(TMP) mkdir -p $(TMP)/_dist/src/github.com/lxc ln -s ../../../../distrobuilder-$(VERSION) $(TMP)/_dist/src/github.com/lxc/distrobuilder # Download dependencies cd $(TMP)/distrobuilder-$(VERSION) && go mod vendor # Assemble tarball tar --exclude-vcs -C $(TMP) -zcf $(ARCHIVE).gz distrobuilder-$(VERSION)/ # Cleanup rm -Rf $(TMP) .PHONY: doc-setup doc-setup: @echo "Setting up documentation build environment" python3 -m venv .sphinx/venv . $(SPHINXENV) ; pip install --upgrade -r .sphinx/requirements.txt mkdir -p .sphinx/deps/ .sphinx/themes/ wget -N -P .sphinx/_static/download https://linuxcontainers.org/static/img/favicon.ico https://linuxcontainers.org/static/img/containers.png https://linuxcontainers.org/static/img/containers.small.png rm -Rf doc/html .PHONY: doc doc: doc-setup doc-incremental .PHONY: doc-incremental doc-incremental: @echo "Build the documentation" . $(SPHINXENV) ; sphinx-build -c .sphinx/ -b dirhtml doc/ doc/html/ -w .sphinx/warnings.txt .PHONY: doc-serve doc-serve: cd doc/html; python3 -m http.server 8001 .PHONY: doc-spellcheck doc-spellcheck: doc . $(SPHINXENV) ; python3 -m pyspelling -c .sphinx/.spellcheck.yaml .PHONY: doc-linkcheck doc-linkcheck: doc-setup . $(SPHINXENV) ; sphinx-build -c .sphinx/ -b linkcheck doc/ doc/html/ .PHONY: doc-lint doc-lint: .sphinx/.markdownlint/doc-lint.sh .PHONY: static-analysis static-analysis: ifeq ($(shell command -v golangci-lint 2> /dev/null),) go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.52.2 endif golangci-lint run --timeout 5m run-parts --exit-on-error --regex '.sh' test/lint distrobuilder-3.0/README.md000066400000000000000000000062351456216713500155260ustar00rootroot00000000000000# distrobuilder System container and VM image builder for Incus and LXC. Incus images may also be compatible with Canonical's LXD. ## Status Type | Service | Status --- | --- | --- CI | GitHub | [![Build Status](https://github.com/lxc/distrobuilder/workflows/CI%20tests/badge.svg)](https://github.com/lxc/distrobuilder/actions) Project status | CII Best Practices | [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/1728/badge)](https://bestpractices.coreinfrastructure.org/projects/1728) ## Command line options The following are the command line options of `distrobuilder`. You can use `distrobuilder` to create container images for both Incus and LXC. ```bash $ distrobuilder System container and VM image builder for Incus and LXC Usage: distrobuilder [command] Available Commands: build-dir Build plain rootfs build-incus Build Incus image from scratch build-lxc Build LXC image from scratch help Help about any command pack-incus Create Incus image from existing rootfs pack-lxc Create LXC image from existing rootfs repack-windows Repack Windows ISO with drivers included Flags: --cache-dir Cache directory --cleanup Clean up cache directory (default true) --debug Enable debug output --disable-overlay Disable the use of filesystem overlays -h, --help help for distrobuilder -o, --options Override options (list of key=value) -t, --timeout Timeout in seconds --version Print version number Use "distrobuilder [command] --help" for more information about a command. ``` ## Installing from package `distrobuilder` is available from the [Snap Store](https://snapcraft.io/distrobuilder). ``` sudo snap install distrobuilder --classic ``` ## Installing from source To compile `distrobuilder` from source, first install the Go programming language, and some other dependencies. - Debian-based: ``` sudo apt update sudo apt install -y golang-go debootstrap rsync gpg squashfs-tools git ``` - ArchLinux-based: ``` sudo pacman -Syu sudo pacman -S go debootstrap rsync gnupg squashfs-tools git --needed ``` Second, download the source code of the `distrobuilder` repository (this repository). ``` git clone https://github.com/lxc/distrobuilder ``` Third, enter the directory with the source code of `distrobuilder` and run `make` to compile the source code. This will generate the executable program `distrobuilder`, and it will be located at `$HOME/go/bin/distrobuilder`. ``` cd ./distrobuilder make ``` Finally, you can run `distrobuilder` as follows. You may also add to your $PATH the directory `$HOME/go/bin/` so that you do not need to run the command with the full path. ``` $HOME/go/bin/distrobuilder ``` ## How to use See [How to use `distrobuilder`](doc/howto/build.md) for instructions. ## Troubleshooting See [Troubleshoot `distrobuilder`](doc/howto/troubleshoot.md). distrobuilder-3.0/distrobuilder/000077500000000000000000000000001456216713500171145ustar00rootroot00000000000000distrobuilder-3.0/distrobuilder/chroot.go000066400000000000000000000041011456216713500207350ustar00rootroot00000000000000package main import ( "errors" "fmt" "os" "path/filepath" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) func getOverlay(logger *logrus.Logger, cacheDir, sourceDir string) (func(), string, error) { var stat unix.Statfs_t // Skip overlay on xfs and zfs for _, dir := range []string{cacheDir, sourceDir} { err := unix.Statfs(dir, &stat) if err != nil { return nil, "", err } switch stat.Type { case unix.XFS_SUPER_MAGIC: return nil, "", errors.New("overlay not supported on xfs") case 0x2fc12fc1: return nil, "", errors.New("overlay not supported on zfs") } } upperDir := filepath.Join(cacheDir, "upper") overlayDir := filepath.Join(cacheDir, "overlay") workDir := filepath.Join(cacheDir, "work") err := os.Mkdir(upperDir, 0755) if err != nil { return nil, "", fmt.Errorf("Failed to create directory %q: %w", upperDir, err) } err = os.Mkdir(overlayDir, 0755) if err != nil { return nil, "", fmt.Errorf("Failed to create directory %q: %w", overlayDir, err) } err = os.Mkdir(workDir, 0755) if err != nil { return nil, "", fmt.Errorf("Failed to create directory %q: %w", workDir, err) } opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", sourceDir, upperDir, workDir) err = unix.Mount("overlay", overlayDir, "overlay", 0, opts) if err != nil { return nil, "", fmt.Errorf("Failed to mount overlay: %w", err) } cleanup := func() { unix.Sync() err := unix.Unmount(overlayDir, 0) if err != nil { logger.WithFields(logrus.Fields{"err": err, "dir": overlayDir}).Warn("Failed to unmount overlay directory") } err = os.RemoveAll(upperDir) if err != nil { logger.WithFields(logrus.Fields{"err": err, "dir": upperDir}).Warn("Failed to remove upper directory") } err = os.RemoveAll(workDir) if err != nil { logger.WithFields(logrus.Fields{"err": err, "dir": workDir}).Warn("Failed to remove work directory") } err = os.Remove(overlayDir) if err != nil { logger.WithFields(logrus.Fields{"err": err, "dir": overlayDir}).Warn("Failed to remove overlay directory") } } return cleanup, overlayDir, nil } distrobuilder-3.0/distrobuilder/lxc.generator000066400000000000000000000163271456216713500216230ustar00rootroot00000000000000#!/bin/sh # NOTE: systemctl is not available for systemd-generators set -eu # disable localisation (faster grep) export LC_ALL=C ## Helper functions # is_lxc_container succeeds if we're running inside a LXC container is_lxc_container() { grep -qa container=lxc /proc/1/environ } is_lxc_privileged_container() { grep -qw 4294967295$ /proc/self/uid_map } # is_lxd_vm succeeds if we're running inside a LXD VM is_lxd_vm() { [ -e /dev/virtio-ports/org.linuxcontainers.lxd ] } # is_incus_vm succeeds if we're running inside an Incus VM is_incus_vm() { [ -e /dev/virtio-ports/org.linuxcontainers.incus ] } # is_in_path succeeds if the given file exists in on of the paths is_in_path() { # Don't use $PATH as that may not include all relevant paths for path in /bin /sbin /usr/bin /usr/sbin /usr/local/bin /usr/local/sbin; do [ -e "${path}/$1" ] && return 0 done return 1 } ## Fix functions # fix_ro_paths avoids udevd issues with /sys and /proc being writable fix_ro_paths() { mkdir -p "/run/systemd/system/$1.d" cat <<-EOF > "/run/systemd/system/$1.d/zzz-lxc-ropath.conf" [Service] BindReadOnlyPaths=/sys /proc EOF } # fix_nm_link_state forces the network interface to a DOWN state ahead of NetworkManager starting up fix_nm_link_state() { [ -e "/sys/class/net/$1" ] || return 0 ip_path= if [ -f /sbin/ip ]; then ip_path=/sbin/ip elif [ -f /bin/ip ]; then ip_path=/bin/ip else return 0 fi cat <<-EOF > /run/systemd/system/network-device-down.service [Unit] Description=Turn off network device Before=NetworkManager.service Before=systemd-networkd.service [Service] # do not turn off if there is a default route to 169.254.0.1, i.e. the device is a routed nic ExecCondition=/bin/sh -c '! /usr/bin/grep -qs 00000000.0100FEA9 /proc/net/route' ExecStart=-${ip_path} link set $1 down Type=oneshot RemainAfterExit=true [Install] WantedBy=default.target EOF mkdir -p /run/systemd/system/default.target.wants ln -sf /run/systemd/system/network-device-down.service /run/systemd/system/default.target.wants/network-device-down.service } # fix_systemd_override_unit generates a unit specific override fix_systemd_override_unit() { dropin_dir="/run/systemd/${1}.d" mkdir -p "${dropin_dir}" { echo "[Service]"; [ "${systemd_version}" -ge 247 ] && echo "ProcSubset=all"; [ "${systemd_version}" -ge 247 ] && echo "ProtectProc=default"; [ "${systemd_version}" -ge 232 ] && echo "ProtectControlGroups=no"; [ "${systemd_version}" -ge 232 ] && echo "ProtectKernelTunables=no"; [ "${systemd_version}" -ge 239 ] && echo "NoNewPrivileges=no"; [ "${systemd_version}" -ge 249 ] && echo "LoadCredential="; [ "${systemd_version}" -ge 254 ] && echo "PrivateNetwork=no"; # Additional settings for privileged containers if is_lxc_privileged_container; then echo "ProtectHome=no"; echo "ProtectSystem=no"; echo "PrivateDevices=no"; echo "PrivateTmp=no"; [ "${systemd_version}" -ge 244 ] && echo "ProtectKernelLogs=no"; [ "${systemd_version}" -ge 232 ] && echo "ProtectKernelModules=no"; [ "${systemd_version}" -ge 231 ] && echo "ReadWritePaths="; [ "${systemd_version}" -ge 254 ] && echo "ImportCredential="; fi true; } > "${dropin_dir}/zzz-lxc-service.conf" } # fix_systemd_mask masks the systemd unit fix_systemd_mask() { ln -sf /dev/null "/run/systemd/system/$1" } # fix_systemd_udev_trigger overrides the systemd-udev-trigger.service to match the latest version # of the file which uses "ExecStart=-" instead of "ExecStart=". fix_systemd_udev_trigger() { cmd= if [ -f /usr/bin/udevadm ]; then cmd=/usr/bin/udevadm elif [ -f /sbin/udevadm ]; then cmd=/sbin/udevadm elif [ -f /bin/udevadm ]; then cmd=/bin/udevadm else return 0 fi mkdir -p /run/systemd/system/systemd-udev-trigger.service.d cat <<-EOF > /run/systemd/system/systemd-udev-trigger.service.d/zzz-lxc-override.conf [Service] ExecStart= ExecStart=-${cmd} trigger --type=subsystems --action=add ExecStart=-${cmd} trigger --type=devices --action=add EOF } # fix_systemd_sysctl overrides the systemd-sysctl.service to use "ExecStart=-" instead of "ExecStart=". fix_systemd_sysctl() { cmd=/usr/lib/systemd/systemd-sysctl ! [ -e "${cmd}" ] && cmd=/lib/systemd/systemd-sysctl mkdir -p /run/systemd/system/systemd-sysctl.service.d cat <<-EOF > /run/systemd/system/systemd-sysctl.service.d/zzz-lxc-override.conf [Service] ExecStart= ExecStart=-${cmd} EOF } ## Main logic # Nothing to do in Incus VM but deployed in case it is later converted to a container is_incus_vm || is_lxd_vm && exit 0 # Exit immediately if not an Incus/LXC container is_lxc_container || exit 0 # Check for NetworkManager nm_exists=0 is_in_path NetworkManager && nm_exists=1 # Determine systemd version for path in /usr/lib/systemd/systemd /lib/systemd/systemd; do [ -x "${path}" ] || continue systemd_version="$("${path}" --version | head -n1 | cut -d' ' -f2)" break done # Determine distro name and release ID="" if [ -e /etc/os-release ]; then . /etc/os-release fi # Overriding some systemd features is only needed if security.nesting=false # in which case, /dev/.lxc will be missing if [ ! -d /dev/.lxc ]; then # Apply systemd overrides if [ "${systemd_version}" -ge 244 ]; then fix_systemd_override_unit system/service else # Setup per-unit overrides find /lib/systemd /etc/systemd /run/systemd /usr/lib/systemd -name "*.service" -type f | sed 's#/\(lib\|etc\|run\|usr/lib\)/systemd/##g'| while read -r service_file; do fix_systemd_override_unit "${service_file}" done fi # Workarounds for privileged containers. if { [ "${ID}" = "altlinux" ] || [ "${ID}" = "arch" ] || [ "${ID}" = "fedora" ]; } && ! is_lxc_privileged_container; then fix_ro_paths systemd-networkd.service fix_ro_paths systemd-resolved.service fi fi # Ignore failures on some units. fix_systemd_udev_trigger fix_systemd_sysctl # Mask some units. fix_systemd_mask dev-hugepages.mount fix_systemd_mask run-ribchester-general.mount fix_systemd_mask systemd-hwdb-update.service fix_systemd_mask systemd-journald-audit.socket fix_systemd_mask systemd-modules-load.service fix_systemd_mask systemd-pstore.service fix_systemd_mask ua-messaging.service fix_systemd_mask systemd-firstboot.service fix_systemd_mask systemd-binfmt.service if [ ! -e /dev/tty1 ]; then fix_systemd_mask vconsole-setup-kludge@tty1.service fi if [ -d /etc/udev ]; then mkdir -p /run/udev/rules.d cat <<-EOF > /run/udev/rules.d/90-lxc-net.rules # This file was created by distrobuilder. # # Its purpose is to convince NetworkManager to treat the eth0 veth # interface like a regular Ethernet. NetworkManager ordinarily doesn't # like to manage the veth interfaces, because they are typically configured # by container management tooling for specialized purposes. ACTION=="add|change|move", ENV{ID_NET_DRIVER}=="veth", ENV{INTERFACE}=="eth[0-9]*", ENV{NM_UNMANAGED}="0" EOF fi # Workarounds for NetworkManager in containers if [ "${nm_exists}" -eq 1 ]; then fix_nm_link_state eth0 fi # Allow masking units created by the lxc system-generator. for d in /etc/systemd/system /usr/lib/systemd/system /lib/systemd/system; do if ! [ -d "${d}" ]; then continue fi find "${d}" -maxdepth 1 -type l | while read -r f; do unit="$(basename "${f}")" if [ "${unit}" = "network-device-down.service" ] && [ "$(readlink "${f}")" = "/dev/null" ]; then fix_systemd_mask "${unit}" fi done done distrobuilder-3.0/distrobuilder/main.go000066400000000000000000000406351456216713500203770ustar00rootroot00000000000000package main /* #define _GNU_SOURCE #include #include #include #include #include #include #include #include __attribute__((constructor)) void init(void) { pid_t pid; int ret; if (geteuid() != 0) { return; } // Unshare a new mntns so our mounts don't leak if (unshare(CLONE_NEWNS | CLONE_NEWPID | CLONE_NEWUTS) < 0) { fprintf(stderr, "Failed to unshare namespaces: %s\n", strerror(errno)); _exit(1); } // Hardcode the hostname to "distrobuilder" if (sethostname("distrobuilder", 13) < 0) { fprintf(stderr, "Failed to set hostname: %s\n", strerror(errno)); _exit(1); } // Prevent mount propagation back to initial namespace if (mount(NULL, "/", NULL, MS_REC | MS_PRIVATE, NULL) < 0) { fprintf(stderr, "Failed to mark / private: %s\n", strerror(errno)); _exit(1); } pid = fork(); if (pid < 0) { fprintf(stderr, "Failed to fork: %s\n", strerror(errno)); _exit(1); } else if (pid > 0) { // parent waitpid(pid, &ret, 0); _exit(WEXITSTATUS(ret)); } // We're done, jump back to Go } */ import "C" import ( "bufio" "bytes" "context" "embed" "errors" "fmt" "io" "os" "os/signal" "path/filepath" "strings" "time" incus "github.com/lxc/incus/shared/util" "github.com/sirupsen/logrus" "github.com/spf13/cobra" "gopkg.in/yaml.v2" "github.com/lxc/distrobuilder/managers" "github.com/lxc/distrobuilder/shared" "github.com/lxc/distrobuilder/shared/version" "github.com/lxc/distrobuilder/sources" ) //go:embed lxc.generator var lxcGenerator embed.FS var typeDescription = `Depending on the type, it either outputs a unified (single tarball) or split image (tarball + squashfs or qcow2 image). The --type flag can take one of the following values: - split (default) - unified ` var compressionDescription = `The compression can be set with the --compression flag. I can take one of the following values: - bzip2 - gzip - lzip - lzma - lzo - lzop - xz (default) - zstd For supported compression methods, a compression level can be specified with method-N, where N is an integer, e.g. gzip-9. ` type cmdGlobal struct { flagCleanup bool flagCacheDir string flagDebug bool flagOptions []string flagTimeout uint flagVersion bool flagDisableOverlay bool flagSourcesDir string flagKeepSources bool definition *shared.Definition sourceDir string targetDir string interrupt chan os.Signal logger *logrus.Logger overlayCleanup func() ctx context.Context cancel context.CancelFunc subCommand *cobra.Command } func main() { // Global flags globalCmd := cmdGlobal{} app := &cobra.Command{ Use: "distrobuilder", Short: "System container and VM image builder for LXC and Incus", PersistentPreRun: func(cmd *cobra.Command, args []string) { // Quick checks if os.Geteuid() != 0 { fmt.Fprintf(os.Stderr, "You must be root to run this tool\n") os.Exit(1) } // Keep track of the current subcommand. When app.Execute() fails, there's no way of // knowing which command failed. However, we want to know this as we call postRun in // case of an error, and handle the "validate" subcommand differently in that function. globalCmd.subCommand = cmd var err error globalCmd.logger, err = shared.GetLogger(globalCmd.flagDebug) if err != nil { fmt.Fprintf(os.Stderr, "Failed to get logger: %s\n", err) os.Exit(1) } if globalCmd.flagTimeout == 0 { globalCmd.ctx, globalCmd.cancel = context.WithCancel(context.Background()) } else { globalCmd.ctx, globalCmd.cancel = context.WithTimeout(context.Background(), time.Duration(globalCmd.flagTimeout)*time.Second) } go func() { for { select { case <-globalCmd.interrupt: globalCmd.cancel() globalCmd.logger.Info("Interrupted") return case <-globalCmd.ctx.Done(): if globalCmd.flagTimeout > 0 { globalCmd.logger.Info("Timed out") } return } } }() // No need to create cache directory if we're only validating. if cmd.CalledAs() == "validate" { return } // Create temp directory if the cache directory isn't explicitly set if globalCmd.flagCacheDir == "" { dir, err := os.MkdirTemp("/var/cache", "distrobuilder.") if err != nil { fmt.Fprintf(os.Stderr, "Failed to create cache directory: %s\n", err) os.Exit(1) } globalCmd.flagCacheDir = dir } }, PersistentPostRunE: globalCmd.postRun, CompletionOptions: cobra.CompletionOptions{DisableDefaultCmd: true}, } app.PersistentFlags().BoolVar(&globalCmd.flagCleanup, "cleanup", true, "Clean up cache directory") app.PersistentFlags().StringVar(&globalCmd.flagCacheDir, "cache-dir", "", "Cache directory"+"``") app.PersistentFlags().StringSliceVarP(&globalCmd.flagOptions, "options", "o", []string{}, "Override options (list of key=value)"+"``") app.PersistentFlags().UintVarP(&globalCmd.flagTimeout, "timeout", "t", 0, "Timeout in seconds"+"``") app.PersistentFlags().BoolVar(&globalCmd.flagVersion, "version", false, "Print version number") app.PersistentFlags().BoolVar(&globalCmd.flagDebug, "debug", false, "Enable debug output") app.PersistentFlags().BoolVar(&globalCmd.flagDisableOverlay, "disable-overlay", false, "Disable the use of filesystem overlays") // Version handling app.SetVersionTemplate("{{.Version}}\n") app.Version = version.Version // LXC sub-commands LXCCmd := cmdLXC{global: &globalCmd} app.AddCommand(LXCCmd.commandBuild()) app.AddCommand(LXCCmd.commandPack()) // Incus sub-commands IncusCmd := cmdIncus{global: &globalCmd} app.AddCommand(IncusCmd.commandBuild()) app.AddCommand(IncusCmd.commandPack()) // build-dir sub-command buildDirCmd := cmdBuildDir{global: &globalCmd} app.AddCommand(buildDirCmd.command()) // repack-windows sub-command repackWindowsCmd := cmdRepackWindows{global: &globalCmd} app.AddCommand(repackWindowsCmd.command()) validateCmd := cmdValidate{global: &globalCmd} app.AddCommand(validateCmd.command()) globalCmd.interrupt = make(chan os.Signal, 1) signal.Notify(globalCmd.interrupt, os.Interrupt) // Run the main command and handle errors err := app.Execute() if err != nil { if globalCmd.logger != nil { globalCmd.logger.WithFields(logrus.Fields{"err": err}).Error("Failed running distrobuilder") } else { fmt.Fprintf(os.Stderr, "Failed running distrobuilder: %s\n", err.Error()) } _ = globalCmd.postRun(globalCmd.subCommand, nil) os.Exit(1) } } func (c *cmdGlobal) cleanupCacheDirectory() { // Try removing the entire cache directory. err := os.RemoveAll(c.flagCacheDir) if err == nil { return } // Try removing the content of the cache directory if the directory itself cannot be removed. err = filepath.Walk(c.flagCacheDir, func(path string, info os.FileInfo, err error) error { if path == c.flagCacheDir { return nil } return os.RemoveAll(path) }) if err != nil { c.logger.WithField("err", err).Warn("Failed cleaning up cache directory") } } func (c *cmdGlobal) preRunBuild(cmd *cobra.Command, args []string) error { // if an error is returned, disable the usage message cmd.SilenceUsage = true isRunningBuildDir := cmd.CalledAs() == "build-dir" // Clean up cache directory before doing anything c.cleanupCacheDirectory() err := os.MkdirAll(c.flagCacheDir, 0755) if err != nil { return fmt.Errorf("Failed creating cache directory: %w", err) } if len(args) > 1 { // Create and set target directory if provided err := os.MkdirAll(args[1], 0755) if err != nil { return fmt.Errorf("Failed to create directory %q: %w", args[1], err) } c.targetDir = args[1] } else { // Use current working directory as target var err error c.targetDir, err = os.Getwd() if err != nil { return fmt.Errorf("Failed to get working directory: %w", err) } } if isRunningBuildDir { c.sourceDir = c.targetDir } else { c.sourceDir = filepath.Join(c.flagCacheDir, "rootfs") } // Create source directory if it doesn't exist err = os.MkdirAll(c.sourceDir, 0755) if err != nil { return fmt.Errorf("Failed to create directory %q: %w", c.sourceDir, err) } // Get the image definition c.definition, err = getDefinition(args[0], c.flagOptions) if err != nil { return fmt.Errorf("Failed to get definition: %w", err) } // Create cache directory if we also plan on creating LXC or Incus images if !isRunningBuildDir { err = os.MkdirAll(c.flagCacheDir, 0755) if err != nil { return fmt.Errorf("Failed to create directory %q: %w", c.flagCacheDir, err) } } // Run template on source keys for i, key := range c.definition.Source.Keys { c.definition.Source.Keys[i], err = shared.RenderTemplate(key, c.definition) if err != nil { return fmt.Errorf("Failed to render source keys: %w", err) } } // Run template on source URL c.definition.Source.URL, err = shared.RenderTemplate(c.definition.Source.URL, c.definition) if err != nil { return fmt.Errorf("Failed to render source URL: %w", err) } // Load and run downloader downloader, err := sources.Load(c.ctx, c.definition.Source.Downloader, c.logger, *c.definition, c.sourceDir, c.flagCacheDir, c.flagSourcesDir) if err != nil { return fmt.Errorf("Failed to load downloader %q: %w", c.definition.Source.Downloader, err) } c.logger.Info("Downloading source") err = downloader.Run() if err != nil { return fmt.Errorf("Error while downloading source: %w", err) } // Setup the mounts and chroot into the rootfs exitChroot, err := shared.SetupChroot(c.sourceDir, *c.definition, nil) if err != nil { return fmt.Errorf("Failed to setup chroot: %w", err) } // Unmount everything and exit the chroot defer func() { _ = exitChroot() }() // Always include sections which have no type filter. If running build-dir, // only these sections will be processed. imageTargets := shared.ImageTargetUndefined // If we're running either build-lxc or build-incus, include types which are // meant for all. if !isRunningBuildDir { imageTargets |= shared.ImageTargetAll } switch cmd.CalledAs() { case "build-lxc": // If we're running build-lxc, also process container-only sections. imageTargets |= shared.ImageTargetContainer case "build-incus": // Include either container-specific or vm-specific sections when // running build-incus. ok, err := cmd.Flags().GetBool("vm") if err != nil { return fmt.Errorf(`Failed to get bool value of "vm": %w`, err) } if ok { imageTargets |= shared.ImageTargetVM c.definition.Targets.Type = shared.DefinitionFilterTypeVM } else { imageTargets |= shared.ImageTargetContainer } } manager, err := managers.Load(c.ctx, c.definition.Packages.Manager, c.logger, *c.definition) if err != nil { return fmt.Errorf("Failed to load manager %q: %w", c.definition.Packages.Manager, err) } c.logger.Info("Managing repositories") err = manager.ManageRepositories(imageTargets) if err != nil { return fmt.Errorf("Failed to manage repositories: %w", err) } c.logger.WithField("trigger", "post-unpack").Info("Running hooks") // Run post unpack hook for _, hook := range c.definition.GetRunnableActions("post-unpack", imageTargets) { if hook.Pongo { hook.Action, err = shared.RenderTemplate(hook.Action, c.definition) if err != nil { return fmt.Errorf("Failed to render action: %w", err) } } err := shared.RunScript(c.ctx, hook.Action) if err != nil { return fmt.Errorf("Failed to run post-unpack: %w", err) } } c.logger.Info("Managing packages") // Install/remove/update packages err = manager.ManagePackages(imageTargets) if err != nil { return fmt.Errorf("Failed to manage packages: %w", err) } c.logger.WithField("trigger", "post-packages").Info("Running hooks") // Run post packages hook for _, hook := range c.definition.GetRunnableActions("post-packages", imageTargets) { if hook.Pongo { hook.Action, err = shared.RenderTemplate(hook.Action, c.definition) if err != nil { return fmt.Errorf("Failed to render action: %w", err) } } err := shared.RunScript(c.ctx, hook.Action) if err != nil { return fmt.Errorf("Failed to run post-packages: %w", err) } } return nil } func (c *cmdGlobal) preRunPack(cmd *cobra.Command, args []string) error { // if an error is returned, disable the usage message cmd.SilenceUsage = true // Clean up cache directory before doing anything c.cleanupCacheDirectory() err := os.MkdirAll(c.flagCacheDir, 0755) if err != nil { return fmt.Errorf("Failed creating cache directory: %w", err) } // resolve path c.sourceDir, err = filepath.Abs(args[1]) if err != nil { return fmt.Errorf("Failed to get absolute path of %q: %w", args[1], err) } c.targetDir = "." if len(args) == 3 { c.targetDir = args[2] } // Get the image definition c.definition, err = getDefinition(args[0], c.flagOptions) if err != nil { return fmt.Errorf("Failed to get definition: %w", err) } return nil } func (c *cmdGlobal) postRun(cmd *cobra.Command, args []string) error { // If we're only validating, there's nothing to clean up. if cmd != nil && cmd.CalledAs() == "validate" { return nil } hasLogger := c.logger != nil // exit all chroots otherwise we cannot remove the cache directory for _, exit := range shared.ActiveChroots { if exit != nil { err := exit() if err != nil && hasLogger { c.logger.WithField("err", err).Warn("Failed exiting chroot") } } } // Clean up overlay if c.overlayCleanup != nil { if hasLogger { c.logger.Info("Cleaning up overlay") } c.overlayCleanup() } // Clean up cache directory if c.flagCleanup { if hasLogger { c.logger.Info("Removing cache directory") } c.cleanupCacheDirectory() } // Clean up sources directory if !c.flagKeepSources { if hasLogger { c.logger.Info("Removing sources directory") } _ = os.RemoveAll(c.flagSourcesDir) } return nil } func (c *cmdGlobal) getOverlayDir() (string, func(), error) { var ( cleanup func() overlayDir string err error ) if c.flagDisableOverlay { overlayDir = filepath.Join(c.flagCacheDir, "overlay") // Use rsync if overlay doesn't work err = shared.RsyncLocal(c.ctx, c.sourceDir+"/", overlayDir) if err != nil { return "", nil, fmt.Errorf("Failed to copy image content: %w", err) } } else { cleanup, overlayDir, err = getOverlay(c.logger, c.flagCacheDir, c.sourceDir) if err != nil { c.logger.WithField("err", err).Warn("Failed to create overlay") overlayDir = filepath.Join(c.flagCacheDir, "overlay") // Use rsync if overlay doesn't work err = shared.RsyncLocal(c.ctx, c.sourceDir+"/", overlayDir) if err != nil { return "", nil, fmt.Errorf("Failed to copy image content: %w", err) } } } return overlayDir, cleanup, nil } func getDefinition(fname string, options []string) (*shared.Definition, error) { // Read the provided file, or if none was given, read from stdin var buf bytes.Buffer if fname == "" || fname == "-" { scanner := bufio.NewScanner(os.Stdin) for scanner.Scan() { buf.WriteString(scanner.Text()) } } else { f, err := os.Open(fname) if err != nil { return nil, err } defer f.Close() _, err = io.Copy(&buf, f) if err != nil { return nil, err } } // Parse the yaml input var def shared.Definition err := yaml.UnmarshalStrict(buf.Bytes(), &def) if err != nil { return nil, err } // Set options from the command line for _, o := range options { parts := strings.Split(o, "=") if len(parts) != 2 { return nil, errors.New("Options need to be of type key=value") } err := def.SetValue(parts[0], parts[1]) if err != nil { return nil, fmt.Errorf("Failed to set option %s: %w", o, err) } } // Apply some defaults on top of the provided configuration def.SetDefaults() // Validate the result err = def.Validate() if err != nil { return nil, err } return &def, nil } // addSystemdGenerator creates a systemd-generator which runs on boot, and does some configuration around the system itself and networking. func addSystemdGenerator() error { // Check if container has systemd if !incus.PathExists("/etc/systemd") { return nil } err := os.MkdirAll("/etc/systemd/system-generators", 0755) if err != nil { return fmt.Errorf("Failed creating directory: %w", err) } content, err := lxcGenerator.ReadFile("lxc.generator") if err != nil { return fmt.Errorf("Failed reading lxc.generator: %w", err) } err = os.WriteFile("/etc/systemd/system-generators/lxc", content, 0755) if err != nil { return fmt.Errorf("Failed creating system generator: %w", err) } return nil } distrobuilder-3.0/distrobuilder/main_build-dir.go000066400000000000000000000052541456216713500223300ustar00rootroot00000000000000package main import ( "fmt" "os" "path/filepath" "github.com/spf13/cobra" "github.com/lxc/distrobuilder/generators" "github.com/lxc/distrobuilder/shared" ) type cmdBuildDir struct { cmdBuild *cobra.Command global *cmdGlobal flagWithPostFiles bool } func (c *cmdBuildDir) command() *cobra.Command { c.cmdBuild = &cobra.Command{ Use: "build-dir ", Short: "Build plain rootfs", Args: cobra.ExactArgs(2), RunE: c.global.preRunBuild, PostRunE: func(cmd *cobra.Command, args []string) error { // Run global generators for _, file := range c.global.definition.Files { if !shared.ApplyFilter(&file, c.global.definition.Image.Release, c.global.definition.Image.ArchitectureMapped, c.global.definition.Image.Variant, c.global.definition.Targets.Type, 0) { continue } generator, err := generators.Load(file.Generator, c.global.logger, c.global.flagCacheDir, c.global.targetDir, file, *c.global.definition) if err != nil { return fmt.Errorf("Failed to load generator %q: %w", file.Generator, err) } c.global.logger.WithField("generator", file.Generator).Info("Running generator") err = generator.Run() if err != nil { continue } } if !c.flagWithPostFiles { return nil } exitChroot, err := shared.SetupChroot(c.global.targetDir, *c.global.definition, nil) if err != nil { return fmt.Errorf("Failed to setup chroot in %q: %w", c.global.targetDir, err) } c.global.logger.WithField("trigger", "post-files").Info("Running hooks") // Run post files hook for _, action := range c.global.definition.GetRunnableActions("post-files", shared.ImageTargetUndefined) { if action.Pongo { action.Action, err = shared.RenderTemplate(action.Action, c.global.definition) if err != nil { return fmt.Errorf("Failed to render action: %w", err) } } err := shared.RunScript(c.global.ctx, action.Action) if err != nil { { err := exitChroot() if err != nil { c.global.logger.WithField("err", err).Warn("Failed exiting chroot") } } return fmt.Errorf("Failed to run post-files: %w", err) } } err = exitChroot() if err != nil { return fmt.Errorf("Failed exiting chroot: %w", err) } return nil }, } c.cmdBuild.Flags().StringVar(&c.global.flagSourcesDir, "sources-dir", filepath.Join(os.TempDir(), "distrobuilder"), "Sources directory for distribution tarballs"+"``") c.cmdBuild.Flags().BoolVar(&c.global.flagKeepSources, "keep-sources", true, "Keep sources after build"+"``") c.cmdBuild.Flags().BoolVar(&c.flagWithPostFiles, "with-post-files", false, "Run post-files actions"+"``") return c.cmdBuild } distrobuilder-3.0/distrobuilder/main_incus.go000066400000000000000000000353431456216713500216000ustar00rootroot00000000000000package main import ( "errors" "fmt" "io" "os" "os/exec" "path/filepath" client "github.com/lxc/incus/client" "github.com/lxc/incus/shared/api" incus "github.com/lxc/incus/shared/util" "github.com/sirupsen/logrus" "github.com/spf13/cobra" "golang.org/x/sys/unix" "github.com/lxc/distrobuilder/generators" "github.com/lxc/distrobuilder/image" "github.com/lxc/distrobuilder/managers" "github.com/lxc/distrobuilder/shared" ) type cmdIncus struct { cmdBuild *cobra.Command cmdPack *cobra.Command global *cmdGlobal flagType string flagCompression string flagVM bool flagImportIntoIncus string } func (c *cmdIncus) commandBuild() *cobra.Command { c.cmdBuild = &cobra.Command{ Use: "build-incus [target dir] [--type=TYPE] [--compression=COMPRESSION] [--import-into-incus]", Aliases: []string{"build-lxd"}, Short: "Build Incus image from scratch", Long: fmt.Sprintf(`Build Incus image from scratch %s %s `, typeDescription, compressionDescription), Args: cobra.RangeArgs(1, 2), PreRunE: func(cmd *cobra.Command, args []string) error { if !incus.ValueInSlice(c.flagType, []string{"split", "unified"}) { return errors.New("--type needs to be one of ['split', 'unified']") } // Check compression arguments _, _, err := shared.ParseCompression(c.flagCompression) if err != nil { return fmt.Errorf("Failed to parse compression level: %w", err) } if c.flagType == "split" { _, _, err := shared.ParseSquashfsCompression(c.flagCompression) if err != nil { return fmt.Errorf("Failed to parse compression level: %w", err) } } // Check dependencies if c.flagVM { err := c.checkVMDependencies() if err != nil { return fmt.Errorf("Failed to check VM dependencies: %w", err) } } return c.global.preRunBuild(cmd, args) }, RunE: func(cmd *cobra.Command, args []string) error { overlayDir, cleanup, err := c.global.getOverlayDir() if err != nil { return fmt.Errorf("Failed to get overlay directory: %w", err) } if cleanup != nil { c.global.overlayCleanup = cleanup defer func() { cleanup() c.global.overlayCleanup = nil }() } return c.run(cmd, args, overlayDir) }, } c.cmdBuild.Flags().StringVar(&c.flagType, "type", "split", "Type of tarball to create"+"``") c.cmdBuild.Flags().StringVar(&c.flagCompression, "compression", "xz", "Type of compression to use"+"``") c.cmdBuild.Flags().BoolVar(&c.flagVM, "vm", false, "Create a qcow2 image for VMs"+"``") c.cmdBuild.Flags().StringVar(&c.flagImportIntoIncus, "import-into-incus", "", "Import built image into Incus"+"``") c.cmdBuild.Flags().BoolVar(&c.global.flagKeepSources, "keep-sources", true, "Keep sources after build"+"``") return c.cmdBuild } func (c *cmdIncus) commandPack() *cobra.Command { c.cmdPack = &cobra.Command{ Use: "pack-incus [target dir] [--type=TYPE] [--compression=COMPRESSION] [--import-into-incus]", Aliases: []string{"pack-lxd"}, Short: "Create Incus image from existing rootfs", Long: fmt.Sprintf(`Create Incus image from existing rootfs %s %s `, typeDescription, compressionDescription), Args: cobra.RangeArgs(2, 3), PreRunE: func(cmd *cobra.Command, args []string) error { if !incus.ValueInSlice(c.flagType, []string{"split", "unified"}) { return errors.New("--type needs to be one of ['split', 'unified']") } // Check compression arguments _, _, err := shared.ParseCompression(c.flagCompression) if err != nil { return fmt.Errorf("Failed to parse compression level: %w", err) } if c.flagType == "split" { _, _, err := shared.ParseSquashfsCompression(c.flagCompression) if err != nil { return fmt.Errorf("Failed to parse compression level: %w", err) } } // Check dependencies if c.flagVM { err := c.checkVMDependencies() if err != nil { return fmt.Errorf("Failed to check VM dependencies: %w", err) } } return c.global.preRunPack(cmd, args) }, RunE: func(cmd *cobra.Command, args []string) error { overlayDir, cleanup, err := c.global.getOverlayDir() if err != nil { return fmt.Errorf("Failed to get overlay directory: %w", err) } if cleanup != nil { c.global.overlayCleanup = cleanup defer func() { cleanup() c.global.overlayCleanup = nil }() } if c.flagVM { c.global.definition.Targets.Type = "vm" } err = c.runPack(cmd, args, overlayDir) if err != nil { return fmt.Errorf("Failed to pack image: %w", err) } return c.run(cmd, args, overlayDir) }, } c.cmdPack.Flags().StringVar(&c.flagType, "type", "split", "Type of tarball to create") c.cmdPack.Flags().StringVar(&c.flagCompression, "compression", "xz", "Type of compression to use") c.cmdPack.Flags().BoolVar(&c.flagVM, "vm", false, "Create a qcow2 image for VMs"+"``") c.cmdPack.Flags().StringVar(&c.flagImportIntoIncus, "import-into-incus", "", "Import built image into Incus"+"``") c.cmdPack.Flags().Lookup("import-into-incus").NoOptDefVal = "-" return c.cmdPack } func (c *cmdIncus) runPack(cmd *cobra.Command, args []string, overlayDir string) error { // Setup the mounts and chroot into the rootfs exitChroot, err := shared.SetupChroot(overlayDir, *c.global.definition, nil) if err != nil { return fmt.Errorf("Failed to setup chroot: %w", err) } // Unmount everything and exit the chroot defer func() { _ = exitChroot() }() imageTargets := shared.ImageTargetAll if c.flagVM { imageTargets |= shared.ImageTargetVM } else { imageTargets |= shared.ImageTargetContainer } manager, err := managers.Load(c.global.ctx, c.global.definition.Packages.Manager, c.global.logger, *c.global.definition) if err != nil { return fmt.Errorf("Failed to load manager %q: %w", c.global.definition.Packages.Manager, err) } c.global.logger.Info("Managing repositories") err = manager.ManageRepositories(imageTargets) if err != nil { return fmt.Errorf("Failed to manage repositories: %w", err) } c.global.logger.WithField("trigger", "post-unpack").Info("Running hooks") // Run post unpack hook for _, hook := range c.global.definition.GetRunnableActions("post-unpack", imageTargets) { if hook.Pongo { hook.Action, err = shared.RenderTemplate(hook.Action, c.global.definition) if err != nil { return fmt.Errorf("Failed to render action: %w", err) } } err := shared.RunScript(c.global.ctx, hook.Action) if err != nil { return fmt.Errorf("Failed to run post-unpack: %w", err) } } c.global.logger.Info("Managing packages") // Install/remove/update packages err = manager.ManagePackages(imageTargets) if err != nil { return fmt.Errorf("Failed to manage packages: %w", err) } c.global.logger.Info("Running hooks", "trigger", "post-packages") // Run post packages hook for _, hook := range c.global.definition.GetRunnableActions("post-packages", imageTargets) { if hook.Pongo { hook.Action, err = shared.RenderTemplate(hook.Action, c.global.definition) if err != nil { return fmt.Errorf("Failed to render action: %w", err) } } err := shared.RunScript(c.global.ctx, hook.Action) if err != nil { return fmt.Errorf("Failed to run post-packages: %w", err) } } return nil } func (c *cmdIncus) run(cmd *cobra.Command, args []string, overlayDir string) error { img := image.NewIncusImage(c.global.ctx, overlayDir, c.global.targetDir, c.global.flagCacheDir, *c.global.definition) imageTargets := shared.ImageTargetUndefined | shared.ImageTargetAll if c.flagVM { imageTargets |= shared.ImageTargetVM } else { imageTargets |= shared.ImageTargetContainer } for _, file := range c.global.definition.Files { if !shared.ApplyFilter(&file, c.global.definition.Image.Release, c.global.definition.Image.ArchitectureMapped, c.global.definition.Image.Variant, c.global.definition.Targets.Type, imageTargets) { continue } generator, err := generators.Load(file.Generator, c.global.logger, c.global.flagCacheDir, overlayDir, file, *c.global.definition) if err != nil { return fmt.Errorf("Failed to load generator %q: %w", file.Generator, err) } c.global.logger.WithField("generator", file.Generator).Info("Running generator") err = generator.RunIncus(img, c.global.definition.Targets.Incus) if err != nil { return fmt.Errorf("Failed to create Incus data: %w", err) } } rootfsDir := overlayDir var mounts []shared.ChrootMount var vmDir string var vm *vm if c.flagVM { vmDir = filepath.Join(c.global.flagCacheDir, "vm") err := os.Mkdir(vmDir, 0755) if err != nil { return fmt.Errorf("Failed to create directory %q: %w", vmDir, err) } imgFilename, err := shared.RenderTemplate(fmt.Sprintf("%s.raw", c.global.definition.Image.Name), c.global.definition) if err != nil { return fmt.Errorf("Failed to render template: %w", err) } imgFile := filepath.Join(c.global.flagCacheDir, imgFilename) vm, err = newVM(c.global.ctx, imgFile, vmDir, c.global.definition.Targets.Incus.VM.Filesystem, c.global.definition.Targets.Incus.VM.Size) if err != nil { return fmt.Errorf("Failed to instantiate VM: %w", err) } err = vm.createEmptyDiskImage() if err != nil { return fmt.Errorf("Failed to create disk image: %w", err) } err = vm.createPartitions() if err != nil { return fmt.Errorf("Failed to create partitions: %w", err) } err = vm.mountImage() if err != nil { return fmt.Errorf("Failed to mount image: %w", err) } defer func() { _ = vm.umountImage() }() err = vm.createRootFS() if err != nil { return fmt.Errorf("Failed to create root filesystem: %w", err) } err = vm.mountRootPartition() if err != nil { return fmt.Errorf("failed to mount root partion: %w", err) } defer func() { _ = shared.RunCommand(vm.ctx, nil, nil, "umount", "-R", vmDir) }() err = vm.createUEFIFS() if err != nil { return fmt.Errorf("Failed to create UEFI filesystem: %w", err) } err = vm.mountUEFIPartition() if err != nil { return fmt.Errorf("Failed to mount UEFI partition: %w", err) } // We cannot use Incus' rsync package as that uses the --delete flag which // causes an issue due to the boot/efi directory being present. err = shared.RsyncLocal(c.global.ctx, overlayDir+"/", vmDir) if err != nil { return fmt.Errorf("Failed to copy rootfs: %w", err) } rootfsDir = vmDir mounts = []shared.ChrootMount{ { Source: vm.getLoopDev(), Target: filepath.Join("/", "dev", filepath.Base(vm.getLoopDev())), Flags: unix.MS_BIND, }, { Source: vm.getRootfsDevFile(), Target: filepath.Join("/", "dev", filepath.Base(vm.getRootfsDevFile())), Flags: unix.MS_BIND, }, { Source: vm.getUEFIDevFile(), Target: filepath.Join("/", "dev", filepath.Base(vm.getUEFIDevFile())), Flags: unix.MS_BIND, }, { Source: vm.getUEFIDevFile(), Target: "/boot/efi", FSType: "vfat", Flags: 0, Data: "", IsDir: true, }, } } exitChroot, err := shared.SetupChroot(rootfsDir, *c.global.definition, mounts) if err != nil { return fmt.Errorf("Failed to chroot: %w", err) } err = addSystemdGenerator() if err != nil { return fmt.Errorf("Failed adding systemd generator: %w", err) } c.global.logger.WithField("trigger", "post-files").Info("Running hooks") // Run post files hook for _, action := range c.global.definition.GetRunnableActions("post-files", imageTargets) { if action.Pongo { action.Action, err = shared.RenderTemplate(action.Action, c.global.definition) if err != nil { return fmt.Errorf("Failed to render action: %w", err) } } err := shared.RunScript(c.global.ctx, action.Action) if err != nil { { err := exitChroot() if err != nil { c.global.logger.WithField("err", err).Warn("Failed exiting chroot") } } return fmt.Errorf("Failed to run post-files: %w", err) } } err = exitChroot() if err != nil { return fmt.Errorf("Failed exiting chroot: %w", err) } // Unmount VM directory and loop device before creating the image. if c.flagVM { err := shared.RunCommand(vm.ctx, nil, nil, "umount", "-R", vmDir) if err != nil { return fmt.Errorf("Failed to unmount %q: %w", vmDir, err) } err = vm.umountImage() if err != nil { return fmt.Errorf("Failed to unmount image: %w", err) } } c.global.logger.WithFields(logrus.Fields{"type": c.flagType, "vm": c.flagVM, "compression": c.flagCompression}).Info("Creating Incus image") imageFile, rootfsFile, err := img.Build(c.flagType == "unified", c.flagCompression, c.flagVM) if err != nil { return fmt.Errorf("Failed to create Incus image: %w", err) } importFlag := cmd.Flags().Lookup("import-into-incus") if importFlag.Changed { path := "" server, err := client.ConnectIncusUnix(path, nil) if err != nil { return fmt.Errorf("Failed to connect to Incus: %w", err) } image := api.ImagesPost{ Filename: imageFile, } imageType := "container" var meta io.ReadCloser var rootfs io.ReadCloser // Open meta meta, err = os.Open(imageFile) if err != nil { return err } defer meta.Close() // Open rootfs if rootfsFile != "" { rootfs, err = os.Open(rootfsFile) if err != nil { return err } defer rootfs.Close() if filepath.Ext(rootfsFile) == ".qcow2" { imageType = "virtual-machine" } } createArgs := &client.ImageCreateArgs{ MetaFile: meta, MetaName: filepath.Base(imageFile), RootfsFile: rootfs, RootfsName: filepath.Base(rootfsFile), Type: imageType, } op, err := server.CreateImage(image, createArgs) if err != nil { return fmt.Errorf("Failed to create image: %w", err) } err = op.Wait() if err != nil { return fmt.Errorf("Failed to create image: %w", err) } // Don't create alias if the flag value is equal to the NoOptDefVal (the default value if --import-into-incus flag is set without any value). if importFlag.Value.String() == importFlag.NoOptDefVal { return nil } opAPI := op.Get() alias := api.ImageAliasesPost{} alias.Target = opAPI.Metadata["fingerprint"].(string) alias.Name, err = shared.RenderTemplate(importFlag.Value.String(), c.global.definition) if err != nil { return fmt.Errorf("Failed to render %q: %w", importFlag.Value.String(), err) } alias.Description, err = shared.RenderTemplate(c.global.definition.Image.Description, c.global.definition) if err != nil { return fmt.Errorf("Failed to render %q: %w", c.global.definition.Image.Description, err) } err = server.CreateImageAlias(alias) if err != nil { return fmt.Errorf("Failed to create image alias: %w", err) } } return nil } func (c *cmdIncus) checkVMDependencies() error { dependencies := []string{"btrfs", "mkfs.ext4", "mkfs.vfat", "qemu-img", "rsync", "sgdisk"} for _, dep := range dependencies { _, err := exec.LookPath(dep) if err != nil { return fmt.Errorf("Required tool %q is missing", dep) } } return nil } distrobuilder-3.0/distrobuilder/main_lxc.go000066400000000000000000000166311456216713500212440ustar00rootroot00000000000000package main import ( "fmt" "os" "path/filepath" "github.com/spf13/cobra" "github.com/lxc/distrobuilder/generators" "github.com/lxc/distrobuilder/image" "github.com/lxc/distrobuilder/managers" "github.com/lxc/distrobuilder/shared" ) type cmdLXC struct { cmdBuild *cobra.Command cmdPack *cobra.Command global *cmdGlobal flagCompression string } func (c *cmdLXC) commandBuild() *cobra.Command { c.cmdBuild = &cobra.Command{ Use: "build-lxc [target dir] [--compression=COMPRESSION]", Short: "Build LXC image from scratch", Long: fmt.Sprintf(`Build LXC image from scratch %s `, compressionDescription), Args: cobra.RangeArgs(1, 2), PreRunE: func(cmd *cobra.Command, args []string) error { // Check compression arguments _, _, err := shared.ParseCompression(c.flagCompression) if err != nil { return fmt.Errorf("Failed to parse compression level: %w", err) } return c.global.preRunBuild(cmd, args) }, RunE: func(cmd *cobra.Command, args []string) error { overlayDir, cleanup, err := c.global.getOverlayDir() if err != nil { return fmt.Errorf("Failed to get overlay directory: %w", err) } if cleanup != nil { c.global.overlayCleanup = cleanup defer func() { cleanup() c.global.overlayCleanup = nil }() } return c.run(cmd, args, overlayDir) }, } c.cmdBuild.Flags().StringVar(&c.flagCompression, "compression", "xz", "Type of compression to use"+"``") c.cmdBuild.Flags().StringVar(&c.global.flagSourcesDir, "sources-dir", filepath.Join(os.TempDir(), "distrobuilder"), "Sources directory for distribution tarballs"+"``") c.cmdBuild.Flags().BoolVar(&c.global.flagKeepSources, "keep-sources", true, "Keep sources after build"+"``") return c.cmdBuild } func (c *cmdLXC) commandPack() *cobra.Command { c.cmdPack = &cobra.Command{ Use: "pack-lxc [target dir] [--compression=COMPRESSION]", Short: "Create LXC image from existing rootfs", Long: fmt.Sprintf(`Create LXC image from existing rootfs %s `, compressionDescription), Args: cobra.RangeArgs(2, 3), PreRunE: func(cmd *cobra.Command, args []string) error { // Check compression arguments _, _, err := shared.ParseCompression(c.flagCompression) if err != nil { return fmt.Errorf("Failed to parse compression level: %w", err) } return c.global.preRunPack(cmd, args) }, RunE: func(cmd *cobra.Command, args []string) error { overlayDir, cleanup, err := c.global.getOverlayDir() if err != nil { return fmt.Errorf("Failed to get overlay directory: %w", err) } if cleanup != nil { c.global.overlayCleanup = cleanup defer func() { cleanup() c.global.overlayCleanup = nil }() } err = c.runPack(cmd, args, overlayDir) if err != nil { return fmt.Errorf("Failed to pack image: %w", err) } return c.run(cmd, args, overlayDir) }, } c.cmdPack.Flags().StringVar(&c.flagCompression, "compression", "xz", "Type of compression to use"+"``") return c.cmdPack } func (c *cmdLXC) runPack(cmd *cobra.Command, args []string, overlayDir string) error { // Setup the mounts and chroot into the rootfs exitChroot, err := shared.SetupChroot(overlayDir, *c.global.definition, nil) if err != nil { return fmt.Errorf("Failed to setup chroot: %w", err) } // Unmount everything and exit the chroot defer func() { _ = exitChroot() }() imageTargets := shared.ImageTargetAll | shared.ImageTargetContainer manager, err := managers.Load(c.global.ctx, c.global.definition.Packages.Manager, c.global.logger, *c.global.definition) if err != nil { return fmt.Errorf("Failed to load manager %q: %w", c.global.definition.Packages.Manager, err) } c.global.logger.Info("Managing repositories") err = manager.ManageRepositories(imageTargets) if err != nil { return fmt.Errorf("Failed to manage repositories: %w", err) } c.global.logger.WithField("trigger", "post-unpack").Info("Running hooks") // Run post unpack hook for _, hook := range c.global.definition.GetRunnableActions("post-unpack", imageTargets) { if hook.Pongo { hook.Action, err = shared.RenderTemplate(hook.Action, c.global.definition) if err != nil { return fmt.Errorf("Failed to render action: %w", err) } } err := shared.RunScript(c.global.ctx, hook.Action) if err != nil { return fmt.Errorf("Failed to run post-unpack: %w", err) } } c.global.logger.Info("Managing packages") // Install/remove/update packages err = manager.ManagePackages(imageTargets) if err != nil { return fmt.Errorf("Failed to manage packages: %w", err) } c.global.logger.WithField("trigger", "post-packages").Info("Running hooks") // Run post packages hook for _, hook := range c.global.definition.GetRunnableActions("post-packages", imageTargets) { if hook.Pongo { hook.Action, err = shared.RenderTemplate(hook.Action, c.global.definition) if err != nil { return fmt.Errorf("Failed to render action: %w", err) } } err := shared.RunScript(c.global.ctx, hook.Action) if err != nil { return fmt.Errorf("Failed to run post-packages: %w", err) } } return nil } func (c *cmdLXC) run(cmd *cobra.Command, args []string, overlayDir string) error { img := image.NewLXCImage(c.global.ctx, overlayDir, c.global.targetDir, c.global.flagCacheDir, *c.global.definition) for _, file := range c.global.definition.Files { if !shared.ApplyFilter(&file, c.global.definition.Image.Release, c.global.definition.Image.ArchitectureMapped, c.global.definition.Image.Variant, c.global.definition.Targets.Type, shared.ImageTargetUndefined|shared.ImageTargetAll|shared.ImageTargetContainer) { c.global.logger.WithField("generator", file.Generator).Info("Skipping generator") continue } generator, err := generators.Load(file.Generator, c.global.logger, c.global.flagCacheDir, overlayDir, file, *c.global.definition) if err != nil { return fmt.Errorf("Failed to load generator %q: %w", file.Generator, err) } c.global.logger.WithField("generator", file.Generator).Info("Running generator") err = generator.RunLXC(img, c.global.definition.Targets.LXC) if err != nil { return fmt.Errorf("Failed to run generator %q: %w", file.Generator, err) } } exitChroot, err := shared.SetupChroot(overlayDir, *c.global.definition, nil) if err != nil { return fmt.Errorf("Failed to setup chroot in %q: %w", overlayDir, err) } err = addSystemdGenerator() if err != nil { return fmt.Errorf("Failed adding systemd generator: %w", err) } c.global.logger.WithField("trigger", "post-files").Info("Running hooks") // Run post files hook for _, action := range c.global.definition.GetRunnableActions("post-files", shared.ImageTargetUndefined|shared.ImageTargetAll|shared.ImageTargetContainer) { if action.Pongo { action.Action, err = shared.RenderTemplate(action.Action, c.global.definition) if err != nil { return fmt.Errorf("Failed to render action: %w", err) } } err := shared.RunScript(c.global.ctx, action.Action) if err != nil { { err := exitChroot() if err != nil { c.global.logger.WithField("err", err).Warn("Failed exiting chroot") } } return fmt.Errorf("Failed to run post-files: %w", err) } } err = exitChroot() if err != nil { return fmt.Errorf("Failed exiting chroot: %w", err) } c.global.logger.WithField("compression", c.flagCompression).Info("Creating LXC image") err = img.Build(c.flagCompression) if err != nil { return fmt.Errorf("Failed to create LXC image: %w", err) } return nil } distrobuilder-3.0/distrobuilder/main_repack-windows.go000066400000000000000000000503301456216713500234050ustar00rootroot00000000000000package main import ( "bufio" "bytes" "encoding/hex" "errors" "fmt" "net/http" "os" "os/exec" "path/filepath" "regexp" "strconv" "strings" "github.com/flosch/pongo2" incus "github.com/lxc/incus/shared/util" "github.com/sirupsen/logrus" "github.com/spf13/cobra" "golang.org/x/sys/unix" "github.com/lxc/distrobuilder/shared" "github.com/lxc/distrobuilder/windows" ) type cmdRepackWindows struct { global *cmdGlobal flagDrivers string flagWindowsVersion string flagWindowsArchitecture string } func init() { // Filters should be registered in the init() function _ = pongo2.RegisterFilter("toHex", toHex) } func (c *cmdRepackWindows) command() *cobra.Command { cmd := &cobra.Command{ Use: "repack-windows [--drivers=DRIVERS]", Short: "Repack Windows ISO with drivers included", Args: cobra.ExactArgs(2), PreRunE: c.preRun, RunE: func(cmd *cobra.Command, args []string) error { defer func() { _ = unix.Unmount(c.global.sourceDir, 0) }() sourceDir := filepath.Dir(args[0]) targetDir := filepath.Dir(args[1]) // If either the source or the target are located on a FUSE filesystem, disable overlay // as it doesn't play well with wimlib-imagex. for _, dir := range []string{sourceDir, targetDir} { var stat unix.Statfs_t err := unix.Statfs(dir, &stat) if err != nil { c.global.logger.WithFields(logrus.Fields{"dir": dir, "err": err}).Warn("Failed to get directory information") continue } // Since there's no magic number for virtiofs, we need to check FUSE_SUPER_MAGIC (which is not defined in the unix package). if stat.Type == 0x65735546 { c.global.logger.Warn("FUSE filesystem detected, disabling overlay") c.global.flagDisableOverlay = true break } } overlayDir, cleanup, err := c.global.getOverlayDir() if err != nil { return fmt.Errorf("Failed to get overlay directory: %w", err) } if cleanup != nil { c.global.overlayCleanup = cleanup defer func() { cleanup() c.global.overlayCleanup = nil }() } return c.run(cmd, args, overlayDir) }, } cmd.Flags().StringVar(&c.flagDrivers, "drivers", "", "Path to drivers ISO"+"``") cmd.Flags().StringVar(&c.flagWindowsVersion, "windows-version", "", "Windows version to repack"+"``") cmd.Flags().StringVar(&c.flagWindowsArchitecture, "windows-arch", "", "Windows architecture to repack"+"``") return cmd } // Create rw rootfs in preRun. Point global.sourceDir to the rw rootfs. func (c *cmdRepackWindows) preRun(cmd *cobra.Command, args []string) error { logger := c.global.logger if c.flagWindowsVersion == "" { detectedVersion := detectWindowsVersion(filepath.Base(args[0])) if detectedVersion == "" { return errors.New("Failed to detect Windows version. Please provide the version using the --windows-version flag") } c.flagWindowsVersion = detectedVersion } else { supportedVersions := []string{"w11", "w10", "2k19", "2k12", "2k16", "2k22"} if !incus.ValueInSlice(c.flagWindowsVersion, supportedVersions) { return fmt.Errorf("Version must be one of %v", supportedVersions) } } if c.flagWindowsArchitecture == "" { detectedArchitecture := detectWindowsArchitecture(filepath.Base(args[0])) if detectedArchitecture == "" { return errors.New("Failed to detect Windows architecture. Please provide the architecture using the --windows-arch flag") } c.flagWindowsArchitecture = detectedArchitecture } else { supportedArchitectures := []string{"amd64", "ARM64"} if !incus.ValueInSlice(c.flagWindowsArchitecture, supportedArchitectures) { return fmt.Errorf("Architecture must be one of %v", supportedArchitectures) } } // Check dependencies err := c.checkDependencies() if err != nil { return fmt.Errorf("Failed to check dependencies: %w", err) } // if an error is returned, disable the usage message cmd.SilenceUsage = true // Clean up cache directory before doing anything err = os.RemoveAll(c.global.flagCacheDir) if err != nil { return fmt.Errorf("Failed to remove directory %q: %w", c.global.flagCacheDir, err) } success := false err = os.Mkdir(c.global.flagCacheDir, 0755) if err != nil { return fmt.Errorf("Failed to create directory %q: %w", c.global.flagCacheDir, err) } defer func() { if c.global.flagCleanup && !success { os.RemoveAll(c.global.flagCacheDir) } }() c.global.sourceDir = filepath.Join(c.global.flagCacheDir, "source") // Create source path err = os.MkdirAll(c.global.sourceDir, 0755) if err != nil { return fmt.Errorf("Failed to create directory %q: %w", c.global.sourceDir, err) } logger.Info("Mounting Windows ISO") // Mount ISO err = shared.RunCommand(c.global.ctx, nil, nil, "mount", "-t", "udf", "-o", "loop", args[0], c.global.sourceDir) if err != nil { return fmt.Errorf("Failed to mount %q at %q: %w", args[0], c.global.sourceDir, err) } success = true return nil } func (c *cmdRepackWindows) run(cmd *cobra.Command, args []string, overlayDir string) error { logger := c.global.logger driverPath := filepath.Join(c.global.flagCacheDir, "drivers") virtioISOPath := c.flagDrivers if virtioISOPath == "" { // Download vioscsi driver virtioURL := "https://fedorapeople.org/groups/virt/virtio-win/direct-downloads/latest-virtio/virtio-win.iso" virtioISOPath = filepath.Join(c.global.flagSourcesDir, "windows", "virtio-win.iso") if !incus.PathExists(virtioISOPath) { err := os.MkdirAll(filepath.Dir(virtioISOPath), 0755) if err != nil { return fmt.Errorf("Failed to create directory %q: %w", filepath.Dir(virtioISOPath), err) } f, err := os.Create(virtioISOPath) if err != nil { return fmt.Errorf("Failed to create file %q: %w", virtioISOPath, err) } defer f.Close() var client http.Client logger.Info("Downloading drivers ISO") _, err = incus.DownloadFileHash(c.global.ctx, &client, "", nil, nil, "virtio-win.iso", virtioURL, "", nil, f) if err != nil { f.Close() os.Remove(virtioISOPath) return fmt.Errorf("Failed to download %q: %w", virtioURL, err) } f.Close() } } if !incus.PathExists(driverPath) { err := os.MkdirAll(driverPath, 0755) if err != nil { return fmt.Errorf("Failed to create directory %q: %w", driverPath, err) } } logger.Info("Mounting driver ISO") // Mount driver ISO err := shared.RunCommand(c.global.ctx, nil, nil, "mount", "-t", "iso9660", "-o", "loop", virtioISOPath, driverPath) if err != nil { return fmt.Errorf("Failed to mount %q at %q: %w", virtioISOPath, driverPath, err) } defer func() { _ = unix.Unmount(driverPath, 0) }() var sourcesDir string entries, err := os.ReadDir(overlayDir) if err != nil { return fmt.Errorf("Failed to read directory %q: %w", overlayDir, err) } for _, entry := range entries { if strings.ToLower(entry.Name()) == "sources" { sourcesDir = filepath.Join(overlayDir, entry.Name()) break } } entries, err = os.ReadDir(sourcesDir) if err != nil { return fmt.Errorf("Failed to read directory %q: %w", sourcesDir, err) } var bootWim string var installWim string // Find boot.wim and install.wim but consider their case. for _, entry := range entries { if bootWim != "" && installWim != "" { break } if strings.ToLower(entry.Name()) == "boot.wim" { bootWim = filepath.Join(sourcesDir, entry.Name()) continue } if strings.ToLower(entry.Name()) == "install.wim" { installWim = filepath.Join(sourcesDir, entry.Name()) continue } } if bootWim == "" { return errors.New("Unable to find boot.wim") } if installWim == "" { return errors.New("Unable to find install.wim") } var buf bytes.Buffer err = shared.RunCommand(c.global.ctx, nil, &buf, "wimlib-imagex", "info", installWim) if err != nil { return fmt.Errorf("Failed to retrieve wim file information: %w", err) } indexes := []int{} scanner := bufio.NewScanner(&buf) for scanner.Scan() { text := scanner.Text() if strings.HasPrefix(text, "Index") { fields := strings.Split(text, " ") index, err := strconv.Atoi(fields[len(fields)-1]) if err != nil { return fmt.Errorf("Failed to determine wim file indexes: %w", err) } indexes = append(indexes, index) } } // This injects the drivers into the installation process err = c.modifyWim(bootWim, 2) if err != nil { return fmt.Errorf("Failed to modify index 2 of %q: %w", filepath.Base(bootWim), err) } // This injects the drivers into the final OS for _, idx := range indexes { err = c.modifyWim(installWim, idx) if err != nil { return fmt.Errorf("Failed to modify index %d of %q: %w", idx, filepath.Base(installWim), err) } } logger.Info("Generating new ISO") var stdout strings.Builder err = shared.RunCommand(c.global.ctx, nil, &stdout, "genisoimage", "--version") if err != nil { return fmt.Errorf("Failed to determine version of genisoimage: %w", err) } version := strings.Split(stdout.String(), "\n")[0] if strings.HasPrefix(version, "mkisofs") { err = shared.RunCommand(c.global.ctx, nil, nil, "genisoimage", "-iso-level", "3", "-l", "-no-emul-boot", "-b", "efi/microsoft/boot/efisys.bin", "-o", args[1], overlayDir) } else { err = shared.RunCommand(c.global.ctx, nil, nil, "genisoimage", "--allow-limited-size", "-l", "-no-emul-boot", "-b", "efi/microsoft/boot/efisys.bin", "-o", args[1], overlayDir) } if err != nil { return fmt.Errorf("Failed to generate ISO: %w", err) } return nil } func (c *cmdRepackWindows) modifyWim(path string, index int) error { logger := c.global.logger // Mount VIM file wimFile := filepath.Join(path) wimPath := filepath.Join(c.global.flagCacheDir, "wim") if !incus.PathExists(wimPath) { err := os.MkdirAll(wimPath, 0755) if err != nil { return fmt.Errorf("Failed to create directory %q: %w", wimPath, err) } } success := false err := shared.RunCommand(c.global.ctx, nil, nil, "wimlib-imagex", "mountrw", wimFile, strconv.Itoa(index), wimPath, "--allow-other") if err != nil { return fmt.Errorf("Failed to mount %q: %w", filepath.Base(wimFile), err) } defer func() { if !success { _ = shared.RunCommand(c.global.ctx, nil, nil, "wimlib-imagex", "unmount", wimPath) } }() dirs, err := c.getWindowsDirectories(wimPath) if err != nil { return fmt.Errorf("Failed to get required windows directories: %w", err) } if dirs["filerepository"] == "" { return errors.New("Failed to determine windows/system32/driverstore/filerepository path") } if dirs["inf"] == "" { return errors.New("Failed to determine windows/inf path") } if dirs["config"] == "" { return errors.New("Failed to determine windows/system32/config path") } if dirs["drivers"] == "" { return errors.New("Failed to determine windows/system32/drivers path") } logger.WithFields(logrus.Fields{"file": filepath.Base(path), "index": index}).Info("Modifying WIM file") // Create registry entries and copy files err = c.injectDrivers(dirs) if err != nil { return fmt.Errorf("Failed to inject drivers: %w", err) } err = shared.RunCommand(c.global.ctx, nil, nil, "wimlib-imagex", "unmount", wimPath, "--commit") if err != nil { return fmt.Errorf("Failed to unmount WIM image: %w", err) } success = true return nil } func (c *cmdRepackWindows) checkDependencies() error { dependencies := []string{"genisoimage", "hivexregedit", "rsync", "wimlib-imagex"} for _, dep := range dependencies { _, err := exec.LookPath(dep) if err != nil { return fmt.Errorf("Required tool %q is missing", dep) } } return nil } func (c *cmdRepackWindows) getWindowsDirectories(wimPath string) (map[string]string, error) { windowsPath := "" system32Path := "" driverStorePath := "" dirs := make(map[string]string) entries, err := os.ReadDir(wimPath) if err != nil { return nil, err } // Get windows directory for _, entry := range entries { if !entry.IsDir() { continue } if regexp.MustCompile(`^(?i)windows$`).MatchString(entry.Name()) { windowsPath = filepath.Join(wimPath, entry.Name()) break } } entries, err = os.ReadDir(windowsPath) if err != nil { return nil, err } for _, entry := range entries { if dirs["inf"] != "" && system32Path != "" { break } if !entry.IsDir() { continue } if regexp.MustCompile(`^(?i)inf$`).MatchString(entry.Name()) { dirs["inf"] = filepath.Join(windowsPath, entry.Name()) continue } if regexp.MustCompile(`^(?i)system32$`).MatchString(entry.Name()) { system32Path = filepath.Join(windowsPath, entry.Name()) } } entries, err = os.ReadDir(system32Path) if err != nil { return nil, err } for _, entry := range entries { if dirs["config"] != "" && dirs["drivers"] != "" && driverStorePath != "" { break } if !entry.IsDir() { continue } if regexp.MustCompile(`^(?i)config$`).MatchString(entry.Name()) { dirs["config"] = filepath.Join(system32Path, entry.Name()) continue } if regexp.MustCompile(`^(?i)drivers$`).MatchString(entry.Name()) { dirs["drivers"] = filepath.Join(system32Path, entry.Name()) continue } if regexp.MustCompile(`^(?i)driverstore$`).MatchString(entry.Name()) { driverStorePath = filepath.Join(system32Path, entry.Name()) } } entries, err = os.ReadDir(driverStorePath) if err != nil { return nil, err } for _, entry := range entries { if !entry.IsDir() { continue } if regexp.MustCompile(`^(?i)filerepository$`).MatchString(entry.Name()) { dirs["filerepository"] = filepath.Join(driverStorePath, entry.Name()) break } } return dirs, nil } func (c *cmdRepackWindows) injectDrivers(dirs map[string]string) error { logger := c.global.logger driverPath := filepath.Join(c.global.flagCacheDir, "drivers") i := 0 driversRegistry := "Windows Registry Editor Version 5.00" systemRegistry := "Windows Registry Editor Version 5.00" softwareRegistry := "Windows Registry Editor Version 5.00" for driver, info := range windows.Drivers { logger.WithField("driver", driver).Debug("Injecting driver") ctx := pongo2.Context{ "infFile": fmt.Sprintf("oem%d.inf", i), "packageName": info.PackageName, "driverName": driver, } sourceDir := filepath.Join(driverPath, driver, c.flagWindowsVersion, c.flagWindowsArchitecture) targetBasePath := filepath.Join(dirs["filerepository"], info.PackageName) if !incus.PathExists(targetBasePath) { err := os.MkdirAll(targetBasePath, 0755) if err != nil { return fmt.Errorf("Failed to create directory %q: %w", targetBasePath, err) } } err := filepath.Walk(sourceDir, func(path string, info os.FileInfo, err error) error { ext := filepath.Ext(path) targetPath := filepath.Join(targetBasePath, filepath.Base(path)) // Copy driver files if incus.ValueInSlice(ext, []string{".cat", ".dll", ".inf", ".sys"}) { logger.WithFields(logrus.Fields{"src": path, "dest": targetPath}).Debug("Copying file") err := shared.Copy(path, targetPath) if err != nil { return fmt.Errorf("Failed to copy %q to %q: %w", filepath.Base(path), targetPath, err) } } // Copy .inf file if ext == ".inf" { target := filepath.Join(dirs["inf"], ctx["infFile"].(string)) logger.WithFields(logrus.Fields{"src": path, "dest": target}).Debug("Copying file") err := shared.Copy(path, target) if err != nil { return fmt.Errorf("Failed to copy %q to %q: %w", filepath.Base(path), target, err) } // Retrieve the ClassGuid which is needed for the Windows registry entries. file, err := os.Open(path) if err != nil { return fmt.Errorf("Failed to open %s: %w", path, err) } re := regexp.MustCompile(`(?i)^ClassGuid[ ]*=[ ]*(.+)$`) scanner := bufio.NewScanner(file) for scanner.Scan() { matches := re.FindStringSubmatch(scanner.Text()) if len(matches) > 0 { ctx["classGuid"] = strings.TrimSpace(matches[1]) } } file.Close() _, ok := ctx["classGuid"] if !ok { return fmt.Errorf("Failed to determine classGUID for driver %q", driver) } } // Copy .sys and .dll files if ext == ".dll" || ext == ".sys" { target := filepath.Join(dirs["drivers"], filepath.Base(path)) logger.WithFields(logrus.Fields{"src": path, "dest": target}).Debug("Copying file") err := shared.Copy(path, target) if err != nil { return fmt.Errorf("Failed to copy %q to %q: %w", filepath.Base(path), target, err) } } return nil }) if err != nil { return fmt.Errorf("Failed to copy driver files: %w", err) } // Update Windows DRIVERS registry if info.DriversRegistry != "" { tpl, err := pongo2.FromString(info.DriversRegistry) if err != nil { return fmt.Errorf("Failed to parse template for driver %q: %w", driver, err) } out, err := tpl.Execute(ctx) if err != nil { return fmt.Errorf("Failed to render template for driver %q: %w", driver, err) } driversRegistry = fmt.Sprintf("%s\n\n%s", driversRegistry, out) } // Update Windows SYSTEM registry if info.SystemRegistry != "" { tpl, err := pongo2.FromString(info.SystemRegistry) if err != nil { return fmt.Errorf("Failed to parse template for driver %q: %w", driver, err) } out, err := tpl.Execute(ctx) if err != nil { return fmt.Errorf("Failed to render template for driver %q: %w", driver, err) } systemRegistry = fmt.Sprintf("%s\n\n%s", systemRegistry, out) } // Update Windows SOFTWARE registry if info.SoftwareRegistry != "" { tpl, err := pongo2.FromString(info.SoftwareRegistry) if err != nil { return fmt.Errorf("Failed to parse template for driver %q: %w", driver, err) } out, err := tpl.Execute(ctx) if err != nil { return fmt.Errorf("Failed to render template for driver %q: %w", driver, err) } softwareRegistry = fmt.Sprintf("%s\n\n%s", softwareRegistry, out) } i++ } logger.WithField("hivefile", "DRIVERS").Debug("Updating Windows registry") err := shared.RunCommand(c.global.ctx, strings.NewReader(driversRegistry), nil, "hivexregedit", "--merge", "--prefix='HKEY_LOCAL_MACHINE\\DRIVERS'", filepath.Join(dirs["config"], "DRIVERS")) if err != nil { return fmt.Errorf("Failed to edit Windows DRIVERS registry: %w", err) } logger.WithField("hivefile", "SYSTEM").Debug("Updating Windows registry") err = shared.RunCommand(c.global.ctx, strings.NewReader(systemRegistry), nil, "hivexregedit", "--merge", "--prefix='HKEY_LOCAL_MACHINE\\SYSTEM'", filepath.Join(dirs["config"], "SYSTEM")) if err != nil { return fmt.Errorf("Failed to edit Windows SYSTEM registry: %w", err) } logger.WithField("hivefile", "SOFTWARE").Debug("Updating Windows registry") err = shared.RunCommand(c.global.ctx, strings.NewReader(softwareRegistry), nil, "hivexregedit", "--merge", "--prefix='HKEY_LOCAL_MACHINE\\SOFTWARE'", filepath.Join(dirs["config"], "SOFTWARE")) if err != nil { return fmt.Errorf("Failed to edit Windows SOFTWARE registry: %w", err) } return nil } func detectWindowsVersion(fileName string) string { aliases := map[string][]string{ "w11": {"w11", "win11", "windows.?11"}, "w10": {"w10", "win10", "windows.?10"}, "2k19": {"2k19", "w2k19", "win2k19", "windows.?server.?2019"}, "2k12": {"2k12", "w2k12", "win2k12", "windows.?server.?2012"}, "2k16": {"2k16", "w2k16", "win2k16", "windows.?server.?2016"}, "2k22": {"2k22", "w2k22", "win2k22", "windows.?server.?2022"}, } for k, v := range aliases { for _, alias := range v { if regexp.MustCompile(fmt.Sprintf("(?i)%s", alias)).MatchString(fileName) { return k } } } return "" } func detectWindowsArchitecture(fileName string) string { aliases := map[string][]string{ "amd64": {"amd64", "x64"}, "ARM64": {"arm64"}, } for k, v := range aliases { for _, alias := range v { if regexp.MustCompile(fmt.Sprintf("(?i)%s", alias)).MatchString(fileName) { return k } } } return "" } // toHex is a pongo2 filter which converts the provided value to a hex value understood by the Windows registry. func toHex(in *pongo2.Value, param *pongo2.Value) (out *pongo2.Value, err *pongo2.Error) { dst := make([]byte, hex.EncodedLen(len(in.String()))) hex.Encode(dst, []byte(in.String())) var builder strings.Builder for i := 0; i < len(dst); i += 2 { _, err := builder.Write(dst[i : i+2]) if err != nil { return &pongo2.Value{}, &pongo2.Error{Sender: "filter:toHex", OrigError: err} } _, err = builder.WriteString(",00,") if err != nil { return &pongo2.Value{}, &pongo2.Error{Sender: "filter:toHex", OrigError: err} } } return pongo2.AsValue(strings.TrimSuffix(builder.String(), ",")), nil } distrobuilder-3.0/distrobuilder/main_repack-windows_test.go000066400000000000000000000040531456216713500244450ustar00rootroot00000000000000package main import ( "testing" "github.com/stretchr/testify/assert" ) func Test_detectWindowsVersion(t *testing.T) { type args struct { fileName string } tests := []struct { name string args args want string }{ { "Windows 11 (1)", args{"Windows 11.iso"}, "w11", }, { "Windows 11 (2)", args{"Windows11.iso"}, "w11", }, { "Windows 11 (3)", args{"Win11.iso"}, "w11", }, { "Windows 11 (4)", args{"Windows_11.iso"}, "w11", }, { "Windows 10 (1)", args{"Windows 10.iso"}, "w10", }, { "Windows 10 (2)", args{"Windows10.iso"}, "w10", }, { "Windows 10 (3)", args{"Win10.iso"}, "w10", }, { "Windows 10 (4)", args{"Windows_10.iso"}, "w10", }, { "Windows Server 2019 (1)", args{"Windows_Server_2019.iso"}, "2k19", }, { "Windows Server 2019 (2)", args{"Windows Server 2019.iso"}, "2k19", }, { "Windows Server 2019 (3)", args{"WindowsServer2019.iso"}, "2k19", }, { "Windows Server 2019 (4)", args{"Windows_Server_2k19.iso"}, "2k19", }, { "Windows Server 2012 (1)", args{"Windows_Server_2012.iso"}, "2k12", }, { "Windows Server 2012 (2)", args{"Windows Server 2012.iso"}, "2k12", }, { "Windows Server 2012 (3)", args{"WindowsServer2012.iso"}, "2k12", }, { "Windows Server 2012 (4)", args{"Windows_Server_2k12.iso"}, "2k12", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got := detectWindowsVersion(tt.args.fileName) assert.Equal(t, tt.want, got) }) } } func Test_detectWindowsArchitecture(t *testing.T) { type args struct { fileName string } tests := []struct { name string args args want string }{ { "Windows 11 (1)", args{"Win10_22H2_English_x64.iso"}, "amd64", }, { "Windows 11 (2)", args{"Win10_22H2_English_arm64.iso"}, "ARM64", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got := detectWindowsArchitecture(tt.args.fileName) assert.Equal(t, tt.want, got) }) } } distrobuilder-3.0/distrobuilder/main_validate.go000066400000000000000000000014171456216713500222430ustar00rootroot00000000000000package main import ( "fmt" "github.com/spf13/cobra" ) type cmdValidate struct { cmdValidate *cobra.Command global *cmdGlobal } func (c *cmdValidate) command() *cobra.Command { c.cmdValidate = &cobra.Command{ Use: "validate ", Short: "Validate definition file", Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { // Get the image definition _, err := getDefinition(args[0], c.global.flagOptions) if err != nil { return fmt.Errorf("Failed to get definition: %w", err) } return nil }, SilenceUsage: true, SilenceErrors: true, } c.cmdValidate.Flags().StringSliceVarP(&c.global.flagOptions, "options", "o", []string{}, "Override options (list of key=value)"+"``") return c.cmdValidate } distrobuilder-3.0/distrobuilder/vm.go000066400000000000000000000150721456216713500200720ustar00rootroot00000000000000package main import ( "context" "errors" "fmt" "os" "path/filepath" "strconv" "strings" incus "github.com/lxc/incus/shared/util" "golang.org/x/sys/unix" "github.com/lxc/distrobuilder/shared" ) type vm struct { imageFile string loopDevice string rootFS string rootfsDir string size uint64 ctx context.Context } func newVM(ctx context.Context, imageFile, rootfsDir, fs string, size uint64) (*vm, error) { if fs == "" { fs = "ext4" } if !incus.ValueInSlice(fs, []string{"btrfs", "ext4"}) { return nil, fmt.Errorf("Unsupported fs: %s", fs) } if size == 0 { size = 4294967296 } return &vm{ctx: ctx, imageFile: imageFile, rootfsDir: rootfsDir, rootFS: fs, size: size}, nil } func (v *vm) getLoopDev() string { return v.loopDevice } func (v *vm) getRootfsDevFile() string { if v.loopDevice == "" { return "" } return fmt.Sprintf("%sp2", v.loopDevice) } func (v *vm) getUEFIDevFile() string { if v.loopDevice == "" { return "" } return fmt.Sprintf("%sp1", v.loopDevice) } func (v *vm) createEmptyDiskImage() error { f, err := os.Create(v.imageFile) if err != nil { return fmt.Errorf("Failed to open %s: %w", v.imageFile, err) } defer f.Close() err = f.Chmod(0600) if err != nil { return fmt.Errorf("Failed to chmod %s: %w", v.imageFile, err) } err = f.Truncate(int64(v.size)) if err != nil { return fmt.Errorf("Failed to create sparse file %s: %w", v.imageFile, err) } return nil } func (v *vm) createPartitions() error { args := [][]string{ {"--zap-all"}, {"--new=1::+100M", "-t 1:EF00"}, {"--new=2::", "-t 2:8300"}, } for _, cmd := range args { err := shared.RunCommand(v.ctx, nil, nil, "sgdisk", append([]string{v.imageFile}, cmd...)...) if err != nil { return fmt.Errorf("Failed to create partitions: %w", err) } } return nil } func (v *vm) mountImage() error { // If loopDevice is set, it probably is already mounted. if v.loopDevice != "" { return nil } var out strings.Builder err := shared.RunCommand(v.ctx, nil, &out, "losetup", "-P", "-f", "--show", v.imageFile) if err != nil { return fmt.Errorf("Failed to setup loop device: %w", err) } v.loopDevice = strings.TrimSpace(out.String()) out.Reset() // Ensure the partitions are accessible. This part is usually only needed // if building inside of a container. err = shared.RunCommand(v.ctx, nil, &out, "lsblk", "--raw", "--output", "MAJ:MIN", "--noheadings", v.loopDevice) if err != nil { return fmt.Errorf("Failed to list block devices: %w", err) } deviceNumbers := strings.Split(out.String(), "\n") if !incus.PathExists(v.getUEFIDevFile()) { fields := strings.Split(deviceNumbers[1], ":") major, err := strconv.Atoi(fields[0]) if err != nil { return fmt.Errorf("Failed to parse %q: %w", fields[0], err) } minor, err := strconv.Atoi(fields[1]) if err != nil { return fmt.Errorf("Failed to parse %q: %w", fields[1], err) } dev := unix.Mkdev(uint32(major), uint32(minor)) err = unix.Mknod(v.getUEFIDevFile(), unix.S_IFBLK|0644, int(dev)) if err != nil { return fmt.Errorf("Failed to create block device %q: %w", v.getUEFIDevFile(), err) } } if !incus.PathExists(v.getRootfsDevFile()) { fields := strings.Split(deviceNumbers[2], ":") major, err := strconv.Atoi(fields[0]) if err != nil { return fmt.Errorf("Failed to parse %q: %w", fields[0], err) } minor, err := strconv.Atoi(fields[1]) if err != nil { return fmt.Errorf("Failed to parse %q: %w", fields[1], err) } dev := unix.Mkdev(uint32(major), uint32(minor)) err = unix.Mknod(v.getRootfsDevFile(), unix.S_IFBLK|0644, int(dev)) if err != nil { return fmt.Errorf("Failed to create block device %q: %w", v.getRootfsDevFile(), err) } } return nil } func (v *vm) umountImage() error { // If loopDevice is empty, the image probably isn't mounted. if v.loopDevice == "" || !incus.PathExists(v.loopDevice) { return nil } err := shared.RunCommand(v.ctx, nil, nil, "losetup", "-d", v.loopDevice) if err != nil { return fmt.Errorf("Failed to detach loop device: %w", err) } // Make sure that p1 and p2 are also removed. if incus.PathExists(v.getUEFIDevFile()) { err := os.Remove(v.getUEFIDevFile()) if err != nil { return fmt.Errorf("Failed to remove file %q: %w", v.getUEFIDevFile(), err) } } if incus.PathExists(v.getRootfsDevFile()) { err := os.Remove(v.getRootfsDevFile()) if err != nil { return fmt.Errorf("Failed to remove file %q: %w", v.getRootfsDevFile(), err) } } v.loopDevice = "" return nil } func (v *vm) createRootFS() error { if v.loopDevice == "" { return errors.New("Disk image not mounted") } switch v.rootFS { case "btrfs": err := shared.RunCommand(v.ctx, nil, nil, "mkfs.btrfs", "-f", "-L", "rootfs", v.getRootfsDevFile()) if err != nil { return fmt.Errorf("Failed to create btrfs filesystem: %w", err) } // Create the root subvolume as well err = shared.RunCommand(v.ctx, nil, nil, "mount", "-t", v.rootFS, v.getRootfsDevFile(), v.rootfsDir) if err != nil { return fmt.Errorf("Failed to mount %q at %q: %w", v.getRootfsDevFile(), v.rootfsDir, err) } defer func() { _ = shared.RunCommand(v.ctx, nil, nil, "umount", v.rootfsDir) }() return shared.RunCommand(v.ctx, nil, nil, "btrfs", "subvolume", "create", fmt.Sprintf("%s/@", v.rootfsDir)) case "ext4": return shared.RunCommand(v.ctx, nil, nil, "mkfs.ext4", "-F", "-b", "4096", "-i 8192", "-m", "0", "-L", "rootfs", "-E", "resize=536870912", v.getRootfsDevFile()) } return nil } func (v *vm) createUEFIFS() error { if v.loopDevice == "" { return errors.New("Disk image not mounted") } return shared.RunCommand(v.ctx, nil, nil, "mkfs.vfat", "-F", "32", "-n", "UEFI", v.getUEFIDevFile()) } func (v *vm) mountRootPartition() error { if v.loopDevice == "" { return errors.New("Disk image not mounted") } switch v.rootFS { case "btrfs": return shared.RunCommand(v.ctx, nil, nil, "mount", v.getRootfsDevFile(), v.rootfsDir, "-t", v.rootFS, "-o", "defaults,discard,nobarrier,commit=300,noatime,subvol=/@") case "ext4": return shared.RunCommand(v.ctx, nil, nil, "mount", v.getRootfsDevFile(), v.rootfsDir, "-t", v.rootFS, "-o", "discard,nobarrier,commit=300,noatime,data=writeback") } return nil } func (v *vm) mountUEFIPartition() error { if v.loopDevice == "" { return errors.New("Disk image not mounted") } mountpoint := filepath.Join(v.rootfsDir, "boot", "efi") err := os.MkdirAll(mountpoint, 0755) if err != nil { return fmt.Errorf("Failed to create directory %q: %w", mountpoint, err) } return shared.RunCommand(v.ctx, nil, nil, "mount", "-t", "vfat", v.getUEFIDevFile(), mountpoint, "-o", "discard") } distrobuilder-3.0/doc/000077500000000000000000000000001456216713500150065ustar00rootroot00000000000000distrobuilder-3.0/doc/doc-cheat-sheet.md000066400000000000000000000337601456216713500202760ustar00rootroot00000000000000--- orphan: true myst: substitutions: reuse_key: "This is **included** text." advanced_reuse_key: "This is a substitution that includes a code block: ``` code block ```" --- # Documentation cheat sheet The documentation files use a mixture of [Markdown](https://commonmark.org/) and [MyST](https://myst-parser.readthedocs.io/) syntax. See the following sections for syntax help and conventions. ## Headings ```{list-table} :header-rows: 1 * - Input - Description * - `# Title` - Page title and H1 heading * - `## Heading` - H2 heading * - `### Heading` - H3 heading * - `#### Heading` - H4 heading * - ... - Further headings ``` Adhere to the following conventions: - Do not use consecutive headings without intervening text. - Use sentence style for headings (capitalize only the first word). - Do not skip levels (for example, always follow an H2 with an H3, not an H4). ## Inline formatting ```{list-table} :header-rows: 1 * - Input - Output * - `*Italic*` - *Italic* * - `**Bold**` - **Bold** * - `` `code` `` - `code` ``` Adhere to the following conventions: - Use italics sparingly. Common uses for italics are titles and names (for example, when referring to a section title that you cannot link to, or when introducing the name for a concept). - Use bold sparingly. A common use for bold is UI elements ("Click **OK**"). Avoid using bold for emphasis and rather rewrite the sentence to get your point across. ## Code blocks Start and end a code block with three back ticks: ``` You can specify the code language after the back ticks to enforce a specific lexer, but in many cases, the default lexer works just fine. ```{list-table} :header-rows: 1 * - Input - Output * - ```` ``` # Demonstrate a code block code: - example: true ``` ```` - ``` # Demonstrate a code block code: - example: true ``` * - ```` ```yaml # Demonstrate a code block code: - example: true ``` ```` - ```yaml # Demonstrate a code block code: - example: true ``` ``` To include back ticks in a code block, increase the number of surrounding back ticks: ```{list-table} :header-rows: 1 * - Input - Output * - ````` ```` ``` ```` ````` - ```` ``` ```` ``` ## Links How to link depends on if you are linking to an external URL or to another page in the documentation. ### External links For external links, use only the URL, or Markdown syntax if you want to override the link text. ```{list-table} :header-rows: 1 * - Input - Output * - `https://linuxcontainers.org` - [{spellexception}`https://linuxcontainers.org`](https://linuxcontainers.org) * - `[Linux Containers](https://linuxcontainers.org)` - [Linux Containers](https://linuxcontainers.org) ``` To display a URL as text and prevent it from being linked, add a ``: ```{list-table} :header-rows: 1 * - Input - Output * - `https://linuxcontainers.org` - {spellexception}`https://linuxcontainers.org` ``` ### Internal references For internal references, both Markdown and MyST syntax are supported. In most cases, you should use MyST syntax though, because it resolves the link text automatically and gives an indication of the link in GitHub rendering. #### Referencing a page To reference a documentation page, use MyST syntax to automatically extract the link text. When overriding the link text, use Markdown syntax. ```{list-table} :header-rows: 1 * - Input - Output - Output on GitHub - Status * - `` {doc}`index` `` - {doc}`index` - {doc}`index` - Preferred. * - `[](index)` - [](index) - - Do not use. * - `[Distrobuilder documentation](index)` - [Distrobuilder documentation](index) - [Distrobuilder documentation](index) - Preferred when overriding the link text. * - `` {doc}`Distrobuiledr documentation ` `` - {doc}`Distrobuilder documentation ` - {doc}`Distrobuilder documentation ` - Alternative when overriding the link text. ``` Adhere to the following conventions: - Override the link text only when it is necessary. If you can use the document title as link text, do so, because the text will then update automatically if the title changes. - Never "override" the link text with the same text that would be generated automatically. (a_section_target)= #### Referencing a section To reference a section within the documentation (on the same page or on another page), you can either add a target to it and reference that target, or you can use an automatically generated anchor in combination with the file name. Adhere to the following conventions: - Add targets for sections that are central and a "typical" place to link to, so you expect they will be linked frequently. For "one-off" links, you can use the automatically generated anchors. - Override the link text only when it is necessary. If you can use the section title as link text, do so, because the text will then update automatically if the title changes. - Never "override" the link text with the same text that would be generated automatically. ##### Using a target You can add targets at any place in the documentation. However, if there is no heading or title for the targeted element, you must specify a link text. (a_random_target)= ```{list-table} :header-rows: 1 * - Input - Output - Output on GitHub - Description * - `(target_ID)=` - - \(target_ID\)= - Adds the target ``target_ID``. * - `` {ref}`a_section_target` `` - {ref}`a_section_target` - \{ref\}`a_section_target` - References a target that has a title. * - `` {ref}`link text ` `` - {ref}`link text ` - \{ref\}`link text ` - References a target and specifies a title. * - ``[`option name\](a_random_target)`` - [`option name`](a_random_target) - [`option name`](a_random_target) (link is broken) - Use Markdown syntax if you need markup on the link text. ``` ##### Using an automatically generated anchor You must use Markdown syntax to use automatically generated anchors. You can leave out the file name when linking within the same file. ```{list-table} :header-rows: 1 * - Input - Output - Output on GitHub - Description * - `[](#referencing-a-section)` - [](#referencing-a-section) - - Do not use. * - `[link text](#referencing-a-section)` - [link text](#referencing-a-section) - [link text](#referencing-a-section) - Preferred when overriding the link text. ``` ## Navigation Every documentation page must be included as a subpage to another page in the navigation. This is achieved with the [`toctree`](https://www.sphinx-doc.org/en/master/usage/restructuredtext/directives.html#directive-toctree) directive in the parent page: ```` ```{toctree} :hidden: subpage1 subpage2 ``` ```` If a page should not be included in the navigation, you can suppress the resulting build warning by putting the following instruction at the top of the file: ``` --- orphan: true --- ``` Use orphan pages sparingly and only if there is a clear reason for it. ## Lists ```{list-table} :header-rows: 1 * - Input - Output * - ``` - Item 1 - Item 2 - Item 3 ``` - - Item 1 - Item 2 - Item 3 * - ``` 1. Step 1 1. Step 2 1. Step 3 ``` - 1. Step 1 1. Step 2 1. Step 3 * - ``` 1. Step 1 - Item 1 * Subitem - Item 2 1. Step 2 1. Substep 1 1. Substep 2 ``` - 1. Step 1 - Item 1 * Subitem - Item 2 1. Step 2 1. Substep 1 1. Substep 2 ``` Adhere to the following conventions: - In numbered lists, use ``1.`` for all items to generate the step numbers automatically. - Use `-` for unordered lists. When using nested lists, you can use `*` for the nested level. ### Definition lists ```{list-table} :header-rows: 1 * - Input - Output * - ``` Term 1 : Definition Term 2 : Definition ``` - Term 1 : Definition Term 2 : Definition ``` ## Tables You can use standard Markdown tables. However, using the rST [list table](https://docutils.sourceforge.io/docs/ref/rst/directives.html#list-table) syntax is usually much easier. Both markups result in the following output: ```{list-table} :header-rows: 1 * - Header 1 - Header 2 * - Cell 1 Second paragraph cell 1 - Cell 2 * - Cell 3 - Cell 4 ``` ### Markdown tables ``` | Header 1 | Header 2 | |------------------------------------|----------| | Cell 1

2nd paragraph cell 1 | Cell 2 | | Cell 3 | Cell 4 | ``` ### List tables ```` ```{list-table} :header-rows: 1 * - Header 1 - Header 2 * - Cell 1 2nd paragraph cell 1 - Cell 2 * - Cell 3 - Cell 4 ``` ```` ## Notes ```{list-table} :header-rows: 1 * - Input - Output * - ```` ```{note} A note. ``` ```` - ```{note} A note. ``` * - ```` ```{tip} A tip. ``` ```` - ```{tip} A tip. ``` * - ```` ```{important} Important information ``` ```` - ```{important} Important information. ``` * - ```` ```{caution} This might damage your hardware! ``` ```` - ```{caution} This might damage your hardware! ``` ``` Adhere to the following conventions: - Use notes sparingly. - Only use the following note types: `note`, `tip`, `important`, `caution` - Only use a caution if there is a clear hazard of hardware damage or data loss. ## Images ```{list-table} :header-rows: 1 * - Input - Output * - ``` ![Alt text](https://linuxcontainers.org/static/img/containers.png) ``` - ![Alt text](https://linuxcontainers.org/static/img/containers.png) * - ```` ```{figure} https://linuxcontainers.org/static/img/containers.png :width: 100px :alt: Alt text Figure caption ``` ```` - ```{figure} https://linuxcontainers.org/static/img/containers.png :width: 100px :alt: Alt text Figure caption ``` ``` Adhere to the following conventions: - For pictures in the `doc` folder, start the path with `/` (for example, `/images/image.png`). - Use PNG format for screenshots and SVG format for graphics. ## Reuse A big advantage of MyST in comparison to plain Markdown is that it allows to reuse content. ### Substitution To reuse sentences or paragraphs without too much markup and special formatting, use substitutions. Substitutions can be defined in the following locations: - In the `substitutions.yaml` file. Substitutions defined in this file are available in all documentation pages. - At the top of a single file in the following format: ```` --- myst: substitutions: reuse_key: "This is **included** text." advanced_reuse_key: "This is a substitution that includes a code block: ``` code block ```" --- ```` You can combine both options by defining a default substitution in `reuse/substitutions.py` and overriding it at the top of a file. ```{list-table} :header-rows: 1 * - Input - Output * - `{{reuse_key}}` - {{reuse_key}} * - `{{advanced_reuse_key}}` - {{advanced_reuse_key}} ``` Adhere to the following convention: - Substitutions do not work on GitHub. Therefore, use key names that indicate the included text (for example, `note_not_supported` instead of `reuse_note`). ### File inclusion To reuse longer sections or text with more advanced markup, you can put the content in a separate file and include the file or parts of the file in several locations. You cannot put any targets into the content that is being reused (because references to this target would be ambiguous then). You can, however, put a target right before including the file. By combining file inclusion and substitutions, you can even replace parts of the included text. `````{list-table} :header-rows: 1 * - Input - Output * - ```` % Include parts of the content from file [../README.md](../README.md) ```{include} ../README.md :start-after: Installing from source :end-before: Second, download the source code ``` ```` - % Include parts of the content from file [../README.md](../README.md) ```{include} ../README.md :start-after: Installing from source :end-before: Second, download the source code ``` ````` Adhere to the following convention: - File inclusion does not work on GitHub. Therefore, always add a comment linking to the included file. - To select parts of the text, add HTML comments for the start and end points and use `:start-after:` and `:end-before:`, if possible. You can combine `:start-after:` and `:end-before:` with `:start-line:` and `:end-line:` if required. Using only `:start-line:` and `:end-line:` is error-prone though. ## Tabs ``````{list-table} :header-rows: 1 * - Input - Output * - ````` ````{tabs} ```{group-tab} Tab 1 Content Tab 1 ``` ```{group-tab} Tab 2 Content Tab 2 ``` ```` ````` - ````{tabs} ```{group-tab} Tab 1 Content Tab 1 ``` ```{group-tab} Tab 2 Content Tab 2 ``` ```` `````` ## Collapsible sections There is no support for details sections in rST, but you can insert HTML to create them. ```{list-table} :header-rows: 1 * - Input - Output * - ```
Details Content
``` -
Details Content
``` ## Glossary You can define glossary terms in any file. Ideally, all terms should be collected in one glossary file though, and they can then be referenced from any file. `````{list-table} :header-rows: 1 * - Input - Output * - ```` ```{glossary} example term Definition of the example term. ``` ```` - ```{glossary} example term Definition of the example term. ``` * - ``{term}`example term` `` - {term}`example term` ````` distrobuilder-3.0/doc/examples/000077500000000000000000000000001456216713500166245ustar00rootroot00000000000000distrobuilder-3.0/doc/examples/sabayon.yaml000066400000000000000000000065021456216713500211470ustar00rootroot00000000000000image: distribution: sabayon description: Sabayon Builder expiry: 30d variant: builder architecture: amd64 source: downloader: docker-http url: sabayon/builder-amd64 environment: clear_defaults: true variables: - key: "SHELL" value: "/bin/bash" - key: "ACCEPT_LICENSE" value: "*" - key: "ETP_NONINTERACTIVE" value: "1" targets: lxc: create_message: | You just created a Sabayon container (arch={{ image.architecture }}) config: - type: all before: 5 content: |- lxc.include = LXC_TEMPLATE_CONFIG/sabayon.common.conf - type: user before: 5 content: |- lxc.include = LXC_TEMPLATE_CONFIG/sabayon.userns.conf - type: all after: 4 content: |- lxc.include = LXC_TEMPLATE_CONFIG/common.conf - type: user after: 4 content: |- lxc.include = LXC_TEMPLATE_CONFIG/userns.conf - type: all content: |- lxc.arch = {{ image.architecture_kernel }} files: - path: /etc/hostname generator: hostname - path: /etc/hosts generator: hosts packages: manager: equo # repositories: # - name: "community" # type: "enman" # Enable main repository # - name: "sabayonlinux.org" # type: "equo" update: true cleanup: true sets: - packages: - sabayon-live action: install actions: # Spinbase image doesn't include enman tool # for external repositories. This is not needed # if external repository aren't used or it's used equ # as repo type. #- trigger: post-unpack # action: |- # #!/bin/sh # equo up && equo i enman - trigger: post-packages action: |- #!/bin/sh echo -5 | equo conf update # Disable systemd-remount-fs.service because # on unprivileged container systemd can't # remount filesystem. - trigger: post-packages action: |- #!/bin/sh cd /etc/systemd/system ln -s /dev/null systemd-remount-fs.service # Disable mount of hugepages - trigger: post-packages action: |- #!/bin/bash cd /etc/systemd/system ln -s /dev/null dev-hugepages.mount # Disable systemd-journald-audit service - trigger: post-packages action: |- #!/bin/bash cd /etc/systemd/system ln -s /dev/null systemd-journald-audit.socket # Disable sabayon-anti-fork-bomb limits # (already apply to host) - trigger: post-packages action: |- #!/bin/bash sed -i -e 's/^*/#*/g' /etc/security/limits.d/00-sabayon-anti-fork-bomb.conf sed -i -e 's/^root/#root/g' /etc/security/limits.d/00-sabayon-anti-fork-bomb.conf # Configure DHCP for interface eth0 by default. # Avoid to use DHCP for any interface to avoid reset of docker # interfaces or others custom interfaces. - trigger: post-packages action: |- #!/bin/bash cat > /etc/systemd/network/default_dhcp.network << "EOF" [Network] DHCP=ipv4 [Match] Name=eth0 [DHCP] UseDomains=true EOF # Enable systemd-networkd service by default. - trigger: post-packages action: |- #!/bin/bash systemctl enable systemd-networkd # Clean journal directory (to avoid permission errors) - trigger: post-packages action: |- rm -rf /var/log/journal/ mappings: architecture_map: debian distrobuilder-3.0/doc/examples/scheme.yaml000066400000000000000000000074041456216713500207610ustar00rootroot00000000000000# This example contains every possible key image: description: |- here goes the image description distribution: distro release: release architecture: x86_64 expiry: 30d variant: default name: distro-release-x86_64 serial: some-random-string source: downloader: ubuntu-http url: http://archive.ubuntu.com keys: - 0xdeadbeaf keyserver: http://keyserver.ubuntu.com variant: default suite: suite same_as: bionic skip_verification: false components: - main targets: lxc: create_message: |- You just created an {{ image.description }} container. To enable SSH, run: apt install openssh-server No default root or user password are set by LXC. config: - type: all before: 5 content: |- lxc.include = LXC_TEMPLATE_CONFIG/ubuntu.common.conf - type: user before: 5 content: |- lxc.include = LXC_TEMPLATE_CONFIG/ubuntu.userns.conf - type: all after: 4 content: |- lxc.include = LXC_TEMPLATE_CONFIG/common.conf - type: user after: 4 content: |- lxc.include = LXC_TEMPLATE_CONFIG/userns.conf - type: all content: |- lxc.arch = {{ image.architecture_personality }} incus: vm: size: 2147483648 filesystem: ext4 files: - generator: dump path: /some/path content: |- here goes the content name: name mode: 0644 gid: 1000 uid: 1000 template: properties: key: value when: - always templated: true releases: - a - b architectures: - x86_64 variants: - default - generator: hostname path: /etc/hostname - generator: hosts path: /etc/hosts - generator: remove path: /root/file - generator: template name: foo content: |- Here goes the content template: properties: key: value when: - create - copy packages: manager: apt custom_manager: clean: cmd: mgr flags: - clean install: cmd: mgr flags: - install remove: cmd: mgr flags: - remove refresh: cmd: mgr flags: - refresh update: cmd: mgr flags: - update flags: - --yes update: true cleanup: false sets: - packages: - gnupg action: install early: true - packages: - vim action: install releases: - a - b architectures: - x86_64 variants: - default - packages: - lightdm action: install flags: - --no-install-recommends - packages: - grub action: remove repositories: - name: reponame url: |- deb http://archive.ubuntu.com/ubuntu {{ image.release }}-updates main restricted universe multiverse type: type key: 0xdeadbeaf releases: - a - b architectures: - x86_64 variants: - default actions: - trigger: post-unpack action: |- #!/bin/sh do something after the rootfs has been unpacked - trigger: post-files action: |- #!/bin/sh do something after the files section has been processed - trigger: post-update action: |- #!/bin/sh do something after packages have been processed - trigger: post-packages action: |- #!/bin/sh do something after the packages section has been processed releases: - a - b architectures: - x86_64 variants: - default mappings: architectures: a: b c: d architecture_map: debian environment: clear_defaults: true variables: - key: FOO value: bar distrobuilder-3.0/doc/examples/ubuntu.yaml000066400000000000000000000136111456216713500210340ustar00rootroot00000000000000image: name: ubuntu-disco-x86_64 distribution: ubuntu release: focal description: |- Ubuntu {{ image.release }} architecture: x86_64 source: downloader: debootstrap same_as: gutsy url: http://archive.ubuntu.com/ubuntu keyserver: keyserver.ubuntu.com keys: - 0x790BC7277767219C42C86F933B4FE6ACC0B21F32 - 0xf6ecb3762474eda9d21b7022871920d1991bc93c targets: lxc: create_message: |- You just created an {{ image.description }} container. To enable SSH, run: apt install openssh-server No default root or user password are set by LXC. config: - type: all before: 5 content: |- lxc.include = LXC_TEMPLATE_CONFIG/ubuntu.common.conf - type: user before: 5 content: |- lxc.include = LXC_TEMPLATE_CONFIG/ubuntu.userns.conf - type: all after: 4 content: |- lxc.include = LXC_TEMPLATE_CONFIG/common.conf - type: user after: 4 content: |- lxc.include = LXC_TEMPLATE_CONFIG/userns.conf - type: all content: |- lxc.arch = {{ image.architecture_personality }} files: - path: /etc/hostname generator: hostname - path: /etc/hosts generator: hosts - path: /etc/resolvconf/resolv.conf.d/original generator: remove - path: /etc/resolvconf/resolv.conf.d/tail generator: remove - path: /etc/machine-id generator: dump - path: /etc/user/profile generator: copy source: /etc/profile - path: /var/lib/dbus/machine-id generator: remove - path: /etc/netplan/10-lxc.yaml generator: dump content: |- network: version: 2 ethernets: eth0: dhcp4: true dhcp-identifier: mac releases: - bionic - eoan - focal - groovy - hirsute - impish - jammy types: - container variants: - default - path: /etc/netplan/10-lxc.yaml generator: dump content: |- network: version: 2 ethernets: enp5s0: dhcp4: true dhcp-identifier: mac releases: - bionic - eoan - focal - groovy - hirsute - impish - jammy types: - vm variants: - default - name: meta-data generator: cloud-init variants: - cloud - name: network-config generator: cloud-init variants: - cloud - name: user-data generator: cloud-init variants: - cloud - name: vendor-data generator: cloud-init variants: - cloud - name: ext4 generator: fstab types: - vm - name: incus-agent generator: incus-agent types: - vm - path: /etc/default/grub.d/50-incus.cfg generator: dump content: |- GRUB_RECORDFAIL_TIMEOUT=0 GRUB_TIMEOUT=0 GRUB_CMDLINE_LINUX_DEFAULT="${GRUB_CMDLINE_LINUX_DEFAULT} console=tty1 console=ttyS0" GRUB_TERMINAL=console types: - vm - path: /etc/sudoers.d/90-incus generator: dump mode: 0440 content: |- # User rules for ubuntu ubuntu ALL=(ALL) NOPASSWD:ALL variants: - default packages: manager: apt update: true cleanup: true sets: - packages: - fuse - language-pack-en - openssh-client - vim action: install - packages: - cloud-init action: install variants: - cloud - packages: - acpid action: install architectures: - amd64 - arm64 types: - vm - packages: - grub-efi-amd64-signed - shim-signed action: install architectures: - amd64 types: - vm - packages: - grub-efi-arm64-signed action: install architectures: - arm64 types: - vm - packages: - shim-signed action: install architectures: - arm64 releases: - disco - eoan - focal - groovy - hirsute - impish - jammy types: - vm - packages: - linux-virtual action: install releases: - bionic - eoan - focal - groovy - hirsute - impish - jammy types: - vm - packages: - os-prober action: remove types: - vm repositories: - name: sources.list url: |- deb http://archive.ubuntu.com/ubuntu {{ image.release }} main restricted universe multiverse deb http://archive.ubuntu.com/ubuntu {{ image.release }}-updates main restricted universe multiverse deb http://security.ubuntu.com/ubuntu {{ image.release }}-security main restricted universe multiverse architectures: - amd64 - i386 - name: sources.list url: |- deb http://ports.ubuntu.com/ubuntu-ports {{ image.release }} main restricted universe multiverse deb http://ports.ubuntu.com/ubuntu-ports {{ image.release }}-updates main restricted universe multiverse deb http://ports.ubuntu.com/ubuntu-ports {{ image.release }}-security main restricted universe multiverse architectures: - armhf - arm64 - powerpc - powerpc64 - ppc64el - riscv64 actions: - trigger: post-update action: |- #!/bin/sh set -eux # Create the ubuntu user account getent group sudo >/dev/null 2>&1 || groupadd --system sudo useradd --create-home -s /bin/bash -G sudo -U ubuntu variants: - default - trigger: post-packages action: |- #!/bin/sh set -eux # Enable systemd-networkd systemctl enable systemd-networkd releases: - bionic - eoan - focal - groovy - hirsute - impish - jammy - trigger: post-packages action: |- #!/bin/sh set -eux # Make sure the locale is built and functional locale-gen en_US.UTF-8 update-locale LANG=en_US.UTF-8 # Cleanup underlying /run mount -o bind / /mnt rm -rf /mnt/run/* umount /mnt # Cleanup temporary shadow paths rm /etc/*- - trigger: post-files action: |- #!/bin/sh set -eux TARGET="x86_64" [ "$(uname -m)" = "aarch64" ] && TARGET="arm64" update-grub grub-install --uefi-secure-boot --target="${TARGET}-efi" --no-nvram --removable update-grub sed -i "s#root=[^ ]*#root=/dev/sda2#g" /boot/grub/grub.cfg types: - vm mappings: architecture_map: debian distrobuilder-3.0/doc/howto/000077500000000000000000000000001456216713500161465ustar00rootroot00000000000000distrobuilder-3.0/doc/howto/build.md000066400000000000000000000125371456216713500175770ustar00rootroot00000000000000# How to build images ## Plain rootfs ```shell $ distrobuilder build-dir --help Build plain rootfs Usage: distrobuilder build-dir [flags] Flags: -h, --help help for build-dir --keep-sources Keep sources after build (default true) --sources-dir Sources directory for distribution tarballs (default "/tmp/distrobuilder") --with-post-files Run post-files actions Global Flags: --cache-dir Cache directory --cleanup Clean up cache directory (default true) --debug Enable debug output --disable-overlay Disable the use of filesystem overlays -o, --options Override options (list of key=value) -t, --timeout Timeout in seconds --version Print version number ``` To build a plain rootfs, run `distrobuilder build-dir`. The command takes an image definition file and an output directory as positional arguments. Running `build-dir` is useful if one wants to build both LXC and Incus images. In that case one can simply run ```shell distrobuilder build-dir def.yaml /path/to/rootfs distrobuilder pack-lxc def.yaml /path/to/rootfs /path/to/output distrobuilder pack-incus def.yaml /path/to/rootfs /path/to/output ``` (howto-build-lxc)= ## LXC image ```shell $ distrobuilder build-lxc --help Build LXC image from scratch The compression can be set with the --compression flag. I can take one of the following values: - bzip2 - gzip - lzip - lzma - lzo - lzop - xz (default) - zstd For supported compression methods, a compression level can be specified with method-N, where N is an integer, e.g. gzip-9. Usage: distrobuilder build-lxc [target dir] [--compression=COMPRESSION] [flags] Flags: --compression Type of compression to use (default "xz") -h, --help help for build-lxc --keep-sources Keep sources after build (default true) --sources-dir Sources directory for distribution tarballs (default "/tmp/distrobuilder") Global Flags: --cache-dir Cache directory --cleanup Clean up cache directory (default true) --debug Enable debug output --disable-overlay Disable the use of filesystem overlays -o, --options Override options (list of key=value) -t, --timeout Timeout in seconds --version Print version number ``` Running the `build-lxc` sub-command creates a LXC image. It outputs two files `rootfs.tar.xz` and `meta.tar.xz`. After building the image, the rootfs will be destroyed. The `pack-lxc` sub-command can be used to create an image from an existing rootfs. The rootfs won't be deleted afterwards. (howto-build-incus)= ## Incus image ```shell $ distrobuilder build-incus --help Build Incus image from scratch Depending on the type, it either outputs a unified (single tarball) or split image (tarball + squashfs or qcow2 image). The --type flag can take one of the following values: - split (default) - unified The compression can be set with the --compression flag. I can take one of the following values: - bzip2 - gzip - lzip - lzma - lzo - lzop - xz (default) - zstd For supported compression methods, a compression level can be specified with method-N, where N is an integer, e.g. gzip-9. Usage: distrobuilder build-incus [target dir] [--type=TYPE] [--compression=COMPRESSION] [--import-into-incus] [flags] Flags: --compression Type of compression to use (default "xz") -h, --help help for build-incus --import-into-incus[="-"] Import built image into Incus --keep-sources Keep sources after build (default true) --sources-dir Sources directory for distribution tarballs (default "/tmp/distrobuilder") --type Type of tarball to create (default "split") --vm Create a qcow2 image for VMs Global Flags: --cache-dir Cache directory --cleanup Clean up cache directory (default true) --debug Enable debug output --disable-overlay Disable the use of filesystem overlays -o, --options Override options (list of key=value) -t, --timeout Timeout in seconds --version Print version number ``` Running the `build-incus` sub-command creates an Incus image. If `--type=split`, it outputs two files. The metadata tarball will always be named `incus.tar.xz`. When creating a container image, the second file will be `rootfs.squashfs`. When creating a VM image, the second file will be `disk.qcow2`. If `--type=unified`, a unified tarball named `.tar.xz` is created. See the [image section](../reference/image.md) for more on the image name. If `--compression` is set, the tarballs will use the provided compression instead of `xz`. Setting `--vm` will create a `qcow2` image which is used for virtual machines. If `--import-into-incus` is set, the resulting image is imported into Incus. It basically runs `lxc image import `. Per default, it doesn't create an alias. This can be changed by calling it as `--import-into-incus=`. After building the image, the rootfs will be destroyed. The `pack-incus` sub-command can be used to create an image from an existing rootfs. The rootfs won't be deleted afterwards. distrobuilder-3.0/doc/howto/index.md000066400000000000000000000002401456216713500175730ustar00rootroot00000000000000# How-to guides These how-to guides cover key operations and processes in `distrobuilder`. ```{toctree} :titlesonly: install.md build.md troubleshoot.md ``` distrobuilder-3.0/doc/howto/install.md000066400000000000000000000003401456216713500201330ustar00rootroot00000000000000# How to install `distrobuilder` % Include content from [../../README.md](../../README.md) ```{include} ../../README.md :start-after: :end-before: ``` distrobuilder-3.0/doc/howto/troubleshoot.md000066400000000000000000000021001456216713500212120ustar00rootroot00000000000000# Troubleshoot `distrobuilder` This section covers some of the most commonly encountered problems and gives instructions for resolving them. ## Cannot install into target > Error `Cannot install into target '/var/cache/distrobuilder.123456789/rootfs' mounted with noexec or nodev` You have installed `distrobuilder` into an Incus container and you are trying to run it. `distrobuilder` does not run in an Incus container. Run `distrobuilder` on the host, or in a VM. ## Classic confinement > Error `error: This revision of snap "distrobuilder" was published using classic confinement` You are trying to install the `distrobuilder` snap package. The `distrobuilder` snap package has been configured to use the `classic` confinement. Therefore, when you install it, you have to add the flag `--classic` as shown above in the instructions. ## Must be root > Error `You must be root to run this tool` You must be _root_ in order to run the `distrobuilder` tool. The tool runs commands such as `mknod` that require administrative privileges. Use `sudo` when running `distrobuilder`. distrobuilder-3.0/doc/index.md000066400000000000000000000043251456216713500164430ustar00rootroot00000000000000# `distrobuilder` `distrobuilder` is an image building tool for LXC and Incus. Its modern design uses pre-built official images whenever available and supports a variety of modifications on the base image. `distrobuilder` creates LXC or Incus images, or just a plain root file system, from a declarative image definition (in YAML format) that defines the source of the image, its package manager, what packages to install or remove for specific image variants, OS releases and architectures, as well as additional files to generate and arbitrary actions to execute as part of the image build process. `distrobuilder` can be used to create custom images that can be used as the base for LXC containers or Incus instances. `distrobuilder` is used to build the images on the [Linux containers image server](https://images.linuxcontainers.org/). You can also use it to build images from ISO files that require licenses and therefore cannot be distributed. --- ## In this documentation ````{grid} 1 1 2 2 ```{grid-item} [](tutorials/index) **Start here**: a hands-on introduction to `distrobuilder` for new users ``` ```{grid-item} [](howto/index) **Step-by-step guides** covering key operations and common tasks ``` ```` ````{grid} 1 1 2 2 :reverse: ```{grid-item} [](reference/index) **Technical information** - specifications, APIs, architecture ``` ```{grid-item} Explanation (coming) **Discussion and clarification** of key topics ``` ```` --- ## Project and community `distrobuilder` is free software and developed under the [Apache 2 license](https://www.apache.org/licenses/LICENSE-2.0). It's an open source project that warmly welcomes community projects, contributions, suggestions, fixes and constructive feedback. - [Contribute to the project](https://github.com/lxc/distrobuilder/blob/master/CONTRIBUTING.md) - [Discuss on IRC](https://web.libera.chat/#lxc) (see [Getting started with IRC](https://discuss.linuxcontainers.org/t/getting-started-with-irc/11920) if needed) - [Ask and answer questions on the forum](https://discuss.linuxcontainers.org) - [Join the mailing lists](https://lists.linuxcontainers.org) ```{toctree} :hidden: :titlesonly: self tutorials/index howto/index reference/index ``` distrobuilder-3.0/doc/reference/000077500000000000000000000000001456216713500167445ustar00rootroot00000000000000distrobuilder-3.0/doc/reference/actions.md000066400000000000000000000023141456216713500207260ustar00rootroot00000000000000# Actions ```yaml actions: - trigger: # required action: |- #!/bin/bash echo "Run me" architectures: # filter releases: # filter variants: # filter ``` Actions are scripts that are to be run after certain steps during the building process. Each action has two fields, `trigger` and `action`, as well as some filters. The `trigger` field describes the step after which the `action` is to be run. Valid triggers are: * `post-unpack` * `post-update` * `post-packages` * `post-files` The above list also shows the order in which the actions are processed. After the root file system has been unpacked, all `post-unpack` actions are run. After the package manager has updated all packages, (given that `packages.update` is `true`), all `post-update` actions are run. After the package manager has installed the requested packages, all `post-packages` actions are run. For more on `packages`, see [packages](packages.md). And last, after the `files` section has been processed, all `post-files` actions are run. This action runs only for `build-lxc`, `build-incus`, `pack-lxc`, and `pack-incus`. For more on `files`, see [generators](generators.md). distrobuilder-3.0/doc/reference/command_line_options.md000066400000000000000000000003101456216713500234600ustar00rootroot00000000000000# Command line options % Include content from [../../README.md](../../README.md) ```{include} ../../README.md :start-after: :end-before: ``` distrobuilder-3.0/doc/reference/filters.md000066400000000000000000000010711456216713500207350ustar00rootroot00000000000000# Filters Filters can be used to restrict certain sections from being run or being applied. There are three filters, `releases`, `architectures`, and `variants`, and each filter takes a list. Here's an example: ```yaml releases: - v1 - v2 architectures: - x86_64 variants: - cloud ``` In the above case, the section will only be applied or run if the release is v1 or v2, the architecture is x86_64 _and_ the variant is cloud. Filters can be applied to each item individually in the lists of following sections: - files - sets (packages) - actions - repositories distrobuilder-3.0/doc/reference/generators.md000066400000000000000000000114541456216713500214440ustar00rootroot00000000000000# Generators Generators are used to create, modify or remove files inside the rootfs. Available generators are * [`cloud-init`](#cloud-init) * [`dump`](#dump) * [`copy`](#copy) * [`hostname`](#hostname) * [`hosts`](#hosts) * [`remove`](#remove) * [`template`](#template) * [`incus-agent`](#incus-agent) * [`fstab`](#fstab) In the image definition YAML, they are listed under `files`. ```yaml files: - generator: # which generator to use (required) name: path: content: template: properties: when: templated: mode: gid: uid: pongo: source: architectures: # filter releases: # filter variants: # filter ``` Filters can be applied to each entry in `files`. Valid filters are `architecture`, `release` and `variant`. See filters for more information. If `pongo` is `true`, the values of `path`, `content`, and `source` are rendered using Pongo2. ## `cloud-init` For LXC images, the generator disables cloud-init by disabling any cloud-init services, and creates the file `cloud-init.disable` which is checked by `cloud-init` on startup. For Incus images, the generator creates templates depending on the provided name. Valid names are `user-data`, `meta-data`, `vendor-data` and `network-config`. The default `path` if not defined otherwise is `/var/lib/cloud/seed/nocloud-net/`. Setting `path`, `content` or `template.properties` will override the default values. ## `dump` The `dump` generator writes the provided `content` to a file set in `path`. If provided, it will set the `mode` (octal format), `gid` (integer) and/or `uid` (integer). ## `copy` The `copy` generator copies the file(s) from `source` to the destination `path`. `path` can be left empty and in that case the data will be placed in the same `source` path but inside the container. If provided, the destination `path` will set the `mode` (octal format), `gid` (integer) and/or `uid` (integer). Copying will be done according to the following rules: * If `source` is a directory, the entire contents of the directory are copied. Only symlinks and regular files are supported. * Note 1: The directory itself is not copied, just its contents. * Note 2: For files copied, only regular Unix permissions are kept. * If `source` is a symlink or a regular file, it is copied individually along with its metadata. In this case, if `path` ends with a trailing slash `/`, it will be considered a directory and the contents of `source` will be written at `path`/base(`source`). * If `path` does not end with a trailing slash, it will be considered a regular file and the contents of `source` will be written at `path`. * If `path` does not exist, it is created along with all missing directories in its path. * Multiple `source` resources can be specified using Golang `filepath.Match` regexps. For simplicity they are only allowed in the base name and not in the directory hierarchy. If more than one match is found, `path` will be automatically interpreted as a directory. ## `hostname` For LXC images, the host name generator writes the LXC specific string `LXC_NAME` to the `hostname` file set in `path`. If the path doesn't exist, the generator does nothing. For Incus images, the generator creates a template for `path`. If the path doesn't exist, the generator does nothing. ## `hosts` For LXC images, the generator adds the entry `127.0.0.1 LXC_NAME` to the hosts file set in `path`. For Incus images, the generator creates a template for the hosts file set in `path`, adding an entry for `127.0.0.1 {{ container.name }}`. ## `remove` The generator removes the file set in `path` from the container's root file system. ## `template` This generator creates a custom Incus template. The `name` field is used as the template's file name. The `path` defines the target file in the container's root file system. The `properties` key is a map of the template properties. The `when` key can be one or more of: * create (run at the time a new container is created from the image) * copy (run when a container is created from an existing one) * start (run every time the container is started) See {ref}`incus:image-format` in the Incus documentation for more information. ## `incus-agent` This generator creates the `systemd` unit files which are needed to start the `incus-agent` in Incus VMs. ## `fstab` This generator creates an `/etc/fstab` file which is used for VMs. Its content is: ``` LABEL=rootfs / 0 0 LABEL=UEFI /boot/efi vfat defaults 0 0 ``` The file system is taken from the Incus target (see [targets](targets.md)) which defaults to `ext4`. The options are generated depending on the file system. You cannot override them. distrobuilder-3.0/doc/reference/image.md000066400000000000000000000021401456216713500203450ustar00rootroot00000000000000# Image The image section describes the image output. ```yaml image: distribution: # required architecture: description: expiry: name: release: serial: variant: ``` The fields `distribution`, `architecture`, `description` and `release` are self-explanatory. If `architecture` is not set, it defaults to the host's architecture. The `expiry` field describes the image expiry. The format is `\d+(s|m|h|d|w)` (seconds, minutes, hours, days, weeks), and defaults to 30 days (`30d`). It's also possible to define multiple such parts, e.g. `1h 30m 10s`. The `name` field is used in the Incus metadata as well as the output name for Incus unified tarballs. It defaults to `{{ image.distribution }}-{{ image.release }}-{{ image.architecture_mapped }}-{{ image.variant }}-{{ image.serial }}`. The `serial` field is the image's serial number. It can be anything and defaults to `YYYYmmdd_HHMM` (date format). The `variant` field can be anything and is used in the Incus metadata as well as for [filtering](filters.md). distrobuilder-3.0/doc/reference/index.md000066400000000000000000000003371456216713500204000ustar00rootroot00000000000000# Reference The reference material in this section provides technical descriptions of `distrobuilder`. ```{toctree} :titlesonly: actions command_line_options filters generators image mappings packages source targets ``` distrobuilder-3.0/doc/reference/mappings.md000066400000000000000000000017741456216713500211150ustar00rootroot00000000000000# Mappings `mappings` describes an architecture mapping between the architectures from those used in Incus and those used by the distribution. These mappings are useful if you for example want to build a `x86_64` image but the source tarball contains `amd64` as its architecture. ```yaml mappings: architectures: architecture_map: ``` It's possible to specify a custom map using the `architectures` field. Here's an example of a custom mapping: ```yaml mappings: architectures: i686: i386 x86_64: amd64 armv7l: armhf aarch64: arm64 ppc: powerpc ppc64: powerpc64 ppc64le: ppc64el ``` The mapped architecture can be accessed via `Image.ArchitectureMapped` in the code or `image.architecture_mapped` in the definition file. There are some preset mappings which can be used in the `architecture_map` field. Those are: * `alpinelinux` * `altlinux` * `archlinux` * `centos` * `debian` * `funtoo` * `gentoo` * `plamolinux` * `voidlinux` distrobuilder-3.0/doc/reference/packages.md000066400000000000000000000056551456216713500210570ustar00rootroot00000000000000# Package management Installing and removing packages can be done using the `packages` section. ```yaml packages: manager: # required update: cleanup: sets: - packages: - - ... action: # required architectures: # filter releases: # filter variants: # filter flags: # install/remove flags for just this set - ... repositories: - name: url: type: key: architectures: # filter releases: # filter variants: # filter - ... ``` The `manager` keys specifies the package manager which is to be used. Valid package manager are: * `apk` * `apt` * `dnf` * `egoportage` (combination of `portage` and `ego`) * `equo` * `luet` * `opkg` * `pacman` * `portage` * `slackpkg` * `xbps` * `yum` * `zypper` It's also possible to specify a custom package manager. This is useful if the desired package manager is not supported by distrobuilder. ```yaml packages: custom_manager: # required clean: # required cmd: flags: install: # required cmd: flags: remove: # required cmd: flags: refresh: # required cmd: flags: update: # required cmd: flags: flags: # global flags for all commands ... ``` If `update` is true, the package manager will update all installed packages. If `cleanup` is true, the package manager will run a cleanup operation which usually cleans up cached files. This depends on the package manager though and is not supported by all. A set contains a list of `packages`, an `action`, and optional filters. Here, `packages` is a list of packages which are to be installed or removed. The value of `action` must be either `install` or `remove`. If `flags` is specified for a package set, they are appended to the command specific flags, along with any global flags, when calling the `install` or `remove` command. For example, you can define a package set that should be installed with `--no-install-recommends`. `repositories` contains a list of additional repositories which are to be added. The `type` field is only needed if the package manager supports more than one repository manager. The `key` field is a GPG armored key ring which might be needed for verification. Depending on the package manager, the `url` field can take the content of a repository file. The following is possible with `yum`: ```yaml packages: manager: yum update: false repositories: - name: myrepo url: |- [myrepo] baseurl=http://user:password@1.1.1.1 gpgcheck=0 ``` distrobuilder-3.0/doc/reference/source.md000066400000000000000000000051721456216713500205730ustar00rootroot00000000000000# Source In order to create an image, a source must be defined. The source section is defined as follows: ```yaml source: downloader: # required url: keys: keyserver: variant: suite: same_as: skip_verification: components: ``` The `downloader` field defines a downloader which pulls a rootfs image which will be used as a starting point. It needs to be one of * `alpinelinux-http` * `alt-http` * `apertis-http` * `archlinux-http` * `centos-http` * `debootstrap` * `docker-http` * `fedora-http` * `funtoo-http` * `gentoo-http` * `nixos-http` * `openeuler-http` * `opensuse-http` * `openwrt-http` * `oraclelinux-http` * `sabayon-http` * `rootfs-http` * `ubuntu-http` * `voidlinux-http` The `url` field defines the URL or mirror of the rootfs image. Although this field is not required, most downloaders will need it. The `rootfs-http` downloader also supports local image files when prefixed with `file://`, e.g. `url: file:///home/user/image.tar.gz` or `url: file:///home/user/image.squashfs`. The `keys` field is a list of GPG keys. These keys can be listed as fingerprints or armored keys. The latter has the advantage of not having to rely on a key server to download the key from. The keys are used to verify the downloaded rootfs tarball if downloaded from a insecure source (HTTP). The `keyserver` defaults to `hkps.pool.sks-keyservers.net` if none is provided. The `variant` field is only used in a few distributions and defaults to `default`. Here's a list downloaders and their possible variants: * `centos-http`: `minimal`, `netinstall`, `LiveDVD` * `debootstrap`: `default`, `minbase`, `buildd`, `fakechroot` * `ubuntu-http`: `default`, `core` * `voidlinux-http`: `default`, `musl` All other downloaders ignore this field. The `suite` field is only used by the `debootstrap` downloader. If set, `debootstrap` will use `suite` instead of `image.release` as its first positional argument. If the `same_as` field is set, distrobuilder creates a temporary symlink in `/usr/share/debootstrap/scripts` which points to the `same_as` file inside that directory. This can be used if you want to run `debootstrap foo` but `foo` is missing due to `debootstrap` not being up-to-date. If `skip_verification` is true, the source tarball is not verified. If the `components` field is set, `debootstrap` will use packages from the listed components. If a package set has the `early` flag enabled, that list of packages will be installed while the source is being downloaded. (Note that `early` packages are only supported by the `debootstrap` downloader.) distrobuilder-3.0/doc/reference/targets.md000066400000000000000000000022431456216713500207400ustar00rootroot00000000000000# Targets The target section is for target dependent files. ```yaml targets: lxc: create_message: config: - type: before: after: content: - ... incus: vm: size: filesystem: ``` ## LXC The `create_message` field is a string which is displayed after new LXC container has been created. This string is rendered using Pongo2 and can include various fields from the definition file, e.g. `{{ image.description }}`. `config` is a list of container configuration options. The `type` must be `all`, `system` or `user`. The keys `before` and `after` are used for compatibility. Currently, the maximum value for compatibility is 5. If your desired compatibility level is 3 for example, you would use `before: 4` and `after: 2`. `content` describes the configuration which is to be written to the configuration file. ## Incus Valid keys are `size` and `filesystem`. The former specifies the VM image size in bytes. The latter specifies the root partition file system. It currently supports `ext4` and `btrfs`. distrobuilder-3.0/doc/substitutions.yaml000066400000000000000000000001171456216713500206300ustar00rootroot00000000000000# Key/value substitutions to use within the Sphinx doc. {example_key: "Value"} distrobuilder-3.0/doc/tutorials/000077500000000000000000000000001456216713500170345ustar00rootroot00000000000000distrobuilder-3.0/doc/tutorials/index.md000066400000000000000000000001641456216713500204660ustar00rootroot00000000000000# Tutorials These tutorials guide you through the usage of `distrobuilder`. ```{toctree} :titlesonly: use.md ``` distrobuilder-3.0/doc/tutorials/use.md000066400000000000000000000201111456216713500201450ustar00rootroot00000000000000--- discourse: 7519 --- # Use `distrobuilder` to create images This guide shows you how to create an image for Incus or LXC. Before you start, you must install `distrobuilder`. See {doc}`../howto/install` for instructions. ## Create an image To create an image, first create a directory where you will be placing the images, and enter that directory. ``` mkdir -p $HOME/Images/ubuntu/ cd $HOME/Images/ubuntu/ ``` Then, copy one of the example YAML configuration files for images into this directory. ```{note} The YAML configuration file contains an image template that gives instructions to distrobuilder. Distrobuilder provides examples of YAML files for various distributions in the [examples directory](https://github.com/lxc/distrobuilder/tree/master/doc/examples). [`scheme.yaml`](https://github.com/lxc/distrobuilder/blob/master/doc/examples/scheme.yaml) is a standard template that includes all available options. Official Incus templates for various distributions are available in the [`lxc-ci` repository](https://github.com/lxc/lxc-ci/tree/master/images). ``` In this example, we are creating an Ubuntu image. ``` cp $HOME/go/src/github.com/lxc/distrobuilder/doc/examples/ubuntu.yaml ubuntu.yaml ``` ### Edit the template file Optionally, you can do some edits to the YAML configuration file. You can define the following keys: | Section | Description | Documentation | |------------|------------------------------------------------------------------------------------------|--------------------------------| | `image` | Defines distribution, architecture, release etc. | {doc}`../reference/image` | | `source` | Defines main package source, keys etc. | {doc}`../reference/source` | | `targets` | Defines configuration for specific targets (e.g. Incus, instances etc.) | {doc}`../reference/targets` | | `files` | Defines generators to modify files | {doc}`../reference/generators` | | `packages` | Defines packages for install or removal; adds repositories | {doc}`../reference/packages` | | `actions` | Defines scripts to be run after specific steps during image building | {doc}`../reference/actions` | | `mappings` | Maps different terms for architectures for specific distributions (e.g. `x86_64: amd64`) | {doc}`../reference/mappings` | ```{tip} When building a VM image, you should either build an image with cloud-init support (provides automatic size growth) or set a higher size in the template, because the standard size is relatively small (~4 GB). Alternatively, you can also grow it manually. ``` ## Build and launch the image The steps for building and launching the image depend on whether you want to use it with Incus or with LXC. ### Create an image for Incus To build an image for Incus, run `distrobuilder`. We are using the `build-incus` option to create an image for Incus. - To create a container image: ``` sudo $HOME/go/bin/distrobuilder build-incus ubuntu.yaml ``` - To create a VM image: ``` sudo $HOME/go/bin/distrobuilder build-incus ubuntu.yaml --vm ``` See {ref}`howto-build-incus` for more information about the `build-incus` command. If the command is successful, you will get an output similar to the following (for a container image). The `incus.tar.xz` file is the description of the container image. The `rootfs.squasfs` file is the root file system (rootfs) of the container image. The set of these two files is the _container image_. ```bash $ ls -l total 100960 -rw-r--r-- 1 root root 676 Oct 3 16:15 incus.tar.xz -rw-r--r-- 1 root root 103370752 Oct 3 16:15 rootfs.squashfs -rw-r--r-- 1 ubuntu ubuntu 7449 Oct 3 16:03 ubuntu.yaml $ ``` #### Add the image to Incus To add the image to an Incus installation, use the `incus image import` command as follows. ```bash $ incus image import incus.tar.xz rootfs.squashfs --alias mycontainerimage Image imported with fingerprint: 009349195858651a0f883de804e64eb82e0ac8c0bc51880 ``` See {ref}`incus:images-copy` for detailed information. Let's look at the image in Incus. The `ubuntu.yaml` had a setting to create an Ubuntu 20.04 (`focal`) image. The size is 98.58MB. ```bash $ incus image list mycontainerimage +------------------+--------------+--------+--------------+--------+---------+-----------------------------+ | ALIAS | FINGERPRINT | PUBLIC | DESCRIPTION | ARCH | SIZE | UPLOAD DATE | +------------------+--------------+--------+--------------+--------+---------+-----------------------------+ | mycontainerimage | 009349195858 | no | Ubuntu focal | x86_64 | 98.58MB | Oct 3, 2020 at 5:10pm (UTC) | +------------------+--------------+--------+--------------+--------+---------+-----------------------------+ ``` #### Launch an Incus container from the container image To launch a container from the freshly created image, use `incus launch` as follows. Note that you do not specify a repository for the image (like `ubuntu:` or `images:`) because the image is located locally. ```bash $ incus launch mycontainerimage c1 Creating c1 Starting c1 ``` ### Create an image for LXC Using LXC containers instead of Incus may require the installation of `lxc-utils`. Having both LXC and Incus installed on the same system will probably cause confusion. Use of raw LXC is generally discouraged due to the lack of automatic AppArmor protection. To create an image for LXC, use the following command: ```bash $ sudo $HOME/go/bin/distrobuilder build-lxc ubuntu.yaml $ ls -l total 87340 -rw-r--r-- 1 root root 740 Jan 19 03:15 meta.tar.xz -rw-r--r-- 1 root root 89421136 Jan 19 03:15 rootfs.tar.xz -rw-r--r-- 1 root root 4798 Jan 19 02:42 ubuntu.yaml ``` See {ref}`howto-build-lxc` for more information about the `build-lxc` command. #### Add the container image to LXC To add the container image to a LXC installation, use the `lxc-create` command as follows. ```bash lxc-create -n myContainerImage -t local -- --metadata meta.tar.xz --fstree rootfs.tar.xz ``` #### Launch a LXC container from the container image Then start the container with ```bash lxc-start -n myContainerImage ``` ## Repack Windows ISO ```{youtube} https://www.youtube.com/watch?v=3PDMGwbbk48 ``` With Incus it's possible to run Windows VMs. All you need is a Windows ISO and a bunch of drivers. To make the installation a bit easier, `distrobuilder` added the `repack-windows` command. It takes a Windows ISO, and repacks it together with the necessary drivers. Currently, `distrobuilder` supports Windows 10, Windows Server 2012, Windows Server 2016, Windows Server 2019 and Windows Server 2022. The Windows version will automatically be detected, but in case this fails you can use the `--windows-version` flag to set it manually. It supports the values `w10`, `2k12`, `2k16`, `2k19` and `2k22` for Windows 10, Windows Server 2012, Windows Server 2016, Windows Server 2019 and Windows Server 2022 respectively. Here's how to repack a Windows ISO: ```bash distrobuilder repack-windows path/to/Windows.iso path/to/Windows-repacked.iso ``` More information on `repack-windows` can be found by running ```bash distrobuilder repack-windows -h ``` ### Install Windows Run the following commands to initialize the VM, to configure (=increase) the allocated disk space and finally attach the full path of your prepared ISO file. Note that the installation of Windows 10 takes about 10GB (before updates), therefore a 30GB disk gives you about 20GB of free space. ```bash incus init win10 --empty --vm -c security.secureboot=false incus config device override win10 root size=30GiB incus config device add win10 iso disk source=/path/to/Windows-repacked.iso boot.priority=10 ``` Now, the VM win10 has been configured and it is ready to be started. The following command starts the virtual machine and opens up a VGA console so that we go through the graphical installation of Windows. ```bash incus start win10 --console=vga ``` distrobuilder-3.0/generators/000077500000000000000000000000001456216713500164125ustar00rootroot00000000000000distrobuilder-3.0/generators/cloud-init.go000066400000000000000000000111231456216713500210060ustar00rootroot00000000000000package generators import ( "fmt" "os" "path/filepath" "strings" "github.com/lxc/incus/shared/api" incus "github.com/lxc/incus/shared/util" "github.com/lxc/distrobuilder/image" "github.com/lxc/distrobuilder/shared" ) type cloudInit struct { common } // RunLXC disables cloud-init. func (g *cloudInit) RunLXC(img *image.LXCImage, target shared.DefinitionTargetLXC) error { // With OpenRC: // Remove all symlinks to /etc/init.d/cloud-{init-local,config,init,final} in /etc/runlevels/* fullPath := filepath.Join(g.sourceDir, "etc", "runlevels") if incus.PathExists(fullPath) { err := filepath.Walk(fullPath, func(path string, info os.FileInfo, err error) error { if info.IsDir() { return nil } if incus.ValueInSlice(info.Name(), []string{"cloud-init-local", "cloud-config", "cloud-init", "cloud-final"}) { err := os.Remove(path) if err != nil { return fmt.Errorf("Failed to remove file %q: %w", path, err) } } return nil }) if err != nil { return fmt.Errorf("Failed to walk file tree %q: %w", fullPath, err) } } // With systemd: path := filepath.Join(g.sourceDir, "/etc/cloud") if !incus.PathExists(path) { err := os.MkdirAll(path, 0755) if err != nil { return fmt.Errorf("Failed to create directory %q: %w", path, err) } } // Create file /etc/cloud/cloud-init.disabled path = filepath.Join(g.sourceDir, "/etc/cloud/cloud-init.disabled") f, err := os.Create(path) if err != nil { return fmt.Errorf("Failed to create file %q: %w", path, err) } defer f.Close() return nil } // RunIncus creates cloud-init template files. func (g *cloudInit) RunIncus(img *image.IncusImage, target shared.DefinitionTargetIncus) error { templateDir := filepath.Join(g.cacheDir, "templates") err := os.MkdirAll(templateDir, 0755) if err != nil { return fmt.Errorf("Failed to create directory %q: %w", templateDir, err) } var content string properties := make(map[string]string) switch g.defFile.Name { case "user-data": content = `{%- if config_get("cloud-init.user-data", properties.default) == properties.default -%} {{ config_get("user.user-data", properties.default) }} {%- else -%} {{- config_get("cloud-init.user-data", properties.default) }} {%- endif %} ` properties["default"] = `#cloud-config {}` case "meta-data": content = `instance-id: {{ container.name }} local-hostname: {{ container.name }} {{ config_get("user.meta-data", "") }} ` case "vendor-data": content = `{%- if config_get("cloud-init.vendor-data", properties.default) == properties.default -%} {{ config_get("user.vendor-data", properties.default) }} {%- else -%} {{- config_get("cloud-init.vendor-data", properties.default) }} {%- endif %} ` properties["default"] = `#cloud-config {}` case "network-config": defaultValue := `version: 1 config: - type: physical name: {% if instance.type == "virtual-machine" %}enp5s0{% else %}eth0{% endif %} subnets: - type: dhcp control: auto` // Use the provided content as the new default value if g.defFile.Content != "" { defaultValue = g.defFile.Content } content = fmt.Sprintf(`{%%- if config_get("cloud-init.network-config", "") == "" -%%} {%%- if config_get("user.network-config", "") == "" -%%} %s {%%- else -%%} {{- config_get("user.network-config", "") -}} {%%- endif -%%} {%%- else -%%} {{- config_get("cloud-init.network-config", "") -}} {%%- endif %%} `, defaultValue) default: return fmt.Errorf("Unknown cloud-init configuration: %s", g.defFile.Name) } template := fmt.Sprintf("cloud-init-%s.tpl", g.defFile.Name) path := filepath.Join(templateDir, template) file, err := os.Create(path) if err != nil { return fmt.Errorf("Failed to create file %q: %w", path, err) } defer file.Close() // Use the provided content as the new default value if g.defFile.Name != "network-config" && g.defFile.Content != "" { properties["default"] = g.defFile.Content } // Append final new line if missing if !strings.HasSuffix(content, "\n") { content += "\n" } _, err = file.WriteString(content) if err != nil { return fmt.Errorf("Failed to write to content to %s template: %w", g.defFile.Name, err) } if len(g.defFile.Template.Properties) > 0 { properties = g.defFile.Template.Properties } targetPath := filepath.Join("/var/lib/cloud/seed/nocloud-net", g.defFile.Name) if g.defFile.Path != "" { targetPath = g.defFile.Path } // Add to Incus templates img.Metadata.Templates[targetPath] = &api.ImageMetadataTemplate{ Template: template, Properties: properties, When: []string{"create", "copy"}, } return nil } // Run does nothing. func (g *cloudInit) Run() error { return nil } distrobuilder-3.0/generators/cloud-init_test.go000066400000000000000000000104311456216713500220460ustar00rootroot00000000000000package generators import ( "context" "fmt" "log" "os" "path/filepath" "testing" incus "github.com/lxc/incus/shared/util" "github.com/stretchr/testify/require" "github.com/lxc/distrobuilder/image" "github.com/lxc/distrobuilder/shared" ) func TestCloudInitGeneratorRunLXC(t *testing.T) { cacheDir := filepath.Join(os.TempDir(), "distrobuilder-test") rootfsDir := filepath.Join(cacheDir, "rootfs") setup(t, cacheDir) defer teardown(cacheDir) generator, err := Load("cloud-init", nil, cacheDir, rootfsDir, shared.DefinitionFile{}, shared.Definition{}) require.IsType(t, &cloudInit{}, generator) require.NoError(t, err) // Prepare rootfs err = os.MkdirAll(filepath.Join(rootfsDir, "etc", "runlevels"), 0755) require.NoError(t, err) err = os.MkdirAll(filepath.Join(rootfsDir, "etc", "cloud"), 0755) require.NoError(t, err) for _, f := range []string{"cloud-init-local", "cloud-config", "cloud-init", "cloud-final"} { fullPath := filepath.Join(rootfsDir, "etc", "runlevels", f) err = os.Symlink("/dev/null", fullPath) require.NoError(t, err) require.FileExists(t, fullPath) } // Disable cloud-init err = generator.RunLXC(nil, shared.DefinitionTargetLXC{}) require.NoError(t, err) // Check whether the generator has altered the rootfs for _, f := range []string{"cloud-init-local", "cloud-config", "cloud-init", "cloud-final"} { fullPath := filepath.Join(rootfsDir, "etc", "runlevels", f) require.Falsef(t, incus.PathExists(fullPath), "File '%s' exists but shouldn't", fullPath) } for i := 0; i <= 6; i++ { dir := filepath.Join(rootfsDir, "etc", "rc.d", fmt.Sprintf("rc%d.d", i)) for _, f := range []string{"cloud-init-local", "cloud-config", "cloud-init", "cloud-final"} { fullPath := filepath.Join(dir, fmt.Sprintf("S99%s", f)) require.Falsef(t, incus.PathExists(fullPath), "File '%s' exists but shouldn't", fullPath) } } require.FileExists(t, filepath.Join(rootfsDir, "etc", "cloud", "cloud-init.disabled")) } func TestCloudInitGeneratorRunIncus(t *testing.T) { cacheDir := filepath.Join(os.TempDir(), "distrobuilder-test") rootfsDir := filepath.Join(cacheDir, "rootfs") setup(t, cacheDir) defer teardown(cacheDir) definition := shared.Definition{ Image: shared.DefinitionImage{ Distribution: "ubuntu", Release: "artful", }, } image := image.NewIncusImage(context.TODO(), cacheDir, "", cacheDir, definition) tests := []struct { name string expected string shouldFail bool }{ { "user-data", `{%- if config_get("cloud-init.user-data", properties.default) == properties.default -%} {{ config_get("user.user-data", properties.default) }} {%- else -%} {{- config_get("cloud-init.user-data", properties.default) }} {%- endif %} `, false, }, { "meta-data", `instance-id: {{ container.name }} local-hostname: {{ container.name }} {{ config_get("user.meta-data", "") }} `, false, }, { "vendor-data", `{%- if config_get("cloud-init.vendor-data", properties.default) == properties.default -%} {{ config_get("user.vendor-data", properties.default) }} {%- else -%} {{- config_get("cloud-init.vendor-data", properties.default) }} {%- endif %} `, false, }, { "network-config", `{%- if config_get("cloud-init.network-config", "") == "" -%} {%- if config_get("user.network-config", "") == "" -%} version: 1 config: - type: physical name: {% if instance.type == "virtual-machine" %}enp5s0{% else %}eth0{% endif %} subnets: - type: dhcp control: auto {%- else -%} {{- config_get("user.network-config", "") -}} {%- endif -%} {%- else -%} {{- config_get("cloud-init.network-config", "") -}} {%- endif %} `, false, }, { "foo", "Unknown cloud-init configuration: foo", true, }, } for i, tt := range tests { log.Printf("Running test #%d: %s", i, tt.name) generator, err := Load("cloud-init", nil, cacheDir, rootfsDir, shared.DefinitionFile{ Generator: "cloud-init", Name: tt.name, }, shared.Definition{}) require.IsType(t, &cloudInit{}, generator) require.NoError(t, err) err = generator.RunIncus(image, shared.DefinitionTargetIncus{}) if !tt.shouldFail { require.NoError(t, err) } else { require.Regexp(t, tt.expected, err) continue } validateTestFile(t, filepath.Join(cacheDir, "templates", fmt.Sprintf("cloud-init-%s.tpl", tt.name)), tt.expected) } } distrobuilder-3.0/generators/common.go000066400000000000000000000015231456216713500202320ustar00rootroot00000000000000package generators import ( "github.com/sirupsen/logrus" "github.com/lxc/distrobuilder/shared" ) type common struct { logger *logrus.Logger cacheDir string sourceDir string defFile shared.DefinitionFile } func (g *common) init(logger *logrus.Logger, cacheDir string, sourceDir string, defFile shared.DefinitionFile, def shared.Definition) { g.logger = logger g.cacheDir = cacheDir g.sourceDir = sourceDir g.defFile = defFile render := func(val string) string { if !defFile.Pongo { return val } out, err := shared.RenderTemplate(val, def) if err != nil { logger.WithField("err", err).Warn("Failed to render template") return val } return out } if defFile.Pongo { g.defFile.Content = render(g.defFile.Content) g.defFile.Path = render(g.defFile.Path) g.defFile.Source = render(g.defFile.Source) } } distrobuilder-3.0/generators/copy.go000066400000000000000000000125021456216713500177130ustar00rootroot00000000000000package generators import ( "fmt" "io" "os" "path/filepath" "strings" "github.com/lxc/incus/shared/util" "github.com/lxc/distrobuilder/image" "github.com/lxc/distrobuilder/shared" ) type copy struct { common } // RunLXC copies a file to the container. func (g *copy) RunLXC(img *image.LXCImage, target shared.DefinitionTargetLXC) error { return g.Run() } // RunIncus copies a file to the container. func (g *copy) RunIncus(img *image.IncusImage, target shared.DefinitionTargetIncus) error { return g.Run() } // Run copies a file to the container. func (g *copy) Run() error { // First check if the input is a file or a directory. // Then check whether the destination finishes in a "/" or not // Afterwards, the rules for copying can be applied. See doc/generators.md // Set the name of the destination file to the input file // relative to the root if destination file is missing var destPath, srcPath string var files []string srcPath = g.defFile.Source destPath = filepath.Join(g.sourceDir, g.defFile.Source) if g.defFile.Path != "" { destPath = filepath.Join(g.sourceDir, g.defFile.Path) } dirFiles, err := os.ReadDir(filepath.Dir(srcPath)) if err != nil { return fmt.Errorf("Failed to read directory %q: %w", filepath.Dir(srcPath), err) } for _, f := range dirFiles { match, err := filepath.Match(srcPath, filepath.Join(filepath.Dir(srcPath), f.Name())) if err != nil { return fmt.Errorf("Failed to match pattern: %w", err) } if match { files = append(files, filepath.Join(filepath.Dir(srcPath), f.Name())) } } switch len(files) { case 0: // Look for the literal file _, err = os.Stat(srcPath) if err != nil { return fmt.Errorf("Failed to stat file %q: %w", srcPath, err) } err = g.doCopy(srcPath, destPath, g.defFile) case 1: err = g.doCopy(srcPath, destPath, g.defFile) default: // Make sure that we are copying to a directory g.defFile.Path = g.defFile.Path + "/" for _, f := range files { err = g.doCopy(f, destPath, g.defFile) if err != nil { break } } } if err != nil { return fmt.Errorf("Failed to copy file(s): %w", err) } return nil } func (g *copy) doCopy(srcPath, destPath string, defFile shared.DefinitionFile) error { in, err := os.Stat(srcPath) if err != nil { return fmt.Errorf("Failed to stat file %q: %w", srcPath, err) } switch in.Mode() & os.ModeType { // Regular file case 0, os.ModeSymlink: if strings.HasSuffix(defFile.Path, "/") { destPath = filepath.Join(destPath, filepath.Base(srcPath)) } err := g.copyFile(srcPath, destPath, defFile) if err != nil { return fmt.Errorf("Failed to copy file %q to %q: %w", srcPath, destPath, err) } case os.ModeDir: err := g.copyDir(srcPath, destPath, defFile) if err != nil { return fmt.Errorf("Failed to copy file %q to %q: %w", srcPath, destPath, err) } default: return fmt.Errorf("File type of %q not supported", srcPath) } return nil } func (g *copy) copyDir(srcPath, destPath string, defFile shared.DefinitionFile) error { err := filepath.Walk(srcPath, func(src string, fi os.FileInfo, err error) error { if err != nil { return err } rel, err := filepath.Rel(srcPath, src) if err != nil { return fmt.Errorf("Failed to get relative path of %q: %w", srcPath, err) } dest := filepath.Join(destPath, rel) if err != nil { return fmt.Errorf("Failed to join path elements: %w", err) } switch fi.Mode() & os.ModeType { case 0, os.ModeSymlink: err = g.copyFile(src, dest, defFile) if err != nil { return fmt.Errorf("Failed to copy file %q to %q: %w", src, dest, err) } case os.ModeDir: err := os.MkdirAll(dest, os.ModePerm) if err != nil { return fmt.Errorf("Failed to create directory %q: %w", dest, err) } default: fmt.Printf("File type of %q not supported, skipping", src) } return nil }) if err != nil { return fmt.Errorf("Failed to walk file tree of %q: %w", srcPath, err) } return nil } func (g *copy) copyFile(src, dest string, defFile shared.DefinitionFile) error { // Let's make sure that we can create the file dir := filepath.Dir(dest) _, err := os.Stat(dir) if os.IsNotExist(err) { err = os.MkdirAll(dir, os.ModePerm) } if err != nil { return fmt.Errorf("Failed to create directory %q: %w", dir, err) } fi, err := os.Lstat(src) if err != nil { return fmt.Errorf("Failed to access source path %q: %w", src, err) } if fi.Mode()&os.ModeSymlink != 0 { // Handle symlinks. target, err := os.Readlink(src) if err != nil { return err } if util.PathExists(dest) { err = os.Remove(dest) if err != nil { return err } } err = os.Symlink(target, dest) if err != nil { return err } return nil } in, err := os.Open(src) if err != nil { return fmt.Errorf("Failed to open file %q: %w", src, err) } defer in.Close() out, err := os.Create(dest) if err != nil { if os.IsExist(err) { out, err = os.OpenFile(dest, os.O_WRONLY, 0644) if err != nil { return fmt.Errorf("Failed to open file %q: %w", dest, err) } } else { return fmt.Errorf("Failed to open file %q: %w", dest, err) } } defer out.Close() _, err = io.Copy(out, in) if err != nil { return fmt.Errorf("Failed to copy file %q: %w", dest, err) } err = updateFileAccess(out, defFile) if err != nil { return fmt.Errorf("Failed to update file access of %q: %w", dest, err) } return nil } distrobuilder-3.0/generators/copy_test.go000066400000000000000000000126561456216713500207640ustar00rootroot00000000000000package generators import ( "bytes" "io" "os" "path/filepath" "testing" "github.com/stretchr/testify/require" "github.com/lxc/distrobuilder/shared" ) func TestCopyGeneratorRun(t *testing.T) { cacheDir := filepath.Join(os.TempDir(), "distrobuilder-test") rootfsDir := filepath.Join(cacheDir, "rootfs") setup(t, cacheDir) defer teardown(cacheDir) generator, err := Load("copy", nil, cacheDir, rootfsDir, shared.DefinitionFile{ Source: "copy_test", Path: "copy_test_dir", }, shared.Definition{}) require.IsType(t, ©{}, generator) require.NoError(t, err) defer os.RemoveAll("copy_test") err = os.Mkdir("copy_test", os.ModePerm) require.NoError(t, err) src1, err := os.Create(filepath.Join("copy_test", "src1")) require.NoError(t, err) defer src1.Close() _, err = src1.WriteString("src1\n") require.NoError(t, err) src2, err := os.Create(filepath.Join("copy_test", "src2")) require.NoError(t, err) defer src2.Close() _, err = src2.WriteString("src2\n") require.NoError(t, err) err = os.Symlink("src1", filepath.Join("copy_test", "srcLink")) require.NoError(t, err) // is a directory -> contents copied err = generator.Run() require.NoError(t, err) require.DirExists(t, filepath.Join(rootfsDir, "copy_test_dir")) require.FileExists(t, filepath.Join(rootfsDir, "copy_test_dir", "src1")) require.FileExists(t, filepath.Join(rootfsDir, "copy_test_dir", "src2")) require.FileExists(t, filepath.Join(rootfsDir, "copy_test_dir", "srcLink")) var destBuffer, srcBuffer bytes.Buffer dest, err := os.Open(filepath.Join(rootfsDir, "copy_test_dir", "src1")) require.NoError(t, err) defer dest.Close() _, err = io.Copy(&destBuffer, dest) require.NoError(t, err) _, err = src1.Seek(0, 0) require.NoError(t, err) _, err = io.Copy(&srcBuffer, src1) require.NoError(t, err) require.Equal(t, destBuffer.String(), srcBuffer.String()) dest, err = os.Open(filepath.Join(rootfsDir, "copy_test_dir", "src2")) require.NoError(t, err) defer dest.Close() destBuffer.Reset() _, err = io.Copy(&destBuffer, dest) require.NoError(t, err) _, err = src2.Seek(0, 0) require.NoError(t, err) srcBuffer.Reset() _, err = io.Copy(&srcBuffer, src2) require.NoError(t, err) require.Equal(t, destBuffer.String(), srcBuffer.String()) link, err := os.Readlink(filepath.Join(rootfsDir, "copy_test_dir", "srcLink")) require.NoError(t, err) require.Equal(t, "src1", link) // as wildcard _, err = src1.Seek(0, 0) require.NoError(t, err) _, err = src2.Seek(0, 0) require.NoError(t, err) generator, err = Load("copy", nil, cacheDir, rootfsDir, shared.DefinitionFile{ Source: "copy_test/src*", Path: "copy_test_wildcard", }, shared.Definition{}) require.IsType(t, ©{}, generator) require.NoError(t, err) err = generator.Run() require.NoError(t, err) require.DirExists(t, filepath.Join(rootfsDir, "copy_test_wildcard")) require.FileExists(t, filepath.Join(rootfsDir, "copy_test_wildcard", "src1")) require.FileExists(t, filepath.Join(rootfsDir, "copy_test_wildcard", "src2")) dest, err = os.Open(filepath.Join(rootfsDir, "copy_test_wildcard", "src1")) require.NoError(t, err) defer dest.Close() destBuffer.Reset() _, err = io.Copy(&destBuffer, dest) require.NoError(t, err) _, err = src1.Seek(0, 0) require.NoError(t, err) srcBuffer.Reset() _, err = io.Copy(&srcBuffer, src1) require.NoError(t, err) require.Equal(t, destBuffer.String(), srcBuffer.String()) dest, err = os.Open(filepath.Join(rootfsDir, "copy_test_wildcard", "src2")) require.NoError(t, err) defer dest.Close() destBuffer.Reset() _, err = io.Copy(&destBuffer, dest) require.NoError(t, err) _, err = src2.Seek(0, 0) require.NoError(t, err) srcBuffer.Reset() _, err = io.Copy(&srcBuffer, src2) require.NoError(t, err) require.Equal(t, destBuffer.String(), srcBuffer.String()) // is a file -> file copied to _, err = src1.Seek(0, 0) require.NoError(t, err) generator, err = Load("copy", nil, cacheDir, rootfsDir, shared.DefinitionFile{ Source: "copy_test/src1", }, shared.Definition{}) require.IsType(t, ©{}, generator) require.NoError(t, err) err = generator.Run() require.NoError(t, err) require.FileExists(t, filepath.Join(rootfsDir, "copy_test", "src1")) dest, err = os.Open(filepath.Join(rootfsDir, "copy_test", "src1")) require.NoError(t, err) defer dest.Close() destBuffer.Reset() _, err = io.Copy(&destBuffer, dest) require.NoError(t, err) _, err = src1.Seek(0, 0) require.NoError(t, err) srcBuffer.Reset() _, err = io.Copy(&srcBuffer, src1) require.NoError(t, err) require.Equal(t, destBuffer.String(), srcBuffer.String()) // is a file -> file copied to / _, err = src1.Seek(0, 0) require.NoError(t, err) generator, err = Load("copy", nil, cacheDir, rootfsDir, shared.DefinitionFile{ Source: "copy_test/src1", Path: "/hello/world/", }, shared.Definition{}) require.IsType(t, ©{}, generator) require.NoError(t, err) err = generator.Run() require.NoError(t, err) require.DirExists(t, filepath.Join(rootfsDir, "hello", "world")) require.FileExists(t, filepath.Join(rootfsDir, "hello", "world", "src1")) dest, err = os.Open(filepath.Join(rootfsDir, "hello", "world", "src1")) require.NoError(t, err) defer dest.Close() destBuffer.Reset() _, err = io.Copy(&destBuffer, dest) require.NoError(t, err) _, err = src1.Seek(0, 0) require.NoError(t, err) srcBuffer.Reset() _, err = io.Copy(&srcBuffer, src1) require.NoError(t, err) require.Equal(t, destBuffer.String(), srcBuffer.String()) } distrobuilder-3.0/generators/dump.go000066400000000000000000000033061456216713500177100ustar00rootroot00000000000000package generators import ( "fmt" "os" "path/filepath" "strings" "github.com/lxc/distrobuilder/image" "github.com/lxc/distrobuilder/shared" ) type dump struct { common } // RunLXC dumps content to a file. func (g *dump) RunLXC(img *image.LXCImage, target shared.DefinitionTargetLXC) error { content := g.defFile.Content err := g.run(content) if err != nil { return fmt.Errorf("Failed to dump content: %w", err) } if g.defFile.Templated { err = img.AddTemplate(g.defFile.Path) if err != nil { return fmt.Errorf("Failed to add template: %w", err) } } return nil } // RunIncus dumps content to a file. func (g *dump) RunIncus(img *image.IncusImage, target shared.DefinitionTargetIncus) error { content := g.defFile.Content return g.run(content) } // Run dumps content to a file. func (g *dump) Run() error { return g.run(g.defFile.Content) } func (g *dump) run(content string) error { path := filepath.Join(g.sourceDir, g.defFile.Path) // Create any missing directory err := os.MkdirAll(filepath.Dir(path), 0755) if err != nil { return fmt.Errorf("Failed to create directory %q: %w", filepath.Dir(path), err) } // Open the target file (create if needed) file, err := os.Create(path) if err != nil { return fmt.Errorf("Failed to create file %q: %w", path, err) } defer file.Close() // Append final new line if missing if !strings.HasSuffix(content, "\n") { content += "\n" } // Write the content _, err = file.WriteString(content) if err != nil { return fmt.Errorf("Failed to write string to file %q: %w", path, err) } err = updateFileAccess(file, g.defFile) if err != nil { return fmt.Errorf("Failed to update file access of %q: %w", path, err) } return nil } distrobuilder-3.0/generators/dump_test.go000066400000000000000000000072331456216713500207520ustar00rootroot00000000000000package generators import ( "bytes" "io" "os" "path/filepath" "testing" "github.com/stretchr/testify/require" "github.com/lxc/distrobuilder/shared" ) func TestDumpGeneratorRunLXC(t *testing.T) { cacheDir := filepath.Join(os.TempDir(), "distrobuilder-test") rootfsDir := filepath.Join(cacheDir, "rootfs") setup(t, cacheDir) defer teardown(cacheDir) def := shared.Definition{ Targets: shared.DefinitionTarget{ LXC: shared.DefinitionTargetLXC{ CreateMessage: "message", }, }, } generator, err := Load("dump", nil, cacheDir, rootfsDir, shared.DefinitionFile{ Path: "/hello/world", Content: "hello {{ targets.lxc.create_message }}", Pongo: true, }, def) require.IsType(t, &dump{}, generator) require.NoError(t, err) err = generator.RunLXC(nil, shared.DefinitionTargetLXC{ CreateMessage: "message", }) require.NoError(t, err) require.FileExists(t, filepath.Join(rootfsDir, "hello", "world")) var buffer bytes.Buffer file, err := os.Open(filepath.Join(rootfsDir, "hello", "world")) require.NoError(t, err) defer file.Close() _, err = io.Copy(&buffer, file) require.NoError(t, err) require.Equal(t, "hello message\n", buffer.String()) generator, err = Load("dump", nil, cacheDir, rootfsDir, shared.DefinitionFile{ Path: "/hello/world", Content: "hello {{ targets.lxc.create_message }}", }, def) require.IsType(t, &dump{}, generator) require.NoError(t, err) err = generator.RunLXC(nil, shared.DefinitionTargetLXC{ CreateMessage: "message", }) require.NoError(t, err) require.FileExists(t, filepath.Join(rootfsDir, "hello", "world")) file, err = os.Open(filepath.Join(rootfsDir, "hello", "world")) require.NoError(t, err) defer file.Close() buffer.Reset() _, err = io.Copy(&buffer, file) require.NoError(t, err) require.Equal(t, "hello {{ targets.lxc.create_message }}\n", buffer.String()) } func TestDumpGeneratorRunIncus(t *testing.T) { cacheDir := filepath.Join(os.TempDir(), "distrobuilder-test") rootfsDir := filepath.Join(cacheDir, "rootfs") setup(t, cacheDir) defer teardown(cacheDir) def := shared.Definition{ Targets: shared.DefinitionTarget{ Incus: shared.DefinitionTargetIncus{ VM: shared.DefinitionTargetIncusVM{ Filesystem: "ext4", }, }, }, } generator, err := Load("dump", nil, cacheDir, rootfsDir, shared.DefinitionFile{ Path: "/hello/world", Content: "hello {{ targets.incus.vm.filesystem }}", Pongo: true, }, def) require.IsType(t, &dump{}, generator) require.NoError(t, err) err = generator.RunIncus(nil, shared.DefinitionTargetIncus{ VM: shared.DefinitionTargetIncusVM{ Filesystem: "ext4", }}) require.NoError(t, err) require.FileExists(t, filepath.Join(rootfsDir, "hello", "world")) var buffer bytes.Buffer file, err := os.Open(filepath.Join(rootfsDir, "hello", "world")) require.NoError(t, err) defer file.Close() _, err = io.Copy(&buffer, file) require.NoError(t, err) require.Equal(t, "hello ext4\n", buffer.String()) file.Close() generator, err = Load("dump", nil, cacheDir, rootfsDir, shared.DefinitionFile{ Path: "/hello/world", Content: "hello {{ targets.incus.vm.filesystem }}", }, def) require.IsType(t, &dump{}, generator) require.NoError(t, err) err = generator.RunIncus(nil, shared.DefinitionTargetIncus{ VM: shared.DefinitionTargetIncusVM{ Filesystem: "ext4", }}) require.NoError(t, err) require.FileExists(t, filepath.Join(rootfsDir, "hello", "world")) file, err = os.Open(filepath.Join(rootfsDir, "hello", "world")) require.NoError(t, err) defer file.Close() buffer.Reset() _, err = io.Copy(&buffer, file) require.NoError(t, err) require.Equal(t, "hello {{ targets.incus.vm.filesystem }}\n", buffer.String()) } distrobuilder-3.0/generators/fstab.go000066400000000000000000000023111456216713500200350ustar00rootroot00000000000000package generators import ( "errors" "fmt" "os" "path/filepath" "github.com/lxc/distrobuilder/image" "github.com/lxc/distrobuilder/shared" ) type fstab struct { common } // RunLXC doesn't support the fstab generator. func (g *fstab) RunLXC(img *image.LXCImage, target shared.DefinitionTargetLXC) error { return errors.New("fstab generator not supported for LXC") } // RunIncus writes to /etc/fstab. func (g *fstab) RunIncus(img *image.IncusImage, target shared.DefinitionTargetIncus) error { f, err := os.Create(filepath.Join(g.sourceDir, "etc/fstab")) if err != nil { return fmt.Errorf("Failed to create file %q: %w", filepath.Join(g.sourceDir, "etc/fstab"), err) } defer f.Close() content := `LABEL=rootfs / %s %s 0 0 LABEL=UEFI /boot/efi vfat defaults 0 0 ` fs := target.VM.Filesystem if fs == "" { fs = "ext4" } options := "defaults" if fs == "btrfs" { options = fmt.Sprintf("%s,subvol=@", options) } _, err = f.WriteString(fmt.Sprintf(content, fs, options)) if err != nil { return fmt.Errorf("Failed to write string to file %q: %w", filepath.Join(g.sourceDir, "etc/fstab"), err) } return nil } // Run does nothing. func (g *fstab) Run() error { return nil } distrobuilder-3.0/generators/generators.go000066400000000000000000000032701456216713500211140ustar00rootroot00000000000000package generators import ( "errors" "github.com/sirupsen/logrus" "github.com/lxc/distrobuilder/image" "github.com/lxc/distrobuilder/shared" ) // ErrNotSupported returns a "Not supported" error. var ErrNotSupported = errors.New("Not supported") // ErrUnknownGenerator represents the unknown generator error. var ErrUnknownGenerator = errors.New("Unknown generator") type generator interface { init(logger *logrus.Logger, cacheDir string, sourceDir string, defFile shared.DefinitionFile, def shared.Definition) Generator } // Generator interface. type Generator interface { RunLXC(*image.LXCImage, shared.DefinitionTargetLXC) error RunIncus(*image.IncusImage, shared.DefinitionTargetIncus) error Run() error } var generators = map[string]func() generator{ "cloud-init": func() generator { return &cloudInit{} }, "copy": func() generator { return ©{} }, "dump": func() generator { return &dump{} }, "fstab": func() generator { return &fstab{} }, "hostname": func() generator { return &hostname{} }, "hosts": func() generator { return &hosts{} }, "incus-agent": func() generator { return &incusAgent{} }, "remove": func() generator { return &remove{} }, "template": func() generator { return &template{} }, // Legacy. "lxd-agent": func() generator { return &incusAgent{} }, } // Load loads and initializes a generator. func Load(generatorName string, logger *logrus.Logger, cacheDir string, sourceDir string, defFile shared.DefinitionFile, def shared.Definition) (Generator, error) { df, ok := generators[generatorName] if !ok { return nil, ErrUnknownGenerator } d := df() d.init(logger, cacheDir, sourceDir, defFile, def) return d, nil } distrobuilder-3.0/generators/generators_test.go000066400000000000000000000022301456216713500221460ustar00rootroot00000000000000package generators import ( "bytes" "io" "os" "path/filepath" "testing" "github.com/stretchr/testify/require" "github.com/lxc/distrobuilder/shared" ) func setup(t *testing.T, cacheDir string) { // Create rootfs directory err := os.MkdirAll(filepath.Join(cacheDir, "rootfs"), 0755) require.NoError(t, err) } func teardown(cacheDir string) { os.RemoveAll(cacheDir) } func TestGet(t *testing.T) { generator, err := Load("hostname", nil, "", "", shared.DefinitionFile{}, shared.Definition{}) require.IsType(t, &hostname{}, generator) require.NoError(t, err) generator, err = Load("", nil, "", "", shared.DefinitionFile{}, shared.Definition{}) require.Nil(t, generator) require.Error(t, err) } func createTestFile(t *testing.T, path, content string) { file, err := os.Create(path) require.NoError(t, err) defer file.Close() _, err = file.WriteString(content) require.NoError(t, err) } func validateTestFile(t *testing.T, path, content string) { file, err := os.Open(path) require.NoError(t, err) defer file.Close() var buffer bytes.Buffer _, err = io.Copy(&buffer, file) require.NoError(t, err) require.Equal(t, content, buffer.String()) } distrobuilder-3.0/generators/hostname.go000066400000000000000000000044211456216713500205600ustar00rootroot00000000000000package generators import ( "fmt" "os" "path/filepath" "github.com/lxc/incus/shared/api" incus "github.com/lxc/incus/shared/util" "github.com/lxc/distrobuilder/image" "github.com/lxc/distrobuilder/shared" ) type hostname struct { common } // RunLXC creates a hostname template. func (g *hostname) RunLXC(img *image.LXCImage, target shared.DefinitionTargetLXC) error { // Skip if the file doesn't exist if !incus.PathExists(filepath.Join(g.sourceDir, g.defFile.Path)) { return nil } // Create new hostname file file, err := os.Create(filepath.Join(g.sourceDir, g.defFile.Path)) if err != nil { return fmt.Errorf("Failed to create file %q: %w", filepath.Join(g.sourceDir, g.defFile.Path), err) } defer file.Close() // Write LXC specific string to the hostname file _, err = file.WriteString("LXC_NAME\n") if err != nil { return fmt.Errorf("Failed to write to hostname file: %w", err) } // Add hostname path to LXC's templates file err = img.AddTemplate(g.defFile.Path) if err != nil { return fmt.Errorf("Failed to add template: %w", err) } return nil } // RunIncus creates a hostname template. func (g *hostname) RunIncus(img *image.IncusImage, target shared.DefinitionTargetIncus) error { // Skip if the file doesn't exist if !incus.PathExists(filepath.Join(g.sourceDir, g.defFile.Path)) { return nil } templateDir := filepath.Join(g.cacheDir, "templates") err := os.MkdirAll(templateDir, 0755) if err != nil { return fmt.Errorf("Failed to create directory %q: %w", templateDir, err) } file, err := os.Create(filepath.Join(templateDir, "hostname.tpl")) if err != nil { return fmt.Errorf("Failed to create file %q: %w", filepath.Join(templateDir, "hostname.tpl"), err) } defer file.Close() _, err = file.WriteString("{{ container.name }}\n") if err != nil { return fmt.Errorf("Failed to write to hostname file: %w", err) } // Add to Incus templates img.Metadata.Templates[g.defFile.Path] = &api.ImageMetadataTemplate{ Template: "hostname.tpl", Properties: g.defFile.Template.Properties, When: g.defFile.Template.When, } if len(g.defFile.Template.When) == 0 { img.Metadata.Templates[g.defFile.Path].When = []string{ "create", "copy", } } return nil } // Run does nothing. func (g *hostname) Run() error { return nil } distrobuilder-3.0/generators/hostname_test.go000066400000000000000000000041641456216713500216230ustar00rootroot00000000000000package generators import ( "context" "os" "path/filepath" "testing" "github.com/stretchr/testify/require" "github.com/lxc/distrobuilder/image" "github.com/lxc/distrobuilder/shared" ) func TestHostnameGeneratorRunLXC(t *testing.T) { cacheDir := filepath.Join(os.TempDir(), "distrobuilder-test") rootfsDir := filepath.Join(cacheDir, "rootfs") setup(t, cacheDir) defer teardown(cacheDir) generator, err := Load("hostname", nil, cacheDir, rootfsDir, shared.DefinitionFile{Path: "/etc/hostname"}, shared.Definition{}) require.IsType(t, &hostname{}, generator) require.NoError(t, err) definition := shared.Definition{ Image: shared.DefinitionImage{ Distribution: "ubuntu", Release: "artful", }, } image := image.NewLXCImage(context.TODO(), cacheDir, "", cacheDir, definition) err = os.MkdirAll(filepath.Join(cacheDir, "rootfs", "etc"), 0755) require.NoError(t, err) createTestFile(t, filepath.Join(cacheDir, "rootfs", "etc", "hostname"), "hostname") err = generator.RunLXC(image, shared.DefinitionTargetLXC{}) require.NoError(t, err) validateTestFile(t, filepath.Join(cacheDir, "rootfs", "etc", "hostname"), "LXC_NAME\n") } func TestHostnameGeneratorRunIncus(t *testing.T) { cacheDir := filepath.Join(os.TempDir(), "distrobuilder-test") rootfsDir := filepath.Join(cacheDir, "rootfs") setup(t, cacheDir) defer teardown(cacheDir) generator, err := Load("hostname", nil, cacheDir, rootfsDir, shared.DefinitionFile{Path: "/etc/hostname"}, shared.Definition{}) require.IsType(t, &hostname{}, generator) require.NoError(t, err) definition := shared.Definition{ Image: shared.DefinitionImage{ Distribution: "ubuntu", Release: "artful", }, } image := image.NewIncusImage(context.TODO(), cacheDir, "", cacheDir, definition) err = os.MkdirAll(filepath.Join(cacheDir, "rootfs", "etc"), 0755) require.NoError(t, err) createTestFile(t, filepath.Join(cacheDir, "rootfs", "etc", "hostname"), "hostname") err = generator.RunIncus(image, shared.DefinitionTargetIncus{}) require.NoError(t, err) validateTestFile(t, filepath.Join(cacheDir, "templates", "hostname.tpl"), "{{ container.name }}\n") } distrobuilder-3.0/generators/hosts.go000066400000000000000000000061641456216713500201100ustar00rootroot00000000000000package generators import ( "fmt" "os" "path/filepath" "strings" "github.com/lxc/incus/shared/api" incus "github.com/lxc/incus/shared/util" "github.com/lxc/distrobuilder/image" "github.com/lxc/distrobuilder/shared" ) type hosts struct { common } // RunLXC creates a LXC specific entry in the hosts file. func (g *hosts) RunLXC(img *image.LXCImage, target shared.DefinitionTargetLXC) error { // Skip if the file doesn't exist if !incus.PathExists(filepath.Join(g.sourceDir, g.defFile.Path)) { return nil } // Read the current content content, err := os.ReadFile(filepath.Join(g.sourceDir, g.defFile.Path)) if err != nil { return fmt.Errorf("Failed to read file %q: %w", filepath.Join(g.sourceDir, g.defFile.Path), err) } // Replace hostname with placeholder content = []byte(strings.Replace(string(content), "distrobuilder", "LXC_NAME", -1)) // Add a new line if needed if !strings.Contains(string(content), "LXC_NAME") { content = append([]byte("127.0.1.1\tLXC_NAME\n"), content...) } f, err := os.Create(filepath.Join(g.sourceDir, g.defFile.Path)) if err != nil { return fmt.Errorf("Failed to create file %q: %w", filepath.Join(g.sourceDir, g.defFile.Path), err) } defer f.Close() // Overwrite the file _, err = f.Write(content) if err != nil { return fmt.Errorf("Failed to write to file %q: %w", filepath.Join(g.sourceDir, g.defFile.Path), err) } // Add hostname path to LXC's templates file err = img.AddTemplate(g.defFile.Path) if err != nil { return fmt.Errorf("Failed to add template: %w", err) } return nil } // RunIncus creates a hosts template. func (g *hosts) RunIncus(img *image.IncusImage, target shared.DefinitionTargetIncus) error { // Skip if the file doesn't exist if !incus.PathExists(filepath.Join(g.sourceDir, g.defFile.Path)) { return nil } templateDir := filepath.Join(g.cacheDir, "templates") // Create templates path err := os.MkdirAll(templateDir, 0755) if err != nil { return fmt.Errorf("Failed to create directory %q: %w", templateDir, err) } // Read the current content content, err := os.ReadFile(filepath.Join(g.sourceDir, g.defFile.Path)) if err != nil { return fmt.Errorf("Failed to read file %q: %w", filepath.Join(g.sourceDir, g.defFile.Path), err) } // Replace hostname with placeholder content = []byte(strings.Replace(string(content), "distrobuilder", "{{ container.name }}", -1)) // Add a new line if needed if !strings.Contains(string(content), "{{ container.name }}") { content = append([]byte("127.0.1.1\t{{ container.name }}\n"), content...) } // Write the template err = os.WriteFile(filepath.Join(templateDir, "hosts.tpl"), content, 0644) if err != nil { return fmt.Errorf("Failed to write file %q: %w", filepath.Join(templateDir, "hosts.tpl"), err) } img.Metadata.Templates[g.defFile.Path] = &api.ImageMetadataTemplate{ Template: "hosts.tpl", Properties: g.defFile.Template.Properties, When: g.defFile.Template.When, } if len(g.defFile.Template.When) == 0 { img.Metadata.Templates[g.defFile.Path].When = []string{ "create", "copy", } } return nil } // Run does nothing. func (g *hosts) Run() error { return nil } distrobuilder-3.0/generators/hosts_test.go000066400000000000000000000043521456216713500211440ustar00rootroot00000000000000package generators import ( "context" "os" "path/filepath" "testing" "github.com/stretchr/testify/require" "github.com/lxc/distrobuilder/image" "github.com/lxc/distrobuilder/shared" ) func TestHostsGeneratorRunLXC(t *testing.T) { cacheDir := filepath.Join(os.TempDir(), "distrobuilder-test") rootfsDir := filepath.Join(cacheDir, "rootfs") setup(t, cacheDir) defer teardown(cacheDir) generator, err := Load("hosts", nil, cacheDir, rootfsDir, shared.DefinitionFile{Path: "/etc/hosts"}, shared.Definition{}) require.IsType(t, &hosts{}, generator) require.NoError(t, err) definition := shared.Definition{ Image: shared.DefinitionImage{ Distribution: "ubuntu", Release: "artful", }, } image := image.NewLXCImage(context.TODO(), cacheDir, "", cacheDir, definition) err = os.MkdirAll(filepath.Join(cacheDir, "rootfs", "etc"), 0755) require.NoError(t, err) createTestFile(t, filepath.Join(cacheDir, "rootfs", "etc", "hosts"), "127.0.0.1\tlocalhost\n127.0.0.1\tdistrobuilder\n") err = generator.RunLXC(image, shared.DefinitionTargetLXC{}) require.NoError(t, err) validateTestFile(t, filepath.Join(cacheDir, "rootfs", "etc", "hosts"), "127.0.0.1\tlocalhost\n127.0.0.1\tLXC_NAME\n") } func TestHostsGeneratorRunIncus(t *testing.T) { cacheDir := filepath.Join(os.TempDir(), "distrobuilder-test") rootfsDir := filepath.Join(cacheDir, "rootfs") setup(t, cacheDir) defer teardown(cacheDir) generator, err := Load("hosts", nil, cacheDir, rootfsDir, shared.DefinitionFile{Path: "/etc/hosts"}, shared.Definition{}) require.IsType(t, &hosts{}, generator) require.NoError(t, err) definition := shared.Definition{ Image: shared.DefinitionImage{ Distribution: "ubuntu", Release: "artful", }, } image := image.NewIncusImage(context.TODO(), cacheDir, "", cacheDir, definition) err = os.MkdirAll(filepath.Join(cacheDir, "rootfs", "etc"), 0755) require.NoError(t, err) createTestFile(t, filepath.Join(cacheDir, "rootfs", "etc", "hosts"), "127.0.0.1\tlocalhost\n127.0.0.1\tdistrobuilder\n") err = generator.RunIncus(image, shared.DefinitionTargetIncus{}) require.NoError(t, err) validateTestFile(t, filepath.Join(cacheDir, "templates", "hosts.tpl"), "127.0.0.1\tlocalhost\n127.0.0.1\t{{ container.name }}\n") } distrobuilder-3.0/generators/incus-agent.go000066400000000000000000000156741456216713500211730ustar00rootroot00000000000000package generators import ( "bufio" "errors" "fmt" "os" "path/filepath" "strings" incus "github.com/lxc/incus/shared/util" "github.com/lxc/distrobuilder/image" "github.com/lxc/distrobuilder/shared" ) var incusAgentSetupScript = `#!/bin/sh set -eu PREFIX="/run/incus_agent" # Functions. mount_virtiofs() { mount -t virtiofs config "${PREFIX}/.mnt" >/dev/null 2>&1 } mount_9p() { /sbin/modprobe 9pnet_virtio >/dev/null 2>&1 || true /bin/mount -t 9p config "${PREFIX}/.mnt" -o access=0,trans=virtio,size=1048576 >/dev/null 2>&1 } fail() { umount -l "${PREFIX}" >/dev/null 2>&1 || true rmdir "${PREFIX}" >/dev/null 2>&1 || true echo "${1}" exit 1 } # Setup the mount target. umount -l "${PREFIX}" >/dev/null 2>&1 || true mkdir -p "${PREFIX}" mount -t tmpfs tmpfs "${PREFIX}" -o mode=0700,size=50M mkdir -p "${PREFIX}/.mnt" # Try virtiofs first. mount_virtiofs || mount_9p || fail "Couldn't mount virtiofs or 9p, failing." # Copy the data. cp -Ra "${PREFIX}/.mnt/"* "${PREFIX}" # Unmount the temporary mount. umount "${PREFIX}/.mnt" rmdir "${PREFIX}/.mnt" # Fix up permissions. chown -R root:root "${PREFIX}" # Legacy. if [ ! -e "${PREFIX}/incus-agent" ] && [ -e "${PREFIX}/lxd-agent" ]; then ln -s lxd-agent "${PREFIX}"/incus-agent fi exit 0 ` type incusAgent struct { common } // RunLXC is not supported. func (g *incusAgent) RunLXC(img *image.LXCImage, target shared.DefinitionTargetLXC) error { return ErrNotSupported } // RunIncus creates systemd unit files for the agent. func (g *incusAgent) RunIncus(img *image.IncusImage, target shared.DefinitionTargetIncus) error { initFile := filepath.Join(g.sourceDir, "sbin", "init") fi, err := os.Lstat(initFile) if err != nil { return fmt.Errorf("Failed to stat file %q: %w", initFile, err) } if fi.Mode()&os.ModeSymlink != 0 { linkTarget, err := os.Readlink(initFile) if err != nil { return fmt.Errorf("Failed to read link %q: %w", initFile, err) } if strings.Contains(linkTarget, "systemd") { return g.handleSystemd() } if strings.Contains(linkTarget, "busybox") { return g.getInitSystemFromInittab() } return nil } return g.getInitSystemFromInittab() } // Run does nothing. func (g *incusAgent) Run() error { return nil } func (g *incusAgent) handleSystemd() error { systemdPath := filepath.Join("/", "lib", "systemd") if !incus.PathExists(filepath.Join(g.sourceDir, systemdPath)) { systemdPath = filepath.Join("/", "usr", "lib", "systemd") } incusAgentServiceUnit := fmt.Sprintf(`[Unit] Description=Incus - agent Documentation=https://linuxcontainers.org/incus/docs/main/ ConditionPathExistsGlob=/dev/virtio-ports/org.linuxcontainers.* Before=cloud-init.target cloud-init.service cloud-init-local.service DefaultDependencies=no [Service] Type=notify WorkingDirectory=-/run/incus_agent ExecStartPre=%s/incus-agent-setup ExecStart=/run/incus_agent/incus-agent Restart=on-failure RestartSec=5s StartLimitInterval=60 StartLimitBurst=10 [Install] WantedBy=multi-user.target `, systemdPath) path := filepath.Join(g.sourceDir, systemdPath, "system", "incus-agent.service") err := os.WriteFile(path, []byte(incusAgentServiceUnit), 0644) if err != nil { return fmt.Errorf("Failed to write file %q: %w", path, err) } err = os.Symlink(path, filepath.Join(g.sourceDir, "/etc/systemd/system/multi-user.target.wants/incus-agent.service")) if err != nil { return fmt.Errorf("Failed to create symlink %q: %w", filepath.Join(g.sourceDir, "/etc/systemd/system/multi-user.target.wants/incus-agent.service"), err) } path = filepath.Join(g.sourceDir, systemdPath, "incus-agent-setup") err = os.WriteFile(path, []byte(incusAgentSetupScript), 0755) if err != nil { return fmt.Errorf("Failed to write file %q: %w", path, err) } udevPath := filepath.Join("/", "lib", "udev", "rules.d") stat, err := os.Lstat(filepath.Join(g.sourceDir, "lib", "udev")) if err == nil && stat.Mode()&os.ModeSymlink != 0 || !incus.PathExists(filepath.Dir(filepath.Join(g.sourceDir, udevPath))) { udevPath = filepath.Join("/", "usr", "lib", "udev", "rules.d") } incusAgentRules := `ACTION=="add", SYMLINK=="virtio-ports/org.linuxcontainers.incus", TAG+="systemd" SYMLINK=="virtio-ports/org.linuxcontainers.incus", RUN+="/bin/systemctl start incus-agent.service" # Legacy. ACTION=="add", SYMLINK=="virtio-ports/org.linuxcontainers.lxd", TAG+="systemd" SYMLINK=="virtio-ports/org.linuxcontainers.lxd", RUN+="/bin/systemctl start incus-agent.service" ` err = os.WriteFile(filepath.Join(g.sourceDir, udevPath, "99-incus-agent.rules"), []byte(incusAgentRules), 0400) if err != nil { return fmt.Errorf("Failed to write file %q: %w", filepath.Join(g.sourceDir, udevPath, "99-incus-agent.rules"), err) } return nil } func (g *incusAgent) handleOpenRC() error { incusAgentScript := `#!/sbin/openrc-run description="Incus - agent" command=/run/incus_agent/incus-agent command_background=true pidfile=/run/incus-agent.pid start_stop_daemon_args="--chdir /run/incus_agent" required_dirs=/run/incus_agent depend() { need incus-agent-setup after incus-agent-setup before cloud-init before cloud-init-local } ` err := os.WriteFile(filepath.Join(g.sourceDir, "/etc/init.d/incus-agent"), []byte(incusAgentScript), 0755) if err != nil { return fmt.Errorf("Failed to write file %q: %w", filepath.Join(g.sourceDir, "/etc/init.d/incus-agent"), err) } err = os.Symlink("/etc/init.d/incus-agent", filepath.Join(g.sourceDir, "/etc/runlevels/default/incus-agent")) if err != nil { return fmt.Errorf("Failed to create symlink %q: %w", filepath.Join(g.sourceDir, "/etc/runlevels/default/incus-agent"), err) } incusConfigShareMountScript := `#!/sbin/openrc-run description="Incus - agent - setup" command=/usr/local/bin/incus-agent-setup required_dirs=/dev/virtio-ports/ ` err = os.WriteFile(filepath.Join(g.sourceDir, "/etc/init.d/incus-agent-setup"), []byte(incusConfigShareMountScript), 0755) if err != nil { return fmt.Errorf("Failed to write file %q: %w", filepath.Join(g.sourceDir, "/etc/init.d/incus-agent-setup"), err) } err = os.Symlink("/etc/init.d/incus-agent-setup", filepath.Join(g.sourceDir, "/etc/runlevels/default/incus-agent-setup")) if err != nil { return fmt.Errorf("Failed to create symlink %q: %w", filepath.Join(g.sourceDir, "/etc/runlevels/default/incus-agent-setup"), err) } path := filepath.Join(g.sourceDir, "/usr/local/bin", "incus-agent-setup") err = os.WriteFile(path, []byte(incusAgentSetupScript), 0755) if err != nil { return fmt.Errorf("Failed to write file %q: %w", path, err) } return nil } func (g *incusAgent) getInitSystemFromInittab() error { f, err := os.Open(filepath.Join(g.sourceDir, "etc", "inittab")) if err != nil { return fmt.Errorf("Failed to open file %q: %w", filepath.Join(g.sourceDir, "etc", "inittab"), err) } defer f.Close() scanner := bufio.NewScanner(f) for scanner.Scan() { if strings.Contains(scanner.Text(), "sysinit") && strings.Contains(scanner.Text(), "openrc") { return g.handleOpenRC() } } return errors.New("Failed to determine init system") } distrobuilder-3.0/generators/remove.go000066400000000000000000000010601456216713500202330ustar00rootroot00000000000000package generators import ( "os" "path/filepath" "github.com/lxc/distrobuilder/image" "github.com/lxc/distrobuilder/shared" ) type remove struct { common } // RunLXC removes a path. func (g *remove) RunLXC(img *image.LXCImage, target shared.DefinitionTargetLXC) error { return g.Run() } // RunIncus removes a path. func (g *remove) RunIncus(img *image.IncusImage, target shared.DefinitionTargetIncus) error { return g.Run() } // Run removes a path. func (g *remove) Run() error { return os.RemoveAll(filepath.Join(g.sourceDir, g.defFile.Path)) } distrobuilder-3.0/generators/template.go000066400000000000000000000037271456216713500205650ustar00rootroot00000000000000package generators import ( "fmt" "os" "path/filepath" "strings" "github.com/flosch/pongo2" "github.com/lxc/incus/shared/api" "github.com/lxc/distrobuilder/image" "github.com/lxc/distrobuilder/shared" ) type template struct { common } // RunLXC dumps content to a file. func (g *template) RunLXC(img *image.LXCImage, target shared.DefinitionTargetLXC) error { // no template support for LXC, ignoring generator return nil } // RunIncus dumps content to a file. func (g *template) RunIncus(img *image.IncusImage, target shared.DefinitionTargetIncus) error { templateDir := filepath.Join(g.cacheDir, "templates") err := os.MkdirAll(templateDir, 0755) if err != nil { return fmt.Errorf("Failed to create directory %q: %w", templateDir, err) } template := fmt.Sprintf("%s.tpl", g.defFile.Name) file, err := os.Create(filepath.Join(templateDir, template)) if err != nil { return fmt.Errorf("Failed to create file %q: %w", filepath.Join(templateDir, template), err) } defer file.Close() content := g.defFile.Content // Append final new line if missing if !strings.HasSuffix(content, "\n") { content += "\n" } if g.defFile.Pongo { tpl, err := pongo2.FromString(content) if err != nil { return fmt.Errorf("Failed to parse template: %w", err) } content, err = tpl.Execute(pongo2.Context{"incus": target}) if err != nil { return fmt.Errorf("Failed to execute template: %w", err) } } _, err = file.WriteString(content) if err != nil { return fmt.Errorf("Failed to write to content to %s template: %w", g.defFile.Name, err) } // Add to Incus templates img.Metadata.Templates[g.defFile.Path] = &api.ImageMetadataTemplate{ Template: template, Properties: g.defFile.Template.Properties, When: g.defFile.Template.When, } if len(g.defFile.Template.When) == 0 { img.Metadata.Templates[g.defFile.Path].When = []string{ "create", "copy", } } return nil } // Run does nothing. func (g *template) Run() error { return nil } distrobuilder-3.0/generators/template_test.go000066400000000000000000000054141456216713500216170ustar00rootroot00000000000000package generators import ( "context" "os" "path/filepath" "testing" "github.com/stretchr/testify/require" "github.com/lxc/distrobuilder/image" "github.com/lxc/distrobuilder/shared" ) func TestTemplateGeneratorRunIncus(t *testing.T) { cacheDir := filepath.Join(os.TempDir(), "distrobuilder-test") rootfsDir := filepath.Join(cacheDir, "rootfs") setup(t, cacheDir) defer teardown(cacheDir) definition := shared.Definition{ Image: shared.DefinitionImage{ Distribution: "ubuntu", Release: "artful", }, } generator, err := Load("template", nil, cacheDir, rootfsDir, shared.DefinitionFile{ Generator: "template", Name: "template", Content: "==test==", Path: "/root/template", }, definition) require.IsType(t, &template{}, generator) require.NoError(t, err) image := image.NewIncusImage(context.TODO(), cacheDir, "", cacheDir, definition) err = os.MkdirAll(filepath.Join(cacheDir, "rootfs", "root"), 0755) require.NoError(t, err) createTestFile(t, filepath.Join(cacheDir, "rootfs", "root", "template"), "--test--") err = generator.RunIncus(image, shared.DefinitionTargetIncus{}) require.NoError(t, err) validateTestFile(t, filepath.Join(cacheDir, "templates", "template.tpl"), "==test==\n") validateTestFile(t, filepath.Join(cacheDir, "rootfs", "root", "template"), "--test--") } func TestTemplateGeneratorRunIncusDefaultWhen(t *testing.T) { cacheDir := filepath.Join(os.TempDir(), "distrobuilder-test") rootfsDir := filepath.Join(cacheDir, "rootfs") setup(t, cacheDir) defer teardown(cacheDir) definition := shared.Definition{ Image: shared.DefinitionImage{ Distribution: "ubuntu", Release: "artful", }, } generator, err := Load("template", nil, cacheDir, rootfsDir, shared.DefinitionFile{ Generator: "template", Name: "test-default-when", Content: "==test==", Path: "test-default-when", }, definition) require.IsType(t, &template{}, generator) require.NoError(t, err) image := image.NewIncusImage(context.TODO(), cacheDir, "", cacheDir, definition) err = generator.RunIncus(image, shared.DefinitionTargetIncus{}) require.NoError(t, err) generator, err = Load("template", nil, cacheDir, rootfsDir, shared.DefinitionFile{ Generator: "template", Name: "test-when", Content: "==test==", Path: "test-when", Template: shared.DefinitionFileTemplate{ When: []string{"create"}, }, }, definition) require.IsType(t, &template{}, generator) require.NoError(t, err) err = generator.RunIncus(image, shared.DefinitionTargetIncus{}) require.NoError(t, err) testvalue := []string{"create", "copy"} require.Equal(t, image.Metadata.Templates["test-default-when"].When, testvalue) testvalue = []string{"create"} require.Equal(t, image.Metadata.Templates["test-when"].When, testvalue) } distrobuilder-3.0/generators/utils.go000066400000000000000000000020301456216713500200740ustar00rootroot00000000000000package generators import ( "fmt" "os" "strconv" "github.com/lxc/distrobuilder/shared" ) func updateFileAccess(file *os.File, defFile shared.DefinitionFile) error { // Change file mode if needed if defFile.Mode != "" { mode, err := strconv.ParseUint(defFile.Mode, 8, 64) if err != nil { return fmt.Errorf("Failed to parse file mode: %w", err) } err = file.Chmod(os.FileMode(mode)) if err != nil { return fmt.Errorf("Failed to change file mode: %w", err) } } // Change gid if needed if defFile.GID != "" { gid, err := strconv.Atoi(defFile.GID) if err != nil { return fmt.Errorf("Failed to parse GID: %w", err) } err = file.Chown(-1, gid) if err != nil { return fmt.Errorf("Failed to change GID: %w", err) } } // Change uid if needed if defFile.UID != "" { uid, err := strconv.Atoi(defFile.UID) if err != nil { return fmt.Errorf("Failed to parse UID: %w", err) } err = file.Chown(uid, -1) if err != nil { return fmt.Errorf("Failed to change UID: %w", err) } } return nil } distrobuilder-3.0/go.mod000066400000000000000000000103301456216713500153440ustar00rootroot00000000000000module github.com/lxc/distrobuilder go 1.18 exclude ( github.com/rootless-containers/proto v0.1.0 github.com/rootless-containers/proto/go-proto v0.0.0-20221103010429-bc555ef10687 ) require ( github.com/flosch/pongo2 v0.0.0-20200913210552-0d938eb266f3 github.com/google/go-github/v56 v56.0.0 github.com/lxc/incus v0.0.0-20231030213510-385b6509cfce github.com/mudler/docker-companion v0.4.6-0.20211015133729-bd4704fad372 github.com/sirupsen/logrus v1.9.3 github.com/spf13/cobra v1.7.0 github.com/stretchr/testify v1.8.4 golang.org/x/sys v0.13.0 golang.org/x/text v0.13.0 gopkg.in/antchfx/htmlquery.v1 v1.2.2 gopkg.in/flosch/pongo2.v3 v3.0.0-20141028000813-5e81b817a0c4 gopkg.in/yaml.v2 v2.4.0 ) require ( github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect github.com/Microsoft/hcsshim v0.11.2 // indirect github.com/antchfx/xpath v1.2.5 // indirect github.com/apex/log v1.9.0 // indirect github.com/containerd/cgroups v1.1.0 // indirect github.com/containerd/containerd v1.7.8 // indirect github.com/containerd/continuity v0.4.3 // indirect github.com/containerd/log v0.1.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect github.com/cyphar/filepath-securejoin v0.2.4 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/distribution/reference v0.5.0 // indirect github.com/docker/distribution v2.8.3+incompatible // indirect github.com/docker/docker v24.0.7+incompatible // indirect github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 // indirect github.com/fsouza/go-dockerclient v1.10.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/uuid v1.4.0 // indirect github.com/gorilla/schema v1.2.0 // indirect github.com/gorilla/securecookie v1.1.1 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/heroku/docker-registry-client v0.0.0-20211012143308-9463674c8930 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/klauspost/compress v1.17.2 // indirect github.com/klauspost/pgzip v1.2.6 // indirect github.com/kr/fs v0.1.0 // indirect github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/term v0.5.0 // indirect github.com/morikuni/aec v1.0.0 // indirect github.com/muhlemmer/gu v0.3.1 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.0-rc5 // indirect github.com/opencontainers/runc v1.1.9 // indirect github.com/opencontainers/runtime-spec v1.1.0 // indirect github.com/opencontainers/umoci v0.4.8-0.20211009121349-9c76304c034d // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pkg/sftp v1.13.6 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/rootless-containers/proto/go-proto v0.0.0-20230421021042-4cd87ebadd67 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/urfave/cli v1.22.14 // indirect github.com/vbatts/go-mtree v0.5.3 // indirect github.com/zitadel/oidc/v2 v2.11.0 // indirect go.opencensus.io v0.24.0 // indirect golang.org/x/crypto v0.14.0 // indirect golang.org/x/mod v0.13.0 // indirect golang.org/x/net v0.17.0 // indirect golang.org/x/oauth2 v0.13.0 // indirect golang.org/x/sync v0.4.0 // indirect golang.org/x/term v0.13.0 // indirect golang.org/x/tools v0.14.0 // indirect google.golang.org/appengine v1.6.8 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405 // indirect google.golang.org/grpc v1.59.0 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/square/go-jose.v2 v2.6.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect gotest.tools/v3 v3.5.0 // indirect ) distrobuilder-3.0/go.sum000066400000000000000000001675311456216713500154110ustar00rootroot00000000000000bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/AdaLogics/go-fuzz-headers v0.0.0-20210401092550-0a8691dafd0d/go.mod h1:CzsSbkDixRphAF5hS6wbMKq0eI6ccJRb7/A0M6JBnwg= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= github.com/AdamKorcz/go-fuzz-headers v0.0.0-20210312213058-32f4d319f0d2/go.mod h1:VPevheIvXETHZT/ddjwarP3POR5p/cnH9Hy5yoFnQjc= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= github.com/Microsoft/go-winio v0.4.15-0.20200113171025-3fe6c5262873/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= github.com/Microsoft/hcsshim v0.11.2 h1:63w4x0s9PjbJGGTQTNgCTExPCkyDXhx2AUVQDPDBAek= github.com/Microsoft/hcsshim v0.11.2/go.mod h1:smjE4dvqPX9Zldna+t5FG3rnoHhaB7QYxPRqGcpAD9w= github.com/OpenPeeDeeP/depguard v1.0.0/go.mod h1:7/4sitnI9YlQgTLLk734QlzXT8DuHVnAyztLplQjk+o= github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/antchfx/xpath v1.2.5 h1:hqZ+wtQ+KIOV/S3bGZcIhpgYC26um2bZYP2KVGcR7VY= github.com/antchfx/xpath v1.2.5/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs= github.com/apex/log v1.4.0/go.mod h1:UMNC4vQNC7hb5gyr47r18ylK1n34rV7GO+gb0wpXvcE= github.com/apex/log v1.9.0 h1:FHtw/xuaM8AgmvDDTI9fiwoAL25Sq2cxojnZICUU8l0= github.com/apex/log v1.9.0/go.mod h1:m82fZlWIuiWzWP04XCTXmnX0xRkYYbCdYn8jbJeLBEA= github.com/apex/logs v0.0.7/go.mod h1:XzxuLZ5myVHDy9SAmYpamKKRNApGj54PfYLcFrXqDwo= github.com/apex/logs v1.0.0/go.mod h1:XzxuLZ5myVHDy9SAmYpamKKRNApGj54PfYLcFrXqDwo= github.com/aphistic/golf v0.0.0-20180712155816-02c07f170c5a/go.mod h1:3NqKYiepwy8kCu4PNA+aP7WUV72eXWJeP9/r3/K9aLE= github.com/aphistic/sweet v0.2.0/go.mod h1:fWDlIh/isSE9n6EPsRmC0det+whmX6dJid3stzu0Xys= github.com/aws/aws-sdk-go v1.20.6/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59/go.mod h1:q/89r3U2H7sSsE2t6Kca0lfwTK8JdoNGS/yzM/4iH5I= github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.7.8 h1:RkwgOW3AVUT3H/dyT0W03Dc8AzlpMG65lX48KftOFSM= github.com/containerd/containerd v1.7.8/go.mod h1:L/Hn9qylJtUFT7cPeM0Sr3fATj+WjHwRQ0lyrYk3OPY= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20200228182428-0f16d7a0959c/go.mod h1:Dq467ZllaHgAtVp4p1xUQWBrFXR9s/wyoTpG8zOJGkY= github.com/containerd/continuity v0.4.3 h1:6HVkalIp+2u1ZLH1J/pYX2oBVXlJZvh1X1A7bEZ9Su8= github.com/containerd/continuity v0.4.3/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ= github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM= github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/docker/distribution v0.0.0-20171011171712-7484e51bf6af/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v1.4.2-0.20191101170500-ac7306503d23/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM= github.com/docker/docker v24.0.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4= github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.6.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/flosch/pongo2 v0.0.0-20200913210552-0d938eb266f3 h1:fmFk0Wt3bBxxwZnu48jqMdaOR/IZ4vdtJFuaFV8MpIE= github.com/flosch/pongo2 v0.0.0-20200913210552-0d938eb266f3/go.mod h1:bJWSKrZyQvfTnb2OudyUjurSG4/edverV7n82+K3JiM= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsouza/go-dockerclient v1.6.4/go.mod h1:GOdftxWLWIbIWKbIMDroKFJzPdg6Iw7r+jX1DDZdVsA= github.com/fsouza/go-dockerclient v1.10.0 h1:ppSBsbR60I1DFbV4Ag7LlHlHakHFRNLk9XakATW1yVQ= github.com/fsouza/go-dockerclient v1.10.0/go.mod h1:+iNzAW78AzClIBTZ6WFjkaMvOgz68GyCJ236b1opLTs= github.com/go-critic/go-critic v0.3.5-0.20190526074819-1df300866540/go.mod h1:+sE8vrLDS2M0pZkBk0wy6+nLdKexVDrl/jBqQOTDThA= github.com/go-lintpack/lintpack v0.5.2/go.mod h1:NwZuYi2nUHho8XEIZ6SIxihrnPoqBTDqfpXvXAN0sXM= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4= github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ= github.com/go-toolsmith/astequal v0.0.0-20180903214952-dcb477bfacd6/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= github.com/go-toolsmith/astfmt v0.0.0-20180903215011-8f8ee99c3086/go.mod h1:mP93XdblcopXwlyN4X4uodxXQhldPGZbcEJIimQHrkg= github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw= github.com/go-toolsmith/astinfo v0.0.0-20180906194353-9809ff7efb21/go.mod h1:dDStQCHtmZpYOmjRP/8gHHnCCch3Zz3oEgCdZVdtweU= github.com/go-toolsmith/astp v0.0.0-20180903215135-0af7e3c24f30/go.mod h1:SV2ur98SGypH1UjcPpCatrV5hPazG6+IfNHbkDXBRrk= github.com/go-toolsmith/astp v1.0.0/go.mod h1:RSyrtpVlfTFGDYRbrjyWP1pYu//tSFcvdYrA8meBmLI= github.com/go-toolsmith/pkgload v0.0.0-20181119091011-e9e65178eee8/go.mod h1:WoMrjiy4zvdS+Bg6z9jZH82QXwkcgCBX6nOfnmdaHks= github.com/go-toolsmith/pkgload v1.0.0/go.mod h1:5eFArkbO80v7Z0kdngIxsRXRMTaX4Ilcwuh3clNrQJc= github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.0.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= github.com/golangci/errcheck v0.0.0-20181223084120-ef45e06d44b6/go.mod h1:DbHgvLiFKX1Sh2T1w8Q/h4NAI8MHIpzCdnBUDTXU3I0= github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8= github.com/golangci/go-tools v0.0.0-20190318055746-e32c54105b7c/go.mod h1:unzUULGw35sjyOYjUt0jMTXqHlZPpPc6e+xfO4cd6mM= github.com/golangci/goconst v0.0.0-20180610141641-041c5f2b40f3/go.mod h1:JXrF4TWy4tXYn62/9x8Wm/K/dm06p8tCKwFRDPZG/1o= github.com/golangci/gocyclo v0.0.0-20180528134321-2becd97e67ee/go.mod h1:ozx7R9SIwqmqf5pRP90DhR2Oay2UIjGuKheCBCNwAYU= github.com/golangci/gofmt v0.0.0-20181222123516-0b8337e80d98/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= github.com/golangci/golangci-lint v1.17.2-0.20190909185456-6163a8a79084/go.mod h1:jXakAOSd+FMU9dP3D6IfBK7HyD1q/RLHI9NOY8veycY= github.com/golangci/gosec v0.0.0-20190211064107-66fb7fc33547/go.mod h1:0qUabqiIQgfmlAmulqxyiGkkyF6/tOGSnY2cnPVwrzU= github.com/golangci/ineffassign v0.0.0-20190609212857-42439a7714cc/go.mod h1:e5tpTHCfVze+7EpLEozzMB3eafxo2KT5veNg1k6byQU= github.com/golangci/lint-1 v0.0.0-20190420132249-ee948d087217/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= github.com/golangci/misspell v0.0.0-20180809174111-950f5d19e770/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA= github.com/golangci/prealloc v0.0.0-20180630174525-215b22d4de21/go.mod h1:tf5+bzsHdTM0bsB7+8mt0GUMvjCgwLpTapNZHU8AajI= github.com/golangci/revgrep v0.0.0-20180526074752-d9c87f5ffaf0/go.mod h1:qOQCunEYvmd/TLamH+7LlVccLvUH5kZNhbCgTHoBbp4= github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-github/v56 v56.0.0 h1:TysL7dMa/r7wsQi44BjqlwaHvwlFlqkK8CtBWCX3gb4= github.com/google/go-github/v56 v56.0.0/go.mod h1:D8cdcX98YWJvi7TLo7zM4/h8ZTx6u6fwGEkCdisopo0= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/schema v1.2.0 h1:YufUaxZYCKGFuAq3c96BOhjgd5nmXiOY9NGzF247Tsc= github.com/gorilla/schema v1.2.0/go.mod h1:kgLaKoK1FELgZqMAVxx/5cbj0kT+57qxUrAlIO2eleU= github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/hcl v0.0.0-20180404174102-ef8a98b0bbce/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w= github.com/heroku/docker-registry-client v0.0.0-20181004091502-47ecf50fd8d4/go.mod h1:ceV82AfTGFCOL/b0cdpP54uKVSL1Gef0TBSTGFDuqyY= github.com/heroku/docker-registry-client v0.0.0-20211012143308-9463674c8930 h1:mNL9ktJqBuzPTV/QP/fKd4y1uOFvfiv6zhe0G7lg9OA= github.com/heroku/docker-registry-client v0.0.0-20211012143308-9463674c8930/go.mod h1:Yho0S7KhsnHQRCC5lDraYF1SsLMeWtf/tKdufKu3TJA= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jeremija/gosubmit v0.2.7 h1:At0OhGCFGPXyjPYAsCchoBUhE099pcBXmsb4iZqROIc= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v0.0.0-20161130080628-0de1eaf82fa3/go.mod h1:jxZFDH7ILpTPQTk+E2s+z4CUas9lVNjIuKR4c5/zKgM= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4= github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/pgzip v1.2.4/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= github.com/lxc/incus v0.0.0-20231030213510-385b6509cfce h1:tWrq+DvbE+gDLnR9C56v8crNAzw/KP7c9gM5+6KowRo= github.com/lxc/incus v0.0.0-20231030213510-385b6509cfce/go.mod h1:PSZe8EU2RhdpIqkCF3T7KQruU+RBfDqSTHz9VhhMen0= github.com/magiconair/properties v1.7.6/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-ps v0.0.0-20170309133038-4fdf99ab2936/go.mod h1:r1VsdOzOPt1ZSrGZWFoNhsAedKnEd6r9Np1+5blZCWk= github.com/mitchellh/mapstructure v0.0.0-20180220230111-00c29f56e238/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mozilla/tls-observatory v0.0.0-20180409132520-8791a200eb40/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk= github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= github.com/mudler/docker-companion v0.4.6-0.20211015133729-bd4704fad372 h1:UZI8phFB+jUWQEQbZfWmo8lkVaH09JYkHdiUJvEIT1g= github.com/mudler/docker-companion v0.4.6-0.20211015133729-bd4704fad372/go.mod h1:W9meZ2mgTcd/EBtkLPTq6p4SpFFp28ZGPNID5Rrj5oY= github.com/muhlemmer/gu v0.3.1 h1:7EAqmFrW7n3hETvuAdmFmn4hS8W+z3LgKtrnow+YzNM= github.com/muhlemmer/gu v0.3.1/go.mod h1:YHtHR+gxM+bKEIIs7Hmi9sPT3ZDUvTN/i88wQpZkrdM= github.com/muhlemmer/httpforwarded v0.1.0 h1:x4DLrzXdliq8mprgUMR0olDvHGkou5BJsK/vWUetyzY= github.com/nbutton23/zxcvbn-go v0.0.0-20160627004424-a22cb81b2ecd/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= github.com/nbutton23/zxcvbn-go v0.0.0-20171102151520-eafdab6b0663/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI= github.com/opencontainers/image-spec v1.1.0-rc5/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc90/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= github.com/opencontainers/runc v1.1.9 h1:XR0VIHTGce5eWPkaPesqTBrhW2yAcaraWfsEalNwQLM= github.com/opencontainers/runc v1.1.9/go.mod h1:CbUumNnWCuTGFukNXahoo/RFBZvDAgRh/smNYNOhA50= github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg= github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= github.com/opencontainers/umoci v0.4.7/go.mod h1:lgJ4bnwJezsN1o/5d7t/xdRPvmf8TvBko5kKYJsYvgo= github.com/opencontainers/umoci v0.4.8-0.20211009121349-9c76304c034d h1:aTf3VD/a24obMN/7xMM+5t5hVQpzTQeYnYS8aERRl8o= github.com/opencontainers/umoci v0.4.8-0.20211009121349-9c76304c034d/go.mod h1:kO0Bh4G4BZUh2QSlqsCR/OCtdqLjmt3mvD6okZhMBlU= github.com/pelletier/go-toml v1.1.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/sftp v1.13.6 h1:JFZT4XbOU7l77xGSpOdW+pwIMqP044IyjXX6FGyEKFo= github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI= github.com/rogpeppe/fastuuid v1.1.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rootless-containers/proto/go-proto v0.0.0-20210921234734-69430b6543fb/go.mod h1:LLjEAc6zmycfeN7/1fxIphWQPjHpTt7ElqT7eVf8e4A= github.com/rootless-containers/proto/go-proto v0.0.0-20230421021042-4cd87ebadd67 h1:58jvc5cZ+hGKidQ4Z37/+rj9eQxRRjOOsqNEwPSZXR4= github.com/rootless-containers/proto/go-proto v0.0.0-20230421021042-4cd87ebadd67/go.mod h1:LLjEAc6zmycfeN7/1fxIphWQPjHpTt7ElqT7eVf8e4A= github.com/rs/cors v1.10.1 h1:L0uuZVXIKlI1SShY2nhFfo44TYvDPQ1w4oFkUJNfhyo= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/go-glob v0.0.0-20170128012129-256dc444b735/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shirou/gopsutil v0.0.0-20180427012116-c95755e4bcd7/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9/go.mod h1:SnhjPscd9TpLiy1LpzGSKh3bXCfxxXuqd9xmQJy3slM= github.com/smartystreets/gunit v1.0.0/go.mod h1:qwPWnhz6pn0NnRBP++URONOVyNkPyr4SauJk4cUOwJs= github.com/sourcegraph/go-diff v0.5.1/go.mod h1:j2dHj3m8aZgQO8lMTcTnBcXkRRRqi34cd2MNlA9u1mE= github.com/spf13/afero v1.1.0/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/cast v1.2.0/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.2/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= github.com/spf13/jwalterweatherman v0.0.0-20180109140146-7c0cea34c8ec/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.0.2/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/timakin/bodyclose v0.0.0-20190721030226-87058b9bfcec/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= github.com/tj/assert v0.0.0-20171129193455-018094318fb0/go.mod h1:mZ9/Rh9oLWpLLDRpvE+3b7gP/C2YyLFYxNmcLnPTMe0= github.com/tj/assert v0.0.3 h1:Df/BlaZ20mq6kuai7f5z2TvPFiwC3xaWJSDQNiIS3Rk= github.com/tj/assert v0.0.3/go.mod h1:Ne6X72Q+TB1AteidzQncjw9PabbMp4PBMZ1k+vd1Pvk= github.com/tj/go-buffer v1.1.0/go.mod h1:iyiJpfFcR2B9sXu7KvjbT9fpM4mOelRSDTbntVj52Uc= github.com/tj/go-elastic v0.0.0-20171221160941-36157cbbebc2/go.mod h1:WjeM0Oo1eNAjXGDx2yma7uG2XoyRZTq1uv3M/o7imD0= github.com/tj/go-kinesis v0.0.0-20171128231115-08b17f58cb1b/go.mod h1:/yhzCV0xPfx6jb1bBgRFjl5lytqVqZXEaeqWP8lTEao= github.com/tj/go-spin v1.1.0/go.mod h1:Mg1mzmePZm4dva8Qz60H2lHwmJ2loum4VIrLgVnKwh4= github.com/ultraware/funlen v0.0.1/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.14 h1:ebbhrRiGK2i4naQJr+1Xj92HXZCrK7MsyTS/ob3HnAk= github.com/urfave/cli v1.22.14/go.mod h1:X0eDS6pD6Exaclxm99NJ3FiCDRED7vIHpx2mDOHLvkA= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasthttp v1.2.0/go.mod h1:4vX61m6KN+xDduDNwXrhIAVZaZaZiQ1luJk8LWSxF3s= github.com/valyala/quicktemplate v1.1.1/go.mod h1:EH+4AkTd43SvgIbQHYu59/cJyxDoOVRUAfrukLPuGJ4= github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= github.com/vbatts/go-mtree v0.5.0/go.mod h1:7JbaNHyBMng+RP8C3Q4E+4Ca8JnGQA2R/MB+jb4tSOk= github.com/vbatts/go-mtree v0.5.3 h1:S/jYlfG8rZ+a0bhZd+RANXejy7M4Js8fq9U+XoWTd5w= github.com/vbatts/go-mtree v0.5.3/go.mod h1:eXsdoPMdL2jcJx6HweWi9lYQxBsTp4lNhqqAjgkZUg8= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/zitadel/oidc/v2 v2.11.0 h1:Am4/yQr4iiM5bznRgF3FOp+wLdKx2gzSU73uyI9vvBE= github.com/zitadel/oidc/v2 v2.11.0/go.mod h1:enFSVBQI6aE0TEB1ntjXs9r6O6DEosxX4uhEBLBVD8o= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE= go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg= golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY= golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20170915142106-8351a756f30f/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180911220305-26e67e76b6c3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190912160710-24e19bdeb0f2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.13.0 h1:jDDenyj+WgFtmV3zYVoi8aE2BwtXFLWOA67ZfNWftiY= golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ= golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20171026204733-164713f0dfce/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.0.0-20170915090833-1cbadb444a80/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/tools v0.0.0-20170915040203-e531a2a1c15f/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181117154741-2ddaf7f79a09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190110163146-51295c7ec13a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190121143147-24cd39ecf745/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190521203540-521d6ed310dd/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc= golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405 h1:AB/lmRny7e2pLhFEYIbl5qkDAUt2h0ZRO4wGPhZf+ik= google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405/go.mod h1:67X1fPuzjcrkymZzZV1vvkFeTn2Rvc6lYF9MYFGCcwE= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/antchfx/htmlquery.v1 v1.2.2 h1:mpG5JLZb/uQFz5Q/HwZxBWmxzsrUJjLhn5eZTN3QEiA= gopkg.in/antchfx/htmlquery.v1 v1.2.2/go.mod h1:5YDCffzQv5+40qGenVeBl9Jp5L2ksn+DznurZY/Qukk= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b h1:QRR6H1YWRnHb4Y/HeNFCTJLFVxaq6wH4YuVdsUOr75U= gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/flosch/pongo2.v3 v3.0.0-20141028000813-5e81b817a0c4 h1:eyQQg/uGuZ3ndaBhqteakHpVW+dSOPalilfC9RpM2TA= gopkg.in/flosch/pongo2.v3 v3.0.0-20141028000813-5e81b817a0c4/go.mod h1:bJkYqV5pg6+Z7MsSu/hSb1zsPT955hBW2QHLE1jm4wA= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI= gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY= gotest.tools/v3 v3.5.0/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34/go.mod h1:H6SUd1XjIs+qQCyskXg5OFSrilMRUkD8ePJpHKDPaeY= sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= distrobuilder-3.0/image/000077500000000000000000000000001456216713500153235ustar00rootroot00000000000000distrobuilder-3.0/image/incus.go000066400000000000000000000132401456216713500167730ustar00rootroot00000000000000package image import ( "context" "fmt" "os" "path/filepath" "strconv" "time" "github.com/lxc/incus/shared/api" "gopkg.in/yaml.v2" "github.com/lxc/distrobuilder/shared" ) // An IncusImage represents an Incus image. type IncusImage struct { sourceDir string targetDir string cacheDir string Metadata api.ImageMetadata definition shared.Definition ctx context.Context } // NewIncusImage returns an IncusImage. func NewIncusImage(ctx context.Context, sourceDir, targetDir, cacheDir string, definition shared.Definition) *IncusImage { return &IncusImage{ sourceDir, targetDir, cacheDir, api.ImageMetadata{ Properties: make(map[string]string), Templates: make(map[string]*api.ImageMetadataTemplate), }, definition, ctx, } } // Build creates an Incus image. func (l *IncusImage) Build(unified bool, compression string, vm bool) (string, string, error) { err := l.createMetadata() if err != nil { return "", "", fmt.Errorf("Failed to create metadata: %w", err) } file, err := os.Create(filepath.Join(l.cacheDir, "metadata.yaml")) if err != nil { return "", "", fmt.Errorf("Failed to create metadata.yaml: %w", err) } defer file.Close() data, err := yaml.Marshal(l.Metadata) if err != nil { return "", "", fmt.Errorf("Failed to marshal yaml: %w", err) } _, err = file.Write(data) if err != nil { return "", "", fmt.Errorf("Failed to write metadata: %w", err) } paths := []string{"metadata.yaml"} // Only include templates directory in the tarball if it's present. info, err := os.Stat(filepath.Join(l.cacheDir, "templates")) if err == nil && info.IsDir() { paths = append(paths, "templates") } var fname string if l.definition.Image.Name != "" { // Use a custom name for the unified tarball. fname, _ = shared.RenderTemplate(l.definition.Image.Name, l.definition) } else { // Default name for the unified tarball. fname = "incus" } rawImage := filepath.Join(l.cacheDir, fmt.Sprintf("%s.raw", fname)) qcowImage := filepath.Join(l.cacheDir, fmt.Sprintf("%s.img", fname)) if vm { // Create compressed qcow2 image. err = shared.RunCommand(l.ctx, nil, nil, "qemu-img", "convert", "-c", "-O", "qcow2", rawImage, qcowImage) if err != nil { return "", "", fmt.Errorf("Failed to create qcow2 image %q: %w", qcowImage, err) } defer os.RemoveAll(rawImage) } imageFile := "" rootfsFile := "" if unified { targetTarball := filepath.Join(l.targetDir, fmt.Sprintf("%s.tar", fname)) if vm { // Rename image to rootfs.img err = os.Rename(qcowImage, filepath.Join(filepath.Dir(qcowImage), "rootfs.img")) if err != nil { return "", "", fmt.Errorf("Failed to rename image %q -> %q: %w", qcowImage, filepath.Join(filepath.Dir(qcowImage), "rootfs.img"), err) } _, err = shared.Pack(l.ctx, targetTarball, "", l.cacheDir, "rootfs.img") } else { // Add the rootfs to the tarball, prefix all files with "rootfs". // We intentionally don't set any compression here, as PackUpdate (further down) cannot deal with compressed tarballs. _, err = shared.Pack(l.ctx, targetTarball, "", l.sourceDir, "--transform", "s,^./,rootfs/,", ".") } if err != nil { return "", "", fmt.Errorf("Failed to pack tarball %q: %w", targetTarball, err) } defer func() { if vm { os.RemoveAll(qcowImage) } }() // Add the metadata to the tarball which is located in the cache directory imageFile, err = shared.PackUpdate(l.ctx, targetTarball, compression, l.cacheDir, paths...) if err != nil { return "", "", fmt.Errorf("Failed to add metadata to tarball %q: %w", targetTarball, err) } } else { if vm { rootfsFile = filepath.Join(l.targetDir, "disk.qcow2") err = shared.Copy(qcowImage, rootfsFile) } else { rootfsFile = filepath.Join(l.targetDir, "rootfs.squashfs") args := []string{l.sourceDir, rootfsFile, "-noappend", "-b", "1M", "-no-exports", "-no-progress", "-no-recovery"} compression, level, parseErr := shared.ParseSquashfsCompression(compression) if parseErr != nil { return "", "", fmt.Errorf("Failed to parse compression level: %w", err) } if level != nil { args = append(args, "-comp", compression, "-Xcompression-level", strconv.Itoa(*level)) } else { args = append(args, "-comp", compression) } // Create rootfs as squashfs. err = shared.RunCommand(l.ctx, nil, nil, "mksquashfs", args...) } if err != nil { return "", "", fmt.Errorf("Failed to create squashfs or copy image: %w", err) } // Create metadata tarball. imageFile, err = shared.Pack(l.ctx, filepath.Join(l.targetDir, "incus.tar"), compression, l.cacheDir, paths...) if err != nil { return "", "", fmt.Errorf("Failed to create metadata tarball: %w", err) } } return imageFile, rootfsFile, nil } func (l *IncusImage) createMetadata() error { var err error l.Metadata.Architecture = l.definition.Image.Architecture l.Metadata.CreationDate = time.Now().UTC().Unix() l.Metadata.Properties["architecture"] = l.definition.Image.ArchitectureMapped l.Metadata.Properties["os"] = l.definition.Image.Distribution l.Metadata.Properties["release"] = l.definition.Image.Release l.Metadata.Properties["variant"] = l.definition.Image.Variant l.Metadata.Properties["serial"] = l.definition.Image.Serial l.Metadata.Properties["description"], err = shared.RenderTemplate( l.definition.Image.Description, l.definition) if err != nil { return fmt.Errorf("Failed to render template: %w", err) } l.Metadata.Properties["name"], err = shared.RenderTemplate( l.definition.Image.Name, l.definition) if err != nil { return fmt.Errorf("Failed to render template: %w", err) } l.Metadata.ExpiryDate = shared.GetExpiryDate(time.Now(), l.definition.Image.Expiry).Unix() return nil } distrobuilder-3.0/image/incus_test.go000066400000000000000000000103621456216713500200340ustar00rootroot00000000000000package image import ( "context" "fmt" "log" "os" "path/filepath" "strings" "testing" "github.com/stretchr/testify/require" "golang.org/x/text/cases" "golang.org/x/text/language" "github.com/lxc/distrobuilder/shared" ) var incusDef = shared.Definition{ Image: shared.DefinitionImage{ Description: "{{ image.distribution|capfirst }} {{ image. release }}", Distribution: "ubuntu", Release: "17.10", Architecture: "x86_64", Expiry: "30d", Name: "{{ image.distribution|lower }}-{{ image.release }}-{{ image.architecture }}-{{ image.serial }}", Serial: "testing", }, Source: shared.DefinitionSource{ Downloader: "debootstrap", }, Packages: shared.DefinitionPackages{ Manager: "apt", }, } func setupIncus(t *testing.T) *IncusImage { cacheDir := filepath.Join(os.TempDir(), "distrobuilder-test") err := os.MkdirAll(filepath.Join(cacheDir, "rootfs"), 0755) require.NoError(t, err) err = os.MkdirAll(filepath.Join(cacheDir, "templates"), 0755) require.NoError(t, err) image := NewIncusImage(context.TODO(), cacheDir, "", cacheDir, incusDef) fail := true defer func() { if fail { teardownIncus(t) } }() // Check cache directory require.Equal(t, cacheDir, image.cacheDir) require.Equal(t, incusDef, image.definition) incusDef.SetDefaults() err = incusDef.Validate() require.NoError(t, err) fail = false return image } func teardownIncus(t *testing.T) { os.RemoveAll(filepath.Join(os.TempDir(), "distrobuilder-test")) } func TestIncusBuild(t *testing.T) { image := setupIncus(t) defer teardownIncus(t) testIncusBuildSplitImage(t, image) testIncusBuildUnifiedImage(t, image) } func testIncusBuildSplitImage(t *testing.T, image *IncusImage) { // Create split tarball and squashfs. imageFile, rootfsFile, err := image.Build(false, "xz", false) require.NoError(t, err) require.FileExists(t, "incus.tar.xz") require.FileExists(t, "rootfs.squashfs") require.Equal(t, "rootfs.squashfs", filepath.Base(rootfsFile)) require.Equal(t, "incus.tar.xz", filepath.Base(imageFile)) os.Remove("incus.tar.xz") os.Remove("rootfs.squashfs") imageFile, rootfsFile, err = image.Build(false, "gzip", false) require.NoError(t, err) require.FileExists(t, "incus.tar.gz") require.FileExists(t, "rootfs.squashfs") require.Equal(t, "rootfs.squashfs", filepath.Base(rootfsFile)) require.Equal(t, "incus.tar.gz", filepath.Base(imageFile)) os.Remove("incus.tar.gz") os.Remove("rootfs.squashfs") } func testIncusBuildUnifiedImage(t *testing.T, image *IncusImage) { // Create unified tarball with custom name. _, _, err := image.Build(true, "xz", false) require.NoError(t, err) defer os.Remove("ubuntu-17.10-x86_64-testing.tar.xz") require.FileExists(t, "ubuntu-17.10-x86_64-testing.tar.xz") _, _, err = image.Build(true, "gzip", false) require.NoError(t, err) defer os.Remove("ubuntu-17.10-x86_64-testing.tar.gz") require.FileExists(t, "ubuntu-17.10-x86_64-testing.tar.gz") // Create unified tarball with default name. image.definition.Image.Name = "" _, _, err = image.Build(true, "xz", false) require.NoError(t, err) defer os.Remove("incus.tar.xz") require.FileExists(t, "incus.tar.xz") } func TestIncusCreateMetadata(t *testing.T) { image := setupIncus(t) defer teardownIncus(t) err := image.createMetadata() require.NoError(t, err) tests := []struct { name string have string expected string }{ { "Architecture", image.Metadata.Architecture, "x86_64", }, { "Properties[architecture]", image.Metadata.Properties["architecture"], "x86_64", }, { "Properties[os]", image.Metadata.Properties["os"], incusDef.Image.Distribution, }, { "Properties[release]", image.Metadata.Properties["release"], incusDef.Image.Release, }, { "Properties[description]", image.Metadata.Properties["description"], fmt.Sprintf("%s %s", cases.Title(language.English).String(incusDef.Image.Distribution), incusDef.Image.Release), }, { "Properties[name]", image.Metadata.Properties["name"], fmt.Sprintf("%s-%s-%s-%s", strings.ToLower(incusDef.Image.Distribution), incusDef.Image.Release, "x86_64", incusDef.Image.Serial), }, } for i, tt := range tests { log.Printf("Running test #%d: %s", i, tt.name) require.Equal(t, tt.expected, tt.have) } } distrobuilder-3.0/image/lxc.go000066400000000000000000000147321456216713500164470ustar00rootroot00000000000000package image import ( "context" "fmt" "os" "path/filepath" "strings" "time" incus "github.com/lxc/incus/shared/util" "github.com/lxc/distrobuilder/shared" ) const maxLXCCompatLevel = 5 // LXCImage represents a LXC image. type LXCImage struct { sourceDir string targetDir string cacheDir string definition shared.Definition ctx context.Context } // NewLXCImage returns a LXCImage. func NewLXCImage(ctx context.Context, sourceDir, targetDir, cacheDir string, definition shared.Definition) *LXCImage { img := LXCImage{ sourceDir, targetDir, cacheDir, definition, ctx, } // create metadata directory err := os.MkdirAll(filepath.Join(cacheDir, "metadata"), 0755) if err != nil { return nil } return &img } // AddTemplate adds an entry to the templates file. func (l *LXCImage) AddTemplate(path string) error { metaDir := filepath.Join(l.cacheDir, "metadata") file, err := os.OpenFile(filepath.Join(metaDir, "templates"), os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) if err != nil { return fmt.Errorf("Failed to open file %q: %w", filepath.Join(metaDir, "templates"), err) } defer file.Close() _, err = file.WriteString(fmt.Sprintf("%v\n", path)) if err != nil { return fmt.Errorf("Failed to write to template file: %w", err) } return nil } // Build creates a LXC image. func (l *LXCImage) Build(compression string) error { err := l.createMetadata() if err != nil { return fmt.Errorf("Failed to create metadata: %w", err) } err = l.packMetadata() if err != nil { return fmt.Errorf("Failed to pack metadata: %w", err) } _, err = shared.Pack(l.ctx, filepath.Join(l.targetDir, "rootfs.tar"), compression, l.sourceDir, ".") if err != nil { return fmt.Errorf("Failed to pack %q: %w", filepath.Join(l.targetDir, "rootfs.tar"), err) } return nil } func (l *LXCImage) createMetadata() error { metaDir := filepath.Join(l.cacheDir, "metadata") imageTargets := shared.ImageTargetUndefined | shared.ImageTargetContainer | shared.ImageTargetAll for _, c := range l.definition.Targets.LXC.Config { if !shared.ApplyFilter(&c, l.definition.Image.Release, l.definition.Image.ArchitectureMapped, l.definition.Image.Variant, l.definition.Targets.Type, imageTargets) { continue } // If not specified, create files up to ${maxLXCCompatLevel} if c.Before == 0 { c.Before = maxLXCCompatLevel + 1 } for i := uint(1); i < maxLXCCompatLevel+1; i++ { // Bound checking if c.After < c.Before { if i <= c.After || i >= c.Before { continue } } else if c.After >= c.Before { if i <= c.After && i >= c.Before { continue } } switch c.Type { case "all": err := l.writeConfig(i, filepath.Join(metaDir, "config"), c.Content) if err != nil { return fmt.Errorf("Failed to write config %q: %w", filepath.Join(metaDir, "config"), err) } err = l.writeConfig(i, filepath.Join(metaDir, "config-user"), c.Content) if err != nil { return fmt.Errorf("Failed to write config %q: %w", filepath.Join(metaDir, "config-user"), err) } case "system": err := l.writeConfig(i, filepath.Join(metaDir, "config"), c.Content) if err != nil { return fmt.Errorf("Failed to write config %q: %w", filepath.Join(metaDir, "config"), err) } case "user": err := l.writeConfig(i, filepath.Join(metaDir, "config-user"), c.Content) if err != nil { return fmt.Errorf("Failed to write config %q: %w", filepath.Join(metaDir, "config-user"), err) } } } } err := l.writeMetadata(filepath.Join(metaDir, "create-message"), l.definition.Targets.LXC.CreateMessage, false) if err != nil { return fmt.Errorf("Error writing 'create-message': %w", err) } err = l.writeMetadata(filepath.Join(metaDir, "expiry"), fmt.Sprint(shared.GetExpiryDate(time.Now(), l.definition.Image.Expiry).Unix()), false) if err != nil { return fmt.Errorf("Error writing 'expiry': %w", err) } var excludesUser string if incus.PathExists(filepath.Join(l.sourceDir, "dev")) { err := filepath.Walk(filepath.Join(l.sourceDir, "dev"), func(path string, info os.FileInfo, err error) error { if err != nil { return err } if info.Mode()&os.ModeDevice != 0 { excludesUser += fmt.Sprintf(".%s\n", strings.TrimPrefix(path, l.sourceDir)) } return nil }) if err != nil { return fmt.Errorf("Error while walking /dev: %w", err) } } err = l.writeMetadata(filepath.Join(metaDir, "excludes-user"), excludesUser, false) if err != nil { return fmt.Errorf("Error writing 'excludes-user': %w", err) } return nil } func (l *LXCImage) packMetadata() error { files := []string{"create-message", "expiry", "excludes-user"} // Get all config and config-user files configs, err := filepath.Glob(filepath.Join(l.cacheDir, "metadata", "config*")) if err != nil { return fmt.Errorf("Failed to match file pattern: %w", err) } for _, c := range configs { files = append(files, filepath.Base(c)) } if incus.PathExists(filepath.Join(l.cacheDir, "metadata", "templates")) { files = append(files, "templates") } _, err = shared.Pack(l.ctx, filepath.Join(l.targetDir, "meta.tar"), "xz", filepath.Join(l.cacheDir, "metadata"), files...) if err != nil { return fmt.Errorf("Failed to create metadata: %w", err) } return nil } func (l *LXCImage) writeMetadata(filename, content string, appendContent bool) error { var file *os.File var err error // Open the file either in append or create mode if appendContent { file, err = os.OpenFile(filename, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) if err != nil { return fmt.Errorf("Failed to open file %q: %w", filename, err) } } else { file, err = os.Create(filename) if err != nil { return fmt.Errorf("Failed to create file %q: %w", filename, err) } } defer file.Close() out, err := shared.RenderTemplate(content, l.definition) if err != nil { return fmt.Errorf("Failed to render template: %w", err) } // Append final new line if missing if !strings.HasSuffix(out, "\n") { out += "\n" } // Write the content _, err = file.WriteString(out) if err != nil { return fmt.Errorf("Failed to write string: %w", err) } return nil } func (l *LXCImage) writeConfig(compatLevel uint, filename, content string) error { // Only add suffix if it's not the latest compatLevel if compatLevel != maxLXCCompatLevel { filename = fmt.Sprintf("%s.%d", filename, compatLevel) } err := l.writeMetadata(filename, content, true) if err != nil { return fmt.Errorf("Error writing '%s': %w", filepath.Base(filename), err) } return nil } distrobuilder-3.0/image/lxc_test.go000066400000000000000000000165611456216713500175100ustar00rootroot00000000000000package image import ( "bytes" "context" "fmt" "io" "log" "os" "path/filepath" "testing" "github.com/stretchr/testify/require" "golang.org/x/sys/unix" "golang.org/x/text/cases" "golang.org/x/text/language" "github.com/lxc/distrobuilder/shared" ) var lxcDef = shared.Definition{ Image: shared.DefinitionImage{ Distribution: "ubuntu", Release: "17.10", Architecture: "amd64", Expiry: "30d", }, Targets: shared.DefinitionTarget{ LXC: shared.DefinitionTargetLXC{ CreateMessage: "Welcome to {{ image.distribution|capfirst}} {{ image.release }}", Config: []shared.DefinitionTargetLXCConfig{ { Type: "all", Before: 5, Content: "all_before_5", }, { Type: "user", Before: 5, Content: "user_before_5", }, { Type: "all", After: 4, Content: "all_after_4", }, { Type: "user", After: 4, Content: "user_after_4", }, { Type: "all", Content: "all", }, { Type: "system", Before: 2, Content: "system_before_2", }, { Type: "system", Before: 2, After: 4, Content: "system_before_2_after_4", }, { Type: "user", Before: 3, After: 3, Content: "user_before_3_after_3", }, { Type: "system", Before: 4, After: 2, Content: "system_before_4_after_2", }, }, }, }, } func lxcCacheDir() string { wd, _ := os.Getwd() return filepath.Join(wd, "distrobuilder-test") } func setupLXC() *LXCImage { return NewLXCImage(context.TODO(), lxcCacheDir(), "", lxcCacheDir(), lxcDef) } func teardownLXC() { os.RemoveAll(lxcCacheDir()) } func TestNewLXCImage(t *testing.T) { image := NewLXCImage(context.TODO(), lxcCacheDir(), "", lxcCacheDir(), lxcDef) defer teardownLXC() require.Equal(t, lxcCacheDir(), image.cacheDir) require.Equal(t, lxcDef, image.definition) } func TestLXCAddTemplate(t *testing.T) { image := setupLXC() defer teardownLXC() // Make sure templates file is empty. _, err := os.Stat(filepath.Join(lxcCacheDir(), "metadata", "templates")) require.EqualError(t, err, fmt.Sprintf("stat %s: no such file or directory", filepath.Join(lxcCacheDir(), "metadata", "templates"))) // Add first template entry. err = image.AddTemplate("/path/file1") require.NoError(t, err) file, err := os.Open(filepath.Join(lxcCacheDir(), "metadata", "templates")) require.NoError(t, err) // Copy file content to buffer. var buffer bytes.Buffer _, err = io.Copy(&buffer, file) require.NoError(t, err) file.Close() require.Equal(t, "/path/file1\n", buffer.String()) // Add second template entry. err = image.AddTemplate("/path/file2") require.NoError(t, err) file, err = os.Open(filepath.Join(lxcCacheDir(), "metadata", "templates")) require.NoError(t, err) // Copy file content to buffer. buffer.Reset() _, err = io.Copy(&buffer, file) require.NoError(t, err) file.Close() require.Equal(t, "/path/file1\n/path/file2\n", buffer.String()) } func TestLXCBuild(t *testing.T) { image := setupLXC() defer teardownLXC() err := os.MkdirAll(filepath.Join(lxcCacheDir(), "rootfs"), 0755) require.NoError(t, err) err = image.Build("xz") require.NoError(t, err) defer func() { os.Remove("meta.tar.xz") os.Remove("rootfs.tar.xz") }() err = image.Build("gzip") require.NoError(t, err) defer func() { os.Remove("meta.tar.gz") os.Remove("rootfs.tar.gz") }() } func TestLXCCreateMetadataBasic(t *testing.T) { defaultImage := setupLXC() defer teardownLXC() tests := []struct { name string shouldFail bool expectedError string prepareImage func(LXCImage) *LXCImage }{ { "valid metadata", false, "", func(l LXCImage) *LXCImage { return &l }, }, { "invalid config template", true, "Error writing 'config': .+", func(l LXCImage) *LXCImage { l.definition.Targets.LXC.Config = []shared.DefinitionTargetLXCConfig{ { Type: "all", After: 4, Content: "{{ invalid }", }, } return &l }, }, { "invalid create-message template", true, "Error writing 'create-message': .+", func(l LXCImage) *LXCImage { l.definition.Targets.LXC.CreateMessage = "{{ invalid }" return &l }, }, { "existing dev directory", false, "", func(l LXCImage) *LXCImage { // Create /dev and device file. err := os.MkdirAll(filepath.Join(lxcCacheDir(), "rootfs", "dev"), 0755) require.NoError(t, err) err = unix.Mknod(filepath.Join(lxcCacheDir(), "rootfs", "dev", "null"), unix.S_IFCHR, 0) require.NoError(t, err) return &l }, }, } for i, tt := range tests { log.Printf("Running test #%d: %s", i, tt.name) image := tt.prepareImage(*defaultImage) err := image.createMetadata() if tt.shouldFail { require.Regexp(t, tt.expectedError, err) } else { require.NoError(t, err) } } // Verify create-message template f, err := os.Open(filepath.Join(lxcCacheDir(), "metadata", "create-message")) require.NoError(t, err) defer f.Close() var buf bytes.Buffer _, err = io.Copy(&buf, f) require.NoError(t, err) require.Equal(t, fmt.Sprintf("Welcome to %s %s\n", cases.Title(language.English).String(lxcDef.Image.Distribution), lxcDef.Image.Release), buf.String()) } func TestLXCCreateMetadataConfig(t *testing.T) { image := setupLXC() defer teardownLXC() tests := []struct { configFile string expected string }{ { "config", "all_after_4\nall\nsystem_before_2_after_4\n", }, { "config.1", "all_before_5\nall\nsystem_before_2\nsystem_before_2_after_4\n", }, { "config.2", "all_before_5\nall\n", }, { "config.3", "all_before_5\nall\nsystem_before_4_after_2\n", }, { "config.4", "all_before_5\nall\n", }, { "config-user", "all_after_4\nuser_after_4\nall\nuser_before_3_after_3\n", }, { "config-user.1", "all_before_5\nuser_before_5\nall\nuser_before_3_after_3\n", }, { "config-user.2", "all_before_5\nuser_before_5\nall\nuser_before_3_after_3\n", }, { "config-user.3", "all_before_5\nuser_before_5\nall\n", }, { "config-user.4", "all_before_5\nuser_before_5\nall\nuser_before_3_after_3\n", }, } err := image.createMetadata() require.NoError(t, err) for _, tt := range tests { log.Printf("Checking '%s'", tt.configFile) file, err := os.Open(filepath.Join(lxcCacheDir(), "metadata", tt.configFile)) require.NoError(t, err) var buffer bytes.Buffer _, err = io.Copy(&buffer, file) file.Close() require.NoError(t, err) require.Equal(t, tt.expected, buffer.String()) } } func TestLXCPackMetadata(t *testing.T) { image := setupLXC() defer func() { teardownLXC() os.Remove("meta.tar.xz") }() err := image.createMetadata() require.NoError(t, err) err = image.packMetadata() require.NoError(t, err) // Include templates directory. err = image.AddTemplate("/path/file") require.NoError(t, err) err = image.packMetadata() require.NoError(t, err) // Provoke error by removing the metadata directory os.RemoveAll(filepath.Join(lxcCacheDir(), "metadata")) err = image.packMetadata() require.Error(t, err) } func TestLXCWriteMetadata(t *testing.T) { image := setupLXC() defer teardownLXC() // Should fail due to invalid path err := image.writeMetadata("/path/file", "", false) require.Error(t, err) // Should succeed err = image.writeMetadata("test", "metadata", false) require.NoError(t, err) os.Remove("test") } distrobuilder-3.0/managers/000077500000000000000000000000001456216713500160365ustar00rootroot00000000000000distrobuilder-3.0/managers/apk.go000066400000000000000000000016641456216713500171470ustar00rootroot00000000000000package managers import ( "fmt" "os" "github.com/lxc/distrobuilder/shared" ) type apk struct { common } func (m *apk) load() error { m.commands = managerCommands{ clean: "apk", install: "apk", refresh: "apk", remove: "apk", update: "apk", } m.flags = managerFlags{ global: []string{ "--no-cache", }, install: []string{ "add", }, remove: []string{ "del", "--rdepends", }, refresh: []string{ "update", }, update: []string{ "upgrade", }, } return nil } func (m *apk) manageRepository(repoAction shared.DefinitionPackagesRepository) error { repoFile := "/etc/apk/repositories" f, err := os.OpenFile(repoFile, os.O_WRONLY|os.O_APPEND, 0644) if err != nil { return fmt.Errorf("Failed to open %q: %w", repoFile, err) } defer f.Close() _, err = f.WriteString(repoAction.URL + "\n") if err != nil { return fmt.Errorf("Failed to write string to file: %w", err) } return nil } distrobuilder-3.0/managers/apt.go000066400000000000000000000066251456216713500171620ustar00rootroot00000000000000package managers import ( "bytes" "fmt" "io" "os" "path/filepath" "strings" incus "github.com/lxc/incus/shared/util" "github.com/lxc/distrobuilder/shared" ) type apt struct { common } func (m *apt) load() error { m.commands = managerCommands{ clean: "apt-get", install: "apt-get", refresh: "apt-get", remove: "apt-get", update: "apt-get", } m.flags = managerFlags{ clean: []string{ "clean", }, global: []string{ "-y", }, install: []string{ "install", }, remove: []string{ "remove", "--auto-remove", }, refresh: []string{ "update", }, update: []string{ "dist-upgrade", }, } return nil } func (m *apt) manageRepository(repoAction shared.DefinitionPackagesRepository) error { var targetFile string if repoAction.Name == "sources.list" { targetFile = filepath.Join("/etc/apt", repoAction.Name) } else { targetFile = filepath.Join("/etc/apt/sources.list.d", repoAction.Name) if !strings.HasSuffix(targetFile, ".list") { targetFile = fmt.Sprintf("%s.list", targetFile) } } if !incus.PathExists(filepath.Dir(targetFile)) { err := os.MkdirAll(filepath.Dir(targetFile), 0755) if err != nil { return fmt.Errorf("Failed to create directory %q: %w", filepath.Dir(targetFile), err) } } f, err := os.OpenFile(targetFile, os.O_CREATE|os.O_RDWR, 0644) if err != nil { return fmt.Errorf("Failed to open file %q: %w", targetFile, err) } defer f.Close() content, err := io.ReadAll(f) if err != nil { return fmt.Errorf("Failed to read from file %q: %w", targetFile, err) } // Truncate file if it's not generated by distrobuilder if !strings.HasPrefix(string(content), "# Generated by distrobuilder\n") { err = f.Truncate(0) if err != nil { return fmt.Errorf("Failed to truncate %q: %w", targetFile, err) } _, err = f.Seek(0, 0) if err != nil { return fmt.Errorf("Failed to seek on file %q: %w", targetFile, err) } _, err = f.WriteString("# Generated by distrobuilder\n") if err != nil { return fmt.Errorf("Failed to write to file %q: %w", targetFile, err) } } _, err = f.WriteString(repoAction.URL) if err != nil { return fmt.Errorf("Failed to write to file %q: %w", targetFile, err) } // Append final new line if missing if !strings.HasSuffix(repoAction.URL, "\n") { _, err = f.WriteString("\n") if err != nil { return fmt.Errorf("Failed to write to file %q: %w", targetFile, err) } } if repoAction.Key != "" { var reader io.Reader if strings.HasPrefix(repoAction.Key, "-----BEGIN PGP PUBLIC KEY BLOCK-----") { reader = strings.NewReader(repoAction.Key) } else { // If only key ID is provided, we need gpg to be installed early. err := shared.RunCommand(m.ctx, nil, nil, "gpg", "--recv-keys", repoAction.Key) if err != nil { return fmt.Errorf("Failed to receive GPG keys: %w", err) } var buf bytes.Buffer err = shared.RunCommand(m.ctx, nil, &buf, "gpg", "--export", "--armor", repoAction.Key) if err != nil { return fmt.Errorf("Failed to export GPG keys: %w", err) } reader = &buf } signatureFilePath := filepath.Join("/etc/apt/trusted.gpg.d", fmt.Sprintf("%s.asc", repoAction.Name)) f, err := os.Create(signatureFilePath) if err != nil { return fmt.Errorf("Failed to create file %q: %w", signatureFilePath, err) } defer f.Close() _, err = io.Copy(f, reader) if err != nil { return fmt.Errorf("Failed to copy file: %w", err) } } return nil } distrobuilder-3.0/managers/common.go000066400000000000000000000043461456216713500176640ustar00rootroot00000000000000package managers import ( "context" "github.com/sirupsen/logrus" "github.com/lxc/distrobuilder/shared" ) type common struct { commands managerCommands flags managerFlags hooks managerHooks logger *logrus.Logger definition shared.Definition ctx context.Context } func (c *common) init(ctx context.Context, logger *logrus.Logger, definition shared.Definition) { c.logger = logger c.definition = definition c.ctx = ctx } // Install installs packages to the rootfs. func (c *common) install(pkgs, flags []string) error { if len(c.flags.install) == 0 || pkgs == nil || len(pkgs) == 0 { return nil } args := append(c.flags.global, c.flags.install...) args = append(args, flags...) args = append(args, pkgs...) return shared.RunCommand(c.ctx, nil, nil, c.commands.install, args...) } // Remove removes packages from the rootfs. func (c *common) remove(pkgs, flags []string) error { if len(c.flags.remove) == 0 || pkgs == nil || len(pkgs) == 0 { return nil } args := append(c.flags.global, c.flags.remove...) args = append(args, flags...) args = append(args, pkgs...) return shared.RunCommand(c.ctx, nil, nil, c.commands.remove, args...) } // Clean cleans up cached files used by the package managers. func (c *common) clean() error { var err error if len(c.flags.clean) == 0 { return nil } args := append(c.flags.global, c.flags.clean...) err = shared.RunCommand(c.ctx, nil, nil, c.commands.clean, args...) if err != nil { return err } if c.hooks.clean != nil { err = c.hooks.clean() } return err } // Refresh refreshes the local package database. func (c *common) refresh() error { if len(c.flags.refresh) == 0 { return nil } if c.hooks.preRefresh != nil { err := c.hooks.preRefresh() if err != nil { return err } } args := append(c.flags.global, c.flags.refresh...) return shared.RunCommand(c.ctx, nil, nil, c.commands.refresh, args...) } // Update updates all packages. func (c *common) update() error { if len(c.flags.update) == 0 { return nil } args := append(c.flags.global, c.flags.update...) return shared.RunCommand(c.ctx, nil, nil, c.commands.update, args...) } func (c *common) manageRepository(repo shared.DefinitionPackagesRepository) error { return nil } distrobuilder-3.0/managers/custom.go000066400000000000000000000015041456216713500176770ustar00rootroot00000000000000package managers type custom struct { common } func (m *custom) load() error { m.commands = managerCommands{ clean: m.definition.Packages.CustomManager.Clean.Command, install: m.definition.Packages.CustomManager.Install.Command, refresh: m.definition.Packages.CustomManager.Refresh.Command, remove: m.definition.Packages.CustomManager.Remove.Command, update: m.definition.Packages.CustomManager.Update.Command, } m.flags = managerFlags{ clean: m.definition.Packages.CustomManager.Clean.Flags, install: m.definition.Packages.CustomManager.Install.Flags, refresh: m.definition.Packages.CustomManager.Refresh.Flags, remove: m.definition.Packages.CustomManager.Remove.Flags, update: m.definition.Packages.CustomManager.Update.Flags, global: m.definition.Packages.CustomManager.Flags, } return nil } distrobuilder-3.0/managers/dnf.go000066400000000000000000000013351456216713500171360ustar00rootroot00000000000000package managers import ( "github.com/lxc/distrobuilder/shared" ) type dnf struct { common } // NewDnf creates a new Manager instance. func (m *dnf) load() error { m.commands = managerCommands{ clean: "dnf", install: "dnf", refresh: "dnf", remove: "dnf", update: "dnf", } m.flags = managerFlags{ global: []string{ "-y", }, install: []string{ "install", "--nobest", }, remove: []string{ "remove", }, refresh: []string{ "makecache", }, update: []string{ "upgrade", "--nobest", }, clean: []string{ "clean", "all", }, } return nil } func (m *dnf) manageRepository(repoAction shared.DefinitionPackagesRepository) error { return yumManageRepository(repoAction) } distrobuilder-3.0/managers/egoportage.go000066400000000000000000000010061456216713500205160ustar00rootroot00000000000000package managers type egoportage struct { common } func (m *egoportage) load() error { m.commands = managerCommands{ clean: "emerge", install: "emerge", refresh: "ego", remove: "emerge", update: "emerge", } m.flags = managerFlags{ global: []string{}, clean: []string{}, install: []string{ "--autounmask-continue", "--quiet-build=y", }, remove: []string{ "--unmerge", }, refresh: []string{ "sync", }, update: []string{ "--update", "@world", }, } return nil } distrobuilder-3.0/managers/equo.go000066400000000000000000000030551456216713500173410ustar00rootroot00000000000000package managers import ( "errors" "github.com/lxc/distrobuilder/shared" ) type equo struct { common } func (m *equo) load() error { m.commands = managerCommands{ clean: "equo", install: "equo", refresh: "equo", remove: "equo", update: "equo", } m.flags = managerFlags{ global: []string{}, clean: []string{ "cleanup", }, install: []string{ "install", }, remove: []string{ "remove", }, refresh: []string{ "update", }, update: []string{ "upgrade", }, } return nil } func (m *equo) manageRepository(repoAction shared.DefinitionPackagesRepository) error { if repoAction.Type == "" || repoAction.Type == "equo" { return m.equoRepoCaller(repoAction) } else if repoAction.Type == "enman" { return m.enmanRepoCaller(repoAction) } return errors.New("Invalid repository Type") } func (m *equo) enmanRepoCaller(repo shared.DefinitionPackagesRepository) error { args := []string{ "add", } if repo.Name == "" && repo.URL == "" { return errors.New("Missing both repository url and repository name") } if repo.URL != "" { args = append(args, repo.URL) } else { args = append(args, repo.Name) } return shared.RunCommand(m.ctx, nil, nil, "enman", args...) } func (m *equo) equoRepoCaller(repo shared.DefinitionPackagesRepository) error { if repo.Name == "" { return errors.New("Invalid repository name") } if repo.URL == "" { return errors.New("Invalid repository url") } return shared.RunCommand(m.ctx, nil, nil, "equo", "repo", "add", "--repo", repo.URL, "--pkg", repo.URL, repo.Name) } distrobuilder-3.0/managers/luet.go000066400000000000000000000032531456216713500173410ustar00rootroot00000000000000package managers import ( "errors" "fmt" "os" "path/filepath" "strings" incus "github.com/lxc/incus/shared/util" "github.com/lxc/distrobuilder/shared" ) type luet struct { common } func (m *luet) load() error { m.commands = managerCommands{ clean: "luet", install: "luet", refresh: "luet", remove: "luet", update: "luet", } m.flags = managerFlags{ global: []string{}, clean: []string{ "cleanup", }, install: []string{ "install", }, refresh: []string{ "repo", "update", }, remove: []string{ "uninstall", }, update: []string{ "upgrade", }, } return nil } func (m *luet) manageRepository(repoAction shared.DefinitionPackagesRepository) error { var targetFile string if repoAction.Name == "" { return errors.New("Invalid repository name") } if repoAction.URL == "" { return errors.New("Invalid repository url") } if strings.HasSuffix(repoAction.Name, ".yml") { targetFile = filepath.Join("/etc/luet/repos.conf.d", repoAction.Name) } else { targetFile = filepath.Join("/etc/luet/repos.conf.d", repoAction.Name+".yml") } if !incus.PathExists(filepath.Dir(targetFile)) { err := os.MkdirAll(filepath.Dir(targetFile), 0755) if err != nil { return fmt.Errorf("Failed to create directory %q: %w", filepath.Dir(targetFile), err) } } f, err := os.OpenFile(targetFile, os.O_CREATE|os.O_WRONLY, 0644) if err != nil { return fmt.Errorf("Failed to open file %q: %w", targetFile, err) } defer f.Close() // NOTE: repo.URL is not an URL but the content of the file. _, err = f.WriteString(repoAction.URL) if err != nil { return fmt.Errorf("Failed to write string to %q: %w", targetFile, err) } return nil } distrobuilder-3.0/managers/manager.go000066400000000000000000000136261456216713500200070ustar00rootroot00000000000000package managers import ( "context" "errors" "fmt" "strings" "github.com/sirupsen/logrus" "github.com/lxc/distrobuilder/shared" ) // ErrUnknownManager represents the unknown manager error. var ErrUnknownManager = errors.New("Unknown manager") // managerFlags represents flags for all subcommands of a package manager. type managerFlags struct { global []string install []string remove []string clean []string update []string refresh []string } // managerHooks represents custom hooks. type managerHooks struct { clean func() error preRefresh func() error } // managerCommands represents all commands. type managerCommands struct { clean string install string refresh string remove string update string } // Manager represents a package manager. type Manager struct { mgr manager def shared.Definition ctx context.Context } type manager interface { init(ctx context.Context, logger *logrus.Logger, definition shared.Definition) load() error manageRepository(repo shared.DefinitionPackagesRepository) error install(pkgs, flags []string) error remove(pkgs, flags []string) error clean() error refresh() error update() error } var managers = map[string]func() manager{ "": func() manager { return &custom{} }, "apk": func() manager { return &apk{} }, "apt": func() manager { return &apt{} }, "dnf": func() manager { return &dnf{} }, "egoportage": func() manager { return &egoportage{} }, "equo": func() manager { return &equo{} }, "luet": func() manager { return &luet{} }, "opkg": func() manager { return &opkg{} }, "pacman": func() manager { return &pacman{} }, "portage": func() manager { return &portage{} }, "slackpkg": func() manager { return &slackpkg{} }, "xbps": func() manager { return &xbps{} }, "yum": func() manager { return &yum{} }, "zypper": func() manager { return &zypper{} }, } // Load loads and initializes a downloader. func Load(ctx context.Context, managerName string, logger *logrus.Logger, definition shared.Definition) (*Manager, error) { df, ok := managers[managerName] if !ok { return nil, ErrUnknownManager } d := df() d.init(ctx, logger, definition) err := d.load() if err != nil { return nil, fmt.Errorf("Failed to load manager %q: %w", managerName, err) } return &Manager{def: definition, mgr: d, ctx: ctx}, nil } // ManagePackages manages packages. func (m *Manager) ManagePackages(imageTarget shared.ImageTarget) error { var validSets []shared.DefinitionPackagesSet for _, set := range m.def.Packages.Sets { if !shared.ApplyFilter(&set, m.def.Image.Release, m.def.Image.ArchitectureMapped, m.def.Image.Variant, m.def.Targets.Type, imageTarget) { continue } validSets = append(validSets, set) } // If there's nothing to install or remove, and no updates need to be performed, // we can exit here. if len(validSets) == 0 && !m.def.Packages.Update { return nil } err := m.mgr.refresh() if err != nil { return fmt.Errorf("Failed to refresh: %w", err) } if m.def.Packages.Update { err = m.mgr.update() if err != nil { return fmt.Errorf("Failed to update: %w", err) } // Run post update hook for _, action := range m.def.GetRunnableActions("post-update", imageTarget) { if action.Pongo { action.Action, err = shared.RenderTemplate(action.Action, m.def) if err != nil { return fmt.Errorf("Failed to render action: %w", err) } } err = shared.RunScript(m.ctx, action.Action) if err != nil { return fmt.Errorf("Failed to run post-update: %w", err) } } } for _, set := range optimizePackageSets(validSets) { if set.Action == "install" { err = m.mgr.install(set.Packages, set.Flags) } else if set.Action == "remove" { err = m.mgr.remove(set.Packages, set.Flags) } if err != nil { return fmt.Errorf("Failed to %s packages: %w", set.Action, err) } } if m.def.Packages.Cleanup { err = m.mgr.clean() if err != nil { return fmt.Errorf("Failed to clean up packages: %w", err) } } return nil } // ManageRepositories manages repositories. func (m *Manager) ManageRepositories(imageTarget shared.ImageTarget) error { var err error if m.def.Packages.Repositories == nil || len(m.def.Packages.Repositories) == 0 { return nil } for _, repo := range m.def.Packages.Repositories { if !shared.ApplyFilter(&repo, m.def.Image.Release, m.def.Image.ArchitectureMapped, m.def.Image.Variant, m.def.Targets.Type, imageTarget) { continue } // Run template on repo.URL repo.URL, err = shared.RenderTemplate(repo.URL, m.def) if err != nil { return fmt.Errorf("Failed to render template: %w", err) } // Run template on repo.Key repo.Key, err = shared.RenderTemplate(repo.Key, m.def) if err != nil { return fmt.Errorf("Failed to render template: %w", err) } err = m.mgr.manageRepository(repo) if err != nil { return fmt.Errorf("Error for repository %s: %w", repo.Name, err) } } return nil } // optimizePackageSets groups consecutive package sets with the same action to // reduce the amount of calls to manager.{Install,Remove}(). It still honors the // order of execution. func optimizePackageSets(sets []shared.DefinitionPackagesSet) []shared.DefinitionPackagesSet { if len(sets) < 2 { return sets } var newSets []shared.DefinitionPackagesSet action := sets[0].Action packages := sets[0].Packages flags := sets[0].Flags for i := 1; i < len(sets); i++ { if sets[i].Action == sets[i-1].Action && strings.Join(sets[i].Flags, " ") == strings.Join(sets[i-1].Flags, " ") { packages = append(packages, sets[i].Packages...) } else { newSets = append(newSets, shared.DefinitionPackagesSet{ Action: action, Packages: packages, Flags: flags, }) action = sets[i].Action packages = sets[i].Packages flags = sets[i].Flags } } newSets = append(newSets, shared.DefinitionPackagesSet{ Action: action, Packages: packages, Flags: flags, }) return newSets } distrobuilder-3.0/managers/manager_test.go000066400000000000000000000030151456216713500210350ustar00rootroot00000000000000package managers import ( "testing" "github.com/stretchr/testify/require" "github.com/lxc/distrobuilder/shared" ) func TestManagePackages(t *testing.T) { sets := []shared.DefinitionPackagesSet{ { Packages: []string{"foo"}, Action: "install", }, { Packages: []string{"bar"}, Action: "install", }, { Packages: []string{"baz"}, Action: "remove", }, { Packages: []string{"lorem"}, Action: "remove", }, { Packages: []string{"ipsum"}, Action: "install", }, { Packages: []string{"dolor"}, Action: "remove", }, } optimizedSets := optimizePackageSets(sets) require.Len(t, optimizedSets, 4) require.Equal(t, optimizedSets[0], shared.DefinitionPackagesSet{Action: "install", Packages: []string{"foo", "bar"}}) require.Equal(t, optimizedSets[1], shared.DefinitionPackagesSet{Action: "remove", Packages: []string{"baz", "lorem"}}) require.Equal(t, optimizedSets[2], shared.DefinitionPackagesSet{Action: "install", Packages: []string{"ipsum"}}) require.Equal(t, optimizedSets[3], shared.DefinitionPackagesSet{Action: "remove", Packages: []string{"dolor"}}) sets = []shared.DefinitionPackagesSet{ { Packages: []string{"foo"}, Action: "install", }, } optimizedSets = optimizePackageSets(sets) require.Len(t, optimizedSets, 1) require.Equal(t, optimizedSets[0], shared.DefinitionPackagesSet{Action: "install", Packages: []string{"foo"}}) sets = []shared.DefinitionPackagesSet{} optimizedSets = optimizePackageSets(sets) require.Len(t, optimizedSets, 0) } distrobuilder-3.0/managers/opkg.go000066400000000000000000000011411456216713500173220ustar00rootroot00000000000000package managers import ( "os" ) type opkg struct { common } func (m *opkg) load() error { m.commands = managerCommands{ clean: "rm", install: "opkg", refresh: "opkg", remove: "opkg", update: "echo", } m.flags = managerFlags{ clean: []string{ "-rf", "/tmp/opkg-lists/", }, global: []string{}, install: []string{ "install", }, remove: []string{ "remove", }, refresh: []string{ "update", }, update: []string{ "Not supported", }, } m.hooks = managerHooks{ preRefresh: func() error { return os.MkdirAll("/var/lock", 0755) }, } return nil } distrobuilder-3.0/managers/pacman.go000066400000000000000000000052471456216713500176340ustar00rootroot00000000000000package managers import ( "fmt" "os" "path/filepath" "runtime" incus "github.com/lxc/incus/shared/util" "github.com/lxc/distrobuilder/shared" ) type pacman struct { common } func (m *pacman) load() error { err := m.setMirrorlist() if err != nil { return fmt.Errorf("Failed to set mirrorlist: %w", err) } err = m.setupTrustedKeys() if err != nil { return fmt.Errorf("Failed to setup trusted keys: %w", err) } m.commands = managerCommands{ clean: "pacman", install: "pacman", refresh: "pacman", remove: "pacman", update: "pacman", } m.flags = managerFlags{ clean: []string{ "-Sc", }, global: []string{ "--noconfirm", }, install: []string{ "-S", "--needed", }, remove: []string{ "-Rcs", }, refresh: []string{ "-Syy", }, update: []string{ "-Su", }, } m.hooks = managerHooks{ clean: func() error { path := "/var/cache/pacman/pkg" // List all entries. entries, err := os.ReadDir(path) if err != nil { if os.IsNotExist(err) { return nil } return fmt.Errorf("Failed to list directory '%s': %w", path, err) } // Individually wipe all entries. for _, entry := range entries { entryPath := filepath.Join(path, entry.Name()) err := os.RemoveAll(entryPath) if err != nil && !os.IsNotExist(err) { return fmt.Errorf("Failed to remove '%s': %w", entryPath, err) } } return nil }, } return nil } func (m *pacman) setupTrustedKeys() error { var err error _, err = os.Stat("/etc/pacman.d/gnupg") if err == nil { return nil } err = shared.RunCommand(m.ctx, nil, nil, "pacman-key", "--init") if err != nil { return fmt.Errorf("Error initializing with pacman-key: %w", err) } var keyring string if incus.ValueInSlice(runtime.GOARCH, []string{"arm", "arm64"}) { keyring = "archlinuxarm" } else { keyring = "archlinux" } err = shared.RunCommand(m.ctx, nil, nil, "pacman-key", "--populate", keyring) if err != nil { return fmt.Errorf("Error populating with pacman-key: %w", err) } return nil } func (m *pacman) setMirrorlist() error { f, err := os.Create(filepath.Join("etc", "pacman.d", "mirrorlist")) if err != nil { return fmt.Errorf("Failed to create file %q: %w", filepath.Join("etc", "pacman.d", "mirrorlist"), err) } defer f.Close() var mirror string if incus.ValueInSlice(runtime.GOARCH, []string{"arm", "arm64"}) { mirror = "Server = http://mirror.archlinuxarm.org/$arch/$repo" } else { mirror = "Server = http://mirrors.kernel.org/archlinux/$repo/os/$arch" } _, err = f.WriteString(mirror) if err != nil { return fmt.Errorf("Failed to write to %q: %w", filepath.Join("etc", "pacman.d", "mirrorlist"), err) } return nil } distrobuilder-3.0/managers/portage.go000066400000000000000000000007351456216713500200330ustar00rootroot00000000000000package managers type portage struct { common } func (m *portage) load() error { m.commands = managerCommands{ clean: "emerge", install: "emerge", refresh: "true", remove: "emerge", update: "emerge", } m.flags = managerFlags{ global: []string{}, clean: []string{}, install: []string{ "--autounmask-continue", }, remove: []string{ "--unmerge", }, refresh: []string{}, update: []string{ "--update", "@world", }, } return nil } distrobuilder-3.0/managers/slackpkg.go000066400000000000000000000010301456216713500201560ustar00rootroot00000000000000package managers type slackpkg struct { common } func (m *slackpkg) load() error { m.commands = managerCommands{ install: "slackpkg", remove: "slackpkg", refresh: "slackpkg", update: "true", clean: "true", } m.flags = managerFlags{ global: []string{ "-batch=on", "-default_answer=y", }, install: []string{ "install", }, remove: []string{ "remove", }, refresh: []string{ "update", }, update: []string{ "upgrade-all", }, clean: []string{ "clean-system", }, } return nil } distrobuilder-3.0/managers/xbps.go000066400000000000000000000011431456216713500173400ustar00rootroot00000000000000package managers type xbps struct { common } func (m *xbps) load() error { m.commands = managerCommands{ clean: "xbps-remove", install: "xbps-install", refresh: "xbps-install", remove: "xbps-remove", update: "sh", } m.flags = managerFlags{ global: []string{}, clean: []string{ "--yes", "--clean-cache", }, install: []string{ "--yes", }, refresh: []string{ "--sync", }, remove: []string{ "--yes", "--recursive", "--remove-orphans", }, update: []string{ "-c", "xbps-install --yes --update && xbps-install --yes --update", }, } return nil } distrobuilder-3.0/managers/yum.go000066400000000000000000000045051456216713500172030ustar00rootroot00000000000000package managers import ( "bufio" "bytes" "fmt" "os" "path/filepath" "strings" incus "github.com/lxc/incus/shared/util" "github.com/lxc/distrobuilder/shared" ) type yum struct { common } func (m *yum) load() error { m.commands = managerCommands{ clean: "yum", install: "yum", refresh: "yum", remove: "yum", update: "yum", } m.flags = managerFlags{ clean: []string{ "clean", "all", }, global: []string{ "-y", }, install: []string{ "install", }, remove: []string{ "remove", }, refresh: []string{ "makecache", }, update: []string{ "update", }, } var buf bytes.Buffer err := shared.RunCommand(m.ctx, nil, &buf, "yum", "--help") if err != nil { return fmt.Errorf("Failed running yum: %w", err) } scanner := bufio.NewScanner(&buf) for scanner.Scan() { if strings.Contains(scanner.Text(), "--allowerasing") { m.flags.global = append(m.flags.global, "--allowerasing") continue } if strings.Contains(scanner.Text(), "--nobest") { m.flags.update = append(m.flags.update, "--nobest") } } return nil } func (m *yum) manageRepository(repoAction shared.DefinitionPackagesRepository) error { // Run rpmdb --rebuilddb err := shared.RunCommand(m.ctx, nil, nil, "rpmdb", "--rebuilddb") if err != nil { return fmt.Errorf("failed to run rpmdb --rebuilddb: %w", err) } return yumManageRepository(repoAction) } func yumManageRepository(repoAction shared.DefinitionPackagesRepository) error { targetFile := filepath.Join("/etc/yum.repos.d", repoAction.Name) if !strings.HasSuffix(targetFile, ".repo") { targetFile = fmt.Sprintf("%s.repo", targetFile) } if !incus.PathExists(filepath.Dir(targetFile)) { err := os.MkdirAll(filepath.Dir(targetFile), 0755) if err != nil { return fmt.Errorf("Failed to create directory %q: %w", filepath.Dir(targetFile), err) } } f, err := os.Create(targetFile) if err != nil { return fmt.Errorf("Failed to create file %q: %w", targetFile, err) } defer f.Close() _, err = f.WriteString(repoAction.URL) if err != nil { return fmt.Errorf("Failed to write to file %q: %w", targetFile, err) } // Append final new line if missing if !strings.HasSuffix(repoAction.URL, "\n") { _, err = f.WriteString("\n") if err != nil { return fmt.Errorf("Failed to write to file %q: %w", targetFile, err) } } return nil } distrobuilder-3.0/managers/zypper.go000066400000000000000000000021461456216713500177210ustar00rootroot00000000000000package managers import ( "errors" "github.com/lxc/distrobuilder/shared" ) type zypper struct { common } func (m *zypper) load() error { m.commands = managerCommands{ clean: "zypper", install: "zypper", refresh: "zypper", remove: "zypper", update: "zypper", } m.flags = managerFlags{ global: []string{ "--non-interactive", "--gpg-auto-import-keys", }, clean: []string{ "clean", "-a", }, install: []string{ "install", "--allow-downgrade", "--replacefiles", }, remove: []string{ "remove", }, refresh: []string{ "refresh", }, update: []string{ "update", }, } return nil } func (m *zypper) manageRepository(repoAction shared.DefinitionPackagesRepository) error { if repoAction.Type != "" && repoAction.Type != "zypper" { return errors.New("Invalid repository Type") } if repoAction.Name == "" { return errors.New("Invalid repository name") } if repoAction.URL == "" { return errors.New("Invalid repository url") } return shared.RunCommand(m.ctx, nil, nil, "zypper", "ar", "--refresh", "--check", repoAction.URL, repoAction.Name) } distrobuilder-3.0/mkdocs.yml000066400000000000000000000001441456216713500162430ustar00rootroot00000000000000site_name: "distrobuilder - system container and VM image builder" theme: readthedocs docs_dir: doc distrobuilder-3.0/shared/000077500000000000000000000000001456216713500155075ustar00rootroot00000000000000distrobuilder-3.0/shared/archive_linux.go000066400000000000000000000044331456216713500207020ustar00rootroot00000000000000package shared import ( "context" "errors" "fmt" "io" "os" "strings" "github.com/lxc/incus/shared/archive" "github.com/lxc/incus/shared/subprocess" "golang.org/x/sys/unix" ) // Unpack unpacks a tarball. func Unpack(file string, path string) error { extractArgs, extension, _, err := archive.DetectCompression(file) if err != nil { return err } command := "" args := []string{} var reader io.Reader if strings.HasPrefix(extension, ".tar") { command = "tar" args = append(args, "--restrict", "--force-local") args = append(args, "-C", path, "--numeric-owner", "--xattrs-include=*") args = append(args, extractArgs...) args = append(args, "-") f, err := os.Open(file) if err != nil { return err } defer f.Close() reader = f } else if strings.HasPrefix(extension, ".squashfs") { // unsquashfs does not support reading from stdin, // so ProgressTracker is not possible. command = "unsquashfs" args = append(args, "-f", "-d", path, "-n", file) } else { return fmt.Errorf("Unsupported image format: %s", extension) } err = subprocess.RunCommandWithFds(context.TODO(), reader, nil, command, args...) if err != nil { // We can't create char/block devices in unpriv containers so ignore related errors. if command == "unsquashfs" { var runError *subprocess.RunError ok := errors.As(err, &runError) if !ok || runError.StdErr().String() == "" { return err } // Confirm that all errors are related to character or block devices. found := false for _, line := range strings.Split(runError.StdErr().String(), "\n") { line = strings.TrimSpace(line) if line == "" { continue } if !strings.Contains(line, "failed to create block device") { continue } if !strings.Contains(line, "failed to create character device") { continue } // We found an actual error. found = true } if !found { // All good, assume everything unpacked. return nil } } // Check if we ran out of space fs := unix.Statfs_t{} err1 := unix.Statfs(path, &fs) if err1 != nil { return err1 } // Check if we're running out of space if int64(fs.Bfree) < 10 { return fmt.Errorf("Unable to unpack image, run out of disk space") } return fmt.Errorf("Unpack failed: %w", err) } return nil } distrobuilder-3.0/shared/chroot.go000066400000000000000000000240041456216713500173340ustar00rootroot00000000000000package shared import ( "fmt" "os" "path/filepath" "regexp" "strconv" incus "github.com/lxc/incus/shared/util" "golang.org/x/sys/unix" ) // ChrootMount defines mount args. type ChrootMount struct { Source string Target string FSType string Flags uintptr Data string IsDir bool } // ActiveChroots is a map of all active chroots and their exit functions. var ActiveChroots = make(map[string]func() error) func setupMounts(rootfs string, mounts []ChrootMount) error { // Create a temporary mount path err := os.MkdirAll(filepath.Join(rootfs, ".distrobuilder"), 0700) if err != nil { return fmt.Errorf("Failed to create directory %q: %w", filepath.Join(rootfs, ".distrobuilder"), err) } for i, mount := range mounts { // Target path tmpTarget := filepath.Join(rootfs, ".distrobuilder", fmt.Sprintf("%d", i)) // Create the target mountpoint if mount.IsDir { err := os.MkdirAll(tmpTarget, 0755) if err != nil { return fmt.Errorf("Failed to create directory %q: %w", tmpTarget, err) } } else { f, err := os.Create(tmpTarget) if err != nil { return fmt.Errorf("Failed to create file %q: %w", tmpTarget, err) } f.Close() } // Mount to the temporary path err := unix.Mount(mount.Source, tmpTarget, mount.FSType, mount.Flags, mount.Data) if err != nil { return fmt.Errorf("Failed to mount '%s': %w", mount.Source, err) } } return nil } func moveMounts(mounts []ChrootMount) error { for i, mount := range mounts { // Source path tmpSource := filepath.Join("/", ".distrobuilder", fmt.Sprintf("%d", i)) // Resolve symlinks target := mount.Target for { // Get information on current target fi, err := os.Lstat(target) if err != nil { break } // If not a symlink, we're done if fi.Mode()&os.ModeSymlink == 0 { break } // If a symlink, resolve it newTarget, err := os.Readlink(target) if err != nil { break } target = newTarget } // If the target's parent directory is a symlink, we need to resolve that as well. targetDir := filepath.Dir(target) if incus.PathExists(targetDir) { // Get information on current target fi, err := os.Lstat(targetDir) if err != nil { return fmt.Errorf("Failed to stat directory %q: %w", targetDir, err) } // If a symlink, resolve it if fi.Mode()&os.ModeSymlink != 0 { newTarget, err := os.Readlink(targetDir) if err != nil { return fmt.Errorf("Failed to get destination of %q: %w", targetDir, err) } targetDir = newTarget } } // Create parent paths if missing err := os.MkdirAll(targetDir, 0755) if err != nil { return fmt.Errorf("Failed to create directory %q: %w", targetDir, err) } // Create target path if mount.IsDir { err = os.MkdirAll(target, 0755) if err != nil { return fmt.Errorf("Failed to create directory %q: %w", target, err) } } else { err := os.WriteFile(target, nil, 0644) if err != nil { return fmt.Errorf("Failed to create file %q: %w", target, err) } } // Move the mount to its destination err = unix.Mount(tmpSource, target, "", unix.MS_MOVE, "") if err != nil { return fmt.Errorf("Failed to mount '%s': %w", mount.Source, err) } } // Cleanup our temporary path err := os.RemoveAll(filepath.Join("/", ".distrobuilder")) if err != nil { return fmt.Errorf("Failed to remove directory %q: %w", filepath.Join("/", ".distrobuilder"), err) } return nil } func killChrootProcesses(rootfs string) error { // List all files under /proc proc, err := os.Open(filepath.Join(rootfs, "proc")) if err != nil { return fmt.Errorf("Failed to open file %q: %w", filepath.Join(rootfs, "proc"), err) } dirs, err := proc.Readdirnames(0) if err != nil { return fmt.Errorf("Failed to read directory content of %q: %w", filepath.Join(rootfs, "proc"), err) } // Get all processes and kill them re := regexp.MustCompile(`\d+`) for _, dir := range dirs { if re.MatchString(dir) { link, _ := os.Readlink(filepath.Join(rootfs, "proc", dir, "root")) if link == rootfs { pid, _ := strconv.Atoi(dir) err = unix.Kill(pid, unix.SIGKILL) if err != nil { return fmt.Errorf("Failed killing process: %w", err) } } } } return nil } // SetupChroot sets up mount and files, a reverter and then chroots for you. func SetupChroot(rootfs string, definition Definition, m []ChrootMount) (func() error, error) { // Mount the rootfs err := unix.Mount(rootfs, rootfs, "", unix.MS_BIND, "") if err != nil { return nil, fmt.Errorf("Failed to mount '%s': %w", rootfs, err) } // Setup all other needed mounts mounts := []ChrootMount{ {"none", "/proc", "proc", 0, "", true}, {"none", "/sys", "sysfs", 0, "", true}, {"none", "/run", "tmpfs", 0, "", true}, {"none", "/tmp", "tmpfs", 0, "", true}, {"none", "/dev", "tmpfs", 0, "", true}, {"none", "/dev/shm", "tmpfs", 0, "", true}, {"/etc/resolv.conf", "/etc/resolv.conf", "", unix.MS_BIND, "", false}, } // Keep a reference to the host rootfs and cwd root, err := os.Open("/") if err != nil { return nil, err } cwd, err := os.Getwd() if err != nil { return nil, err } // Setup all needed mounts in a temporary location if len(m) > 0 { err = setupMounts(rootfs, append(mounts, m...)) } else { err = setupMounts(rootfs, mounts) } if err != nil { return nil, fmt.Errorf("Failed to mount filesystems: %w", err) } // Chroot into the container's rootfs err = unix.Chroot(rootfs) if err != nil { root.Close() return nil, err } err = unix.Chdir("/") if err != nil { return nil, err } // Move all the mounts into place err = moveMounts(append(mounts, m...)) if err != nil { return nil, err } // Populate /dev directory instead of bind mounting it from the host err = populateDev() if err != nil { return nil, fmt.Errorf("Failed to populate /dev: %w", err) } // Change permission for /dev/shm err = unix.Chmod("/dev/shm", 01777) if err != nil { return nil, fmt.Errorf("Failed to chmod /dev/shm: %w", err) } var env Environment envs := definition.Environment if envs.ClearDefaults { env = Environment{} } else { env = Environment{ "PATH": EnvVariable{ Value: "/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin", Set: true, }, "SHELL": EnvVariable{ Value: "/bin/sh", Set: true, }, "TERM": EnvVariable{ Value: "xterm", Set: true, }, "DEBIAN_FRONTEND": EnvVariable{ Value: "noninteractive", Set: true, }, } } if envs.EnvVariables != nil && len(envs.EnvVariables) > 0 { imageTargets := ImageTargetUndefined | ImageTargetAll if definition.Targets.Type == DefinitionFilterTypeContainer { imageTargets |= ImageTargetContainer } else if definition.Targets.Type == DefinitionFilterTypeVM { imageTargets |= ImageTargetVM } for _, e := range envs.EnvVariables { if !ApplyFilter(&e, definition.Image.Release, definition.Image.ArchitectureMapped, definition.Image.Variant, definition.Targets.Type, imageTargets) { continue } entry, ok := env[e.Key] if ok { entry.Value = e.Value entry.Set = true } else { env[e.Key] = EnvVariable{ Value: e.Value, Set: true, } } } } // Set environment variables oldEnv := SetEnvVariables(env) // Setup policy-rc.d override policyCleanup := false if incus.PathExists("/usr/sbin/") && !incus.PathExists("/usr/sbin/policy-rc.d") { err = os.WriteFile("/usr/sbin/policy-rc.d", []byte(`#!/bin/sh exit 101 `), 0755) if err != nil { return nil, err } policyCleanup = true } exitFunc := func() error { defer root.Close() // Cleanup policy-rc.d if policyCleanup { err = os.Remove("/usr/sbin/policy-rc.d") if err != nil { return fmt.Errorf("Failed to remove %q: %w", "/usr/sbin/policy-rc.d", err) } } // Reset old environment variables SetEnvVariables(oldEnv) // Switch back to the host rootfs err = root.Chdir() if err != nil { return fmt.Errorf("Failed to chdir: %w", err) } err = unix.Chroot(".") if err != nil { return fmt.Errorf("Failed to chroot: %w", err) } err = unix.Chdir(cwd) if err != nil { return fmt.Errorf("Failed to chdir: %w", err) } // This will kill all processes in the chroot and allow to cleanly // unmount everything. err = killChrootProcesses(rootfs) if err != nil { return fmt.Errorf("Failed killing chroot processes: %w", err) } // And now unmount the entire tree err = unix.Unmount(rootfs, unix.MNT_DETACH) if err != nil { return fmt.Errorf("Failed unmounting rootfs: %w", err) } devPath := filepath.Join(rootfs, "dev") // Wipe $rootfs/dev err := os.RemoveAll(devPath) if err != nil { return fmt.Errorf("Failed to remove directory %q: %w", devPath, err) } ActiveChroots[rootfs] = nil return os.MkdirAll(devPath, 0755) } ActiveChroots[rootfs] = exitFunc return exitFunc, nil } func populateDev() error { devs := []struct { Path string Major uint32 Minor uint32 Mode uint32 }{ {"/dev/console", 5, 1, unix.S_IFCHR | 0640}, {"/dev/full", 1, 7, unix.S_IFCHR | 0666}, {"/dev/null", 1, 3, unix.S_IFCHR | 0666}, {"/dev/random", 1, 8, unix.S_IFCHR | 0666}, {"/dev/tty", 5, 0, unix.S_IFCHR | 0666}, {"/dev/urandom", 1, 9, unix.S_IFCHR | 0666}, {"/dev/zero", 1, 5, unix.S_IFCHR | 0666}, } for _, d := range devs { if incus.PathExists(d.Path) { continue } dev := unix.Mkdev(d.Major, d.Minor) err := unix.Mknod(d.Path, d.Mode, int(dev)) if err != nil { return fmt.Errorf("Failed to create %q: %w", d.Path, err) } // For some odd reason, unix.Mknod will not set the mode correctly. // This fixes that. err = unix.Chmod(d.Path, d.Mode) if err != nil { return fmt.Errorf("Failed to chmod %q: %w", d.Path, err) } } symlinks := []struct { Symlink string Target string }{ {"/dev/fd", "/proc/self/fd"}, {"/dev/stdin", "/proc/self/fd/0"}, {"/dev/stdout", "/proc/self/fd/1"}, {"/dev/stderr", "/proc/self/fd/2"}, } for _, l := range symlinks { err := os.Symlink(l.Target, l.Symlink) if err != nil { return fmt.Errorf("Failed to create link %q -> %q: %w", l.Symlink, l.Target, err) } } return nil } distrobuilder-3.0/shared/definition.go000066400000000000000000000502171456216713500201730ustar00rootroot00000000000000package shared import ( "errors" "fmt" "reflect" "strconv" "strings" "time" "github.com/lxc/incus/shared/osarch" incusArch "github.com/lxc/incus/shared/osarch" "github.com/lxc/incus/shared/util" ) // ImageTarget represents the image target. type ImageTarget int const ( // ImageTargetAll is used for all targets. ImageTargetAll ImageTarget = 1 // ImageTargetContainer is used for container targets. ImageTargetContainer ImageTarget = 1 << 1 // ImageTargetVM is used for VM targets. ImageTargetVM ImageTarget = 1 << 2 // ImageTargetUndefined is used when no type has been specified. ImageTargetUndefined ImageTarget = 1 << 3 ) // DefinitionFilterType represents the filter type. type DefinitionFilterType string const ( // DefinitionFilterTypeVM is used for VMs. DefinitionFilterTypeVM DefinitionFilterType = "vm" // DefinitionFilterTypeContainer is used for containers. DefinitionFilterTypeContainer DefinitionFilterType = "container" ) // UnmarshalYAML validates the filter type. func (d *DefinitionFilterType) UnmarshalYAML(unmarshal func(interface{}) error) error { var filterType string err := unmarshal(&filterType) if err != nil { return err } if DefinitionFilterType(filterType) != DefinitionFilterTypeContainer && DefinitionFilterType(filterType) != DefinitionFilterTypeVM { return fmt.Errorf("Invalid filter type %q", filterType) } *d = DefinitionFilterType(filterType) return nil } // Filter represents a filter. type Filter interface { GetReleases() []string GetArchitectures() []string GetVariants() []string GetTypes() []DefinitionFilterType } // A DefinitionFilter defines filters for various actions. type DefinitionFilter struct { Releases []string `yaml:"releases,omitempty"` Architectures []string `yaml:"architectures,omitempty"` Variants []string `yaml:"variants,omitempty"` Types []DefinitionFilterType `yaml:"types,omitempty"` } // GetReleases returns a list of releases. func (d *DefinitionFilter) GetReleases() []string { return d.Releases } // GetArchitectures returns a list of architectures. func (d *DefinitionFilter) GetArchitectures() []string { return d.Architectures } // GetVariants returns a list of variants. func (d *DefinitionFilter) GetVariants() []string { return d.Variants } // GetTypes returns a list of types. func (d *DefinitionFilter) GetTypes() []DefinitionFilterType { return d.Types } // A DefinitionPackagesSet is a set of packages which are to be installed // or removed. type DefinitionPackagesSet struct { DefinitionFilter `yaml:",inline"` Packages []string `yaml:"packages"` Action string `yaml:"action"` Early bool `yaml:"early,omitempty"` Flags []string `yaml:"flags,omitempty"` } // A DefinitionPackagesRepository contains data of a specific repository. type DefinitionPackagesRepository struct { DefinitionFilter `yaml:",inline"` Name string `yaml:"name"` // Name of the repository URL string `yaml:"url"` // URL (may differ based on manager) Type string `yaml:"type,omitempty"` // For distros that have more than one repository manager Key string `yaml:"key,omitempty"` // GPG armored keyring } // CustomManagerCmd represents a command for a custom manager. type CustomManagerCmd struct { Command string `yaml:"cmd"` Flags []string `yaml:"flags,omitempty"` } // DefinitionPackagesCustomManager represents a custom package manager. type DefinitionPackagesCustomManager struct { Clean CustomManagerCmd `yaml:"clean"` Install CustomManagerCmd `yaml:"install"` Remove CustomManagerCmd `yaml:"remove"` Refresh CustomManagerCmd `yaml:"refresh"` Update CustomManagerCmd `yaml:"update"` Flags []string `yaml:"flags,omitempty"` } // A DefinitionPackages represents a package handler. type DefinitionPackages struct { Manager string `yaml:"manager,omitempty"` CustomManager *DefinitionPackagesCustomManager `yaml:"custom_manager,omitempty"` Update bool `yaml:"update,omitempty"` Cleanup bool `yaml:"cleanup,omitempty"` Sets []DefinitionPackagesSet `yaml:"sets,omitempty"` Repositories []DefinitionPackagesRepository `yaml:"repositories,omitempty"` } // A DefinitionImage represents the image. type DefinitionImage struct { Description string `yaml:"description"` Distribution string `yaml:"distribution"` Release string `yaml:"release,omitempty"` Architecture string `yaml:"architecture,omitempty"` Expiry string `yaml:"expiry,omitempty"` Variant string `yaml:"variant,omitempty"` Name string `yaml:"name,omitempty"` Serial string `yaml:"serial,omitempty"` // Internal fields (YAML input ignored) ArchitectureMapped string `yaml:"architecture_mapped,omitempty"` ArchitectureKernel string `yaml:"architecture_kernel,omitempty"` ArchitecturePersonality string `yaml:"architecture_personality,omitempty"` } // A DefinitionSource specifies the download type and location. type DefinitionSource struct { Downloader string `yaml:"downloader"` URL string `yaml:"url,omitempty"` Keys []string `yaml:"keys,omitempty"` Keyserver string `yaml:"keyserver,omitempty"` Variant string `yaml:"variant,omitempty"` Suite string `yaml:"suite,omitempty"` SameAs string `yaml:"same_as,omitempty"` SkipVerification bool `yaml:"skip_verification,omitempty"` Components []string `yaml:"components,omitempty"` } // A DefinitionTargetLXCConfig represents the config part of the metadata. type DefinitionTargetLXCConfig struct { DefinitionFilter `yaml:",inline"` Type string `yaml:"type"` Before uint `yaml:"before,omitempty"` After uint `yaml:"after,omitempty"` Content string `yaml:"content"` } // A DefinitionTargetLXC represents LXC specific files as part of the metadata. type DefinitionTargetLXC struct { CreateMessage string `yaml:"create_message,omitempty"` Config []DefinitionTargetLXCConfig `yaml:"config,omitempty"` } // DefinitionTargetIncusVM represents Incus VM specific options. type DefinitionTargetIncusVM struct { Size uint64 `yaml:"size,omitempty"` Filesystem string `yaml:"filesystem,omitempty"` } // DefinitionTargetIncus represents Incus specific options. type DefinitionTargetIncus struct { VM DefinitionTargetIncusVM `yaml:"vm,omitempty"` } // A DefinitionTarget specifies target dependent files. type DefinitionTarget struct { LXC DefinitionTargetLXC `yaml:"lxc,omitempty"` Incus DefinitionTargetIncus `yaml:"incus,omitempty"` Type DefinitionFilterType // This field is internal only and used only for simplicity. } // A DefinitionFile represents a file which is to be created inside to chroot. type DefinitionFile struct { DefinitionFilter `yaml:",inline"` Generator string `yaml:"generator"` Path string `yaml:"path,omitempty"` Content string `yaml:"content,omitempty"` Name string `yaml:"name,omitempty"` Template DefinitionFileTemplate `yaml:"template,omitempty"` Templated bool `yaml:"templated,omitempty"` Mode string `yaml:"mode,omitempty"` GID string `yaml:"gid,omitempty"` UID string `yaml:"uid,omitempty"` Pongo bool `yaml:"pongo,omitempty"` Source string `yaml:"source,omitempty"` } // A DefinitionFileTemplate represents the settings used by generators. type DefinitionFileTemplate struct { Properties map[string]string `yaml:"properties,omitempty"` When []string `yaml:"when,omitempty"` } // A DefinitionAction specifies a custom action (script) which is to be run after // a certain action. type DefinitionAction struct { DefinitionFilter `yaml:",inline"` Trigger string `yaml:"trigger"` Action string `yaml:"action"` Pongo bool `yaml:"pongo,omitempty"` } // DefinitionMappings defines custom mappings. type DefinitionMappings struct { Architectures map[string]string `yaml:"architectures,omitempty"` ArchitectureMap string `yaml:"architecture_map,omitempty"` } // DefinitionEnvVars defines custom environment variables. type DefinitionEnvVars struct { DefinitionFilter `yaml:",inline"` Key string `yaml:"key"` Value string `yaml:"value"` } // DefinitionEnv represents the config part of the environment section. type DefinitionEnv struct { ClearDefaults bool `yaml:"clear_defaults,omitempty"` EnvVariables []DefinitionEnvVars `yaml:"variables,omitempty"` } // A Definition a definition. type Definition struct { Image DefinitionImage `yaml:"image"` Source DefinitionSource `yaml:"source"` Targets DefinitionTarget `yaml:"targets,omitempty"` Files []DefinitionFile `yaml:"files,omitempty"` Packages DefinitionPackages `yaml:"packages,omitempty"` Actions []DefinitionAction `yaml:"actions,omitempty"` Mappings DefinitionMappings `yaml:"mappings,omitempty"` Environment DefinitionEnv `yaml:"environment,omitempty"` } // SetValue writes the provided value to a field represented by the yaml tag 'key'. func (d *Definition) SetValue(key string, value string) error { // Walk through the definition and find the field with the given key field, err := getFieldByTag(reflect.ValueOf(d).Elem(), reflect.TypeOf(d).Elem(), key) if err != nil { return fmt.Errorf("Failed to get field by tag: %w", err) } // Fail if the field cannot be set if !field.CanSet() { return fmt.Errorf("Cannot set value for %s", key) } switch field.Kind() { case reflect.Bool: v, err := strconv.ParseBool(value) if err != nil { return fmt.Errorf("Failed to parse bool %q: %w", value, err) } field.SetBool(v) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: v, err := strconv.ParseInt(value, 10, 64) if err != nil { return fmt.Errorf("Failed to parse int %q: %w", value, err) } field.SetInt(v) case reflect.String: field.SetString(value) case reflect.Uint, reflect.Uintptr, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: v, err := strconv.ParseUint(value, 10, 64) if err != nil { return fmt.Errorf("Failed to parse uint %q: %w", value, err) } field.SetUint(v) default: return fmt.Errorf("Unsupported type '%s'", field.Kind()) } return nil } // SetDefaults sets some default values. func (d *Definition) SetDefaults() { // default to local arch if d.Image.Architecture == "" { localArch, _ := osarch.ArchitectureGetLocal() d.Image.Architecture = localArch } // set default expiry of 30 days if d.Image.Expiry == "" { d.Image.Expiry = "30d" } // Set default serial number if d.Image.Serial == "" { d.Image.Serial = time.Now().UTC().Format("20060102_1504") } // Set default variant if d.Image.Variant == "" { d.Image.Variant = "default" } // Set default keyserver if d.Source.Keyserver == "" { d.Source.Keyserver = "hkps.pool.sks-keyservers.net" } // Set default name and description templates if d.Image.Name == "" { d.Image.Name = "{{ image.distribution }}-{{ image.release }}-{{ image.architecture_mapped }}-{{ image.variant }}-{{ image.serial }}" } if d.Image.Description == "" { d.Image.Description = "{{ image.distribution|capfirst }} {{ image.release }} {{ image.architecture_mapped }}{% if image.variant != \"default\" %} ({{ image.variant }}){% endif %} ({{ image.serial }})" } // Set default target type. This will only be overridden if building VMs for Incus. d.Targets.Type = DefinitionFilterTypeContainer } // Validate validates the Definition. func (d *Definition) Validate() error { if strings.TrimSpace(d.Image.Distribution) == "" { return errors.New("image.distribution may not be empty") } validDownloaders := []string{ "almalinux-http", "alpinelinux-http", "alt-http", "apertis-http", "archlinux-http", "busybox", "centos-http", "springdalelinux-http", "debootstrap", "rpmbootstrap", "fedora-http", "gentoo-http", "ubuntu-http", "sabayon-http", "docker-http", "oraclelinux-http", "openeuler-http", "opensuse-http", "openwrt-http", "plamolinux-http", "voidlinux-http", "funtoo-http", "rootfs-http", "rockylinux-http", "vyos-http", "slackware-http", "nixos-http", } if !util.ValueInSlice(strings.TrimSpace(d.Source.Downloader), validDownloaders) { return fmt.Errorf("source.downloader must be one of %v", validDownloaders) } if d.Packages.Manager != "" { validManagers := []string{ "apk", "apt", "dnf", "egoportage", "opkg", "pacman", "portage", "yum", "equo", "xbps", "zypper", "luet", "slackpkg", } if !util.ValueInSlice(strings.TrimSpace(d.Packages.Manager), validManagers) { return fmt.Errorf("packages.manager must be one of %v", validManagers) } if d.Packages.CustomManager != nil { return errors.New("cannot have both packages.manager and packages.custom_manager set") } } else { if d.Packages.CustomManager == nil { return errors.New("packages.manager or packages.custom_manager needs to be set") } if d.Packages.CustomManager.Clean.Command == "" { return errors.New("packages.custom_manager requires a clean command") } if d.Packages.CustomManager.Install.Command == "" { return errors.New("packages.custom_manager requires an install command") } if d.Packages.CustomManager.Remove.Command == "" { return errors.New("packages.custom_manager requires a remove command") } if d.Packages.CustomManager.Refresh.Command == "" { return errors.New("packages.custom_manager requires a refresh command") } if d.Packages.CustomManager.Update.Command == "" { return errors.New("packages.custom_manager requires an update command") } } validGenerators := []string{ "dump", "copy", "template", "hostname", "hosts", "remove", "cloud-init", "incus-agent", "fstab", } for _, file := range d.Files { if !util.ValueInSlice(strings.TrimSpace(file.Generator), validGenerators) { return fmt.Errorf("files.*.generator must be one of %v", validGenerators) } } validMappings := []string{ "almalinux", "alpinelinux", "altlinux", "archlinux", "centos", "debian", "gentoo", "plamolinux", "voidlinux", "funtoo", "slackware", } architectureMap := strings.TrimSpace(d.Mappings.ArchitectureMap) if architectureMap != "" { if !util.ValueInSlice(architectureMap, validMappings) { return fmt.Errorf("mappings.architecture_map must be one of %v", validMappings) } } validTriggers := []string{ "post-files", "post-packages", "post-unpack", "post-update", } for _, action := range d.Actions { if !util.ValueInSlice(action.Trigger, validTriggers) { return fmt.Errorf("actions.*.trigger must be one of %v", validTriggers) } } validPackageActions := []string{ "install", "remove", } for _, set := range d.Packages.Sets { if !util.ValueInSlice(set.Action, validPackageActions) { return fmt.Errorf("packages.*.set.*.action must be one of %v", validPackageActions) } } // Mapped architecture (distro name) archMapped, err := d.getMappedArchitecture() if err != nil { return fmt.Errorf("Failed to get mapped architecture: %w", err) } d.Image.ArchitectureMapped = archMapped // Kernel architecture and personality archID, err := incusArch.ArchitectureId(d.Image.Architecture) if err != nil { return fmt.Errorf("Failed to get architecture ID: %w", err) } archName, err := incusArch.ArchitectureName(archID) if err != nil { return fmt.Errorf("Failed to get architecture name: %w", err) } d.Image.ArchitectureKernel = archName archPersonality, err := incusArch.ArchitecturePersonality(archID) if err != nil { return fmt.Errorf("Failed to get architecture personality: %w", err) } d.Image.ArchitecturePersonality = archPersonality return nil } // GetRunnableActions returns a list of actions depending on the trigger // and releases. func (d *Definition) GetRunnableActions(trigger string, imageTarget ImageTarget) []DefinitionAction { out := []DefinitionAction{} for _, action := range d.Actions { if action.Trigger != trigger { continue } if !ApplyFilter(&action, d.Image.Release, d.Image.ArchitectureMapped, d.Image.Variant, d.Targets.Type, imageTarget) { continue } out = append(out, action) } return out } // GetEarlyPackages returns a list of packages which are to be installed or removed earlier than the actual package handling // Also removes them from the package set so they aren't attempted to be re-installed again as normal packages. func (d *Definition) GetEarlyPackages(action string) []string { var early []string normal := []DefinitionPackagesSet{} for _, set := range d.Packages.Sets { if set.Early && set.Action == action && ApplyFilter(&set, d.Image.Release, d.Image.ArchitectureMapped, d.Image.Variant, d.Targets.Type, 0) { early = append(early, set.Packages...) } else { normal = append(normal, set) } } d.Packages.Sets = normal return early } func (d *Definition) getMappedArchitecture() (string, error) { var arch string if d.Mappings.ArchitectureMap != "" { // Translate the architecture using the requested map var err error arch, err = GetArch(d.Mappings.ArchitectureMap, d.Image.Architecture) if err != nil { return "", fmt.Errorf("Failed to translate the architecture name: %w", err) } } else if len(d.Mappings.Architectures) > 0 { // Translate the architecture using a user specified mapping var ok bool arch, ok = d.Mappings.Architectures[d.Image.Architecture] if !ok { // If no mapping exists, it means it doesn't need translating arch = d.Image.Architecture } } else { // No map or mappings provided, just go with it as it is arch = d.Image.Architecture } return arch, nil } func getFieldByTag(v reflect.Value, t reflect.Type, tag string) (reflect.Value, error) { parts := strings.SplitN(tag, ".", 2) if t.Kind() == reflect.Slice { // Get index, e.g. '0' from tag 'foo.0' value, err := strconv.Atoi(parts[0]) if err != nil { return reflect.Value{}, err } if t.Elem().Kind() == reflect.Struct { // Make sure we are in range, otherwise return error if value < 0 || value >= v.Len() { return reflect.Value{}, errors.New("Index out of range") } return getFieldByTag(v.Index(value), t.Elem(), parts[1]) } // Primitive type return v.Index(value), nil } if t.Kind() == reflect.Struct { // Find struct field with correct tag for i := 0; i < t.NumField(); i++ { value := t.Field(i).Tag.Get("yaml") if value != "" && strings.Split(value, ",")[0] == parts[0] { if len(parts) == 1 { return v.Field(i), nil } return getFieldByTag(v.Field(i), t.Field(i).Type, parts[1]) } } } // Return its value if it's a primitive type return v, nil } // ApplyFilter returns true if the filter matches. func ApplyFilter(filter Filter, release string, architecture string, variant string, targetType DefinitionFilterType, acceptedImageTargets ImageTarget) bool { if len(filter.GetReleases()) > 0 && !util.ValueInSlice(release, filter.GetReleases()) { return false } if len(filter.GetArchitectures()) > 0 && !util.ValueInSlice(architecture, filter.GetArchitectures()) { return false } if len(filter.GetVariants()) > 0 && !util.ValueInSlice(variant, filter.GetVariants()) { return false } types := filter.GetTypes() if (acceptedImageTargets == 0 || acceptedImageTargets&ImageTargetUndefined > 0) && len(types) == 0 { return true } hasTargetType := func(targetType DefinitionFilterType) bool { for _, t := range types { if t == targetType { return true } } return false } if acceptedImageTargets&ImageTargetAll > 0 { if len(types) == 2 && hasTargetType(targetType) { return true } } if acceptedImageTargets&ImageTargetContainer > 0 { if targetType == DefinitionFilterTypeContainer && hasTargetType(targetType) { return true } } if acceptedImageTargets&ImageTargetVM > 0 { if targetType == DefinitionFilterTypeVM && hasTargetType(targetType) { return true } } return false } distrobuilder-3.0/shared/definition_test.go000066400000000000000000000366031456216713500212350ustar00rootroot00000000000000package shared import ( "log" "testing" "github.com/lxc/incus/shared/osarch" "github.com/stretchr/testify/require" yaml "gopkg.in/yaml.v2" ) func TestSetDefinitionDefaults(t *testing.T) { def := Definition{} def.SetDefaults() localArch, _ := osarch.ArchitectureGetLocal() require.Equal(t, localArch, def.Image.Architecture) require.Equal(t, "30d", def.Image.Expiry) } func TestValidateDefinition(t *testing.T) { tests := []struct { name string definition Definition expected string shouldFail bool }{ { "valid Definition", Definition{ Image: DefinitionImage{ Distribution: "ubuntu", Release: "artful", }, Source: DefinitionSource{ Downloader: "debootstrap", URL: "https://ubuntu.com", Keys: []string{"0xCODE"}, }, Packages: DefinitionPackages{ Manager: "apt", }, Files: []DefinitionFile{ { Generator: "dump", }, }, Mappings: DefinitionMappings{ ArchitectureMap: "debian", }, }, "", false, }, { "valid Definition without source.keys", Definition{ Image: DefinitionImage{ Distribution: "ubuntu", Release: "artful", }, Source: DefinitionSource{ Downloader: "debootstrap", URL: "https://ubuntu.com", }, Packages: DefinitionPackages{ Manager: "apt", }, }, "", false, }, { "valid Definition without source.url", Definition{ Image: DefinitionImage{ Distribution: "ubuntu", Release: "artful", }, Source: DefinitionSource{ Downloader: "debootstrap", }, Packages: DefinitionPackages{ Manager: "apt", }, }, "", false, }, { "valid Definition with packages.custom_manager", Definition{ Image: DefinitionImage{ Distribution: "ubuntu", Release: "artful", }, Source: DefinitionSource{ Downloader: "debootstrap", }, Packages: DefinitionPackages{ CustomManager: &DefinitionPackagesCustomManager{ Install: CustomManagerCmd{ Command: "install", }, Remove: CustomManagerCmd{ Command: "remove", }, Clean: CustomManagerCmd{ Command: "clean", }, Update: CustomManagerCmd{ Command: "update", }, Refresh: CustomManagerCmd{ Command: "refresh", }, }, }, }, "", false, }, { "invalid ArchitectureMap", Definition{ Image: DefinitionImage{ Distribution: "ubuntu", Release: "artful", }, Source: DefinitionSource{ Downloader: "debootstrap", URL: "https://ubuntu.com", Keys: []string{"0xCODE"}, }, Packages: DefinitionPackages{ Manager: "apt", }, Files: []DefinitionFile{ { Generator: "dump", }, }, Mappings: DefinitionMappings{ ArchitectureMap: "foo", }, }, "mappings.architecture_map must be one of .+", true, }, { "invalid generator", Definition{ Image: DefinitionImage{ Distribution: "ubuntu", Release: "artful", }, Source: DefinitionSource{ Downloader: "debootstrap", URL: "https://ubuntu.com", Keys: []string{"0xCODE"}, }, Packages: DefinitionPackages{ Manager: "apt", }, Files: []DefinitionFile{ { Generator: "foo", }, }, }, "files\\.\\*\\.generator must be one of .+", true, }, { "empty image.distribution", Definition{}, "image.distribution may not be empty", true, }, { "invalid source.downloader", Definition{ Image: DefinitionImage{ Distribution: "ubuntu", Release: "artful", }, Source: DefinitionSource{ Downloader: "foo", }, }, "source.downloader must be one of .+", true, }, { "invalid package.manager", Definition{ Image: DefinitionImage{ Distribution: "ubuntu", Release: "artful", }, Source: DefinitionSource{ Downloader: "debootstrap", URL: "https://ubuntu.com", Keys: []string{"0xCODE"}, }, Packages: DefinitionPackages{ Manager: "foo", }, }, "packages.manager must be one of .+", true, }, { "missing clean command in packages.custom_manager", Definition{ Image: DefinitionImage{ Distribution: "ubuntu", Release: "artful", }, Source: DefinitionSource{ Downloader: "debootstrap", URL: "https://ubuntu.com", Keys: []string{"0xCODE"}, }, Packages: DefinitionPackages{ CustomManager: &DefinitionPackagesCustomManager{}, }, }, "packages.custom_manager requires a clean command", true, }, { "missing install command in packages.custom_manager", Definition{ Image: DefinitionImage{ Distribution: "ubuntu", Release: "artful", }, Source: DefinitionSource{ Downloader: "debootstrap", URL: "https://ubuntu.com", Keys: []string{"0xCODE"}, }, Packages: DefinitionPackages{ CustomManager: &DefinitionPackagesCustomManager{ Clean: CustomManagerCmd{ Command: "clean", }, }, }, }, "packages.custom_manager requires an install command", true, }, { "missing remove command in packages.custom_manager", Definition{ Image: DefinitionImage{ Distribution: "ubuntu", Release: "artful", }, Source: DefinitionSource{ Downloader: "debootstrap", URL: "https://ubuntu.com", Keys: []string{"0xCODE"}, }, Packages: DefinitionPackages{ CustomManager: &DefinitionPackagesCustomManager{ Clean: CustomManagerCmd{ Command: "clean", }, Install: CustomManagerCmd{ Command: "install", }, }, }, }, "packages.custom_manager requires a remove command", true, }, { "missing refresh command in packages.custom_manager", Definition{ Image: DefinitionImage{ Distribution: "ubuntu", Release: "artful", }, Source: DefinitionSource{ Downloader: "debootstrap", URL: "https://ubuntu.com", Keys: []string{"0xCODE"}, }, Packages: DefinitionPackages{ CustomManager: &DefinitionPackagesCustomManager{ Clean: CustomManagerCmd{ Command: "clean", }, Install: CustomManagerCmd{ Command: "install", }, Remove: CustomManagerCmd{ Command: "remove", }, }, }, }, "packages.custom_manager requires a refresh command", true, }, { "missing update command in packages.custom_manager", Definition{ Image: DefinitionImage{ Distribution: "ubuntu", Release: "artful", }, Source: DefinitionSource{ Downloader: "debootstrap", URL: "https://ubuntu.com", Keys: []string{"0xCODE"}, }, Packages: DefinitionPackages{ CustomManager: &DefinitionPackagesCustomManager{ Clean: CustomManagerCmd{ Command: "clean", }, Install: CustomManagerCmd{ Command: "install", }, Remove: CustomManagerCmd{ Command: "remove", }, Refresh: CustomManagerCmd{ Command: "refresh", }, }, }, }, "packages.custom_manager requires an update command", true, }, { "package.manager and package.custom_manager set", Definition{ Image: DefinitionImage{ Distribution: "ubuntu", Release: "artful", }, Source: DefinitionSource{ Downloader: "debootstrap", URL: "https://ubuntu.com", Keys: []string{"0xCODE"}, }, Packages: DefinitionPackages{ Manager: "apt", CustomManager: &DefinitionPackagesCustomManager{}, }, }, "cannot have both packages.manager and packages.custom_manager set", true, }, { "package.manager and package.custom_manager unset", Definition{ Image: DefinitionImage{ Distribution: "ubuntu", Release: "artful", }, Source: DefinitionSource{ Downloader: "debootstrap", URL: "https://ubuntu.com", Keys: []string{"0xCODE"}, }, Packages: DefinitionPackages{}, }, "packages.manager or packages.custom_manager needs to be set", true, }, { "invalid action trigger", Definition{ Image: DefinitionImage{ Distribution: "ubuntu", Release: "artful", }, Source: DefinitionSource{ Downloader: "debootstrap", URL: "https://ubuntu.com", Keys: []string{"0xCODE"}, }, Packages: DefinitionPackages{ Manager: "apt", }, Actions: []DefinitionAction{ { Trigger: "post-build", }, }, }, "actions\\.\\*\\.trigger must be one of .+", true, }, { "invalid package action", Definition{ Image: DefinitionImage{ Distribution: "ubuntu", Release: "artful", }, Source: DefinitionSource{ Downloader: "debootstrap", URL: "https://ubuntu.com", Keys: []string{"0xCODE"}, }, Packages: DefinitionPackages{ Manager: "apt", Sets: []DefinitionPackagesSet{ { Action: "update", }, }, }, }, "packages\\.\\*\\.set\\.\\*\\.action must be one of .+", true, }, } for i, tt := range tests { log.Printf("Running test #%d: %s", i, tt.name) tt.definition.SetDefaults() err := tt.definition.Validate() if tt.shouldFail { require.Regexp(t, tt.expected, err) } else { require.NoError(t, err) } } } func TestDefinitionSetValue(t *testing.T) { d := Definition{ Image: DefinitionImage{ Distribution: "ubuntu", Release: "artful", }, Source: DefinitionSource{ Downloader: "debootstrap", URL: "https://ubuntu.com", Keys: []string{"0xCODE"}, SkipVerification: true, }, Packages: DefinitionPackages{ Manager: "apt", }, Actions: []DefinitionAction{ { Trigger: "post-update", Action: "/bin/true", }, { Trigger: "post-packages", Action: "/bin/false", }, }, } err := d.SetValue("image.release", "bionic") require.NoError(t, err) require.Equal(t, "bionic", d.Image.Release) err = d.SetValue("actions.0.trigger", "post-files") require.NoError(t, err) require.Equal(t, "post-files", d.Actions[0].Trigger) // Index out of bounds err = d.SetValue("actions.3.trigger", "post-files") require.EqualError(t, err, "Failed to get field by tag: Index out of range") // Nonsense err = d.SetValue("image", "[foo: bar]") require.EqualError(t, err, "Unsupported type 'struct'") err = d.SetValue("source.skip_verification", "true") require.NoError(t, err) require.Equal(t, true, d.Source.SkipVerification) } func TestDefinitionFilter(t *testing.T) { input := `packages: sets: - packages: - foo architectures: - amd64` def := Definition{} err := yaml.Unmarshal([]byte(input), &def) require.NoError(t, err) require.Contains(t, def.Packages.Sets[0].Packages, "foo") require.Contains(t, def.Packages.Sets[0].Architectures, "amd64") } func TestApplyFilter(t *testing.T) { repo := DefinitionPackagesRepository{} // Variants repo.Variants = []string{"default"} require.True(t, ApplyFilter(&repo, "foo", "amd64", "default", "vm", 0)) require.False(t, ApplyFilter(&repo, "foo", "amd64", "cloud", "vm", 0)) // Architectures repo.Architectures = []string{"amd64", "i386"} require.True(t, ApplyFilter(&repo, "foo", "amd64", "default", "vm", 0)) require.True(t, ApplyFilter(&repo, "foo", "i386", "default", "vm", 0)) require.False(t, ApplyFilter(&repo, "foo", "s390", "default", "vm", 0)) // Releases repo.Releases = []string{"foo"} require.True(t, ApplyFilter(&repo, "foo", "amd64", "default", "vm", 0)) require.False(t, ApplyFilter(&repo, "bar", "amd64", "default", "vm", 0)) // Targets require.True(t, ApplyFilter(&repo, "foo", "amd64", "default", "vm", 0)) require.True(t, ApplyFilter(&repo, "foo", "amd64", "default", "container", 0)) require.True(t, ApplyFilter(&repo, "foo", "amd64", "default", "vm", ImageTargetUndefined)) require.True(t, ApplyFilter(&repo, "foo", "amd64", "default", "container", ImageTargetUndefined)) require.False(t, ApplyFilter(&repo, "foo", "amd64", "default", "vm", ImageTargetVM)) require.False(t, ApplyFilter(&repo, "foo", "amd64", "default", "vm", ImageTargetAll|ImageTargetVM)) require.False(t, ApplyFilter(&repo, "foo", "amd64", "default", "vm", ImageTargetContainer|ImageTargetVM)) require.False(t, ApplyFilter(&repo, "foo", "amd64", "default", "container", ImageTargetVM)) require.False(t, ApplyFilter(&repo, "foo", "amd64", "default", "container", ImageTargetAll|ImageTargetVM)) require.False(t, ApplyFilter(&repo, "foo", "amd64", "default", "container", ImageTargetContainer|ImageTargetVM)) repo.Types = []DefinitionFilterType{DefinitionFilterTypeVM} require.True(t, ApplyFilter(&repo, "foo", "amd64", "default", "vm", ImageTargetVM)) require.True(t, ApplyFilter(&repo, "foo", "amd64", "default", "vm", ImageTargetAll|ImageTargetVM)) require.True(t, ApplyFilter(&repo, "foo", "amd64", "default", "vm", ImageTargetContainer|ImageTargetVM)) require.False(t, ApplyFilter(&repo, "foo", "amd64", "default", "container", ImageTargetVM)) require.False(t, ApplyFilter(&repo, "foo", "amd64", "default", "container", ImageTargetAll|ImageTargetVM)) require.False(t, ApplyFilter(&repo, "foo", "amd64", "default", "container", ImageTargetContainer|ImageTargetVM)) require.False(t, ApplyFilter(&repo, "foo", "amd64", "default", "container", 0)) repo.Types = []DefinitionFilterType{DefinitionFilterTypeContainer} require.True(t, ApplyFilter(&repo, "foo", "amd64", "default", "container", ImageTargetContainer)) require.True(t, ApplyFilter(&repo, "foo", "amd64", "default", "container", ImageTargetAll|ImageTargetContainer)) require.True(t, ApplyFilter(&repo, "foo", "amd64", "default", "container", ImageTargetContainer|ImageTargetVM)) require.False(t, ApplyFilter(&repo, "foo", "amd64", "default", "vm", ImageTargetContainer)) require.False(t, ApplyFilter(&repo, "foo", "amd64", "default", "vm", ImageTargetAll|ImageTargetContainer)) require.False(t, ApplyFilter(&repo, "foo", "amd64", "default", "vm", ImageTargetContainer|ImageTargetVM)) require.False(t, ApplyFilter(&repo, "foo", "amd64", "default", "vm", 0)) repo.Types = []DefinitionFilterType{DefinitionFilterTypeContainer, DefinitionFilterTypeVM} require.True(t, ApplyFilter(&repo, "foo", "amd64", "default", "container", ImageTargetContainer)) require.True(t, ApplyFilter(&repo, "foo", "amd64", "default", "container", ImageTargetAll|ImageTargetContainer)) require.True(t, ApplyFilter(&repo, "foo", "amd64", "default", "container", ImageTargetContainer|ImageTargetVM)) require.True(t, ApplyFilter(&repo, "foo", "amd64", "default", "vm", ImageTargetAll|ImageTargetContainer)) require.True(t, ApplyFilter(&repo, "foo", "amd64", "default", "vm", ImageTargetContainer|ImageTargetVM)) require.False(t, ApplyFilter(&repo, "foo", "amd64", "default", "vm", ImageTargetContainer)) } func TestDefinitionFilterTypeUnmarshalYAML(t *testing.T) { data := "vm" var out DefinitionFilterType err := yaml.Unmarshal([]byte(data), &out) require.NoError(t, err) require.Equal(t, DefinitionFilterTypeVM, out) data = "container" err = yaml.Unmarshal([]byte(data), &out) require.NoError(t, err) require.Equal(t, DefinitionFilterTypeContainer, out) data = "containers" err = yaml.Unmarshal([]byte(data), &out) require.EqualError(t, err, `Invalid filter type "containers"`) data = "vms" err = yaml.Unmarshal([]byte(data), &out) require.EqualError(t, err, `Invalid filter type "vms"`) } distrobuilder-3.0/shared/logger.go000066400000000000000000000006321456216713500173160ustar00rootroot00000000000000package shared import ( "os" "github.com/sirupsen/logrus" ) // GetLogger returns a new logger. func GetLogger(debug bool) (*logrus.Logger, error) { logger := logrus.StandardLogger() logger.SetOutput(os.Stdout) formatter := logrus.TextFormatter{ FullTimestamp: true, PadLevelText: true, } logger.Formatter = &formatter if debug { logger.Level = logrus.DebugLevel } return logger, nil } distrobuilder-3.0/shared/osarch.go000066400000000000000000000066431456216713500173260ustar00rootroot00000000000000package shared import ( "fmt" "github.com/lxc/incus/shared/osarch" ) var alpineLinuxArchitectureNames = map[int]string{ osarch.ARCH_32BIT_INTEL_X86: "x86", osarch.ARCH_64BIT_INTEL_X86: "x86_64", osarch.ARCH_32BIT_ARMV7_LITTLE_ENDIAN: "armv7", } var archLinuxArchitectureNames = map[int]string{ osarch.ARCH_64BIT_INTEL_X86: "x86_64", osarch.ARCH_32BIT_ARMV7_LITTLE_ENDIAN: "armv7", osarch.ARCH_64BIT_ARMV8_LITTLE_ENDIAN: "aarch64", } var centosArchitectureNames = map[int]string{ osarch.ARCH_32BIT_INTEL_X86: "i386", } var debianArchitectureNames = map[int]string{ osarch.ARCH_32BIT_INTEL_X86: "i386", osarch.ARCH_64BIT_INTEL_X86: "amd64", osarch.ARCH_32BIT_ARMV7_LITTLE_ENDIAN: "armhf", osarch.ARCH_64BIT_ARMV8_LITTLE_ENDIAN: "arm64", osarch.ARCH_32BIT_POWERPC_BIG_ENDIAN: "powerpc", osarch.ARCH_64BIT_POWERPC_BIG_ENDIAN: "powerpc64", osarch.ARCH_64BIT_POWERPC_LITTLE_ENDIAN: "ppc64el", } var gentooArchitectureNames = map[int]string{ osarch.ARCH_32BIT_INTEL_X86: "i686", osarch.ARCH_64BIT_INTEL_X86: "amd64", osarch.ARCH_32BIT_ARMV7_LITTLE_ENDIAN: "armv7a_hardfp", osarch.ARCH_32BIT_POWERPC_BIG_ENDIAN: "ppc", osarch.ARCH_64BIT_POWERPC_BIG_ENDIAN: "ppc64", osarch.ARCH_64BIT_POWERPC_LITTLE_ENDIAN: "ppc64le", osarch.ARCH_64BIT_S390_BIG_ENDIAN: "s390x", } var plamoLinuxArchitectureNames = map[int]string{ osarch.ARCH_32BIT_INTEL_X86: "x86", } var altLinuxArchitectureNames = map[int]string{ osarch.ARCH_32BIT_INTEL_X86: "i586", osarch.ARCH_64BIT_INTEL_X86: "x86_64", osarch.ARCH_64BIT_ARMV8_LITTLE_ENDIAN: "aarch64", } var voidLinuxArchitectureNames = map[int]string{ osarch.ARCH_32BIT_INTEL_X86: "i686", osarch.ARCH_64BIT_INTEL_X86: "x86_64", osarch.ARCH_32BIT_ARMV7_LITTLE_ENDIAN: "armv7l", osarch.ARCH_64BIT_ARMV8_LITTLE_ENDIAN: "aarch64", } var funtooArchitectureNames = map[int]string{ osarch.ARCH_32BIT_INTEL_X86: "generic_32", osarch.ARCH_64BIT_INTEL_X86: "generic_64", osarch.ARCH_32BIT_ARMV7_LITTLE_ENDIAN: "armv7a_vfpv3_hardfp", osarch.ARCH_64BIT_ARMV8_LITTLE_ENDIAN: "arm64_generic", } var slackwareArchitectureNames = map[int]string{ osarch.ARCH_32BIT_INTEL_X86: "i586", osarch.ARCH_64BIT_INTEL_X86: "x86_64", } var distroArchitecture = map[string]map[int]string{ "alpinelinux": alpineLinuxArchitectureNames, "altlinux": altLinuxArchitectureNames, "archlinux": archLinuxArchitectureNames, "centos": centosArchitectureNames, "debian": debianArchitectureNames, "gentoo": gentooArchitectureNames, "plamolinux": plamoLinuxArchitectureNames, "voidlinux": voidLinuxArchitectureNames, "funtoo": funtooArchitectureNames, "slackware": slackwareArchitectureNames, } // GetArch returns the correct architecture name used by the specified // distribution. func GetArch(distro, arch string) (string, error) { // Special case armel as it is effectively a different userspace variant // of armv7 without hard-float and so doesn't have its own kernel architecture name if arch == "armel" { return "armel", nil } archMap, ok := distroArchitecture[distro] if !ok { return "unknown", fmt.Errorf("Architecture map isn't supported: %s", distro) } archID, err := osarch.ArchitectureId(arch) if err != nil { return "unknown", err } archName, exists := archMap[archID] if exists { return archName, nil } return arch, nil } distrobuilder-3.0/shared/osarch_test.go000066400000000000000000000015261456216713500203600ustar00rootroot00000000000000package shared import ( "log" "testing" "github.com/stretchr/testify/require" ) func TestGetArch(t *testing.T) { tests := []struct { distro string arch string expected string }{ { "alpinelinux", "x86_64", "x86_64", }, { "centos", "x86_64", "x86_64", }, { "debian", "amd64", "amd64", }, { "debian", "x86_64", "amd64", }, { "debian", "s390x", "s390x", }, } for i, tt := range tests { log.Printf("Running test #%d: %s %s", i, tt.distro, tt.arch) arch, err := GetArch(tt.distro, tt.arch) require.NoError(t, err) require.Equal(t, tt.expected, arch) } _, err := GetArch("distro", "") require.EqualError(t, err, "Architecture map isn't supported: distro") _, err = GetArch("debian", "arch") require.EqualError(t, err, "Architecture isn't supported: arch") } distrobuilder-3.0/shared/util.go000066400000000000000000000227061456216713500170220ustar00rootroot00000000000000package shared import ( "context" "errors" "fmt" "io" "os" "os/exec" "regexp" "strconv" "strings" "time" "github.com/lxc/incus/shared/util" "golang.org/x/sys/unix" "gopkg.in/flosch/pongo2.v3" yaml "gopkg.in/yaml.v2" ) // EnvVariable represents a environment variable. type EnvVariable struct { Value string Set bool } // Environment represents a set of environment variables. type Environment map[string]EnvVariable // Copy copies a file. func Copy(src, dest string) error { var err error srcFile, err := os.Open(src) if err != nil { return fmt.Errorf("Failed to open file %q: %w", src, err) } defer srcFile.Close() destFile, err := os.Create(dest) if err != nil { return fmt.Errorf("Failed to create file %q: %w", dest, err) } defer destFile.Close() _, err = io.Copy(destFile, srcFile) if err != nil { return fmt.Errorf("Failed to copy file: %w", err) } return destFile.Sync() } // RunCommand runs a command. Stdout is written to the given io.Writer. If nil, it's written to the real stdout. Stderr is always written to the real stderr. func RunCommand(ctx context.Context, stdin io.Reader, stdout io.Writer, name string, arg ...string) error { cmd := exec.CommandContext(ctx, name, arg...) if stdin != nil { cmd.Stdin = stdin } if stdout != nil { cmd.Stdout = stdout } else { cmd.Stdout = os.Stdout } cmd.Stderr = os.Stderr return cmd.Run() } // RunScript runs a script hereby setting the SHELL and PATH env variables, // and redirecting the process's stdout and stderr to the real stdout and stderr // respectively. func RunScript(ctx context.Context, content string) error { fd, err := unix.MemfdCreate("tmp", 0) if err != nil { return fmt.Errorf("Failed to create memfd: %w", err) } defer unix.Close(fd) _, err = unix.Write(int(fd), []byte(content)) if err != nil { return fmt.Errorf("Failed to write to memfd: %w", err) } fdPath := fmt.Sprintf("/proc/self/fd/%d", fd) return RunCommand(ctx, nil, nil, fdPath) } // Pack creates an uncompressed tarball. func Pack(ctx context.Context, filename, compression, path string, args ...string) (string, error) { err := RunCommand(ctx, nil, nil, "tar", append([]string{"--xattrs", "-cf", filename, "-C", path, "--sort=name"}, args...)...) if err != nil { // Clean up incomplete tarball os.Remove(filename) return "", fmt.Errorf("Failed to create tarball: %w", err) } return compressTarball(ctx, filename, compression) } // PackUpdate updates an existing tarball. func PackUpdate(ctx context.Context, filename, compression, path string, args ...string) (string, error) { err := RunCommand(ctx, nil, nil, "tar", append([]string{"--xattrs", "-uf", filename, "-C", path, "--sort=name"}, args...)...) if err != nil { return "", fmt.Errorf("Failed to update tarball: %w", err) } return compressTarball(ctx, filename, compression) } // compressTarball compresses a tarball, or not. func compressTarball(ctx context.Context, filename, compression string) (string, error) { fileExtension := "" args := []string{"-f", filename} compression, level, err := ParseCompression(compression) if err != nil { return "", fmt.Errorf("Failed to parse compression level: %w", err) } if level != nil { if compression == "zstd" && *level > 19 { args = append(args, "--ultra") } args = append(args, "-"+strconv.Itoa(*level)) } // If supported, use as many threads as possible. if util.ValueInSlice(compression, []string{"zstd", "xz", "lzma"}) { args = append(args, "--threads=0") } switch compression { case "lzop", "zstd": // Remove the uncompressed file as the compress fails to do so. defer os.Remove(filename) fallthrough case "bzip2", "xz", "lzip", "lzma", "gzip": err := RunCommand(ctx, nil, nil, compression, args...) if err != nil { return "", fmt.Errorf("Failed to compress tarball %q: %w", filename, err) } } switch compression { case "lzop": fileExtension = "lzo" case "zstd": fileExtension = "zst" case "bzip2": fileExtension = "bz2" case "xz": fileExtension = "xz" case "lzip": fileExtension = "lz" case "lzma": fileExtension = "lzma" case "gzip": fileExtension = "gz" } if fileExtension == "" { return filename, nil } return fmt.Sprintf("%s.%s", filename, fileExtension), nil } // GetExpiryDate returns an expiry date based on the creationDate and format. func GetExpiryDate(creationDate time.Time, format string) time.Time { regex := regexp.MustCompile(`(?:(\d+)(s|m|h|d|w))*`) expiryDate := creationDate for _, match := range regex.FindAllStringSubmatch(format, -1) { // Ignore empty matches if match[0] == "" { continue } var duration time.Duration switch match[2] { case "s": duration = time.Second case "m": duration = time.Minute case "h": duration = time.Hour case "d": duration = 24 * time.Hour case "w": duration = 7 * 24 * time.Hour } // Ignore any error since it will be an integer. value, _ := strconv.Atoi(match[1]) expiryDate = expiryDate.Add(time.Duration(value) * duration) } return expiryDate } // RenderTemplate renders a pongo2 template. func RenderTemplate(template string, iface interface{}) (string, error) { // Serialize interface data, err := yaml.Marshal(iface) if err != nil { return "", err } // Decode document and write it to a pongo2 Context var ctx pongo2.Context err = yaml.Unmarshal(data, &ctx) if err != nil { return "", fmt.Errorf("Failed unmarshalling data: %w", err) } // Load template from string tpl, err := pongo2.FromString("{% autoescape off %}" + template + "{% endautoescape %}") if err != nil { return "", err } // Get rendered template ret, err := tpl.Execute(ctx) if err != nil { return ret, err } // Looks like we're nesting templates so run pongo again if strings.Contains(ret, "{{") || strings.Contains(ret, "{%") { return RenderTemplate(ret, iface) } return ret, err } // SetEnvVariables sets the provided environment variables and returns the // old ones. func SetEnvVariables(env Environment) Environment { oldEnv := Environment{} for k, v := range env { // Check whether the env variables are set at the moment oldVal, set := os.LookupEnv(k) // Store old env variables oldEnv[k] = EnvVariable{ Value: oldVal, Set: set, } if v.Set { os.Setenv(k, v.Value) } else { os.Unsetenv(k) } } return oldEnv } // RsyncLocal copies src to dest using rsync. func RsyncLocal(ctx context.Context, src string, dest string) error { err := RunCommand(ctx, nil, nil, "rsync", "-aHASX", "--devices", src, dest) if err != nil { return fmt.Errorf("Failed to copy %q to %q: %w", src, dest, err) } return nil } // Retry retries a function up to times. This is especially useful for networking. func Retry(f func() error, attempts uint) error { var err error for i := uint(0); i < attempts; i++ { err = f() // Stop retrying if the call succeeded or if the context has been cancelled. if err == nil || err != nil && errors.Is(err, context.Canceled) { break } time.Sleep(time.Second) } return err } // ParseCompression extracts the compression method and level (if any) from the // compression flag. func ParseCompression(compression string) (string, *int, error) { levelRegex := regexp.MustCompile(`^([\w]+)-(\d{1,2})$`) match := levelRegex.FindStringSubmatch(compression) if match != nil { compression = match[1] level, err := strconv.Atoi(match[2]) if err != nil { return "", nil, err } switch compression { case "zstd": if 1 <= level && level <= 22 { return compression, &level, nil } case "bzip2", "gzip", "lzo", "lzop": // The standalone tool is named lzop, but mksquashfs // accepts only lzo. For convenience, accept both. if compression == "lzo" { compression = "lzop" } if 1 <= level && level <= 9 { return compression, &level, nil } case "lzip", "lzma", "xz": if 0 <= level && level <= 9 { return compression, &level, nil } default: return "", nil, fmt.Errorf("Compression method %q does not support specifying levels", compression) } return "", nil, fmt.Errorf("Invalid compression level %q for method %q", level, compression) } if compression == "lzo" { compression = "lzop" } return compression, nil, nil } // ParseSquashfsCompression extracts the compression method and level (if any) // from the compression flag for use with mksquashfs. func ParseSquashfsCompression(compression string) (string, *int, error) { levelRegex := regexp.MustCompile(`^([\w]+)-(\d{1,2})$`) match := levelRegex.FindStringSubmatch(compression) if match != nil { compression = match[1] level, err := strconv.Atoi(match[2]) if err != nil { return "", nil, err } switch compression { case "zstd": if 1 <= level && level <= 22 { return compression, &level, nil } case "gzip", "lzo", "lzop": // mkskquashfs accepts only lzo, but the standalone // tool is named lzop. For convenience, accept both. if compression == "lzop" { compression = "lzo" } if 1 <= level && level <= 9 { return compression, &level, nil } default: return "", nil, fmt.Errorf("Squashfs compression method %q does not support specifying levels", compression) } return "", nil, fmt.Errorf("Invalid squashfs compression level %q for method %q", level, compression) } if compression == "lzop" { compression = "lzo" } if util.ValueInSlice(compression, []string{"gzip", "lzo", "lz4", "xz", "zstd", "lzma"}) { return compression, nil, nil } return "", nil, fmt.Errorf("Invalid squashfs compression method %q", compression) } distrobuilder-3.0/shared/util_test.go000066400000000000000000000075511456216713500200620ustar00rootroot00000000000000package shared import ( "log" "os" "testing" "github.com/stretchr/testify/require" "gopkg.in/flosch/pongo2.v3" ) func TestRenderTemplate(t *testing.T) { tests := []struct { name string iface interface{} template string expected string shouldFail bool }{ { "valid template with yaml tags", Definition{ Image: DefinitionImage{ Distribution: "Ubuntu", Release: "Bionic", }, }, "{{ image.distribution }} {{ image.release }}", "Ubuntu Bionic", false, }, { "valid template without yaml tags", pongo2.Context{ "foo": "bar", }, "{{ foo }}", "bar", false, }, { "variable not in context", pongo2.Context{}, "{{ foo }}", "", false, }, { "invalid template", pongo2.Context{ "foo": nil, }, "{{ foo }", "", true, }, { "invalid context", pongo2.Context{ "foo.bar": nil, }, "{{ foo.bar }}", "", true, }, } for i, tt := range tests { log.Printf("Running test #%d: %s", i, tt.name) ret, err := RenderTemplate(tt.template, tt.iface) if tt.shouldFail { require.Error(t, err) } else { require.NoError(t, err) require.Equal(t, tt.expected, ret) } } } func TestSetEnvVariables(t *testing.T) { // Initial variables os.Setenv("FOO", "bar") env := Environment{ "FOO": EnvVariable{ Value: "bla", Set: true, }, "BAR": EnvVariable{ Value: "blub", Set: true, }, } // Set new env variables oldEnv := SetEnvVariables(env) for k, v := range env { val, set := os.LookupEnv(k) require.True(t, set) require.Equal(t, v.Value, val) } // Reset env variables SetEnvVariables(oldEnv) val, set := os.LookupEnv("FOO") require.True(t, set) require.Equal(t, val, "bar") val, set = os.LookupEnv("BAR") require.False(t, set, "Expected 'BAR' to be unset") require.Empty(t, val) } func TestParseCompression(t *testing.T) { tests := []struct { compression string expectedCompression string expectLevel bool expectedLevel int shouldFail bool }{ { "gzip", "gzip", false, 0 /* irrelevant */, false, }, { "gzip-1", "gzip", true, 1, false, }, { "gzip-10", "", false, 0, true, }, { "zstd-22", "zstd", true, 22, false, }, { "gzip-0", "", false, 0, true, }, { "unknown-1", "", false, 0, true, }, { "lzo", "lzop", false, 0 /* irrelevant */, false, }, { "lzo-9", "lzop", true, 9, false, }, } for i, tt := range tests { log.Printf("Running test #%d: %s", i, tt.compression) compression, level, err := ParseCompression(tt.compression) if tt.shouldFail { require.Error(t, err) } else { require.NoError(t, err) require.Equal(t, tt.expectedCompression, compression) if tt.expectLevel { require.NotNil(t, level) require.Equal(t, tt.expectedLevel, *level) } } } } func TestSquashfsParseCompression(t *testing.T) { tests := []struct { compression string expectedCompression string expectLevel bool expectedLevel int shouldFail bool }{ { "gzip", "gzip", false, 0 /* irrelevant */, false, }, { "gzip-1", "gzip", true, 1, false, }, { "gzip-10", "", false, 0, true, }, { "zstd-22", "zstd", true, 22, false, }, { "gzip-0", "", false, 0, true, }, { "invalid", "", false, 0, true, }, { "xz-1", "", false, 0, true, }, { "lzop", "lzo", false, 0 /* irrelevant */, false, }, { "lzop-9", "lzo", true, 9, false, }, } for i, tt := range tests { log.Printf("Running test #%d: %s", i, tt.compression) compression, level, err := ParseSquashfsCompression(tt.compression) if tt.shouldFail { require.Error(t, err) } else { require.NoError(t, err) require.Equal(t, tt.expectedCompression, compression) if tt.expectLevel { require.NotNil(t, level) require.Equal(t, tt.expectedLevel, *level) } } } } distrobuilder-3.0/shared/version/000077500000000000000000000000001456216713500171745ustar00rootroot00000000000000distrobuilder-3.0/shared/version/version.go000066400000000000000000000001331456216713500212050ustar00rootroot00000000000000package version // Version contains the distrobuilder version number. var Version = "3.0" distrobuilder-3.0/sources/000077500000000000000000000000001456216713500157245ustar00rootroot00000000000000distrobuilder-3.0/sources/almalinux-http.go000066400000000000000000000152361456216713500212310ustar00rootroot00000000000000package sources import ( "crypto/sha256" "errors" "fmt" "io" "net/http" "net/url" "os" "path" "path/filepath" "regexp" "strings" "github.com/lxc/distrobuilder/shared" ) type almalinux struct { commonRHEL fname string majorVersion string } // Run downloads the tarball and unpacks it. func (s *almalinux) Run() error { var err error s.majorVersion = strings.Split(s.definition.Image.Release, ".")[0] baseURL := fmt.Sprintf("%s/%s/isos/%s/", s.definition.Source.URL, strings.ToLower(s.definition.Image.Release), s.definition.Image.ArchitectureMapped) s.fname, err = s.getRelease(s.definition.Source.URL, s.definition.Image.Release, s.definition.Source.Variant, s.definition.Image.ArchitectureMapped) if err != nil { return fmt.Errorf("Failed to get release: %w", err) } fpath := s.getTargetDir() // Skip download if raw image exists and has already been decompressed. if strings.HasSuffix(s.fname, ".raw.xz") { imagePath := filepath.Join(fpath, filepath.Base(strings.TrimSuffix(s.fname, ".xz"))) stat, err := os.Stat(imagePath) if err == nil && stat.Size() > 0 { s.logger.WithField("file", filepath.Join(fpath, strings.TrimSuffix(s.fname, ".xz"))).Info("Unpacking raw image") return s.unpackRaw(filepath.Join(fpath, strings.TrimSuffix(s.fname, ".xz")), s.rootfsDir, s.rawRunner) } } url, err := url.Parse(baseURL) if err != nil { return fmt.Errorf("Failed to parse URL %q: %w", baseURL, err) } checksumFile := "" if !s.definition.Source.SkipVerification { // Force gpg checks when using http if url.Scheme != "https" { if len(s.definition.Source.Keys) == 0 { return errors.New("GPG keys are required if downloading from HTTP") } if s.definition.Image.ArchitectureMapped == "armhfp" { checksumFile = "sha256sum.txt" } else { if strings.HasPrefix(s.definition.Image.Release, "8") { checksumFile = "CHECKSUM" } else { checksumFile = "sha256sum.txt.asc" } } fpath, err := s.DownloadHash(s.definition.Image, baseURL+checksumFile, "", nil) if err != nil { return fmt.Errorf("Failed to download %q: %w", baseURL+checksumFile, err) } // Only verify file if possible. if strings.HasSuffix(checksumFile, ".asc") { valid, err := s.VerifyFile(filepath.Join(fpath, checksumFile), "") if err != nil { return fmt.Errorf("Failed to verify %q: %w", checksumFile, err) } if !valid { return fmt.Errorf("Invalid signature for %q", filepath.Join(fpath, checksumFile)) } } } } _, err = s.DownloadHash(s.definition.Image, baseURL+s.fname, checksumFile, sha256.New()) if err != nil { return fmt.Errorf("Failed to download %q: %w", baseURL+s.fname, err) } if strings.HasSuffix(s.fname, ".raw.xz") || strings.HasSuffix(s.fname, ".raw") { s.logger.WithField("file", filepath.Join(fpath, s.fname)).Info("Unpacking raw image") return s.unpackRaw(filepath.Join(fpath, s.fname), s.rootfsDir, s.rawRunner) } s.logger.WithField("file", filepath.Join(fpath, s.fname)).Info("Unpacking ISO") return s.unpackISO(filepath.Join(fpath, s.fname), s.rootfsDir, s.isoRunner) } func (s *almalinux) rawRunner() error { err := shared.RunScript(s.ctx, fmt.Sprintf(`#!/bin/sh set -eux # Create required files touch /etc/mtab /etc/fstab # Create a minimal rootfs mkdir /rootfs yum --installroot=/rootfs --disablerepo=* --enablerepo=base -y --releasever=%s install basesystem almalinux-release yum rm -rf /rootfs/var/cache/yum `, s.majorVersion)) if err != nil { return fmt.Errorf("Failed to run script: %w", err) } return nil } func (s *almalinux) isoRunner(gpgKeysPath string) error { err := shared.RunScript(s.ctx, fmt.Sprintf(`#!/bin/sh set -eux GPG_KEYS="%s" # Create required files touch /etc/mtab /etc/fstab yum_args="" mkdir -p /etc/yum.repos.d if [ -d /mnt/cdrom ]; then # Install initial package set cd /mnt/cdrom/Packages rpm -ivh --nodeps $(ls rpm-*.rpm | head -n1) rpm -ivh --nodeps $(ls yum-*.rpm | head -n1) # Add cdrom repo cat <<- EOF > /etc/yum.repos.d/cdrom.repo [cdrom] name=Install CD-ROM baseurl=file:///mnt/cdrom enabled=0 EOF if [ -n "${GPG_KEYS}" ]; then echo gpgcheck=1 >> /etc/yum.repos.d/cdrom.repo echo gpgkey=${GPG_KEYS} >> /etc/yum.repos.d/cdrom.repo else echo gpgcheck=0 >> /etc/yum.repos.d/cdrom.repo fi yum_args="--disablerepo=* --enablerepo=cdrom" yum ${yum_args} -y reinstall yum else if ! [ -f /etc/pki/rpm-gpg/RPM-GPG-KEY-AlmaLinux ]; then mv /etc/pki/rpm-gpg/RPM-GPG-KEY-AlmaLinux-* /etc/pki/rpm-gpg/RPM-GPG-KEY-AlmaLinux fi cat <<- "EOF" > /etc/yum.repos.d/almalinux.repo [baseos] name=AlmaLinux $releasever - BaseOS baseurl=https://repo.almalinux.org/almalinux/$releasever/BaseOS/$basearch/os/ gpgcheck=1 enabled=1 gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-AlmaLinux EOF # Use dnf in the boot iso since yum isn't available alias yum=dnf fi pkgs="basesystem almalinux-release yum" # Create a minimal rootfs mkdir /rootfs yum ${yum_args} --installroot=/rootfs -y --releasever=%s --skip-broken install ${pkgs} rm -rf /rootfs/var/cache/yum `, gpgKeysPath, s.majorVersion)) if err != nil { return fmt.Errorf("Failed to run script: %w", err) } return nil } func (s *almalinux) getRelease(URL, release, variant, arch string) (string, error) { fullURL := URL + path.Join("/", strings.ToLower(release), "isos", arch) var ( err error resp *http.Response ) err = shared.Retry(func() error { resp, err = http.Get(fullURL) if err != nil { return fmt.Errorf("Failed to GET %q: %w", fullURL, err) } return nil }, 3) if err != nil { return "", err } defer resp.Body.Close() body, err := io.ReadAll(resp.Body) if err != nil { return "", fmt.Errorf("Failed to read body: %w", err) } re := s.getRegexes(arch, variant, release) for _, r := range re { matches := r.FindAllString(string(body), -1) if len(matches) > 0 { return matches[len(matches)-1], nil } } return "", nil } func (s *almalinux) getRegexes(arch string, variant string, release string) []*regexp.Regexp { releaseFields := strings.Split(release, ".") var re []string switch len(releaseFields) { case 1: re = append(re, fmt.Sprintf("AlmaLinux-%s(\\.\\d+)*-%s-(?i:%s)(-\\d+)?.iso", releaseFields[0], arch, variant)) re = append(re, fmt.Sprintf("AlmaLinux-%s(.\\d+)*-(beta|rc)-\\d-%s-(?i:%s).iso", releaseFields[0], arch, variant)) case 2: re = append(re, fmt.Sprintf("AlmaLinux-%s\\.%s-%s-(?i:%s)(-\\d+)?.iso", releaseFields[0], releaseFields[1], arch, variant)) re = append(re, fmt.Sprintf("AlmaLinux-%s\\.%s-(beta|rc)-\\d-%s-(?i:%s).iso", releaseFields[0], releaseFields[1], arch, variant)) } regexes := make([]*regexp.Regexp, len(re)) for i, r := range re { regexes[i] = regexp.MustCompile(r) } return regexes } distrobuilder-3.0/sources/alpine-http.go000066400000000000000000000111161456216713500205000ustar00rootroot00000000000000package sources import ( "crypto/sha256" "errors" "fmt" "io" "net/http" "net/url" "os" "path/filepath" "regexp" "strings" "github.com/lxc/distrobuilder/shared" ) type alpineLinux struct { common } func (s *alpineLinux) Run() error { var releaseShort string releaseFull := s.definition.Image.Release if s.definition.Image.Release == "edge" { if s.definition.Source.SameAs == "" { return errors.New("You can't use Alpine edge without setting same_as") } releaseFull = s.definition.Source.SameAs } releaseField := strings.Split(releaseFull, ".") if len(releaseField) == 2 { releaseShort = fmt.Sprintf("v%s", releaseFull) } else if len(releaseField) == 3 { releaseShort = fmt.Sprintf("v%s.%s", releaseField[0], releaseField[1]) } else { return fmt.Errorf("Bad Alpine release: %s", releaseFull) } baseURL := fmt.Sprintf("%s/%s/releases/%s", s.definition.Source.URL, releaseShort, s.definition.Image.ArchitectureMapped) if len(releaseField) == 2 { var err error releaseFull, err = s.getLatestRelease(baseURL, releaseFull, s.definition.Image.ArchitectureMapped) if err != nil { return fmt.Errorf("Failed to find latest release: %w", err) } } fname := fmt.Sprintf("alpine-minirootfs-%s-%s.tar.gz", releaseFull, s.definition.Image.ArchitectureMapped) tarball := fmt.Sprintf("%s/%s", baseURL, fname) url, err := url.Parse(tarball) if err != nil { return fmt.Errorf("Failed to parse URL %q: %w", tarball, err) } if !s.definition.Source.SkipVerification && url.Scheme != "https" && len(s.definition.Source.Keys) == 0 { return errors.New("GPG keys are required if downloading from HTTP") } var fpath string if s.definition.Source.SkipVerification { fpath, err = s.DownloadHash(s.definition.Image, tarball, "", nil) } else { fpath, err = s.DownloadHash(s.definition.Image, tarball, tarball+".sha256", sha256.New()) } if err != nil { return fmt.Errorf("Failed to download %q: %w", tarball, err) } // Force gpg checks when using http if !s.definition.Source.SkipVerification && url.Scheme != "https" { _, err = s.DownloadHash(s.definition.Image, tarball+".asc", "", nil) if err != nil { return fmt.Errorf("Failed downloading %q: %w", tarball+".asc", err) } valid, err := s.VerifyFile( filepath.Join(fpath, fname), filepath.Join(fpath, fname+".asc")) if err != nil { return fmt.Errorf("Failed to download %q: %w", tarball+".asc", err) } if !valid { return fmt.Errorf("Invalid signature for %q", filepath.Join(fpath, fname)) } } s.logger.WithField("file", filepath.Join(fpath, fname)).Info("Unpacking image") // Unpack err = shared.Unpack(filepath.Join(fpath, fname), s.rootfsDir) if err != nil { return fmt.Errorf("Failed to unpack %q: %w", fname, err) } // Handle edge builds if s.definition.Image.Release == "edge" { // Upgrade to edge exitChroot, err := shared.SetupChroot(s.rootfsDir, s.definition, nil) if err != nil { return fmt.Errorf("Failed to set up chroot: %w", err) } err = shared.RunCommand(s.ctx, nil, nil, "sed", "-i", "-e", "s/v[[:digit:]]\\.[[:digit:]]\\+/edge/g", "/etc/apk/repositories") if err != nil { { err := exitChroot() if err != nil { s.logger.WithField("err", err).Warn("Failed exiting chroot") } } return fmt.Errorf("Failed to edit apk repositories: %w", err) } err = shared.RunCommand(s.ctx, nil, nil, "apk", "upgrade", "--update-cache", "--available") if err != nil { { err := exitChroot() if err != nil { s.logger.WithField("err", err).Warn("Failed exiting chroot") } } return fmt.Errorf("Failed to upgrade edge build: %w", err) } err = exitChroot() if err != nil { return fmt.Errorf("Failed exiting chroot: %w", err) } } // Fix bad permissions in Alpine tarballs err = os.Chmod(s.rootfsDir, 0755) if err != nil { return fmt.Errorf("Failed to chmod %q: %w", s.rootfsDir, err) } return nil } func (s *alpineLinux) getLatestRelease(baseURL, release string, arch string) (string, error) { var ( resp *http.Response err error ) err = shared.Retry(func() error { resp, err = http.Get(baseURL) if err != nil { return fmt.Errorf("Failed to GET %q: %w", baseURL, err) } return nil }, 3) if err != nil { return "", err } defer resp.Body.Close() body, err := io.ReadAll(resp.Body) if err != nil { return "", fmt.Errorf("Failed to ready body: %w", err) } regex := regexp.MustCompile(fmt.Sprintf(">alpine-minirootfs-(%s\\.\\d+)-%s.tar.gz<", release, arch)) releases := regex.FindAllStringSubmatch(string(body), -1) if len(releases) > 0 { return releases[len(releases)-1][1], nil } return "", nil } distrobuilder-3.0/sources/alt-http.go000066400000000000000000000037371456216713500200220ustar00rootroot00000000000000package sources import ( "crypto/sha256" "errors" "fmt" "net/url" "path/filepath" "strings" "github.com/lxc/distrobuilder/shared" ) type altLinux struct { common } func (s *altLinux) Run() error { arch := s.definition.Image.ArchitectureMapped if arch == "armhf" { arch = "armh" } baseURL := fmt.Sprintf( "%s/%s/cloud/%s/", s.definition.Source.URL, s.definition.Image.Release, arch, ) fname := fmt.Sprintf("alt-%s-rootfs-systemd-%s.tar.xz", strings.ToLower(s.definition.Image.Release), arch) url, err := url.Parse(baseURL) if err != nil { return fmt.Errorf("Failed to parse URL %q: %w", baseURL, err) } checksumFile := "" if !s.definition.Source.SkipVerification { if len(s.definition.Source.Keys) != 0 { checksumFile = baseURL + "SHA256SUMS" fpath, err := s.DownloadHash(s.definition.Image, checksumFile+".gpg", "", nil) if err != nil { return fmt.Errorf("Failed to download %q: %w", checksumFile+".gpg", err) } _, err = s.DownloadHash(s.definition.Image, checksumFile, "", nil) if err != nil { return fmt.Errorf("Failed to download %q: %w", checksumFile, err) } valid, err := s.VerifyFile( filepath.Join(fpath, "SHA256SUMS"), filepath.Join(fpath, "SHA256SUMS.gpg")) if err != nil { return fmt.Errorf("Failed to verify file: %w", err) } if !valid { return fmt.Errorf("Invalid signature for %q", "SHA256SUMS") } } else { // Force gpg checks when using http if url.Scheme != "https" { return errors.New("GPG keys are required if downloading from HTTP") } } } fpath, err := s.DownloadHash(s.definition.Image, baseURL+fname, checksumFile, sha256.New()) if err != nil { return fmt.Errorf("Failed to download %q: %w", baseURL+fname, err) } s.logger.WithField("file", filepath.Join(fpath, fname)).Info("Unpacking image") // Unpack err = shared.Unpack(filepath.Join(fpath, fname), s.rootfsDir) if err != nil { return fmt.Errorf("Failed to unpack %q: %w", fname, err) } return nil } distrobuilder-3.0/sources/apertis-http.go000066400000000000000000000055621456216713500207070ustar00rootroot00000000000000package sources import ( "errors" "fmt" "io" "net/http" "net/url" "path/filepath" "regexp" "strings" "github.com/lxc/distrobuilder/shared" ) type apertis struct { common } // Run downloads the tarball and unpacks it. func (s *apertis) Run() error { release := s.definition.Image.Release exactRelease := release // https://images.apertis.org/daily/v2020dev0/20190830.0/amd64/minimal/ospack_v2020dev0-amd64-minimal_20190830.0.tar.gz baseURL := fmt.Sprintf("%s/%s/%s", s.definition.Source.URL, s.definition.Source.Variant, release) var ( resp *http.Response err error ) err = shared.Retry(func() error { resp, err = http.Head(baseURL) if err != nil { return fmt.Errorf("Failed to HEAD %q: %w", baseURL, err) } return nil }, 3) if err != nil { return err } if resp.StatusCode == http.StatusNotFound { // Possibly, release is a specific release (18.12.0 instead of 18.12). Lets trim the prefix and continue. re := regexp.MustCompile(`\.\d+$`) release = strings.TrimSuffix(release, re.FindString(release)) baseURL = fmt.Sprintf("%s/%s/%s", s.definition.Source.URL, s.definition.Source.Variant, release) } else { exactRelease, err = s.getLatestRelease(baseURL, release) if err != nil { return fmt.Errorf("Failed to get latest release: %w", err) } } baseURL = fmt.Sprintf("%s/%s/%s/%s/", baseURL, exactRelease, s.definition.Image.ArchitectureMapped, s.definition.Image.Variant) fname := fmt.Sprintf("ospack_%s-%s-%s_%s.tar.gz", release, s.definition.Image.ArchitectureMapped, s.definition.Image.Variant, exactRelease) url, err := url.Parse(baseURL) if err != nil { return fmt.Errorf("Failed to parse %q: %w", baseURL, err) } // Force gpg checks when using http if url.Scheme != "https" { return errors.New("Only HTTPS server is supported") } fpath, err := s.DownloadHash(s.definition.Image, baseURL+fname, "", nil) if err != nil { return fmt.Errorf("Failed to download %q: %w", baseURL+fname, err) } s.logger.WithField("file", filepath.Join(fpath, fname)).Info("Unpacking image") // Unpack err = shared.Unpack(filepath.Join(fpath, fname), s.rootfsDir) if err != nil { return fmt.Errorf("Failed to unpack %q: %w", fname, err) } return nil } func (s *apertis) getLatestRelease(baseURL, release string) (string, error) { var ( resp *http.Response err error ) err = shared.Retry(func() error { resp, err = http.Get(baseURL) if err != nil { return fmt.Errorf("Failed to GET %q: %w", baseURL, err) } return nil }, 3) if err != nil { return "", err } defer resp.Body.Close() body, err := io.ReadAll(resp.Body) if err != nil { return "", fmt.Errorf("Failed to ready body: %w", err) } regex := regexp.MustCompile(fmt.Sprintf(">(%s\\.\\d+)/<", release)) releases := regex.FindAllStringSubmatch(string(body), -1) if len(releases) > 0 { return releases[len(releases)-1][1], nil } return "", nil } distrobuilder-3.0/sources/apertis-http_test.go000066400000000000000000000007511456216713500217410ustar00rootroot00000000000000package sources import ( "fmt" "testing" "github.com/stretchr/testify/require" ) func TestApertisHTTP_getLatestRelease(t *testing.T) { s := &apertis{} tests := []struct { release string want string }{ { "18.12", "18.12.0", }, } for _, tt := range tests { baseURL := fmt.Sprintf("https://images.apertis.org/release/%s", tt.release) release, err := s.getLatestRelease(baseURL, tt.release) require.NoError(t, err) require.Equal(t, tt.want, release) } } distrobuilder-3.0/sources/archlinux-http.go000066400000000000000000000073331456216713500212330ustar00rootroot00000000000000package sources import ( "errors" "fmt" "net/url" "os" "path" "path/filepath" "regexp" "sort" "strings" "gopkg.in/antchfx/htmlquery.v1" "github.com/lxc/distrobuilder/shared" ) type archlinux struct { common } // Run downloads an Arch Linux tarball. func (s *archlinux) Run() error { release := s.definition.Image.Release // Releases are only available for the x86_64 architecture. ARM only has // a "latest" tarball. if s.definition.Image.ArchitectureMapped == "x86_64" && release == "" { var err error // Get latest release release, err = s.getLatestRelease(s.definition.Source.URL, s.definition.Image.ArchitectureMapped) if err != nil { return fmt.Errorf("Failed to get latest release: %w", err) } } var fname string var tarball string if s.definition.Image.ArchitectureMapped == "x86_64" { fname = fmt.Sprintf("archlinux-bootstrap-%s-%s.tar.gz", release, s.definition.Image.ArchitectureMapped) tarball = fmt.Sprintf("%s/%s/%s", s.definition.Source.URL, release, fname) } else { fname = fmt.Sprintf("ArchLinuxARM-%s-latest.tar.gz", s.definition.Image.ArchitectureMapped) tarball = fmt.Sprintf("%s/os/%s", s.definition.Source.URL, fname) } url, err := url.Parse(tarball) if err != nil { return fmt.Errorf("Failed to parse URL %q: %w", tarball, err) } if !s.definition.Source.SkipVerification && url.Scheme != "https" && len(s.definition.Source.Keys) == 0 { return errors.New("GPG keys are required if downloading from HTTP") } fpath, err := s.DownloadHash(s.definition.Image, tarball, "", nil) if err != nil { return fmt.Errorf("Failed to download %q: %w", tarball, err) } // Force gpg checks when using http if !s.definition.Source.SkipVerification && url.Scheme != "https" { _, err = s.DownloadHash(s.definition.Image, tarball+".sig", "", nil) if err != nil { return fmt.Errorf("Failed downloading %q: %w", tarball+".sig", err) } valid, err := s.VerifyFile( filepath.Join(fpath, fname), filepath.Join(fpath, fname+".sig")) if err != nil { return fmt.Errorf("Failed to verify %q: %w", fname, err) } if !valid { return fmt.Errorf("Invalid signature for %q", fname) } } s.logger.WithField("file", filepath.Join(fpath, fname)).Info("Unpacking image") // Unpack err = shared.Unpack(filepath.Join(fpath, fname), s.rootfsDir) if err != nil { return fmt.Errorf("Failed to unpack file %q: %w", filepath.Join(fpath, fname), err) } // Move everything inside 'root.' (which was is the tarball) to its // parent directory files, err := filepath.Glob(fmt.Sprintf("%s/*", filepath.Join(s.rootfsDir, "root."+s.definition.Image.ArchitectureMapped))) if err != nil { return fmt.Errorf("Failed to get files: %w", err) } for _, file := range files { err = os.Rename(file, filepath.Join(s.rootfsDir, path.Base(file))) if err != nil { return fmt.Errorf("Failed to rename file %q: %w", file, err) } } path := filepath.Join(s.rootfsDir, "root."+s.definition.Image.ArchitectureMapped) err = os.RemoveAll(path) if err != nil { return fmt.Errorf("Failed to remove %q: %w", path, err) } return nil } func (s *archlinux) getLatestRelease(URL string, arch string) (string, error) { doc, err := htmlquery.LoadURL(URL) if err != nil { return "", fmt.Errorf("Failed to load URL %q: %w", URL, err) } re := regexp.MustCompile(`^\d{4}\.\d{2}\.\d{2}/?$`) var releases []string for _, node := range htmlquery.Find(doc, `//a[@href]/text()`) { if re.MatchString(node.Data) { releases = append(releases, strings.TrimSuffix(node.Data, "/")) } } if len(releases) == 0 { return "", errors.New("Failed to determine latest release") } // Sort releases in case they're out-of-order sort.Strings(releases) return releases[len(releases)-1], nil } distrobuilder-3.0/sources/archlinux-http_test.go000066400000000000000000000005331456216713500222650ustar00rootroot00000000000000package sources import ( "regexp" "testing" "github.com/stretchr/testify/require" ) func TestArchLinuxGetLatestRelease(t *testing.T) { var src archlinux release, err := src.getLatestRelease("https://archive.archlinux.org/iso/", "x86_64") require.NoError(t, err) require.Regexp(t, regexp.MustCompile(`^\d{4}\.\d{2}\.\d{2}$`), release) } distrobuilder-3.0/sources/busybox.go000066400000000000000000000052141456216713500177500ustar00rootroot00000000000000package sources import ( "bufio" "bytes" "crypto/sha256" "fmt" "os" "path/filepath" "github.com/lxc/distrobuilder/shared" ) type busybox struct { common } // Run downloads a busybox tarball. func (s *busybox) Run() error { fname := fmt.Sprintf("busybox-%s.tar.bz2", s.definition.Image.Release) tarball := fmt.Sprintf("%s/%s", s.definition.Source.URL, fname) var ( fpath string err error ) if s.definition.Source.SkipVerification { fpath, err = s.DownloadHash(s.definition.Image, tarball, "", nil) } else { fpath, err = s.DownloadHash(s.definition.Image, tarball, tarball+".sha256", sha256.New()) } if err != nil { return fmt.Errorf("Failed to download %q: %w", tarball, err) } sourceDir := filepath.Join(s.cacheDir, "src") err = os.MkdirAll(sourceDir, 0755) if err != nil { return fmt.Errorf("Failed to create directory %q: %w", sourceDir, err) } s.logger.WithField("file", filepath.Join(fpath, fname)).Info("Unpacking image") // Unpack err = shared.Unpack(filepath.Join(fpath, fname), sourceDir) if err != nil { return fmt.Errorf("Failed to unpack %q: %w", fname, err) } sourceDir = filepath.Join(sourceDir, fmt.Sprintf("busybox-%s", s.definition.Image.Release)) err = shared.RunScript(s.ctx, fmt.Sprintf(`#!/bin/sh set -eux source_dir=%s rootfs_dir=%s cwd="$(pwd)" cd "${source_dir}" make defconfig sed -ri 's/# CONFIG_STATIC .*/CONFIG_STATIC=y/g' .config make cd "${cwd}" mkdir -p "${rootfs_dir}/bin" mv ${source_dir}/busybox "${rootfs_dir}/bin/busybox" `, sourceDir, s.rootfsDir)) if err != nil { return fmt.Errorf("Failed to build busybox: %w", err) } var buf bytes.Buffer err = shared.RunCommand(s.ctx, os.Stdin, &buf, filepath.Join(s.rootfsDir, "bin", "busybox"), "--list-full") if err != nil { return fmt.Errorf("Failed to install busybox: %w", err) } scanner := bufio.NewScanner(&buf) for scanner.Scan() { path := filepath.Join(s.rootfsDir, scanner.Text()) if path == "" || path == "bin/busybox" { continue } s.logger.Debugf("Creating directory %q", path) err = os.MkdirAll(filepath.Dir(path), 0755) if err != nil { return fmt.Errorf("Failed to create directory %q: %w", filepath.Dir(path), err) } s.logger.Debugf("Creating symlink %q -> %q", path, "/bin/busybox") err = os.Symlink("/bin/busybox", path) if err != nil { return fmt.Errorf("Failed to create symlink %q -> /bin/busybox: %w", path, err) } } for _, path := range []string{"dev", "mnt", "proc", "root", "sys", "tmp"} { err := os.Mkdir(filepath.Join(s.rootfsDir, path), 0755) if err != nil { return fmt.Errorf("Failed to create directory %q: %w", filepath.Join(s.rootfsDir, path), err) } } return nil } distrobuilder-3.0/sources/centos-http.go000066400000000000000000000400501456216713500205220ustar00rootroot00000000000000package sources import ( "crypto/sha256" "errors" "fmt" "io" "net/http" "net/url" "os" "path" "path/filepath" "regexp" "strings" "github.com/lxc/distrobuilder/shared" ) type centOS struct { commonRHEL fname string majorVersion string } func (s *centOS) Run() error { if strings.HasSuffix(s.definition.Image.Release, "-Stream") { s.majorVersion = strings.ToLower(s.definition.Image.Release) } else { s.majorVersion = strings.Split(s.definition.Image.Release, ".")[0] } var err error baseURL := fmt.Sprintf("%s/%s/isos/%s/", s.definition.Source.URL, strings.ToLower(s.definition.Image.Release), s.definition.Image.ArchitectureMapped) if s.definition.Image.Release == "9-Stream" { baseURL = fmt.Sprintf("%s/%s/BaseOS/%s/iso/", s.definition.Source.URL, strings.ToLower(s.definition.Image.Release), s.definition.Image.ArchitectureMapped) } s.fname, err = s.getRelease(s.definition.Source.URL, s.definition.Image.Release, s.definition.Source.Variant, s.definition.Image.ArchitectureMapped) if err != nil { return fmt.Errorf("Failed to get release: %w", err) } fpath := s.getTargetDir() // Skip download if raw image exists and has already been decompressed. if strings.HasSuffix(s.fname, ".raw.xz") { imagePath := filepath.Join(fpath, filepath.Base(strings.TrimSuffix(s.fname, ".xz"))) stat, err := os.Stat(imagePath) if err == nil && stat.Size() > 0 { tarball := filepath.Join(fpath, strings.TrimSuffix(s.fname, ".xz")) err = s.unpackRaw(tarball, s.rootfsDir, s.rawRunner) if err != nil { return fmt.Errorf("Failed to unpack %q: %w", tarball, err) } return nil } } url, err := url.Parse(baseURL) if err != nil { return fmt.Errorf("Failed to parse URL %q: %w", baseURL, err) } checksumFile := "" if !s.definition.Source.SkipVerification { // Force gpg checks when using http if url.Scheme != "https" { if len(s.definition.Source.Keys) == 0 { return errors.New("GPG keys are required if downloading from HTTP") } if s.definition.Image.ArchitectureMapped == "armhfp" { checksumFile = "sha256sum.txt" } else { checksumFile = "sha256sum.txt.asc" if strings.HasPrefix(s.definition.Image.Release, "9") { checksumFile = "SHA256SUM" } else if strings.HasPrefix(s.definition.Image.Release, "8") { checksumFile = "CHECKSUM" } } fpath, err := s.DownloadHash(s.definition.Image, baseURL+checksumFile, "", nil) if err != nil { return fmt.Errorf("Failed to download %q: %w", baseURL+checksumFile, err) } // Only verify file if possible. if strings.HasSuffix(checksumFile, ".asc") { valid, err := s.VerifyFile(filepath.Join(fpath, checksumFile), "") if err != nil { return fmt.Errorf("Failed to verify %q: %w", checksumFile, err) } if !valid { return fmt.Errorf("Invalid signature for %q", checksumFile) } } } } _, err = s.DownloadHash(s.definition.Image, baseURL+s.fname, checksumFile, sha256.New()) if err != nil { return fmt.Errorf("Failed to download %q: %w", baseURL+s.fname, err) } source := filepath.Join(fpath, s.fname) s.logger.WithField("file", source).Info("Unpacking image") if strings.HasSuffix(s.fname, ".raw.xz") || strings.HasSuffix(s.fname, ".raw") { err = s.unpackRaw(source, s.rootfsDir, s.rawRunner) } else { err = s.unpackISO(source, s.rootfsDir, s.isoRunner) } if err != nil { return fmt.Errorf("Failed to unpack %q: %w", source, err) } return nil } func (s *centOS) rawRunner() error { err := shared.RunScript(s.ctx, fmt.Sprintf(`#!/bin/sh set -eux version="%s" # Create required files touch /etc/mtab /etc/fstab # Create a minimal rootfs mkdir /rootfs if [ "${version}" = 7 ]; then repo="base" else repo="BaseOS" fi yum --installroot=/rootfs --disablerepo=* --enablerepo=${repo} -y --releasever=${version} install basesystem centos-release yum rm -rf /rootfs/var/cache/yum # Disable CentOS kernel repo if [ -e /rootfs/etc/yum.repos.d/CentOS-armhfp-kernel.repo ]; then sed -ri 's/^enabled=.*/enabled=0/g' /rootfs/etc/yum.repos.d/CentOS-armhfp-kernel.repo fi `, s.majorVersion)) if err != nil { return fmt.Errorf("Failed to run script: %w", err) } return nil } func (s *centOS) isoRunner(gpgKeysPath string) error { err := shared.RunScript(s.ctx, fmt.Sprintf(`#!/bin/sh set -eux GPG_KEYS="%s" # Create required files touch /etc/mtab /etc/fstab yum_args="" mkdir -p /etc/yum.repos.d mkdir -p /etc/pki/rpm-gpg # Add GPG keys for aarch64, Arm32, and ppc64 cat <<-"EOF" >/etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7-aarch64 -----BEGIN PGP PUBLIC KEY BLOCK----- Version: GnuPG v2.0.22 (GNU/Linux) mQENBFW3h2gBCADPM3WDbdHNnMAB0FPvVRIBjGpkpeWT5rsbMZbk35tCx7MbAhMk zcN519xw7DGVLigFd68S3W2Lrde6ioyVQ1SVSJ7z84U4uYUfSa858Dskxxy021Ip NrocTrziy773v1gCPwA5xeT89bgzsMVMzCSy0U7TeqMDhN2urEMG5CCEpy0K9XZv bpUexhn7TbP10g5BzC9igd498QcW/69Oz5OK7WcZOtqmGn78pGBCH2ly+IqIV6ZS 9yXC6jOmOnA8fM0gKJAelhQALd77cULMSGbu96ReG3BEFlgWQjbtZG3L5BvMVInw MkUQEntHvjp6oHtPiIAc3VtLq0IxWVygFHNRABEBAAG0cENlbnRPUyBBbHRBcmNo IFNJRyAtIEFBcmNoNjQgKGh0dHA6Ly93aWtpLmNlbnRvcy5vcmcvU3BlY2lhbElu dGVyZXN0R3JvdXAvQWx0QXJjaC9BQXJjaDY0KSA8c2VjdXJpdHlAY2VudG9zLm9y Zz6JATkEEwECACMFAlW3h2gCGwMHCwkIBwMCAQYVCAIJCgsEFgIDAQIeAQIXgAAK CRBsfLbvMF1J1pSFCACQbLvjwCFdgr0DpVJZ0o50Dcl8jYzZtd/NZOBNYXi/TQza c6DFhiAj72zkgOGb+xznUXJJIiOLCgyJBUdJQSRx/EfVb9ftd4kSOA/wErOhDV71 Hyww9M/gz82SjHF9qq8ofDto6ZfJMfiLX4aZwR39jZzS5Gm+bH5FfgxlwG0V88fu aKlzsn3p975uD659tSKae4xLysxkBG6oDaXvnWI2/UGC724gN+R3aKe9kI0wk8wA h5Qzf7+jRk0qb859rryno1rBpuzxJcwg5qvN2PXG3xDFOHG+3LX3mV3UnVAqCjHO zyGnzAAiNfBwgMyu6bu4lXd4hbZKy73RwnouQkuA =qiwp -----END PGP PUBLIC KEY BLOCK----- EOF cat <<-"EOF" >/etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-AltArch-Arm32 -----BEGIN PGP PUBLIC KEY BLOCK----- Version: GnuPG v2.0.22 (GNU/Linux) mQENBFZYUi8BCADtrzJRu2Q3WGb5ExbmAB8CGWDAbVOTLZBA0bSj+i63LsUDdHkU sKpOGEaRPhagB27lkVUMOkcOIodYAbQZDbF788KDxeF4BopORbGXdo14OMEmoVq6 rWPDoYs7Zv7G8blQa0IBE/BqdjYxyXZ0CSt+OLQ8r3G8ZB//SbZSTWWJcp2aN5oE 79yB+tEfYznGzETZY8gzBOcKIk/ifYVNHHS65ldgOd3KQK7/vjWVc9LDOLcFcwXj YABSaUTsc3SkYKQ71SuxLssBWxSGaiZWBdN7s0FZFMDagWtKW1jQDlIhoRSULfpL m5Y306pEqNOdiNgAnipXPL4NzWv0zFVHoWaFABEBAAG0bUNlbnRPUyBBbHRBcmNo IFNJRyAtIEFybTMyIChodHRwczovL3dpa2kuY2VudG9zLm9yZy9TcGVjaWFsSW50 ZXJlc3RHcm91cC9BbHRBcmNoL0FybTMyKSA8c2VjdXJpdHlAY2VudG9zLm9yZz6J ATkEEwECACMFAlZYUi8CGwMHCwkIBwMCAQYVCAIJCgsEFgIDAQIeAQIXgAAKCRDK /vEbYlBf5qvhB/9R8GXKz71u66U1VTvlDEh4tz7LzKNUBAtEH9fvox1Y8Mh1+VKK h7WtAWXsAkBvy7HeJ/GCUgvbgBjc7qpVjq/dipUTt+c51TLkoSa0msv4aJnA5azU 7+9qD/qvnjEZVgstFGyTQ+m5v9N3KdAWyw2Xi1V820bmmj+vlVzGFbQo2UPps+7d bXZ9xI9Lmme/KD4tctjg9lnoCXmFIHGZfMVCoCyk42+p5EHlSZhYIRyIIhjpELlL gllMZz1Bdp+V51zndIm7Fe1d6jcSEjpPjRecIxfr5PBLAu3j/VbjBK90u8AKSKY9 q5eFcyxxA1r2IdmItGVwz73gSz8WkJoh8QeN =72OZ -----END PGP PUBLIC KEY BLOCK----- EOF cat <<-"EOF" >/etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-AltArch-7-ppc64 -----BEGIN PGP PUBLIC KEY BLOCK----- Version: GnuPG v2.0.22 (GNU/Linux) mQENBFZYUWkBCADomwJs4B6eBhhHmkBxaTQBNg2SicdZZWfb9+VArLqZ+Qyez3YQ V1Bq2dBaDv2HIpTI8AHyT/KL/VuF1cdmGK8Q+uhqVxbFIP3giuaNHdV+DLx7suid aKP0MA/1fs5x4RDvRmHVm0bPRwUWK84aWyh2Ux1D9I8HWsmDamAVKUinocnWWG0K sNsV2uTuHeXYrJB0lex1nD1ColEa4CjmRxHMFYhoaFfw+mUUJ6rrN+zPdettxzbe HPBVhNWpfOcQdEIrPWwhMCJJYOnPQ7OpZBZ7088Bc7JVA4RHMo54MuuU2t1Th71H l7hcF9ueIKXqnsoAWFoG+p4UOy+OHU11THp3ABEBAAG0aUNlbnRPUyBBbHRBcmNo IFNJRyAtIFBvd2VyUEMgKGh0dHBzOi8vd2lraS5jZW50b3Mub3JnL1NwZWNpYWxJ bnRlcmVzdEdyb3VwL0FsdEFyY2gpIDxzZWN1cml0eUBjZW50b3Mub3JnPokBOQQT AQIAIwUCVlhRaQIbAwcLCQgHAwIBBhUIAgkKCwQWAgMBAh4BAheAAAoJEKlju9v1 M/T6HPsH/jLoRihPGZrdNjnVRSx/7hzQ+csdpgwRYSgJOeLTJAmemXYxiAQ0Wh+Z AiDA6hdUu973Y/aTZbOoX+trb6SaEquGLLxhFgC21whVYfRznxE3FQv02a/hjp/3 a+i0GDT4ExSNuMxAqEewnWTymHS8bAsPGKuEMk9zElMZgeM6RrZUT+RL/ybjw5Mi H8mP/tEcR1jAsm30BSoWV0nKHMXLpuOVTQS2V3ngzMWoA/l/9t7CafhkpV7IGfnB HwQChc3L9fyZ/LwCo0WR1mHbzoPq+K4fwOnjdFEbgUSvfQ3+QiXXrfWt7C9IYAmA /6cxo9vG1NH6sQ3BJiEyJNaWj3q2c5U= =E+yp -----END PGP PUBLIC KEY BLOCK----- EOF if [ -d /mnt/cdrom ]; then # Install initial package set cd /mnt/cdrom/Packages rpm -ivh --nodeps $(ls rpm-*.rpm | head -n1) rpm -ivh --nodeps $(ls yum-*.rpm | head -n1) # Add cdrom repo cat <<- EOF > /etc/yum.repos.d/cdrom.repo [cdrom] name=Install CD-ROM baseurl=file:///mnt/cdrom enabled=0 EOF gpg_keys_official="file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7-aarch64 file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-AltArch-Arm32 file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-AltArch-7-ppc64" if [ -n "${GPG_KEYS}" ]; then echo gpgcheck=1 >> /etc/yum.repos.d/cdrom.repo echo gpgkey=${gpg_keys_official} ${GPG_KEYS} >> /etc/yum.repos.d/cdrom.repo else echo gpgcheck=0 >> /etc/yum.repos.d/cdrom.repo fi # Disable fastestmirror plugin if [ -f /etc/yum/pluginconf.d/fastestmirror.conf ]; then sed -ri 's/enabled=1/enabled=0/g' /etc/yum/pluginconf.d/fastestmirror.conf fi yum_args="--disablerepo=* --enablerepo=cdrom" yum ${yum_args} -y reinstall yum else if ! [ -f /etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial ]; then mkdir -p /etc/pki/rpm-gpg cat <<- "EOF" > /etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial -----BEGIN PGP PUBLIC KEY BLOCK----- Version: GnuPG v2.0.22 (GNU/Linux) mQINBFzMWxkBEADHrskpBgN9OphmhRkc7P/YrsAGSvvl7kfu+e9KAaU6f5MeAVyn rIoM43syyGkgFyWgjZM8/rur7EMPY2yt+2q/1ZfLVCRn9856JqTIq0XRpDUe4nKQ 8BlA7wDVZoSDxUZkSuTIyExbDf0cpw89Tcf62Mxmi8jh74vRlPy1PgjWL5494b3X 5fxDidH4bqPZyxTBqPrUFuo+EfUVEqiGF94Ppq6ZUvrBGOVo1V1+Ifm9CGEK597c aevcGc1RFlgxIgN84UpuDjPR9/zSndwJ7XsXYvZ6HXcKGagRKsfYDWGPkA5cOL/e f+yObOnC43yPUvpggQ4KaNJ6+SMTZOKikM8yciyBwLqwrjo8FlJgkv8Vfag/2UR7 JINbyqHHoLUhQ2m6HXSwK4YjtwidF9EUkaBZWrrskYR3IRZLXlWqeOi/+ezYOW0m vufrkcvsh+TKlVVnuwmEPjJ8mwUSpsLdfPJo1DHsd8FS03SCKPaXFdD7ePfEjiYk nHpQaKE01aWVSLUiygn7F7rYemGqV9Vt7tBw5pz0vqSC72a5E3zFzIIuHx6aANry Gat3aqU3qtBXOrA/dPkX9cWE+UR5wo/A2UdKJZLlGhM2WRJ3ltmGT48V9CeS6N9Y m4CKdzvg7EWjlTlFrd/8WJ2KoqOE9leDPeXRPncubJfJ6LLIHyG09h9kKQARAQAB tDpDZW50T1MgKENlbnRPUyBPZmZpY2lhbCBTaWduaW5nIEtleSkgPHNlY3VyaXR5 QGNlbnRvcy5vcmc+iQI3BBMBAgAhBQJczFsZAhsDBgsJCAcDAgYVCAIJCgsDFgIB Ah4BAheAAAoJEAW1VbOEg8ZdjOsP/2ygSxH9jqffOU9SKyJDlraL2gIutqZ3B8pl Gy/Qnb9QD1EJVb4ZxOEhcY2W9VJfIpnf3yBuAto7zvKe/G1nxH4Bt6WTJQCkUjcs N3qPWsx1VslsAEz7bXGiHym6Ay4xF28bQ9XYIokIQXd0T2rD3/lNGxNtORZ2bKjD vOzYzvh2idUIY1DgGWJ11gtHFIA9CvHcW+SMPEhkcKZJAO51ayFBqTSSpiorVwTq a0cB+cgmCQOI4/MY+kIvzoexfG7xhkUqe0wxmph9RQQxlTbNQDCdaxSgwbF2T+gw byaDvkS4xtR6Soj7BKjKAmcnf5fn4C5Or0KLUqMzBtDMbfQQihn62iZJN6ZZ/4dg q4HTqyVpyuzMXsFpJ9L/FqH2DJ4exGGpBv00ba/Zauy7GsqOc5PnNBsYaHCply0X 407DRx51t9YwYI/ttValuehq9+gRJpOTTKp6AjZn/a5Yt3h6jDgpNfM/EyLFIY9z V6CXqQQ/8JRvaik/JsGCf+eeLZOw4koIjZGEAg04iuyNTjhx0e/QHEVcYAqNLhXG rCTTbCn3NSUO9qxEXC+K/1m1kaXoCGA0UWlVGZ1JSifbbMx0yxq/brpEZPUYm+32 o8XfbocBWljFUJ+6aljTvZ3LQLKTSPW7TFO+GXycAOmCGhlXh2tlc6iTc41PACqy yy+mHmSv =kkH7 -----END PGP PUBLIC KEY BLOCK----- EOF fi # --- CentOS 7 if grep -q "CentOS Linux 7" /etc/os-release; then cat <<- "EOF" > /etc/yum.repos.d/CentOS-Base.repo [BaseOS] name=CentOS-$releasever - Base baseurl=https://muug.ca/mirror/centos/$releasever/BaseOS/$basearch gpgcheck=1 enabled=1 gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial EOF fi # --- Only on Stream 8 if grep -q "CentOS Stream 8" /etc/os-release; then cat <<- "EOF" > /etc/yum.repos.d/CentOS-Base.repo [BaseOS] name=CentOS-$releasever - Base baseurl=https://muug.ca/mirror/centos/$releasever/BaseOS/$basearch/os gpgcheck=1 enabled=1 gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial EOF cat <<- "EOF" > /etc/yum.repos.d/CentOS-Appstream.repo [AppStream] name=CentOS-$releasever - Base baseurl=https://muug.ca/mirror/centos/$releasever/AppStream/$basearch/os gpgcheck=1 enabled=1 gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial EOF fi # --- Only on Stream 9 if grep -q "CentOS Stream 9" /etc/os-release; then cat <<- "EOF" > /etc/yum.repos.d/CentOS-Base.repo [baseos] name=CentOS Stream $releasever - BaseOS baseurl=https://mirror.xenyth.net/centos-stream/$releasever/BaseOS/$basearch/os gpgcheck=1 enabled=1 gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial [appstream] name=CentOS Stream $releasever - AppStream baseurl=https://mirror.xenyth.net/centos-stream/$releasever/AppStream/$basearch/os gpgcheck=1 enabled=1 gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial EOF fi # Use dnf in the boot iso since yum isn't available alias yum=dnf fi # Disable fastestmirror plugin if [ -f /etc/yum/pluginconf.d/fastestmirror.conf ]; then sed -ri 's/enabled=1/enabled=0/g' /etc/yum/pluginconf.d/fastestmirror.conf fi pkgs="basesystem centos-release yum" if grep -q "CentOS Stream" /etc/os-release; then pkgs="${pkgs} centos-stream-repos" fi # Create a minimal rootfs mkdir /rootfs yum ${yum_args} --installroot=/rootfs -y --releasever=%s --skip-broken install ${pkgs} rm -rf /rootfs/var/cache/yum `, gpgKeysPath, s.majorVersion)) if err != nil { return fmt.Errorf("Failed to run script: %w", err) } return nil } func (s *centOS) getRelease(URL, release, variant, arch string) (string, error) { releaseFields := strings.Split(release, ".") u := URL + path.Join("/", strings.ToLower(release), "isos", arch) if release == "9-Stream" { u = URL + path.Join("/", strings.ToLower(release), "BaseOS", arch, "iso") } var ( resp *http.Response err error ) err = shared.Retry(func() error { resp, err = http.Get(u) if err != nil { return fmt.Errorf("Failed to get URL %q: %w", u, err) } return nil }, 3) if err != nil { return "", err } defer resp.Body.Close() body, err := io.ReadAll(resp.Body) if err != nil { return "", fmt.Errorf("Failed to read body: %w", err) } if len(releaseFields) == 3 && !strings.Contains(URL, "vault.centos.org") { return "", errors.New("Patch releases are only supported when using vault.centos.org as the mirror") } if strings.HasSuffix(releaseFields[0], "-Stream") { fields := strings.Split(releaseFields[0], "-") // Convert -Stream to Stream- releaseFields[0] = fmt.Sprintf("%s-%s", fields[1], fields[0]) } re := s.getRegexes(arch, variant, release) for _, r := range re { matches := r.FindAllString(string(body), -1) if len(matches) > 0 { return matches[len(matches)-1], nil } } return "", errors.New("Failed to find release") } func (s *centOS) getRegexes(arch string, variant string, release string) []*regexp.Regexp { releaseFields := strings.Split(release, ".") if strings.HasSuffix(releaseFields[0], "-Stream") { fields := strings.Split(releaseFields[0], "-") // Convert -Stream to Stream- releaseFields[0] = fmt.Sprintf("%s-%s", fields[1], fields[0]) } var re []string switch len(releaseFields) { case 1: if arch == "armhfp" { re = append(re, fmt.Sprintf("CentOS-Userland-%s-armv7hl-(RootFS|generic)-(?i:%s)(-\\d+)?-sda.raw.xz", releaseFields[0], variant)) } else { re = append(re, fmt.Sprintf("CentOS-%s(.\\d+)*-%s-(?i:%s)(-\\d+)?.iso", releaseFields[0], arch, variant)) re = append(re, fmt.Sprintf("CentOS-%s(.\\d+)*-%s(-\\d+)?-(?i:%s).iso", releaseFields[0], arch, variant)) re = append(re, fmt.Sprintf("CentOS-%s(-\\d+.0)-%s-(?i:%s).iso", releaseFields[0], arch, variant)) } case 2: if arch == "armhfp" { re = append(re, fmt.Sprintf("CentOS-Userland-%s.%s-armv7hl-RootFS-(?i:%s)(-\\d+)?-sda.raw.xz", releaseFields[0], releaseFields[1], variant)) } else { re = append(re, fmt.Sprintf("CentOS-%s.%s-%s-(?i:%s)(-\\d+)?.iso", releaseFields[0], releaseFields[1], arch, variant)) re = append(re, fmt.Sprintf("CentOS-%s-%s-%s-(?i:%s).iso", releaseFields[0], arch, releaseFields[1], variant)) } case 3: if arch == "x86_64" { re = append(re, fmt.Sprintf("CentOS-%s.%s-%s-%s-(?i:%s)(-\\d+)?.iso", releaseFields[0], releaseFields[1], releaseFields[2], arch, variant)) if len(releaseFields[1]) == 1 { re = append(re, fmt.Sprintf("CentOS-%s-%s-(?i:%s)-%s-0%s.iso", releaseFields[0], arch, variant, releaseFields[2], releaseFields[1])) } else { re = append(re, fmt.Sprintf("CentOS-%s-%s-(?i:%s)-%s-%s.iso", releaseFields[0], arch, variant, releaseFields[2], releaseFields[1])) } re = append(re, fmt.Sprintf("CentOS-%s-%s-(?i:%s)-%s.iso", releaseFields[0], arch, variant, releaseFields[2])) re = append(re, fmt.Sprintf("CentOS-%s-%s-%s-(?i:%s).iso", releaseFields[0], arch, releaseFields[2], variant)) } } regexes := make([]*regexp.Regexp, len(re)) for i, r := range re { regexes[i] = regexp.MustCompile(r) } return regexes } distrobuilder-3.0/sources/common.go000066400000000000000000000136661456216713500175570ustar00rootroot00000000000000package sources import ( "context" "fmt" "hash" "io" "net/http" "os" "os/exec" "path" "path/filepath" "strings" "time" "github.com/lxc/incus/shared/ioprogress" incus "github.com/lxc/incus/shared/util" "github.com/sirupsen/logrus" "github.com/lxc/distrobuilder/shared" ) type common struct { logger *logrus.Logger definition shared.Definition rootfsDir string cacheDir string sourcesDir string ctx context.Context client *http.Client } func (s *common) init(ctx context.Context, logger *logrus.Logger, definition shared.Definition, rootfsDir string, cacheDir string, sourcesDir string) { s.logger = logger s.definition = definition s.rootfsDir = rootfsDir s.cacheDir = cacheDir s.sourcesDir = sourcesDir s.ctx = ctx transport := http.DefaultTransport.(*http.Transport).Clone() // Increase TLS handshake timeout for mirrors which need a bit more time. transport.TLSHandshakeTimeout = 60 * time.Second s.client = &http.Client{ Transport: transport, } } func (s *common) getTargetDir() string { dir := filepath.Join(s.sourcesDir, fmt.Sprintf("%s-%s-%s", s.definition.Image.Distribution, s.definition.Image.Release, s.definition.Image.ArchitectureMapped)) dir = strings.Replace(dir, " ", "", -1) dir = strings.ToLower(dir) return dir } // DownloadHash downloads a file. If a checksum file is provided, it will try and // match the hash. func (s *common) DownloadHash(def shared.DefinitionImage, file, checksum string, hashFunc hash.Hash) (string, error) { var ( hashes []string err error ) destDir := s.getTargetDir() err = os.MkdirAll(destDir, 0755) if err != nil { return "", err } if checksum != "" { if hashFunc != nil { hashFunc.Reset() } hashLen := 0 if hashFunc != nil { hashLen = hashFunc.Size() * 2 } err := shared.Retry(func() error { hashes, err = downloadChecksum(s.ctx, s.client, destDir, checksum, file, hashFunc, hashLen) return err }, 3) if err != nil { return "", fmt.Errorf("Error while downloading checksum: %w", err) } } imagePath := filepath.Join(destDir, filepath.Base(file)) stat, err := os.Stat(imagePath) if err == nil && stat.Size() > 0 { image, err := os.Open(imagePath) if err != nil { return "", err } defer image.Close() if checksum != "" { if hashFunc != nil { hashFunc.Reset() } _, err = io.Copy(hashFunc, image) if err != nil { return "", err } result := fmt.Sprintf("%x", hashFunc.Sum(nil)) var hash string for _, h := range hashes { if result == h { hash = h break } } if hash == "" { return "", fmt.Errorf("Hash mismatch for %s: %s != %v", imagePath, result, hashes) } } return destDir, nil } image, err := os.Create(imagePath) if err != nil { return "", err } defer image.Close() progress := func(progress ioprogress.ProgressData) { fmt.Printf("%s\r", progress.Text) } done := make(chan struct{}) defer close(done) if checksum == "" { err = shared.Retry(func() error { _, err = incus.DownloadFileHash(s.ctx, s.client, "distrobuilder", progress, nil, imagePath, file, "", nil, image) if err != nil { os.Remove(imagePath) } return err }, 3) } else { // Check all file hashes in case multiple have been provided. err = shared.Retry(func() error { for _, h := range hashes { if hashFunc != nil { hashFunc.Reset() } _, err = incus.DownloadFileHash(s.ctx, s.client, "distrobuilder", progress, nil, imagePath, file, h, hashFunc, image) if err == nil { break } } if err != nil { os.Remove(imagePath) } return err }, 3) } if err != nil { return "", err } fmt.Println("") return destDir, nil } // GetSignedContent verifies the provided file, and returns its decrypted (plain) content. func (s *common) GetSignedContent(signedFile string) ([]byte, error) { keyring, err := s.CreateGPGKeyring() if err != nil { return nil, err } gpgDir := path.Dir(keyring) defer os.RemoveAll(gpgDir) out, err := exec.Command("gpg", "--homedir", gpgDir, "--keyring", keyring, "--decrypt", signedFile).Output() if err != nil { return nil, fmt.Errorf("Failed to get file content: %s: %w", out, err) } return out, nil } // VerifyFile verifies a file using gpg. func (s *common) VerifyFile(signedFile, signatureFile string) (bool, error) { keyring, err := s.CreateGPGKeyring() if err != nil { return false, err } gpgDir := path.Dir(keyring) defer os.RemoveAll(gpgDir) var out strings.Builder if signatureFile != "" { err := shared.RunCommand(s.ctx, nil, &out, "gpg", "--homedir", gpgDir, "--keyring", keyring, "--verify", signatureFile, signedFile) if err != nil { return false, fmt.Errorf("Failed to verify: %s: %w", out.String(), err) } } else { err := shared.RunCommand(s.ctx, nil, &out, "gpg", "--homedir", gpgDir, "--keyring", keyring, "--verify", signedFile) if err != nil { return false, fmt.Errorf("Failed to verify: %s: %w", out.String(), err) } } return true, nil } // CreateGPGKeyring creates a new GPG keyring. func (s *common) CreateGPGKeyring() (string, error) { err := os.MkdirAll(s.getTargetDir(), 0700) if err != nil { return "", err } gpgDir, err := os.MkdirTemp(s.getTargetDir(), "gpg.") if err != nil { return "", fmt.Errorf("Failed to create gpg directory: %w", err) } err = os.MkdirAll(gpgDir, 0700) if err != nil { return "", err } var ok bool for i := 0; i < 3; i++ { ok, err = recvGPGKeys(s.ctx, gpgDir, s.definition.Source.Keyserver, s.definition.Source.Keys) if ok { break } time.Sleep(2 * time.Second) } if !ok { return "", err } var out strings.Builder // Export keys to support gpg1 and gpg2 err = shared.RunCommand(s.ctx, nil, &out, "gpg", "--homedir", gpgDir, "--export", "--output", filepath.Join(gpgDir, "distrobuilder.gpg")) if err != nil { os.RemoveAll(gpgDir) return "", fmt.Errorf("Failed to export keyring: %s: %w", out.String(), err) } return filepath.Join(gpgDir, "distrobuilder.gpg"), nil } distrobuilder-3.0/sources/common_test.go000066400000000000000000000054061456216713500206070ustar00rootroot00000000000000package sources import ( "context" "log" "os" "path" "path/filepath" "testing" incus "github.com/lxc/incus/shared/util" "github.com/stretchr/testify/require" "github.com/lxc/distrobuilder/shared" ) func TestVerifyFile(t *testing.T) { wd, err := os.Getwd() if err != nil { t.Fatalf("Failed to retrieve working directory: %v", err) } testdataDir := filepath.Join(wd, "..", "testdata") keys := []string{"0x5DE8949A899C8D99"} keyserver := "keyserver.ubuntu.com" tests := []struct { name string signedFile string signatureFile string keys []string keyserver string shouldFail bool }{ { "testfile with detached signature", filepath.Join(testdataDir, "testfile"), filepath.Join(testdataDir, "testfile.sig"), keys, keyserver, false, }, { "testfile with cleartext signature", filepath.Join(testdataDir, "testfile.asc"), "", keys, keyserver, false, }, { "testfile with invalid cleartext signature", filepath.Join(testdataDir, "testfile-invalid.asc"), "", keys, keyserver, true, }, { "testfile with normal signature", filepath.Join(testdataDir, "testfile.gpg"), "", keys, keyserver, false, }, { "no keys", filepath.Join(testdataDir, "testfile"), filepath.Join(testdataDir, "testfile.sig"), []string{}, keyserver, true, }, { "invalid key", filepath.Join(testdataDir, "testfile.asc"), "", []string{"0x46181433FBB75451"}, keyserver, true, }, } c := common{ sourcesDir: os.TempDir(), definition: shared.Definition{ Source: shared.DefinitionSource{}, }, ctx: context.TODO(), } for i, tt := range tests { log.Printf("Running test #%d: %s", i, tt.name) c.definition = shared.Definition{ Source: shared.DefinitionSource{ Keyserver: tt.keyserver, Keys: tt.keys, }, } valid, err := c.VerifyFile(tt.signedFile, tt.signatureFile) if tt.shouldFail { require.Error(t, err) require.False(t, valid) } else { require.NoError(t, err) require.True(t, valid) } } } func TestCreateGPGKeyring(t *testing.T) { c := common{ sourcesDir: os.TempDir(), definition: shared.Definition{ Source: shared.DefinitionSource{ Keyserver: "keyserver.ubuntu.com", Keys: []string{"0x5DE8949A899C8D99"}, }, }, ctx: context.TODO(), } keyring, err := c.CreateGPGKeyring() require.NoError(t, err) require.FileExists(t, keyring) os.RemoveAll(path.Dir(keyring)) c.definition = shared.Definition{} // This shouldn't fail, but the keyring file should not be created since // there are no keys to be exported. keyring, err = c.CreateGPGKeyring() require.NoError(t, err) require.False(t, incus.PathExists(keyring), "File should not exist") os.RemoveAll(path.Dir(keyring)) } distrobuilder-3.0/sources/debootstrap.go000066400000000000000000000057601456216713500206110ustar00rootroot00000000000000package sources import ( "fmt" "os" "path" "path/filepath" "strings" incus "github.com/lxc/incus/shared/util" "github.com/lxc/distrobuilder/shared" ) type debootstrap struct { common } // Run runs debootstrap. func (s *debootstrap) Run() error { var args []string distro := strings.ToLower(s.definition.Image.Distribution) release := strings.ToLower(s.definition.Image.Release) // Enable merged /usr by default, and disable it for certain distros/releases if distro == "ubuntu" && incus.ValueInSlice(release, []string{"xenial", "bionic"}) || distro == "mint" && incus.ValueInSlice(release, []string{"tara", "tessa", "tina", "tricia", "ulyana"}) || distro == "devuan" { args = append(args, "--no-merged-usr") } else { args = append(args, "--merged-usr") } os.RemoveAll(s.rootfsDir) if s.definition.Source.Variant != "" { args = append(args, "--variant", s.definition.Source.Variant) } if s.definition.Image.ArchitectureMapped != "" { args = append(args, "--arch", s.definition.Image.ArchitectureMapped) } if s.definition.Source.SkipVerification { args = append(args, "--no-check-gpg") } earlyPackagesInstall := s.definition.GetEarlyPackages("install") earlyPackagesRemove := s.definition.GetEarlyPackages("remove") if len(earlyPackagesInstall) > 0 { args = append(args, fmt.Sprintf("--include=%s", strings.Join(earlyPackagesInstall, ","))) } if len(earlyPackagesRemove) > 0 { args = append(args, fmt.Sprintf("--exclude=%s", strings.Join(earlyPackagesRemove, ","))) } if len(s.definition.Source.Components) > 0 { args = append(args, fmt.Sprintf("--components=%s", strings.Join(s.definition.Source.Components, ","))) } if len(s.definition.Source.Keys) > 0 { keyring, err := s.CreateGPGKeyring() if err != nil { return fmt.Errorf("Failed to create GPG keyring: %w", err) } defer os.RemoveAll(path.Dir(keyring)) args = append(args, "--keyring", keyring) } // If source.suite is set, debootstrap will use this instead of // image.release as its first positional argument (SUITE). This is important // for derivatives which don't have their own sources, e.g. Linux Mint. if s.definition.Source.Suite != "" { args = append(args, s.definition.Source.Suite, s.rootfsDir) } else { args = append(args, s.definition.Image.Release, s.rootfsDir) } if s.definition.Source.URL != "" { args = append(args, s.definition.Source.URL) } // If s.definition.Source.SameAs is set, create a symlink in /usr/share/debootstrap/scripts // pointing release to s.definition.Source.SameAs. scriptPath := filepath.Join("/usr/share/debootstrap/scripts", s.definition.Image.Release) if !incus.PathExists(scriptPath) && s.definition.Source.SameAs != "" { err := os.Symlink(s.definition.Source.SameAs, scriptPath) if err != nil { return fmt.Errorf("Failed to create symlink: %w", err) } defer os.Remove(scriptPath) } err := shared.RunCommand(s.ctx, nil, nil, "debootstrap", args...) if err != nil { return fmt.Errorf(`Failed to run "debootstrap": %w`, err) } return nil } distrobuilder-3.0/sources/docker.go000066400000000000000000000015261456216713500175260ustar00rootroot00000000000000package sources import ( "fmt" "os" "path/filepath" dcapi "github.com/mudler/docker-companion/api" ) type docker struct { common } // Run downloads and unpacks a docker image. func (s *docker) Run() error { absRootfsDir, err := filepath.Abs(s.rootfsDir) if err != nil { return fmt.Errorf("Failed to get absolute path of %s: %w", s.rootfsDir, err) } // If DOCKER_REGISTRY_BASE is not set it's used default https://registry-1.docker.io err = dcapi.DownloadAndUnpackImage(s.definition.Source.URL, absRootfsDir, &dcapi.DownloadOpts{ RegistryBase: os.Getenv("DOCKER_REGISTRY_BASE"), RegistryUsername: os.Getenv("DOCKER_REGISTRY_BASE_USER"), RegistryPassword: os.Getenv("DOCKER_REGISTRY_BASE_PASS"), KeepLayers: false, }) if err != nil { return fmt.Errorf("Failed to download an unpack image: %w", err) } return nil } distrobuilder-3.0/sources/fedora-http.go000066400000000000000000000105251456216713500204730ustar00rootroot00000000000000package sources import ( "encoding/json" "errors" "fmt" "io" "net/http" "os" "path/filepath" "regexp" "sort" "github.com/lxc/distrobuilder/shared" ) type fedora struct { common } // Run downloads a container base image and unpacks it and its layers. func (s *fedora) Run() error { baseURL := fmt.Sprintf("%s/packages/Fedora-Container-Base", s.definition.Source.URL) // Get latest build build, err := s.getLatestBuild(baseURL, s.definition.Image.Release) if err != nil { return fmt.Errorf("Failed to get latest build: %w", err) } fname := fmt.Sprintf("Fedora-Container-Base-%s-%s.%s.tar.xz", s.definition.Image.Release, build, s.definition.Image.ArchitectureMapped) // Download image sourceURL := fmt.Sprintf("%s/%s/%s/images/%s", baseURL, s.definition.Image.Release, build, fname) fpath, err := s.DownloadHash(s.definition.Image, sourceURL, "", nil) if err != nil { return fmt.Errorf("Failed to download %q: %w", sourceURL, err) } s.logger.WithField("file", filepath.Join(fpath, fname)).Info("Unpacking image") // Unpack the base image err = shared.Unpack(filepath.Join(fpath, fname), s.rootfsDir) if err != nil { return fmt.Errorf("Failed to unpack %q: %w", filepath.Join(fpath, fname), err) } s.logger.Info("Unpacking layers") // Unpack the rest of the image (/bin, /sbin, /usr, etc.) err = s.unpackLayers(s.rootfsDir) if err != nil { return fmt.Errorf("Failed to unpack: %w", err) } return nil } func (s *fedora) unpackLayers(rootfsDir string) error { // Read manifest file which contains the path to the layers file, err := os.Open(filepath.Join(rootfsDir, "manifest.json")) if err != nil { return fmt.Errorf("Failed to open %q: %w", filepath.Join(rootfsDir, "manifest.json"), err) } defer file.Close() data, err := io.ReadAll(file) if err != nil { return fmt.Errorf("Failed to read file %q: %w", file.Name(), err) } // Structure of the manifest excluding RepoTags var manifests []struct { Layers []string Config string } err = json.Unmarshal(data, &manifests) if err != nil { return fmt.Errorf("Failed to unmarshal JSON data: %w", err) } pathsToRemove := []string{ filepath.Join(rootfsDir, "manifest.json"), filepath.Join(rootfsDir, "repositories"), } // Unpack tarballs (or layers) which contain the rest of the rootfs, and // remove files not relevant to the image. for _, manifest := range manifests { for _, layer := range manifest.Layers { s.logger.WithField("file", filepath.Join(rootfsDir, layer)).Info("Unpacking layer") err := shared.Unpack(filepath.Join(rootfsDir, layer), rootfsDir) if err != nil { return fmt.Errorf("Failed to unpack %q: %w", filepath.Join(rootfsDir, layer), err) } pathsToRemove = append(pathsToRemove, filepath.Join(rootfsDir, filepath.Dir(layer))) } pathsToRemove = append(pathsToRemove, filepath.Join(rootfsDir, manifest.Config)) } // Clean up /tmp since there are unnecessary files there files, err := filepath.Glob(filepath.Join(rootfsDir, "tmp", "*")) if err != nil { return fmt.Errorf("Failed to find matching files: %w", err) } pathsToRemove = append(pathsToRemove, files...) // Clean up /root since there are unnecessary files there files, err = filepath.Glob(filepath.Join(rootfsDir, "root", "*")) if err != nil { return fmt.Errorf("Failed to find matching files: %w", err) } pathsToRemove = append(pathsToRemove, files...) for _, f := range pathsToRemove { os.RemoveAll(f) } return nil } func (s *fedora) getLatestBuild(URL, release string) (string, error) { var ( resp *http.Response err error ) err = shared.Retry(func() error { resp, err = http.Get(fmt.Sprintf("%s/%s", URL, release)) if err != nil { return fmt.Errorf("Failed to GET %q: %w", fmt.Sprintf("%s/%s", URL, release), err) } return nil }, 3) if err != nil { return "", err } defer resp.Body.Close() content, err := io.ReadAll(resp.Body) if err != nil { return "", fmt.Errorf("Failed to read body: %w", err) } // Builds are formatted in one of two ways: // -
. // -
.n. re := regexp.MustCompile(`\d{8}\.(n\.)?\d`) // Find all builds matches := re.FindAllString(string(content), -1) if len(matches) == 0 { return "", errors.New("Unable to find latest build") } // Sort builds sort.Strings(matches) // Return latest build return matches[len(matches)-1], nil } distrobuilder-3.0/sources/funtoo-http.go000066400000000000000000000071431456216713500205470ustar00rootroot00000000000000package sources import ( "errors" "fmt" "net/http" "net/url" "path/filepath" "regexp" "sort" "strings" "gopkg.in/antchfx/htmlquery.v1" "github.com/lxc/distrobuilder/shared" ) type funtoo struct { common } // Run downloads a Funtoo stage3 tarball. func (s *funtoo) Run() error { topLevelArch := s.definition.Image.ArchitectureMapped if topLevelArch == "generic_32" { topLevelArch = "x86-32bit" } else if topLevelArch == "generic_64" { topLevelArch = "x86-64bit" } else if topLevelArch == "armv7a_vfpv3_hardfp" { topLevelArch = "arm-32bit" } else if topLevelArch == "arm64_generic" { topLevelArch = "arm-64bit" } // Keep release backward compatible to old implementation // and to permit to have yet the funtoo/1.4 alias. if s.definition.Image.Release == "1.4" { s.definition.Image.Release = "1.4-release-std" } baseURL := fmt.Sprintf("%s/%s/%s/%s", s.definition.Source.URL, s.definition.Image.Release, topLevelArch, s.definition.Image.ArchitectureMapped) releaseDates, err := s.getReleaseDates(baseURL) if err != nil { return fmt.Errorf("Failed to get release dates: %w", err) } var fname string var tarball string // Find a valid release tarball for i := len(releaseDates) - 1; i >= 0; i-- { fname = fmt.Sprintf("stage3-%s-%s-%s.tar.xz", s.definition.Image.ArchitectureMapped, s.definition.Image.Release, releaseDates[i]) tarball = fmt.Sprintf("%s/%s/%s", baseURL, releaseDates[i], fname) var ( resp *http.Response err error ) err = shared.Retry(func() error { resp, err = http.Head(tarball) if err != nil { return fmt.Errorf("Failed to call HEAD on %q: %w", tarball, err) } return nil }, 3) if err != nil { return err } if resp.StatusCode == http.StatusNotFound { continue } break } url, err := url.Parse(tarball) if err != nil { return fmt.Errorf("Failed to parse URL %q: %w", tarball, err) } if !s.definition.Source.SkipVerification && url.Scheme != "https" && len(s.definition.Source.Keys) == 0 { return errors.New("GPG keys are required if downloading from HTTP") } var fpath string fpath, err = s.DownloadHash(s.definition.Image, tarball, "", nil) if err != nil { return fmt.Errorf("Failed to download %q: %w", tarball, err) } // Force gpg checks when using http if !s.definition.Source.SkipVerification && url.Scheme != "https" { _, err = s.DownloadHash(s.definition.Image, tarball+".gpg", "", nil) if err != nil { return fmt.Errorf("Failed to download %q: %w", tarball+".gpg", err) } valid, err := s.VerifyFile( filepath.Join(fpath, fname), filepath.Join(fpath, fname+".gpg")) if err != nil { return fmt.Errorf("Failed to verify file: %w", err) } if !valid { return fmt.Errorf("Invalid signature for %q", filepath.Join(fpath, fname)) } } s.logger.WithField("file", filepath.Join(fpath, fname)).Info("Unpacking image") // Unpack err = shared.Unpack(filepath.Join(fpath, fname), s.rootfsDir) if err != nil { return fmt.Errorf("Failed to unpack %q: %w", filepath.Join(fpath, fname), err) } return nil } func (s *funtoo) getReleaseDates(URL string) ([]string, error) { doc, err := htmlquery.LoadURL(URL) if err != nil { return nil, fmt.Errorf("Failed to load URL %q: %w", URL, err) } re := regexp.MustCompile(`^\d{4}\-\d{2}\-\d{2}/?$`) var dirs []string for _, node := range htmlquery.Find(doc, `//a[@href]/text()`) { if re.MatchString(node.Data) { dirs = append(dirs, strings.TrimSuffix(node.Data, "/")) } } if len(dirs) == 0 { return nil, errors.New("Failed to get release dates") } // Sort dirs in case they're out-of-order sort.Strings(dirs) return dirs, nil } distrobuilder-3.0/sources/gentoo.go000066400000000000000000000133631456216713500175540ustar00rootroot00000000000000package sources import ( "crypto/sha512" "errors" "fmt" "io" "net/http" "net/url" "os" "path/filepath" "regexp" "strings" "github.com/lxc/distrobuilder/shared" ) type gentoo struct { common } // Run downloads a Gentoo stage3 tarball. func (s *gentoo) Run() error { topLevelArch := s.definition.Image.ArchitectureMapped if topLevelArch == "i686" { topLevelArch = "x86" } else if strings.HasPrefix(topLevelArch, "arm") && topLevelArch != "arm64" { topLevelArch = "arm" } else if strings.HasPrefix(topLevelArch, "ppc") { topLevelArch = "ppc" } else if strings.HasPrefix(topLevelArch, "s390") { topLevelArch = "s390" } var baseURL string if s.definition.Source.Variant != "" { baseURL = fmt.Sprintf("%s/releases/%s/autobuilds/current-stage3-%s-%s", s.definition.Source.URL, topLevelArch, s.definition.Image.ArchitectureMapped, s.definition.Source.Variant) } else { baseURL = fmt.Sprintf("%s/releases/%s/autobuilds/current-stage3-%s", s.definition.Source.URL, topLevelArch, s.definition.Image.ArchitectureMapped) } fname, err := s.getLatestBuild(baseURL, s.definition.Image.ArchitectureMapped, s.definition.Source.Variant) if err != nil { return fmt.Errorf("Failed to get latest build: %w", err) } tarball := fmt.Sprintf("%s/%s", baseURL, fname) url, err := url.Parse(tarball) if err != nil { return fmt.Errorf("Failed to parse %q: %w", tarball, err) } if !s.definition.Source.SkipVerification && url.Scheme != "https" && len(s.definition.Source.Keys) == 0 { return errors.New("GPG keys are required if downloading from HTTP") } var fpath string if s.definition.Source.SkipVerification { fpath, err = s.DownloadHash(s.definition.Image, tarball, "", nil) } else { fpath, err = s.DownloadHash(s.definition.Image, tarball, tarball+".DIGESTS", sha512.New()) } if err != nil { return fmt.Errorf("Failed to download %q: %w", tarball, err) } // Force gpg checks when using http if !s.definition.Source.SkipVerification && url.Scheme != "https" { _, err = s.DownloadHash(s.definition.Image, tarball+".DIGESTS.asc", "", nil) if err != nil { return fmt.Errorf("Failed to download %q: %w", tarball+".DIGESTS.asc", err) } valid, err := s.VerifyFile( filepath.Join(fpath, fname+".DIGESTS.asc"), "") if err != nil { return fmt.Errorf("Failed to verify %q: %w", filepath.Join(fpath, fname+".DIGESTS.asc"), err) } if !valid { return fmt.Errorf("Failed to verify %q", fname+".DIGESTS.asc") } } s.logger.WithField("file", filepath.Join(fpath, fname)).Info("Unpacking image") // Unpack err = shared.Unpack(filepath.Join(fpath, fname), s.rootfsDir) if err != nil { return fmt.Errorf("Failed to unpack %q: %w", filepath.Join(fpath, fname), err) } // Download portage tree snapshot. This avoid having to run `emerge --sync` every time which often fails. baseURL = fmt.Sprintf("%s/snapshots", s.definition.Source.URL) fname = "portage-latest.tar.xz" tarball = fmt.Sprintf("%s/%s", baseURL, fname) fpath, err = s.DownloadHash(s.definition.Image, tarball, "", nil) if err != nil { return fmt.Errorf("Failed to download %q: %w", tarball, err) } // Force gpg checks when using http if !s.definition.Source.SkipVerification && url.Scheme != "https" { _, err = s.DownloadHash(s.definition.Image, tarball+".gpgsig", "", nil) if err != nil { return fmt.Errorf("Failed to download %q: %w", tarball+".gpgsig", err) } valid, err := s.VerifyFile( filepath.Join(fpath, fname+".gpgsig"), "") if err != nil { return fmt.Errorf("Failed to verify %q: %w", filepath.Join(fpath, fname+".gpgsig"), err) } if !valid { return fmt.Errorf("Failed to verify %q", fname+".gpgsig") } } s.logger.WithField("file", filepath.Join(fpath, fname)).Info("Unpacking image") // Unpack err = shared.Unpack(filepath.Join(fpath, fname), filepath.Join(s.rootfsDir, "var/db/repos")) if err != nil { return fmt.Errorf("Failed to unpack %q: %w", filepath.Join(fpath, fname), err) } err = os.RemoveAll(filepath.Join(s.rootfsDir, "var/db/repos/gentoo")) if err != nil { return fmt.Errorf("Failed to remove %q: %w", filepath.Join(s.rootfsDir, "var/db/repos/gentoo"), err) } err = os.Rename(filepath.Join(s.rootfsDir, "var/db/repos/portage"), filepath.Join(s.rootfsDir, "var/db/repos/gentoo")) if err != nil { return fmt.Errorf("Failed to rename %q: %w", filepath.Join(s.rootfsDir, "var/db/repos/portage"), err) } return nil } func (s *gentoo) getLatestBuild(baseURL, arch, variant string) (string, error) { var ( resp *http.Response err error ) err = shared.Retry(func() error { resp, err = http.Get(baseURL) if err != nil { return fmt.Errorf("Failed to GET %q: %w", baseURL, err) } return nil }, 3) if err != nil { return "", err } defer resp.Body.Close() body, err := io.ReadAll(resp.Body) if err != nil { return "", fmt.Errorf("Failed to read body: %w", err) } var regex *regexp.Regexp // Look for .tar.xz if variant != "" { regex = regexp.MustCompile(fmt.Sprintf(`"stage3-%s-%s-.*.tar.xz">`, arch, variant)) } else { regex = regexp.MustCompile(fmt.Sprintf(`"stage3-%s-.*.tar.xz">`, arch)) } // Find all stage3 related files matches := regex.FindAllString(string(body), -1) if len(matches) > 0 { // Take the first match since they're all the same anyway return strings.Trim(matches[0], `<>"`), nil } // Look for .tar.bz2 if variant != "" { regex = regexp.MustCompile(fmt.Sprintf(`"stage3-%s-%s-.*.tar.bz2">`, arch, variant)) } else { regex = regexp.MustCompile(fmt.Sprintf(`">stage3-%s-.*.tar.bz2">`, arch)) } // Find all stage3 related files matches = regex.FindAllString(string(body), -1) if len(matches) > 0 { // Take the first match since they're all the same anyway return strings.Trim(matches[0], `<>"`), nil } return "", errors.New("Failed to get match") } distrobuilder-3.0/sources/nixos-http.go000066400000000000000000000012321456216713500203660ustar00rootroot00000000000000package sources import ( "fmt" "path/filepath" "github.com/lxc/distrobuilder/shared" ) type nixos struct { common } func (s *nixos) Run() error { tarballURL := fmt.Sprintf("https://hydra.nixos.org/job/nixos/trunk-combined/nixos.lxdContainerImage.%s-linux/latest/download-by-type/file/system-tarball", s.definition.Image.ArchitectureMapped) fpath, err := s.DownloadHash(s.definition.Image, tarballURL, "", nil) if err != nil { return fmt.Errorf("Failed downloading tarball: %w", err) } err = shared.Unpack(filepath.Join(fpath, "system-tarball"), s.rootfsDir) if err != nil { return fmt.Errorf("Failed unpacking rootfs: %w", err) } return nil } distrobuilder-3.0/sources/openeuler-http.go000066400000000000000000000074651456216713500212420ustar00rootroot00000000000000package sources import ( "crypto/sha256" "fmt" "io" "net/http" "net/url" "path/filepath" "regexp" "strings" "github.com/lxc/distrobuilder/shared" ) type openEuler struct { commonRHEL fileName string checksumFile string } const ( isoFileName = "openEuler-%s-%s-dvd.iso" shaFileName = "openEuler-%s-%s-dvd.iso.sha256sum" ) func (s *openEuler) getLatestRelease(baseURL, release string) (string, error) { var err error var resp *http.Response if len(release) == 0 { return "", fmt.Errorf("Invalid release: %s", release) } _, err = url.Parse(baseURL) if err != nil { return "", fmt.Errorf("Failed to parse URL %s: %w", baseURL, err) } resp, err = http.Get(baseURL) if err != nil { return "", fmt.Errorf("Failed to read url: %w", err) } body, err := io.ReadAll(resp.Body) if err != nil { return "", fmt.Errorf("Failed to read body: %w", err) } regex := regexp.MustCompile(fmt.Sprintf(`openEuler-%s((-LTS)?(-SP[0-9])?)?`, release)) releases := regex.FindAllString(string(body), -1) if len(releases) > 0 { return strings.TrimPrefix(releases[len(releases)-1], "openEuler-"), nil } return "", fmt.Errorf("Failed to find latest release for %s", release) } func (s *openEuler) Run() error { var err error release, err := s.getLatestRelease(s.definition.Source.URL, s.definition.Image.Release) if err != nil { return fmt.Errorf("Failed to get latest release by %s: %w", s.definition.Image.Release, err) } baseURL := fmt.Sprintf("%s/openEuler-%s/ISO/%s/", s.definition.Source.URL, release, s.definition.Image.Architecture) fpath := s.getTargetDir() s.fileName = fmt.Sprintf(isoFileName, release, s.definition.Image.Architecture) s.checksumFile = fmt.Sprintf(shaFileName, release, s.definition.Image.Architecture) _, err = url.Parse(baseURL) if err != nil { return fmt.Errorf("Failed to parse URL %s: %w", baseURL, err) } _, err = s.DownloadHash(s.definition.Image, baseURL+s.fileName, baseURL+s.checksumFile, sha256.New()) if err != nil { return fmt.Errorf("Failed to download %s: %w", baseURL+s.fileName, err) } source := filepath.Join(fpath, s.fileName) s.logger.Info("Unpacking image folder", "rootfsDir", s.rootfsDir, "cacheDir", s.cacheDir) err = s.unpackISO(source, s.rootfsDir, s.isoRunner) if err != nil { return fmt.Errorf("Failed to unpack %s: %w", source, err) } return nil } func (s *openEuler) isoRunner(gpgKeysPath string) error { err := shared.RunScript(s.ctx, fmt.Sprintf(`#!/bin/sh set -eux GPG_KEYS="%s" # Create required files touch /etc/mtab /etc/fstab yum_args="" mkdir -p /etc/yum.repos.d if which dnf; then alias yum=dnf else # for openEuler packageDir and repoDir always exist. # Install initial package set cd /mnt/cdrom/Packages rpm -ivh --nodeps $(ls rpm-*.rpm | head -n1) rpm -ivh --nodeps $(ls yum-*.rpm | head -n1) fi # Add cdrom repo cat <<- EOF > /etc/yum.repos.d/cdrom.repo [cdrom] name=Install CD-ROM baseurl=file:///mnt/cdrom enabled=0 EOF gpg_keys_official="file:///etc/pki/rpm-gpg/RPM-GPG-KEY-openEuler" if [ -n "${GPG_KEYS}" ]; then echo gpgcheck=1 >> /etc/yum.repos.d/cdrom.repo echo gpgkey=${gpg_keys_official} ${GPG_KEYS} >> /etc/yum.repos.d/cdrom.repo else echo gpgcheck=0 >> /etc/yum.repos.d/cdrom.repo fi yum_args="--disablerepo=* --enablerepo=cdrom" # newest install.img doesnt have rpm installed, # so install rpm firstly if [ -z "$(which rpmkeys)" ]; then cd /mnt/cdrom/Packages yum ${yum_args} -y install rpm --nogpgcheck fi yum ${yum_args} -y install yum dnf pkgs="basesystem openEuler-release yum" # Create a minimal rootfs mkdir /rootfs yum ${yum_args} --installroot=/rootfs -y --skip-broken install ${pkgs} rm -rf /rootfs/var/cache/yum rm -rf /etc/yum.repos.d/cdrom.repo # Remove all files in mnt packages rm -rf /mnt/cdrom `, gpgKeysPath)) if err != nil { return fmt.Errorf("Failed to run script: %w", err) } return nil } distrobuilder-3.0/sources/openeuler-http_test.go000066400000000000000000000023671456216713500222750ustar00rootroot00000000000000package sources import ( "testing" "github.com/stretchr/testify/require" ) func TestGetLatestRelease(t *testing.T) { s := &openEuler{} tests := []struct { url string release string want string shouldFail bool }{ { "https://repo.openeuler.org/", "22.03", "22.03-LTS-SP2", false, }, { "https://repo.openeuler.org/", "20.03", "20.03-LTS-SP3", false, }, { "https://repo.openeuler.org/", "20.03-LTS", "20.03-LTS-SP3", false, }, { "https://repo.openeuler.org/", "20.03-LTS-SP1", "20.03-LTS-SP1", false, }, { "https://repo.openeuler.org/", "21.03", "21.03", false, }, { "https://repo.openeuler.org/", "22.00", // non-existed release "", true, }, { "https://repo.openeuler.org/", "BadRelease", // invalid format "", true, }, { "https://repo.openeuler.org/", "", // null string "", true, }, { "foobar", // invalid url "22.03", "", true, }, } for _, test := range tests { release, err := s.getLatestRelease(test.url, test.release) if test.shouldFail { require.NotNil(t, err) } else { require.NoError(t, err) require.NotEmpty(t, release) require.Equal(t, test.want, release) } } } distrobuilder-3.0/sources/opensuse-http.go000066400000000000000000000126601456216713500210760ustar00rootroot00000000000000package sources import ( "crypto/sha256" "errors" "fmt" "io" "net/http" "net/url" "os" "path" "path/filepath" "regexp" "sort" "strings" "gopkg.in/antchfx/htmlquery.v1" "github.com/lxc/distrobuilder/shared" ) type opensuse struct { common } // Run downloads an OpenSUSE tarball. func (s *opensuse) Run() error { var baseURL string var fname string if s.definition.Source.URL == "" { s.definition.Source.URL = "https://mirrorcache-us.opensuse.org/download" } tarballPath, err := s.getPathToTarball(s.definition.Source.URL, s.definition.Image.Release, s.definition.Image.ArchitectureMapped) if err != nil { return fmt.Errorf("Failed to get tarball path: %w", err) } var resp *http.Response err = shared.Retry(func() error { resp, err = http.Head(tarballPath) if err != nil { return fmt.Errorf("Failed to HEAD %q: %w", tarballPath, err) } return nil }, 3) if err != nil { return err } baseURL, fname = path.Split(resp.Request.URL.String()) url, err := url.Parse(fmt.Sprintf("%s%s", baseURL, fname)) if err != nil { return fmt.Errorf("Failed to parse %q: %w", fmt.Sprintf("%s%s", baseURL, fname), err) } fpath, err := s.DownloadHash(s.definition.Image, url.String(), "", nil) if err != nil { return fmt.Errorf("Failed to download %q: %w", url.String(), err) } _, err = s.DownloadHash(s.definition.Image, url.String()+".sha256", "", nil) if err != nil { return fmt.Errorf("Failed to download %q: %w", url.String()+".sha256", err) } if !s.definition.Source.SkipVerification { err = s.verifyTarball(filepath.Join(fpath, fname), s.definition) if err != nil { return fmt.Errorf("Failed to verify %q: %w", filepath.Join(fpath, fname), err) } } s.logger.WithField("file", filepath.Join(fpath, fname)).Info("Unpacking image") // Unpack err = shared.Unpack(filepath.Join(fpath, fname), s.rootfsDir) if err != nil { return fmt.Errorf("Failed to unpack %q: %w", filepath.Join(fpath, fname), err) } return nil } func (s *opensuse) verifyTarball(imagePath string, definition shared.Definition) error { var err error var checksum []byte checksumPath := imagePath + ".sha256" valid, err := s.VerifyFile(checksumPath, "") if err == nil && valid { checksum, err = s.GetSignedContent(checksumPath) } else { checksum, err = os.ReadFile(checksumPath) } if err != nil { return fmt.Errorf("Failed to read checksum file: %w", err) } image, err := os.Open(imagePath) if err != nil { return fmt.Errorf("Failed to open %q: %w", imagePath, err) } defer image.Close() hash := sha256.New() _, err = io.Copy(hash, image) if err != nil { return fmt.Errorf("Failed to copy tarball content: %w", err) } result := fmt.Sprintf("%x", hash.Sum(nil)) checksumStr := strings.TrimSpace(strings.Split(string(checksum), " ")[0]) if result != checksumStr { return fmt.Errorf("Hash mismatch for %s: %s != %s", imagePath, result, checksumStr) } return nil } func (s *opensuse) getPathToTarball(baseURL string, release string, arch string) (string, error) { u, err := url.Parse(baseURL) if err != nil { return "", fmt.Errorf("Failed to parse URL %q: %w", baseURL, err) } var tarballName string if strings.ToLower(release) == "tumbleweed" { u.Path = path.Join(u.Path, "repositories", "Virtualization:", "containers:", "images:", "openSUSE-Tumbleweed") switch arch { case "i686", "x86_64": u.Path = path.Join(u.Path, "container") case "aarch64": u.Path = path.Join(u.Path, "container_ARM") case "ppc64le": u.Path = path.Join(u.Path, "container_PowerPC") case "s390x": u.Path = path.Join(u.Path, "container_zSystems") default: return "", fmt.Errorf("Unsupported architecture %q", arch) } release = "tumbleweed" } else { u.Path = path.Join(u.Path, "distribution", "leap", release, "appliances") release = "leap" } tarballName, err = s.getTarballName(u, release, arch) if err != nil { return "", fmt.Errorf("Failed to get tarball name: %w", err) } u.Path = path.Join(u.Path, tarballName) return u.String(), nil } func (s *opensuse) getTarballName(u *url.URL, release, arch string) (string, error) { doc, err := htmlquery.LoadURL(u.String()) if err != nil { return "", fmt.Errorf("Failed to load URL %q: %w", u.String(), err) } if doc == nil { return "", errors.New("Empty HTML document") } // Translate x86 architectures. if strings.HasSuffix(arch, "86") { arch = "ix86" } nodes := htmlquery.Find(doc, `//a/@href`) re := regexp.MustCompile(fmt.Sprintf("^opensuse-%s-image.*%s.*\\.tar.xz$", release, arch)) var builds []string for _, n := range nodes { text := strings.TrimPrefix(htmlquery.InnerText(n), "./") if !re.MatchString(text) { continue } if strings.Contains(text, "Build") { builds = append(builds, text) } else { if !s.validateURL(*u, text) { continue } return text, nil } } if len(builds) > 0 { // Unfortunately, the link to the latest build is missing, hence we need // to manually select the latest build. sort.Strings(builds) for i := len(builds) - 1; i >= 0; i-- { if !s.validateURL(*u, builds[i]) { continue } return builds[i], nil } } return "", errors.New("Failed to find tarball name") } func (s *opensuse) validateURL(u url.URL, tarball string) bool { u.Path = path.Join(u.Path, tarball) resp, err := http.Head(u.String()) if err != nil { return false } // Check whether the link to the tarball is valid. if resp.StatusCode == http.StatusNotFound { return false } return true } distrobuilder-3.0/sources/openwrt-http.go000066400000000000000000000115201456216713500207250ustar00rootroot00000000000000package sources import ( "crypto/sha256" "errors" "fmt" "io" "net/http" "net/url" "path/filepath" "regexp" "strings" "github.com/lxc/distrobuilder/shared" ) type openwrt struct { common } // Run downloads the tarball and unpacks it. func (s *openwrt) Run() error { var baseURL string release := s.definition.Image.Release releaseInFilename := strings.ToLower(release) + "-" var architecturePath string switch s.definition.Image.ArchitectureMapped { case "x86_64": architecturePath = strings.Replace(s.definition.Image.ArchitectureMapped, "_", "/", 1) case "armv7l": if strings.HasPrefix(release, "21.02") || strings.HasPrefix(release, "22.03") { architecturePath = "armvirt/32" } else { architecturePath = "armsr/armv7" } case "aarch64": if strings.HasPrefix(release, "21.02") || strings.HasPrefix(release, "22.03") { architecturePath = "armvirt/64" } else { architecturePath = "armsr/armv8" } } // Figure out the correct release if release == "snapshot" { // Build a daily snapshot. baseURL = fmt.Sprintf("%s/snapshots/targets/%s/", s.definition.Source.URL, architecturePath) releaseInFilename = "" } else { baseURL = fmt.Sprintf("%s/releases", s.definition.Source.URL) matched, err := regexp.MatchString(`^\d+\.\d+$`, release) if err != nil { return fmt.Errorf("Failed to match release: %w", err) } if matched { // A release of the form '18.06' has been provided. We need to find // out the latest service release of the form '18.06.0'. release, err = s.getLatestServiceRelease(baseURL, release) if err != nil { return fmt.Errorf("Failed to get latest service release: %w", err) } releaseInFilename = strings.ToLower(release) + "-" } baseURL = fmt.Sprintf("%s/%s/targets/%s/", baseURL, release, architecturePath) } var fname string if strings.HasPrefix(release, "21.02") || strings.HasPrefix(release, "22.03") { switch s.definition.Image.ArchitectureMapped { case "x86_64": fname = fmt.Sprintf("openwrt-%s%s-rootfs.tar.gz", releaseInFilename, strings.Replace(architecturePath, "/", "-", 1)) case "armv7l": fallthrough case "aarch64": fname = fmt.Sprintf("openwrt-%s%s-default-rootfs.tar.gz", releaseInFilename, strings.Replace(architecturePath, "/", "-", 1)) } } else { switch s.definition.Image.ArchitectureMapped { case "x86_64": fallthrough case "armv7l": fallthrough case "aarch64": fname = fmt.Sprintf("openwrt-%s%s-rootfs.tar.gz", releaseInFilename, strings.Replace(architecturePath, "/", "-", 1)) } } var ( resp *http.Response err error ) err = shared.Retry(func() error { resp, err = http.Head(baseURL) if err != nil { return fmt.Errorf("Failed to HEAD %q: %w", baseURL, err) } return nil }, 3) if err != nil { return err } // Use fallback image "generic" if resp.StatusCode == http.StatusNotFound && s.definition.Image.ArchitectureMapped == "x86_64" { baseURL = strings.ReplaceAll(baseURL, "x86/64", "x86/generic") baseURL = strings.ReplaceAll(baseURL, "x86-64", "x86-generic") fname = strings.ReplaceAll(fname, "x86-64", "x86-generic") } url, err := url.Parse(baseURL) if err != nil { return fmt.Errorf("Failed to parse %q: %w", baseURL, err) } checksumFile := "" if !s.definition.Source.SkipVerification { if len(s.definition.Source.Keys) != 0 { checksumFile = baseURL + "sha256sums" _, err := s.DownloadHash(s.definition.Image, checksumFile, "", nil) if err != nil { return fmt.Errorf("Failed to download %q: %w", checksumFile, err) } } else { // Force gpg checks when using http if url.Scheme != "https" { return errors.New("GPG keys are required if downloading from HTTP") } } } fpath, err := s.DownloadHash(s.definition.Image, baseURL+fname, checksumFile, sha256.New()) if err != nil { return fmt.Errorf("Failed to download %q: %w", baseURL+fname, err) } s.logger.WithField("file", filepath.Join(fpath, fname)).Info("Unpacking image") // Unpack err = shared.Unpack(filepath.Join(fpath, fname), s.rootfsDir) if err != nil { return fmt.Errorf("Failed to unpack %q: %w", filepath.Join(fpath, fname), err) } return nil } func (s *openwrt) getLatestServiceRelease(baseURL, release string) (string, error) { var ( resp *http.Response err error ) err = shared.Retry(func() error { resp, err = http.Get(baseURL) if err != nil { return fmt.Errorf("Failed to GET %q: %w", baseURL, err) } return nil }, 3) if err != nil { return "", err } defer resp.Body.Close() body, err := io.ReadAll(resp.Body) if err != nil { return "", fmt.Errorf("Failed to ready body: %w", err) } regex := regexp.MustCompile(fmt.Sprintf(">(%s\\.\\d+)<", release)) releases := regex.FindAllStringSubmatch(string(body), -1) if len(releases) > 0 { return releases[len(releases)-1][1], nil } return "", errors.New("Failed to find latest service release") } distrobuilder-3.0/sources/openwrt-http_test.go000066400000000000000000000011721456216713500217660ustar00rootroot00000000000000package sources import ( "regexp" "testing" "github.com/stretchr/testify/require" ) func TestOpenWrtHTTP_getLatestServiceRelease(t *testing.T) { s := &openwrt{} tests := []struct { release string want *regexp.Regexp }{ { "21.02", regexp.MustCompile(`21\.02\.\d+`), }, { "22.03", regexp.MustCompile(`22\.03\.\d+`), }, { "23.05", regexp.MustCompile(`23\.05\.\d+`), }, } for _, tt := range tests { baseURL := "https://downloads.openwrt.org/releases/" release, err := s.getLatestServiceRelease(baseURL, tt.release) require.NoError(t, err) require.Regexp(t, tt.want, release) } } distrobuilder-3.0/sources/oraclelinux-http.go000066400000000000000000000252321456216713500215610ustar00rootroot00000000000000package sources import ( "errors" "fmt" "net/http" "os" "path/filepath" "regexp" "sort" "strings" incus "github.com/lxc/incus/shared/util" "golang.org/x/sys/unix" "gopkg.in/antchfx/htmlquery.v1" "github.com/lxc/distrobuilder/shared" ) type oraclelinux struct { commonRHEL majorVersion string architecture string } // Run downloads Oracle Linux. func (s *oraclelinux) Run() error { s.majorVersion = s.definition.Image.Release s.architecture = s.definition.Image.ArchitectureMapped baseURL := fmt.Sprintf("%s/OL%s", s.definition.Source.URL, s.definition.Image.Release) updates, err := s.getUpdates(baseURL) if err != nil { s.logger.WithField("err", err).Warn("Failed to get updates") } var latestUpdate string var fname string if len(updates) == 0 { s.logger.Info("Trying to find updates through iteration") fname = fmt.Sprintf("%s-boot.iso", s.architecture) // Try finding updates through iteration. for i := 10; i >= 0; i-- { latestUpdate = fmt.Sprintf("u%d", i) if s.definition.Image.Release == "9" { if s.architecture == "x86_64" { fname = fmt.Sprintf("OracleLinux-R9-U%d-%s-boot.iso", i, s.architecture) } else if s.architecture == "aarch64" { fname = fmt.Sprintf("OracleLinux-R9-U%d-%s-dvd.iso", i, s.architecture) } } fullURL := fmt.Sprintf("%s/%s/%s/%s", baseURL, latestUpdate, s.architecture, fname) var ( resp *http.Response err error ) err = shared.Retry(func() error { resp, err = http.Head(fullURL) if err != nil { return errors.New("") } return nil }, 3) if err != nil { continue } if resp.StatusCode == http.StatusOK { break } } } // Only consider updates providing a boot image since we're not interested in the // DVD ISO. for i := len(updates) - 1; i > 0; i-- { URL := fmt.Sprintf("%s/%s/%s", baseURL, updates[i], s.architecture) fname, err = s.getISO(URL, s.architecture) if err != nil { continue } fullURL := fmt.Sprintf("%s/%s", URL, fname) var ( resp *http.Response err error ) err = shared.Retry(func() error { resp, err = http.Head(fullURL) if err != nil { return errors.New("") } return nil }, 3) if err != nil { continue } if resp.StatusCode == http.StatusOK { latestUpdate = updates[i] break } } source := fmt.Sprintf("%s/%s/%s/%s", baseURL, latestUpdate, s.architecture, fname) fpath, err := s.DownloadHash(s.definition.Image, source, "", nil) if err != nil { return fmt.Errorf("Failed to download %q: %w", source, err) } s.logger.WithField("file", filepath.Join(fpath, fname)).Info("Unpacking ISO") err = s.unpackISO(latestUpdate[1:], filepath.Join(fpath, fname), s.rootfsDir) if err != nil { return fmt.Errorf("Failed to unpack ISO: %w", err) } return nil } func (s *oraclelinux) unpackISO(latestUpdate, filePath, rootfsDir string) error { isoDir := filepath.Join(s.cacheDir, "iso") squashfsDir := filepath.Join(s.cacheDir, "squashfs") roRootDir := filepath.Join(s.cacheDir, "rootfs.ro") tempRootDir := filepath.Join(s.cacheDir, "rootfs") for _, dir := range []string{isoDir, squashfsDir, roRootDir} { err := os.MkdirAll(dir, 0755) if err != nil { return fmt.Errorf("Failed to create %q: %w", dir, err) } } // this is easier than doing the whole loop thing ourselves err := shared.RunCommand(s.ctx, nil, nil, "mount", "-t", "iso9660", "-o", "ro", filePath, isoDir) if err != nil { return fmt.Errorf("Failed to mount %q: %w", filePath, err) } defer func() { _ = unix.Unmount(isoDir, 0) }() var rootfsImage string squashfsImage := filepath.Join(isoDir, "LiveOS", "squashfs.img") if incus.PathExists(squashfsImage) { // The squashfs.img contains an image containing the rootfs, so first // mount squashfs.img err = shared.RunCommand(s.ctx, nil, nil, "mount", "-t", "squashfs", "-o", "ro", squashfsImage, squashfsDir) if err != nil { return fmt.Errorf("Failed to mount %q: %w", squashfsImage, err) } defer func() { _ = unix.Unmount(squashfsDir, 0) }() rootfsImage = filepath.Join(squashfsDir, "LiveOS", "rootfs.img") } else { rootfsImage = filepath.Join(isoDir, "images", "install.img") } // Remove rootfsDir otherwise rsync will copy the content into the directory // itself err = os.RemoveAll(rootfsDir) if err != nil { return fmt.Errorf("Failed to remove %q: %w", rootfsDir, err) } s.logger.WithField("file", rootfsImage).Info("Unpacking root image") err = s.unpackRootfsImage(rootfsImage, tempRootDir) if err != nil { return fmt.Errorf("Failed to unpack %q: %w", rootfsImage, err) } // Determine rpm and yum packages var baseURL string if s.majorVersion == "7" { baseURL = fmt.Sprintf("https://yum.oracle.com/repo/OracleLinux/OL%s/%s/base/%s", s.majorVersion, latestUpdate, s.architecture) } else { baseURL = fmt.Sprintf("https://yum.oracle.com/repo/OracleLinux/OL%s/%s/baseos/base/%s", s.majorVersion, latestUpdate, s.architecture) } doc, err := htmlquery.LoadURL(fmt.Sprintf("%s/index.html", baseURL)) if err != nil { return fmt.Errorf("Failed to load URL %q: %w", fmt.Sprintf("%s/index.html", baseURL), err) } regexRpm := regexp.MustCompile(`^getPackage/rpm-\d+.+\.rpm$`) regexYum := regexp.MustCompile(`^getPackage/yum-\d+.+\.rpm$`) var yumPkgs []string var rpmPkgs []string for _, a := range htmlquery.Find(doc, `//a/@href`) { if regexRpm.MatchString(a.FirstChild.Data) { rpmPkgs = append(rpmPkgs, a.FirstChild.Data) continue } if regexYum.MatchString(a.FirstChild.Data) { yumPkgs = append(yumPkgs, a.FirstChild.Data) continue } } sort.Strings(yumPkgs) sort.Strings(rpmPkgs) if len(rpmPkgs) > 0 && len(yumPkgs) > 0 { yumPkg := yumPkgs[len(yumPkgs)-1] rpmPkg := rpmPkgs[len(rpmPkgs)-1] array := [][]string{ {filepath.Join(tempRootDir, filepath.Base(rpmPkg)), fmt.Sprintf("%s/%s", baseURL, rpmPkg)}, {filepath.Join(tempRootDir, filepath.Base(yumPkg)), fmt.Sprintf("%s/%s", baseURL, yumPkg)}, {filepath.Join(tempRootDir, "RPM-GPG-KEY-oracle"), "https://oss.oracle.com/ol6/RPM-GPG-KEY-oracle"}, } for _, elem := range array { f, err := os.Create(elem[0]) if err != nil { return fmt.Errorf("Failed to create file %q: %w", elem[0], err) } defer f.Close() _, err = incus.DownloadFileHash(s.ctx, http.DefaultClient, "", nil, nil, elem[0], elem[1], "", nil, f) if err != nil { return fmt.Errorf("Failed to download %q: %w", elem[1], err) } f.Close() } } // Setup the mounts and chroot into the rootfs exitChroot, err := shared.SetupChroot(tempRootDir, shared.Definition{}, nil) if err != nil { return fmt.Errorf("Failed to setup chroot: %w", err) } if !incus.PathExists("/bin") && incus.PathExists("/usr/bin") { err = os.Symlink("/usr/bin", "/bin") if err != nil { return fmt.Errorf("Failed to create /bin symlink: %w", err) } } err = shared.RunScript(s.ctx, fmt.Sprintf(`#!/bin/sh set -eux version="%s" latest_update="%s" arch="%s" # Create required files touch /etc/mtab /etc/fstab mkdir -p /etc/yum.repos.d /rootfs if [ "${version}" = "7" ]; then baseurl=https://yum.oracle.com/repo/OracleLinux/OL${version}/${latest_update}/base/${arch}/ baseurl_latest=https://yum.oracle.com/repo/OracleLinux/OL${version}/latest/${arch}/ else baseurl=https://yum.oracle.com/repo/OracleLinux/OL${version}/${latest_update}/baseos/base/${arch}/ baseurl_latest=https://yum.oracle.com/repo/OracleLinux/OL${version}/baseos/latest/${arch}/ fi if which dnf; then alias yum=dnf gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-oracle else gpgkey=file:///RPM-GPG-KEY-oracle # Fetch and install rpm and yum from the Oracle repo _rpm=$(curl -s ${baseurl}/index.html | grep -Eo '>rpm-[[:digit:]][^ ]+\.rpm<' | tail -1 | sed 's|[<>]||g') _yum=$(curl -s ${baseurl}/index.html | grep -Eo '>yum-[[:digit:]][^ ]+\.rpm<' | tail -1 | sed 's|[<>]||g') rpm -ivh --nodeps "${_rpm}" "${_yum}" rpm --import RPM-GPG-KEY-oracle fi # Add repo cat <<- EOF > /etc/yum.repos.d/base.repo [base] name=Oracle Linux baseurl=${baseurl} enabled=1 gpgcheck=1 gpgkey=${gpgkey} EOF cat <<- EOF > /etc/yum.repos.d/base_latest.repo [latest] name=Oracle Linux baseurl=${baseurl_latest} enabled=1 gpgcheck=1 gpgkey=${gpgkey} EOF rm -rf /var/rootfs/* yum install --disablerepo=latest --releasever=${version} --installroot=/rootfs -y basesystem oraclelinux-release mkdir -p /rootfs/etc/yum.repos.d cp /etc/yum.repos.d/*.repo /rootfs/etc/yum.repos.d/ if [ "${version}" = "7" ] && [ "${arch}" = "aarch64" ]; then yum install --releasever=${version} --installroot=/rootfs -y libcom_err fi yum install --disablerepo=latest --releasever=${version} --installroot=/rootfs -y yum rm -rf /rootfs/var/cache/yum if [ -f RPM-GPG-KEY-oracle ] && ! [ -f /rootfs/etc/pki/rpm-gpg/RPM-GPG-KEY-oracle ]; then mkdir -p /rootfs/etc/pki/rpm-gpg/ cp RPM-GPG-KEY-oracle /rootfs/etc/pki/rpm-gpg/ fi cat <<- EOF > /rootfs/etc/yum.repos.d/base.repo [base] name=Oracle Linux baseurl=${baseurl} enabled=1 gpgcheck=1 gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-oracle EOF `, s.majorVersion, latestUpdate, s.architecture)) if err != nil { { err := exitChroot() if err != nil { s.logger.WithField("err", err).Warn("Failed exiting chroot") } } return fmt.Errorf("Failed to run script: %w", err) } err = exitChroot() if err != nil { return fmt.Errorf("Failed exiting chroot: %w", err) } err = shared.RsyncLocal(s.ctx, tempRootDir+"/rootfs/", rootfsDir) if err != nil { return fmt.Errorf(`Failed to run "rsync": %w`, err) } return nil } func (s *oraclelinux) getISO(URL string, architecture string) (string, error) { var re *regexp.Regexp if architecture == "x86_64" { re = regexp.MustCompile(fmt.Sprintf("%s-boot(-\\d{8})?.iso", architecture)) } else if architecture == "aarch64" { re = regexp.MustCompile(fmt.Sprintf("%s-boot-uek(-\\d{8})?.iso", architecture)) } else { return "", fmt.Errorf("Unsupported architecture %q", architecture) } doc, err := htmlquery.LoadURL(URL) if err != nil { return "", fmt.Errorf("Failed to load URL %q: %w", URL, err) } var isos []string for _, a := range htmlquery.Find(doc, "//a/@href") { if re.MatchString(a.FirstChild.Data) { isos = append(isos, a.FirstChild.Data) } } if len(isos) == 0 { return "", errors.New("No isos found") } return isos[len(isos)-1], nil } func (s *oraclelinux) getUpdates(URL string) ([]string, error) { re := regexp.MustCompile(`^[uU]\d+/$`) doc, err := htmlquery.LoadURL(URL) if err != nil { return nil, fmt.Errorf("Failed to load URL %q: %w", URL, err) } var updates []string for _, a := range htmlquery.Find(doc, "//a/@href") { if re.MatchString(a.FirstChild.Data) { updates = append(updates, strings.TrimSuffix(a.FirstChild.Data, "/")) } } if len(updates) == 0 { return nil, errors.New("No updates found") } return updates, nil } distrobuilder-3.0/sources/plamolinux-http.go000066400000000000000000000105751456216713500214300ustar00rootroot00000000000000package sources import ( "errors" "fmt" "net/url" "path" "path/filepath" "strconv" "strings" incus "github.com/lxc/incus/shared/util" "gopkg.in/antchfx/htmlquery.v1" "github.com/lxc/distrobuilder/shared" ) type plamolinux struct { common } // Run downloads Plamo Linux. func (s *plamolinux) Run() error { releaseStr := strings.TrimSuffix(s.definition.Image.Release, ".x") release, err := strconv.Atoi(releaseStr) if err != nil { return fmt.Errorf("Failed to convert %q: %w", releaseStr, err) } u, err := url.Parse(s.definition.Source.URL) if err != nil { return fmt.Errorf("Failed to parse %q: %w", s.definition.Source.URL, err) } mirrorPath := path.Join(u.Path, fmt.Sprintf("Plamo-%s.x", releaseStr), s.definition.Image.ArchitectureMapped, "plamo") paths := []string{path.Join(mirrorPath, "00_base")} ignoredPkgs := []string{"alsa_utils", "grub", "kernel", "lilo", "linux_firmware", "microcode_ctl", "linux_firmwares", "cpufreqd", "cpufrequtils", "gpm", "ntp", "kmod", "kmscon"} if release < 7 { paths = append(paths, path.Join(mirrorPath, "01_minimum")) } var pkgDir string for _, p := range paths { u.Path = p pkgDir, err = s.downloadFiles(s.definition.Image, u.String(), ignoredPkgs) if err != nil { return fmt.Errorf("Failed to download packages: %w", err) } } var pkgTool string // Find package tool if release < 7 { pkgTool = "hdsetup" } else { pkgTool = "pkgtools8" } matches, err := filepath.Glob(filepath.Join(pkgDir, fmt.Sprintf("%s-*.t*z*", pkgTool))) if err != nil { return fmt.Errorf("Failed to match pattern: %w", err) } if len(matches) == 0 { return errors.New("Couldn't find any matching package") } else if len(matches) > 1 { return errors.New("Found more than one matching package") } err = shared.RunCommand(s.ctx, nil, nil, "tar", "-pxf", matches[0], "-C", pkgDir, "sbin/") if err != nil { return fmt.Errorf("Failed to unpack %q: %w", matches[0], err) } rootfsDirAbs, err := filepath.Abs(s.rootfsDir) if err != nil { return fmt.Errorf("Failed to get absolute path: %w", err) } err = shared.RunScript(s.ctx, fmt.Sprintf(`#!/bin/sh set -eux # Input variables PKG_DIR="%s" ROOTFS_DIR="%s" # Environment export PATH="${PATH}:${PKG_DIR}/sbin:${PKG_DIR}/sbin/installer" export LC_ALL="C" export LANG="C" # Fix name of installer directory if [ -d "${PKG_DIR}/sbin/installer_new" ]; then [ -d "${PKG_DIR}/sbin/installer" ] && rm -r "${PKG_DIR}/sbin/installer" mv "${PKG_DIR}/sbin/installer_new" "${PKG_DIR}/sbin/installer" fi # Fix filename of pkgtools8 files pkg_scripts="installpkg installpkg2 installpkg2.mes makepkg updatepkg removepkg" for s in $pkg_scripts do if [ -f "${PKG_DIR}/sbin/new_$s" ]; then ( cd "${PKG_DIR}/sbin" && mv new_"$s" $s ) fi done # generate symblic link to static-zstd ( cd "${PKG_DIR}/sbin/installer" && ln -sf zstd-* zstd ) # Don't call ldconfig sed -i "/ldconfig/!s@/sbin@${PKG_DIR}&@g" ${PKG_DIR}/sbin/installpkg* # Don't override PATH sed -i "/^export PATH/d" ${PKG_DIR}/sbin/installpkg* # Install all packages for pkg in $(ls -cr ${PKG_DIR}/*.t*z*); do installpkg -root ${ROOTFS_DIR} -priority ADD ${pkg} done `, pkgDir, rootfsDirAbs)) if err != nil { return fmt.Errorf("Failed to run script: %w", err) } return nil } func (s *plamolinux) downloadFiles(def shared.DefinitionImage, URL string, ignoredPkgs []string) (string, error) { doc, err := htmlquery.LoadURL(URL) if err != nil { return "", fmt.Errorf("Failed to load URL %q: %w", URL, err) } if doc == nil { return "", errors.New("Empty HTML document") } nodes := htmlquery.Find(doc, `//a/@href`) var dir string for _, n := range nodes { target := htmlquery.InnerText(n) if strings.HasSuffix(target, ".txz") || strings.HasSuffix(target, ".tzst") { pkgName := strings.Split(target, "-")[0] if incus.ValueInSlice(pkgName, ignoredPkgs) { continue } // package dir, err = s.DownloadHash(def, fmt.Sprintf("%s/%s", URL, target), "", nil) if err != nil { return "", fmt.Errorf("Failed to download %q: %w", fmt.Sprintf("%s/%s", URL, target), err) } } else if strings.HasSuffix(target, ".txz/") || strings.HasSuffix(target, ".tzst/") { // directory u, err := url.Parse(URL) if err != nil { return "", fmt.Errorf("Failed to parse %q: %w", URL, err) } u.Path = path.Join(u.Path, target) return s.downloadFiles(def, u.String(), ignoredPkgs) } } return dir, nil } distrobuilder-3.0/sources/rhel-common.go000066400000000000000000000210571456216713500205000ustar00rootroot00000000000000package sources import ( "bytes" "fmt" "os" "path/filepath" "strconv" "strings" incus "github.com/lxc/incus/shared/util" "golang.org/x/sys/unix" "github.com/lxc/distrobuilder/shared" ) type commonRHEL struct { common } func (c *commonRHEL) unpackISO(filePath, rootfsDir string, scriptRunner func(string) error) error { isoDir, err := os.MkdirTemp(c.cacheDir, "temp_") if err != nil { return fmt.Errorf("Failed to create temporary directory: %w", err) } defer os.RemoveAll(isoDir) squashfsDir, err := os.MkdirTemp(c.cacheDir, "temp_") if err != nil { return fmt.Errorf("Failed to create temporary directory: %w", err) } defer os.RemoveAll(squashfsDir) tempRootDir, err := os.MkdirTemp(c.cacheDir, "temp_") if err != nil { return fmt.Errorf("Failed to create temporary directory: %w", err) } defer os.RemoveAll(tempRootDir) // this is easier than doing the whole loop thing ourselves err = shared.RunCommand(c.ctx, nil, nil, "mount", "-t", "iso9660", "-o", "ro", filePath, isoDir) if err != nil { return fmt.Errorf("Failed to mount %q: %w", filePath, err) } defer func() { _ = unix.Unmount(isoDir, 0) }() var rootfsImage string squashfsImage := filepath.Join(isoDir, "LiveOS", "squashfs.img") if incus.PathExists(squashfsImage) { // The squashfs.img contains an image containing the rootfs, so first // mount squashfs.img err = shared.RunCommand(c.ctx, nil, nil, "mount", "-t", "squashfs", "-o", "ro", squashfsImage, squashfsDir) if err != nil { return fmt.Errorf("Failed to mount %q: %w", squashfsImage, err) } defer func() { _ = unix.Unmount(squashfsDir, 0) }() rootfsImage = filepath.Join(squashfsDir, "LiveOS", "rootfs.img") } else { rootfsImage = filepath.Join(isoDir, "images", "install.img") } // Remove rootfsDir otherwise rsync will copy the content into the directory // itself err = os.RemoveAll(rootfsDir) if err != nil { return fmt.Errorf("Failed to remove directory %q: %w", rootfsDir, err) } c.logger.WithField("file", rootfsImage).Info("Unpacking root image") err = c.unpackRootfsImage(rootfsImage, tempRootDir) if err != nil { return fmt.Errorf("Failed to unpack %q: %w", rootfsImage, err) } gpgKeysPath := "" packagesDir := filepath.Join(isoDir, "Packages") repodataDir := filepath.Join(isoDir, "repodata") if !incus.PathExists(packagesDir) { packagesDir = filepath.Join(isoDir, "BaseOS", "Packages") } if !incus.PathExists(repodataDir) { repodataDir = filepath.Join(isoDir, "BaseOS", "repodata") } if incus.PathExists(packagesDir) { entries, err := os.ReadDir(packagesDir) if err != nil { return fmt.Errorf("Failed reading directory %q: %w", packagesDir, err) } // If the BaseOS package dir is empty, try a different one, and also change the repodata directory. // This is the case for Rocky Linux 9. if len(entries) == 0 { packagesDir = filepath.Join(isoDir, c.definition.Source.Variant, "Packages") repodataDir = filepath.Join(isoDir, c.definition.Source.Variant, "repodata") } } if incus.PathExists(packagesDir) && incus.PathExists(repodataDir) { // Create cdrom repo for yum err = os.MkdirAll(filepath.Join(tempRootDir, "mnt", "cdrom"), 0755) if err != nil { return fmt.Errorf("Failed to create directory %q: %w", filepath.Join(tempRootDir, "mnt", "cdrom"), err) } // Copy repo relevant files to the cdrom err = shared.RsyncLocal(c.ctx, packagesDir, filepath.Join(tempRootDir, "mnt", "cdrom")) if err != nil { return fmt.Errorf("Failed to copy Packages: %w", err) } err = shared.RsyncLocal(c.ctx, repodataDir, filepath.Join(tempRootDir, "mnt", "cdrom")) if err != nil { return fmt.Errorf("Failed to copy repodata: %w", err) } // Find all relevant GPG keys gpgKeys, err := filepath.Glob(filepath.Join(isoDir, "RPM-GPG-KEY-*")) if err != nil { return fmt.Errorf("Failed to match gpg keys: %w", err) } // Copy the keys to the cdrom for _, key := range gpgKeys { fmt.Printf("key=%v\n", key) if len(gpgKeysPath) > 0 { gpgKeysPath += " " } gpgKeysPath += fmt.Sprintf("file:///mnt/cdrom/%s", filepath.Base(key)) err = shared.RsyncLocal(c.ctx, key, filepath.Join(tempRootDir, "mnt", "cdrom")) if err != nil { return fmt.Errorf(`Failed to run "rsync": %w`, err) } } } // Setup the mounts and chroot into the rootfs exitChroot, err := shared.SetupChroot(tempRootDir, shared.Definition{}, nil) if err != nil { return fmt.Errorf("Failed to setup chroot: %w", err) } err = scriptRunner(gpgKeysPath) if err != nil { { err := exitChroot() if err != nil { c.logger.WithField("err", err).Warn("Failed exiting chroot") } } return fmt.Errorf("Failed to run script: %w", err) } err = exitChroot() if err != nil { return fmt.Errorf("Failed exiting chroot: %w", err) } err = shared.RsyncLocal(c.ctx, tempRootDir+"/rootfs/", rootfsDir) if err != nil { return fmt.Errorf(`Failed to run "rsync": %w`, err) } return nil } func (c *commonRHEL) unpackRootfsImage(imageFile string, target string) error { installDir, err := os.MkdirTemp(c.cacheDir, "temp_") if err != nil { return fmt.Errorf("Failed to create temporary directory: %w", err) } defer func() { _ = os.RemoveAll(installDir) }() fsType := "squashfs" if filepath.Base(imageFile) == "rootfs.img" { fsType = "ext4" } err = shared.RunCommand(c.ctx, nil, nil, "mount", "-t", fsType, "-o", "ro", imageFile, installDir) if err != nil { return fmt.Errorf("Failed to mount %q: %w", imageFile, err) } defer func() { _ = unix.Unmount(installDir, 0) }() rootfsDir := installDir rootfsFile := filepath.Join(installDir, "LiveOS", "rootfs.img") if incus.PathExists(rootfsFile) { rootfsDir, err = os.MkdirTemp(c.cacheDir, "temp_") if err != nil { return fmt.Errorf("Failed to create temporary directory: %w", err) } defer os.RemoveAll(rootfsDir) err = shared.RunCommand(c.ctx, nil, nil, "mount", "-t", "ext4", "-o", "ro", rootfsFile, rootfsDir) if err != nil { return fmt.Errorf("Failed to mount %q: %w", rootfsFile, err) } defer func() { _ = unix.Unmount(rootfsDir, 0) }() } // Since rootfs is read-only, we need to copy it to a temporary rootfs // directory in order to create the minimal rootfs. err = shared.RsyncLocal(c.ctx, rootfsDir+"/", target) if err != nil { return fmt.Errorf(`Failed to run "rsync": %w`, err) } return nil } func (c *commonRHEL) unpackRaw(filePath, rootfsDir string, scriptRunner func() error) error { roRootDir := filepath.Join(c.cacheDir, "rootfs.ro") tempRootDir := filepath.Join(c.cacheDir, "rootfs") err := os.MkdirAll(roRootDir, 0755) if err != nil { return fmt.Errorf("Failed to create directory %q: %w", roRootDir, err) } if strings.HasSuffix(filePath, ".raw.xz") { // Uncompress raw image err := shared.RunCommand(c.ctx, nil, nil, "unxz", filePath) if err != nil { return fmt.Errorf(`Failed to run "unxz": %w`, err) } } rawFilePath := strings.TrimSuffix(filePath, ".xz") // Figure out the offset var buf bytes.Buffer err = shared.RunCommand(c.ctx, nil, &buf, "fdisk", "-l", "-o", "Start", rawFilePath) if err != nil { return fmt.Errorf(`Failed to run "fdisk": %w`, err) } output := strings.Split(buf.String(), "\n") offsetStr := strings.TrimSpace(output[len(output)-2]) offset, err := strconv.Atoi(offsetStr) if err != nil { return fmt.Errorf("Failed to convert %q: %w", offsetStr, err) } // Mount the partition read-only since we don't want to accidentally modify it. err = shared.RunCommand(c.ctx, nil, nil, "mount", "-t", "ext4", "-o", fmt.Sprintf("ro,loop,offset=%d", offset*512), rawFilePath, roRootDir) if err != nil { return fmt.Errorf("Failed to mount %q: %w", rawFilePath, err) } defer func() { _ = unix.Unmount(roRootDir, 0) }() // Since roRootDir is read-only, we need to copy it to a temporary rootfs // directory in order to create the minimal rootfs. err = shared.RsyncLocal(c.ctx, roRootDir+"/", tempRootDir) if err != nil { return fmt.Errorf(`Failed to run "rsync": %w`, err) } // Setup the mounts and chroot into the rootfs exitChroot, err := shared.SetupChroot(tempRootDir, shared.Definition{}, nil) if err != nil { return fmt.Errorf("Failed to setup chroot: %w", err) } err = scriptRunner() if err != nil { { err := exitChroot() if err != nil { c.logger.WithField("err", err).Warn("Failed exiting chroot") } } return fmt.Errorf("Failed to run script: %w", err) } err = exitChroot() if err != nil { return fmt.Errorf("Failed exiting chroot: %w", err) } err = shared.RsyncLocal(c.ctx, tempRootDir+"/rootfs/", rootfsDir) if err != nil { return fmt.Errorf(`Failed to run "rsync": %w`, err) } return nil } distrobuilder-3.0/sources/rocky-http.go000066400000000000000000000241741456216713500203670ustar00rootroot00000000000000package sources import ( "crypto/sha256" "errors" "fmt" "io" "net/http" "net/url" "path" "path/filepath" "regexp" "strings" "github.com/lxc/distrobuilder/shared" ) type rockylinux struct { commonRHEL fname string } // Run downloads the tarball and unpacks it. func (s *rockylinux) Run() error { var err error baseURL := fmt.Sprintf("%s/%s/isos/%s/", s.definition.Source.URL, strings.ToLower(s.definition.Image.Release), s.definition.Image.ArchitectureMapped) s.fname, err = s.getRelease(s.definition.Source.URL, s.definition.Image.Release, s.definition.Source.Variant, s.definition.Image.ArchitectureMapped) if err != nil { return fmt.Errorf("Failed to get release: %w", err) } fpath := s.getTargetDir() url, err := url.Parse(baseURL) if err != nil { return fmt.Errorf("Failed to parse URL %q: %w", baseURL, err) } checksumFile := "" if !s.definition.Source.SkipVerification { // Force gpg checks when using http if url.Scheme != "https" { if len(s.definition.Source.Keys) == 0 { return errors.New("GPG keys are required if downloading from HTTP") } checksumFile = "CHECKSUM" _, err := s.DownloadHash(s.definition.Image, baseURL+checksumFile, "", nil) if err != nil { return fmt.Errorf("Failed to download %q: %w", baseURL+checksumFile, err) } } } _, err = s.DownloadHash(s.definition.Image, baseURL+s.fname, checksumFile, sha256.New()) if err != nil { return fmt.Errorf("Failed to download %q: %w", baseURL+s.fname, err) } s.logger.WithField("file", filepath.Join(fpath, s.fname)).Info("Unpacking ISO") err = s.unpackISO(filepath.Join(fpath, s.fname), s.rootfsDir, s.isoRunner) if err != nil { return fmt.Errorf("Failed to unpack ISO: %w", err) } return nil } func (s *rockylinux) isoRunner(gpgKeysPath string) error { repoURL := "mirrorlist=http://mirrors.rockylinux.org/mirrorlist?arch=\\$basearch&repo=BaseOS-\\$releasever" if strings.Contains(s.definition.Source.URL, "/vault/") { repoURL = fmt.Sprintf("baseurl=http://dl.rockylinux.org/vault/rocky/%s/BaseOS/\\$basearch/os/", s.definition.Image.Release) } err := shared.RunScript(s.ctx, fmt.Sprintf(`#!/bin/sh set -eux GPG_KEYS="%s" RELEASE="%s" REPO_URL="%s" # Create required files touch /etc/mtab /etc/fstab yum_args="" mkdir -p /etc/yum.repos.d if [ -d /mnt/cdrom ]; then # Install initial package set cd /mnt/cdrom/Packages rpm -ivh --nodeps $(ls yum-*.rpm | head -n1) # Add cdrom repo cat <<- EOF > /etc/yum.repos.d/cdrom.repo [cdrom] name=Install CD-ROM baseurl=file:///mnt/cdrom enabled=0 EOF if [ "${RELEASE}" -eq 9 ]; then cat <<- "EOF" > /etc/pki/rpm-gpg/RPM-GPG-KEY-Rocky-9 -----BEGIN PGP PUBLIC KEY BLOCK----- Version: resf.keykeeper.v1 Comment: Keykeeper xsFNBGJ5RksBEADF/Lzssm7uryV6+VHAgL36klyCVcHwvx9Bk853LBOuHVEZWsme kbJF3fQG7i7gfCKGuV5XW15xINToe4fBThZteGJziboSZRpkEQ2z3lYcbg34X7+d co833lkBNgz1v6QO7PmAdY/x76Q6Hx0J9yiJWd+4j+vRi4hbWuh64vUtTd7rPwk8 0y3g4oK1YT0NR0Xm/QUO9vWmkSTVflQ6y82HhHIUrG+1vQnSOrWaC0O1lqUI3Nuo b6jTARCmbaPsi+XVQnBbsnPPq6Tblwc+NYJSqj5d9nT0uEXT7Zovj4Je5oWVFXp9 P1OWkbo2z5XkKjoeobM/zKDESJR78h+YQAN9IOKFjL/u/Gzrk1oEgByCABXOX+H5 hfucrq5U3bbcKy4e5tYgnnZxqpELv3fN/2l8iZknHEh5aYNT5WXVHpD/8u2rMmwm I9YTEMueEtmVy0ZV3opUzOlC+3ZUwjmvAJtdfJyeVW/VMy3Hw3Ih0Fij91rO613V 7n72ggVlJiX25jYyT4AXlaGfAOMndJNVgBps0RArOBYsJRPnvfHlLi5cfjVd7vYx QhGX9ODYuvyJ/rW70dMVikeSjlBDKS08tvdqOgtiYy4yhtY4ijQC9BmCE9H9gOxU FN297iLimAxr0EVsED96fP96TbDGILWsfJuxAvoqmpkElv8J+P1/F7to2QARAQAB zU9Sb2NreSBFbnRlcnByaXNlIFNvZnR3YXJlIEZvdW5kYXRpb24gLSBSZWxlYXNl IGtleSAyMDIyIDxyZWxlbmdAcm9ja3lsaW51eC5vcmc+wsGKBBMBCAA0BQJieUZL FiEEIcslauFvxUxuZSlJcC1CbTUNJ10CGwMCHgECGQEDCwkHAhUIAxYAAgIiAQAK CRBwLUJtNQ0nXWQ5D/9472seOyRO6//bQ2ns3w9lE+aTLlJ5CY0GSTb4xNuyv+AD IXpgvLSMtTR0fp9GV3vMw6QIWsehDqt7O5xKWi+3tYdaXRpb1cvnh8r/oCcvI4uL k8kImNgsx+Cj+drKeQo03vFxBTDi1BTQFkfEt32fA2Aw5gYcGElM717sNMAMQFEH P+OW5hYDH4kcLbtUypPXFbcXUbaf6jUjfiEp5lLjqquzAyDPLlkzMr5RVa9n3/rI R6OQp5loPVzCRZMgDLALBU2TcFXLVP+6hAW8qM77c+q/rOysP+Yd+N7GAd0fvEvA mfeA4Y6dP0mMRu96EEAJ1qSKFWUul6K6nuqy+JTxktpw8F/IBAz44na17Tf02MJH GCUWyM0n5vuO5kK+Ykkkwd+v43ZlqDnwG7akDkLwgj6O0QNx2TGkdgt3+C6aHN5S MiF0pi0qYbiN9LO0e05Ai2r3zTFC/pCaBWlG1ph2jx1pDy4yUVPfswWFNfe5I+4i CMHPRFsZNYxQnIA2Prtgt2YMwz3VIGI6DT/Z56Joqw4eOfaJTTQSXCANts/gD7qW D3SZXPc7wQD63TpDEjJdqhmepaTECbxN7x/p+GwIZYWJN+AYhvrfGXfjud3eDu8/ i+YIbPKH1TAOMwiyxC106mIL705p+ORf5zATZMyB8Y0OvRIz5aKkBDFZM2QN6A== =PzIf -----END PGP PUBLIC KEY BLOCK----- EOF # Override the GPG key as the one inside the ISO doesn't work. GPG_KEYS=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-Rocky-9 fi if [ -n "${GPG_KEYS}" ]; then echo gpgcheck=1 >> /etc/yum.repos.d/cdrom.repo echo gpgkey=${GPG_KEYS} >> /etc/yum.repos.d/cdrom.repo else echo gpgcheck=0 >> /etc/yum.repos.d/cdrom.repo fi yum_args="--disablerepo=* --enablerepo=cdrom" yum ${yum_args} -y --releasever="${RELEASE}" reinstall yum else if ! [ -f /etc/pki/rpm-gpg/RPM-GPG-KEY-rockyofficial ]; then mkdir -p /etc/pki/rpm-gpg if [ "${RELEASE}" -eq 8 ]; then cat <<- "EOF" > /etc/pki/rpm-gpg/RPM-GPG-KEY-rockyofficial -----BEGIN PGP PUBLIC KEY BLOCK----- Version: GnuPG v2.0.22 (GNU/Linux) mQINBGAofzYBEAC6yS1azw6f3wmaVd//3aSy6O2c9+jeetulRQvg2LvhRRS1eNqp /x9tbBhfohu/tlDkGpYHV7diePgMml9SZDy1sKlI3tDhx6GZ3xwF0fd1vWBZpmNk D9gRkUmYBeLotmcXQZ8ZpWLicosFtDpJEYpLUhuIgTKwt4gxJrHvkWsGQiBkJxKD u3/RlL4IYA3Ot9iuCBflc91EyAw1Yj0gKcDzbOqjvlGtS3ASXgxPqSfU0uLC9USF uKDnP2tcnlKKGfj0u6VkqISliSuRAzjlKho9Meond+mMIFOTT6qp4xyu+9Dj3IjZ IC6rBXRU3xi8z0qYptoFZ6hx70NV5u+0XUzDMXdjQ5S859RYJKijiwmfMC7gZQAf OkdOcicNzen/TwD/slhiCDssHBNEe86Wwu5kmDoCri7GJlYOlWU42Xi0o1JkVltN D8ZId+EBDIms7ugSwGOVSxyZs43q2IAfFYCRtyKHFlgHBRe9/KTWPUrnsfKxGJgC Do3Yb63/IYTvfTJptVfhQtL1AhEAeF1I+buVoJRmBEyYKD9BdU4xQN39VrZKziO3 hDIGng/eK6PaPhUdq6XqvmnsZ2h+KVbyoj4cTo2gKCB2XA7O2HLQsuGduHzYKNjf QR9j0djjwTrsvGvzfEzchP19723vYf7GdcLvqtPqzpxSX2FNARpCGXBw9wARAQAB tDNSZWxlYXNlIEVuZ2luZWVyaW5nIDxpbmZyYXN0cnVjdHVyZUByb2NreWxpbnV4 Lm9yZz6JAk4EEwEIADgWIQRwUcRwqSn0VM6+N7cVr12sbXRaYAUCYCh/NgIbDwUL CQgHAgYVCgkICwIEFgIDAQIeAQIXgAAKCRAVr12sbXRaYLFmEACSMvoO1FDdyAbu 1m6xEzDhs7FgnZeQNzLZECv2j+ggFSJXezlNVOZ5I1I8umBan2ywfKQD8M+IjmrW k9/7h9i54t8RS/RN7KNo7ECGnKXqXDPzBBTs1Gwo1WzltAoaDKUfXqQ4oJ4aCP/q /XPVWEzgpJO1XEezvCq8VXisutyDiXEjjMIeBczxb1hbamQX+jLTIQ1MDJ4Zo1YP zlUqrHW434XC2b1/WbSaylq8Wk9cksca5J+g3FqTlgiWozyy0uxygIRjb6iTzKXk V7SYxeXp3hNTuoUgiFkjh5/0yKWCwx7aQqlHar9GjpxmBDAO0kzOlgtTw//EqTwR KnYZLig9FW0PhwvZJUigr0cvs/XXTTb77z/i/dfHkrjVTTYenNyXogPtTtSyxqca 61fbPf0B/S3N43PW8URXBRS0sykpX4SxKu+PwKCqf+OJ7hMEVAapqzTt1q9T7zyB QwvCVx8s7WWvXbs2d6ZUrArklgjHoHQcdxJKdhuRmD34AuXWCLW+gH8rJWZpuNl3 +WsPZX4PvjKDgMw6YMcV7zhWX6c0SevKtzt7WP3XoKDuPhK1PMGJQqQ7spegGB+5 DZvsJS48Ip0S45Qfmj82ibXaCBJHTNZE8Zs+rdTjQ9DS5qvzRA1sRA1dBb/7OLYE JmeWf4VZyebm+gc50szsg6Ut2yT8hw== =AiP8 -----END PGP PUBLIC KEY BLOCK----- EOF else cat <<- "EOF" > /etc/pki/rpm-gpg/RPM-GPG-KEY-rockyofficial -----BEGIN PGP PUBLIC KEY BLOCK----- Version: resf.keykeeper.v1 Comment: Keykeeper xsFNBGJ5RksBEADF/Lzssm7uryV6+VHAgL36klyCVcHwvx9Bk853LBOuHVEZWsme kbJF3fQG7i7gfCKGuV5XW15xINToe4fBThZteGJziboSZRpkEQ2z3lYcbg34X7+d co833lkBNgz1v6QO7PmAdY/x76Q6Hx0J9yiJWd+4j+vRi4hbWuh64vUtTd7rPwk8 0y3g4oK1YT0NR0Xm/QUO9vWmkSTVflQ6y82HhHIUrG+1vQnSOrWaC0O1lqUI3Nuo b6jTARCmbaPsi+XVQnBbsnPPq6Tblwc+NYJSqj5d9nT0uEXT7Zovj4Je5oWVFXp9 P1OWkbo2z5XkKjoeobM/zKDESJR78h+YQAN9IOKFjL/u/Gzrk1oEgByCABXOX+H5 hfucrq5U3bbcKy4e5tYgnnZxqpELv3fN/2l8iZknHEh5aYNT5WXVHpD/8u2rMmwm I9YTEMueEtmVy0ZV3opUzOlC+3ZUwjmvAJtdfJyeVW/VMy3Hw3Ih0Fij91rO613V 7n72ggVlJiX25jYyT4AXlaGfAOMndJNVgBps0RArOBYsJRPnvfHlLi5cfjVd7vYx QhGX9ODYuvyJ/rW70dMVikeSjlBDKS08tvdqOgtiYy4yhtY4ijQC9BmCE9H9gOxU FN297iLimAxr0EVsED96fP96TbDGILWsfJuxAvoqmpkElv8J+P1/F7to2QARAQAB zU9Sb2NreSBFbnRlcnByaXNlIFNvZnR3YXJlIEZvdW5kYXRpb24gLSBSZWxlYXNl IGtleSAyMDIyIDxyZWxlbmdAcm9ja3lsaW51eC5vcmc+wsGKBBMBCAA0BQJieUZL FiEEIcslauFvxUxuZSlJcC1CbTUNJ10CGwMCHgECGQEDCwkHAhUIAxYAAgIiAQAK CRBwLUJtNQ0nXWQ5D/9472seOyRO6//bQ2ns3w9lE+aTLlJ5CY0GSTb4xNuyv+AD IXpgvLSMtTR0fp9GV3vMw6QIWsehDqt7O5xKWi+3tYdaXRpb1cvnh8r/oCcvI4uL k8kImNgsx+Cj+drKeQo03vFxBTDi1BTQFkfEt32fA2Aw5gYcGElM717sNMAMQFEH P+OW5hYDH4kcLbtUypPXFbcXUbaf6jUjfiEp5lLjqquzAyDPLlkzMr5RVa9n3/rI R6OQp5loPVzCRZMgDLALBU2TcFXLVP+6hAW8qM77c+q/rOysP+Yd+N7GAd0fvEvA mfeA4Y6dP0mMRu96EEAJ1qSKFWUul6K6nuqy+JTxktpw8F/IBAz44na17Tf02MJH GCUWyM0n5vuO5kK+Ykkkwd+v43ZlqDnwG7akDkLwgj6O0QNx2TGkdgt3+C6aHN5S MiF0pi0qYbiN9LO0e05Ai2r3zTFC/pCaBWlG1ph2jx1pDy4yUVPfswWFNfe5I+4i CMHPRFsZNYxQnIA2Prtgt2YMwz3VIGI6DT/Z56Joqw4eOfaJTTQSXCANts/gD7qW D3SZXPc7wQD63TpDEjJdqhmepaTECbxN7x/p+GwIZYWJN+AYhvrfGXfjud3eDu8/ i+YIbPKH1TAOMwiyxC106mIL705p+ORf5zATZMyB8Y0OvRIz5aKkBDFZM2QN6A== =PzIf -----END PGP PUBLIC KEY BLOCK----- EOF fi fi cat <<- EOF > /etc/yum.repos.d/Rocky-BaseOS.repo [BaseOS] name=Rocky-\$releasever - Base ${REPO_URL} gpgcheck=1 enabled=1 gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-rockyofficial EOF # Use dnf in the boot iso since yum isn't available alias yum=dnf fi pkgs="basesystem Rocky-release yum" # Create a minimal rootfs mkdir /rootfs yum ${yum_args} --installroot=/rootfs -y --releasever="${RELEASE}" --skip-broken install ${pkgs} rm -rf /rootfs/var/cache/yum `, gpgKeysPath, s.definition.Image.Release, repoURL)) if err != nil { return fmt.Errorf("Failed to run ISO script: %w", err) } return nil } func (s *rockylinux) getRelease(URL, release, variant, arch string) (string, error) { u := URL + path.Join("/", strings.ToLower(release), "isos", arch) resp, err := http.Get(u) if err != nil { return "", fmt.Errorf("Failed to GET %q: %w", u, err) } defer resp.Body.Close() body, err := io.ReadAll(resp.Body) if err != nil { return "", fmt.Errorf("Failed to read body: %w", err) } re := s.getRegexes(arch, variant, release) for _, r := range re { matches := r.FindAllString(string(body), -1) if len(matches) > 0 { return matches[len(matches)-1], nil } } return "", errors.New("Failed to find release") } func (s *rockylinux) getRegexes(arch string, variant string, release string) []*regexp.Regexp { releaseFields := strings.Split(release, ".") var re []string switch len(releaseFields) { case 1: re = append(re, fmt.Sprintf("Rocky-%s(.\\d+)*-%s-(?i:%s).iso", releaseFields[0], arch, variant)) case 2: re = append(re, fmt.Sprintf("Rocky-%s.%s-%s-(?i:%s).iso", releaseFields[0], releaseFields[1], arch, variant)) } regexes := make([]*regexp.Regexp, len(re)) for i, r := range re { regexes[i] = regexp.MustCompile(r) } return regexes } distrobuilder-3.0/sources/rootfs-http.go000066400000000000000000000017441456216713500205520ustar00rootroot00000000000000package sources import ( "fmt" "net/url" "path" "path/filepath" "github.com/lxc/distrobuilder/shared" ) type rootfs struct { common } // Run downloads a tarball. func (s *rootfs) Run() error { URL, err := url.Parse(s.definition.Source.URL) if err != nil { return fmt.Errorf("Failed to parse URL: %w", err) } var fpath string var filename string if URL.Scheme == "file" { fpath = filepath.Dir(URL.Path) filename = filepath.Base(URL.Path) } else { fpath, err = s.DownloadHash(s.definition.Image, s.definition.Source.URL, "", nil) if err != nil { return fmt.Errorf("Failed to download %q: %w", s. definition.Source.URL, err) } filename = path.Base(s.definition.Source.URL) } s.logger.WithField("file", filepath.Join(fpath, filename)).Info("Unpacking image") // Unpack err = shared.Unpack(filepath.Join(fpath, filename), s.rootfsDir) if err != nil { return fmt.Errorf("Failed to unpack %q: %w", filepath.Join(fpath, filename), err) } return nil } distrobuilder-3.0/sources/rpmbootstrap.go000066400000000000000000000041721456216713500210130ustar00rootroot00000000000000package sources import ( "fmt" "os" "path" "github.com/lxc/distrobuilder/shared" ) type rpmbootstrap struct { common } func (s *rpmbootstrap) yumordnf() (cmd string, err error) { // check whether yum or dnf command exists for _, cmd = range []string{"yum", "dnf"} { if err = shared.RunCommand(s.ctx, nil, nil, cmd, "--version"); err == nil { return } } cmd = "" err = fmt.Errorf("Command yum or dnf not found, sudo apt-get install yum or sudo apt-get install dnf and try again") return } func (s *rpmbootstrap) repodirs() (dir string, err error) { reposdir := path.Join(s.sourcesDir, "etc", "yum.repos.d") err = os.MkdirAll(reposdir, 0755) if err != nil { return "", err } distribution := s.definition.Image.Distribution content := s.definition.Source.URL if distribution == "" || content == "" { err = fmt.Errorf("No valid distribution and source url specified") return "", err } err = os.WriteFile(path.Join(reposdir, distribution+".repo"), []byte(content), 0644) if err != nil { return "", err } return reposdir, nil } // Run runs yum --installroot. func (s *rpmbootstrap) Run() (err error) { cmd, err := s.yumordnf() if err != nil { return err } repodir, err := s.repodirs() if err != nil { return err } release := s.definition.Image.Release args := []string{fmt.Sprintf("--installroot=%s", s.rootfsDir), fmt.Sprintf("--releasever=%s", release), fmt.Sprintf("--setopt=reposdir=%s", repodir), "install", "-y"} os.RemoveAll(s.rootfsDir) earlyPackagesRemove := s.definition.GetEarlyPackages("remove") for _, pkg := range earlyPackagesRemove { args = append(args, fmt.Sprintf("--exclude=%s", pkg)) } pkgs := []string{"yum", "dnf"} components := s.definition.Source.Components for _, pkg := range components { pkg, err = shared.RenderTemplate(pkg, s.definition) if err != nil { return err } pkgs = append(pkgs, pkg) } earlyPackagesInstall := s.definition.GetEarlyPackages("install") pkgs = append(pkgs, earlyPackagesInstall...) args = append(args, pkgs...) // Install if err = shared.RunCommand(s.ctx, nil, nil, cmd, args...); err != nil { return err } return nil } distrobuilder-3.0/sources/slackware-http.go000066400000000000000000000132221456216713500212040ustar00rootroot00000000000000package sources import ( "errors" "fmt" "net/url" "path" "path/filepath" "strings" incus "github.com/lxc/incus/shared/util" "gopkg.in/antchfx/htmlquery.v1" "github.com/lxc/distrobuilder/shared" ) type slackware struct { common } // Run downloads Slackware Linux. func (s *slackware) Run() error { u, err := url.Parse(s.definition.Source.URL) if err != nil { return fmt.Errorf("Failed to parse %q: %w", s.definition.Source.URL, err) } mirrorPath := "" slackpkgPath := "" // set mirror path based on architecture if s.definition.Image.ArchitectureMapped == "i586" { mirrorPath = path.Join(u.Path, fmt.Sprintf("slackware-%s", s.definition.Image.Release), "slackware") slackpkgPath = s.definition.Source.URL + fmt.Sprintf("slackware-%s", s.definition.Image.Release) } else if s.definition.Image.ArchitectureMapped == "x86_64" { mirrorPath = path.Join(u.Path, fmt.Sprintf("slackware64-%s", s.definition.Image.Release), "slackware64") slackpkgPath = s.definition.Source.URL + fmt.Sprintf("slackware64-%s", s.definition.Image.Release) } else { return fmt.Errorf("Invalid architecture: %s", s.definition.Image.Architecture) } // base software packages and libraries paths := []string{path.Join(mirrorPath, "a")} // additional required libraries paths = append(paths, path.Join(mirrorPath, "ap"), path.Join(mirrorPath, "d"), path.Join(mirrorPath, "l"), path.Join(mirrorPath, "n")) requiredPkgs := []string{"sysvinit", "sysvinit-scripts", "aaa_base", "aaa_elflibs", "aaa_libraries", "coreutils", "glibc-solibs", "aaa_glibc-solibs", "aaa_terminfo", "pam", "cracklib", "libpwquality", "e2fsprogs", "nvi", "pkgtools", "shadow", "tar", "xz", "bash", "etc", "gzip", "pcre2", "libpsl", "wget", "gnupg", "elvis", "slackpkg", "ncurses", "bin", "bzip2", "grep", "acl", "pcre", "gmp", "attr", "sed", "dialog", "file", "gawk", "time", "gettext", "libcgroup", "patch", "sysfsutils", "time", "tree", "utempter", "which", "util-linux", "elogind", "libseccomp", "mpfr", "libunistring", "diffutils", "procps", "findutils", "iproute2", "dhcpcd", "openssl", "perl", "ca-certificates", "inetd", "iputils", "libmnl", "network-scripts", "libaio", "glibc", "nano", "hostname"} var pkgDir string for _, p := range paths { u.Path = p pkgDir, err = s.downloadFiles(s.definition.Image, u.String(), requiredPkgs) if err != nil { return fmt.Errorf("Failed to download packages: %w", err) } } // find package tools matches, err := filepath.Glob(filepath.Join(pkgDir, "pkgtools-*.t*z")) if err != nil { return fmt.Errorf("Failed to match pattern: %w", err) } err = shared.RunCommand(s.ctx, nil, nil, "tar", "-pxf", matches[0], "-C", pkgDir, "sbin/") if err != nil { return fmt.Errorf("Failed to unpack %q: %w", matches[0], err) } rootfsDirAbs, err := filepath.Abs(s.rootfsDir) if err != nil { return fmt.Errorf("Failed to get absolute path: %w", err) } // build rootfs err = shared.RunScript(s.ctx, fmt.Sprintf(`#!/bin/sh set -eux # Input variables PKG_DIR="%s" ROOTFS_DIR="%s" # Environment export LC_ALL="C" export LANG="C" # Don't override PATH sed -i "/^export PATH/d" ${PKG_DIR}/sbin/installpkg* # Install all packages # not compatible with versions < 13.37 for pkg in $(ls -cr ${PKG_DIR}/*.t*z); do # Prevent install script for sysvinit from trying to run /sbin/init if (echo ${pkg} | grep -E 'sysvinit-[0-9]') ; then mkdir -p ${PKG_DIR}/sysvinit && cd ${PKG_DIR}/sysvinit tar -xf ${pkg} sed -i 's@/sbin/init@#/sbin/init@' install/doinst.sh tar -cJf ${pkg} * ${PKG_DIR}/sbin/installpkg --terse --root ${ROOTFS_DIR} ${pkg} cd - else ${PKG_DIR}/sbin/installpkg --terse --root ${ROOTFS_DIR} ${pkg} fi done # Disable kernel/sys modifications sed -i 's@/bin/dmesg@#/bin/dmesg@g' ${ROOTFS_DIR}/etc/rc.d/rc.M sed -i 's@/sbin/modprobe@echo@g' ${ROOTFS_DIR}/etc/rc.d/rc.inet1 if [ -f ${ROOTFS_DIR}/etc/rc.d/rc.elogind ]; then sed -i 's@cd /sys/fs/cgroup;@@g' ${ROOTFS_DIR}/etc/rc.d/rc.elogind fi # Enable networking on eth0 sed -i 's/USE_DHCP\[0\]=""/USE_DHCP\[0\]="yes"/' ${ROOTFS_DIR}/etc/rc.d/rc.inet1.conf sed -i 's/USE_DHCP6\[0\]=""/USE_DHCP6\[0\]="yes"/' ${ROOTFS_DIR}/etc/rc.d/rc.inet1.conf # Some services expect fstab touch ${ROOTFS_DIR}/etc/fstab # Add mirror to slackpkg mkdir -p ${ROOTFS_DIR}/etc/slackpkg echo "%s" > ${ROOTFS_DIR}/etc/slackpkg/mirrors `, pkgDir, rootfsDirAbs, slackpkgPath)) if err != nil { return fmt.Errorf("Failed to run script: %w", err) } return nil } func (s *slackware) downloadFiles(def shared.DefinitionImage, URL string, requiredPkgs []string) (string, error) { doc, err := htmlquery.LoadURL(URL) if err != nil { return "", fmt.Errorf("Failed to load URL %q: %w", URL, err) } if doc == nil { return "", errors.New("Empty HTML document") } nodes := htmlquery.Find(doc, `//a/@href`) var dir string for _, n := range nodes { target := htmlquery.InnerText(n) if strings.HasSuffix(target, ".txz") || strings.HasSuffix(target, ".tgz") { pkgName := strings.Split(target, "-")[0] twoPkgName := strings.Split(target, "-")[0] + "-" + strings.Split(target, "-")[1] if !((incus.ValueInSlice(pkgName, requiredPkgs)) || (incus.ValueInSlice(twoPkgName, requiredPkgs))) { continue } // package dir, err = s.DownloadHash(def, fmt.Sprintf("%s/%s", URL, target), "", nil) if err != nil { return "", fmt.Errorf("Failed to download %q: %w", fmt.Sprintf("%s/%s", URL, target), err) } } else if strings.HasSuffix(target, ".txz/") || strings.HasSuffix(target, ".tgz/") { // directory u, err := url.Parse(URL) if err != nil { return "", fmt.Errorf("Failed to parse %q: %w", URL, err) } u.Path = path.Join(u.Path, target) return s.downloadFiles(def, u.String(), requiredPkgs) } } return dir, nil } distrobuilder-3.0/sources/source.go000066400000000000000000000052441456216713500175600ustar00rootroot00000000000000package sources import ( "context" "errors" "github.com/sirupsen/logrus" "github.com/lxc/distrobuilder/shared" ) // ErrUnknownDownloader represents the unknown downloader error. var ErrUnknownDownloader = errors.New("Unknown downloader") type downloader interface { init(ctx context.Context, logger *logrus.Logger, definition shared.Definition, rootfsDir string, cacheDir string, sourcesDir string) Downloader } // Downloader represents a source downloader. type Downloader interface { Run() error } var downloaders = map[string]func() downloader{ "almalinux-http": func() downloader { return &almalinux{} }, "alpinelinux-http": func() downloader { return &alpineLinux{} }, "alt-http": func() downloader { return &altLinux{} }, "apertis-http": func() downloader { return &apertis{} }, "archlinux-http": func() downloader { return &archlinux{} }, "busybox": func() downloader { return &busybox{} }, "centos-http": func() downloader { return ¢OS{} }, "debootstrap": func() downloader { return &debootstrap{} }, "docker-http": func() downloader { return &docker{} }, "fedora-http": func() downloader { return &fedora{} }, "funtoo-http": func() downloader { return &funtoo{} }, "gentoo-http": func() downloader { return &gentoo{} }, "nixos-http": func() downloader { return &nixos{} }, "openeuler-http": func() downloader { return &openEuler{} }, "opensuse-http": func() downloader { return &opensuse{} }, "openwrt-http": func() downloader { return &openwrt{} }, "oraclelinux-http": func() downloader { return &oraclelinux{} }, "plamolinux-http": func() downloader { return &plamolinux{} }, "rockylinux-http": func() downloader { return &rockylinux{} }, "rootfs-http": func() downloader { return &rootfs{} }, "rpmbootstrap": func() downloader { return &rpmbootstrap{} }, "springdalelinux-http": func() downloader { return &springdalelinux{} }, "ubuntu-http": func() downloader { return &ubuntu{} }, "voidlinux-http": func() downloader { return &voidlinux{} }, "vyos-http": func() downloader { return &vyos{} }, "slackware-http": func() downloader { return &slackware{} }, } // Load loads and initializes a downloader. func Load(ctx context.Context, downloaderName string, logger *logrus.Logger, definition shared.Definition, rootfsDir string, cacheDir string, sourcesDir string) (Downloader, error) { df, ok := downloaders[downloaderName] if !ok { return nil, ErrUnknownDownloader } d := df() d.init(ctx, logger, definition, rootfsDir, cacheDir, sourcesDir) return d, nil } distrobuilder-3.0/sources/springdalelinux-http.go000066400000000000000000000224441456216713500224460ustar00rootroot00000000000000package sources import ( "fmt" "path/filepath" "strings" "github.com/lxc/distrobuilder/shared" ) type springdalelinux struct { commonRHEL fname string majorVersion string } // Run downloads the tarball and unpacks it. func (s *springdalelinux) Run() error { s.majorVersion = strings.Split(s.definition.Image.Release, ".")[0] // Example: http://puias.princeton.edu/data/puias/8.3/x86_64/os/images/boot.iso baseURL := fmt.Sprintf("%s/%s/%s/os/images/", s.definition.Source.URL, strings.ToLower(s.definition.Image.Release), s.definition.Image.ArchitectureMapped) s.fname = "boot.iso" fpath := s.getTargetDir() _, err := s.DownloadHash(s.definition.Image, baseURL+s.fname, "", nil) if err != nil { return fmt.Errorf("Error downloading %q: %w", baseURL+s.fname, err) } s.logger.WithField("file", filepath.Join(fpath, s.fname)).Info("Unpacking ISO") err = s.unpackISO(filepath.Join(fpath, s.fname), s.rootfsDir, s.isoRunner) if err != nil { return fmt.Errorf("Failed to unpack %q: %w", filepath.Join(fpath, s.fname), err) } return nil } func (s *springdalelinux) isoRunner(gpgKeysPath string) error { err := shared.RunScript(s.ctx, fmt.Sprintf(`#!/bin/sh set -eux GPG_KEYS="%s" # Create required files touch /etc/mtab /etc/fstab yum_args="" mkdir -p /etc/yum.repos.d if [ -d /mnt/cdrom ]; then # Install initial package set cd /mnt/cdrom/Packages rpm -ivh --nodeps $(ls rpm-*.rpm | head -n1) rpm -ivh --nodeps $(ls yum-*.rpm | head -n1) # Add cdrom repo cat <<- EOF > /etc/yum.repos.d/cdrom.repo [cdrom] name=Install CD-ROM baseurl=file:///mnt/cdrom enabled=0 EOF if [ -n "${GPG_KEYS}" ]; then echo gpgcheck=1 >> /etc/yum.repos.d/cdrom.repo echo gpgkey=${GPG_KEYS} >> /etc/yum.repos.d/cdrom.repo else echo gpgcheck=0 >> /etc/yum.repos.d/cdrom.repo fi yum_args="--disablerepo=* --enablerepo=cdrom" yum ${yum_args} -y reinstall yum else . /etc/os-release mkdir -p /etc/pki/rpm-gpg if [ "${VERSION_ID:0:1}" -eq "9" ]; then cat <<- "EOF" > /etc/pki/rpm-gpg/RPM-GPG-KEY-springdale -----BEGIN PGP PUBLIC KEY BLOCK----- mQINBGKMDvgBEADXuRn0LRppwvUH6Ljkjc0V62bBEKIYmFw8ED0WIcSVgNtSAA4k HX2w335jDZA5gVLxnxQ4OvsWEQQ4vu8BLA6AfuJHTn/Z03ON5h090drCfns2aEYo PTRB3BrzazhaEnYFJUU9zisN1py8eiiwITrSDb0/DwjFwbo9QZbGbvZh1xnmJOGd 8S5DzkOgBjd3+0I+agUXZvnFeRAyW7GHD78rsLImj/IihGr0/PwTCBJ4QKv6nAIi qRTRhqxLM10ImlJKemvmJcRUi+BLvA15tYokKI7yn1wF+XTzisF7Ol80vyUSylOV TrV5s6H4/CBXDXG4QAGvWZOgtCf+PYzmWytDp/fzhmqKxnuVvNQHSuFy3Wt9rZ+F P9cYqCuX2rkt0fLnb4/X2w6kPzkVM5c5nEAiESiF8dYuxqYirAARpoumPrDd7HUU kBvIAF8wWRJOWebeF0qCcV1EPqwNpkKHDij78y9ufw4T/1Hw3NMjccmA5kci9QVJ 02TDLIhRArziOg1jc0pqq/r1CFVTPLitt5bOznRVfl+CIyHQNz7WW/fzBCUtUMob RPHyoFm9/qVcgLZW6PAzRFeOf4NdIDvVpS7kwcEOWpVFjBKgCWIlBG8QK/jKMkwl USzvEv7RqLYLze0jdDhjr3rXpOARdoOKSLo6X1TUs7rEUaaY++avdpabpQARAQAB tF1TcHJpbmdkYWxlIE9wZW4gRW50ZXJwcmlzZSBMaW51eCA5IChSUE0gU2lnbmlu ZyBrZXkgZm9yIFNETDkpIDxzcHJpbmdkYWxlQG1hdGgucHJpbmNldG9uLmVkdT6J AlIEEwEIADwWIQQn3+jMtnIJ4pDQy8j35Wtav0fFsgUCYowO+AIbAwULCQgHAgMi AgEGFQoJCAsCBBYCAwECHgcCF4AACgkQ9+VrWr9HxbKngA//Sod2JoKUBxPyHFsq MNnxA3dy1fSyMn4xso/qeJZ6Hdpf17w4qD0bl7vDWjLGMFepoAf3a1qzDZN+Q1K8 34YOtOb3+V09dfKebgsAptlz5Wavhl66Fo2j8uUeuRKtcL2qX0jzNysAd0VghNRN qjEfCItwZJ9KNylF1dGVSe1NcL3DXsALYlW9/NTdfl3LuHPaJH1eqV5oFigd3lm2 ELJpteUp223CN20DSQDkLuyw1AOtz2Ui629GgHL9aZDZefcXA+ab1GncNFefBAw6 KayAlAKc9afVO63mTWnHhade90czn0owoWMVjRfrvX/t/XXLLpGHCwPWiMaimPgy g+grbaNuHPY4wcPNBqV2ztcMhls+FK3I9fvqG2LP35QqDclTRDTjMjzm42Nkk2I5 nzgL43m2m1qPh3pqn8iuhkEpWtBGJ3UzgzzB8WWhTo94po7b87ehZpzmlnnb4wg/ oEevHsak4NzN5XvsZUtkx86VWL5P4PMZfJqH/RYkbb/WEbHZcgoaYCFITgdUjCil u3b7Ppqrwqnff+KbpdZPPEfhZ8rb9GHJ7C7btCeTm+6qm+BjNrddr9N/wxFelGhh IMPMEqP/IxJ1nCSgpeA59vT4b01qGyvAAWAQ/vM1a+kApY80sLOawilDsMkY7rZt WHsMUtyqJT2skTdx0/yhLmr1uE65Ag0EYowO+AEQAKFJwWVz7DFBA4/p57DcfBlZ AmZmiGE16UcwpE/u0LvJtP/lxGsaFFGAAQndYEQ8j0tHj5x8RkQWmVy8VZWYbUln LvZ9FzeDSLAX7ik1t9Be3MyjfVHlfQFwhLix2ttGqSsoDztXl8OWHczTv+CtR0Gp FXIusk8kv4PnJaSHtzfzn23C3O4UypTn6BAfeRYs9Jr4LVoMPorH8UcYarbH+X/F 0zn6oH17ChnV3PNyLkDgsgVeCX7ZepcIP7fkbWyy09GqLN66ZifNbgH9dZGtIb4B wEAVxmB9DYQXv5DEK4J82ZLGFPnQoj1LrEMOQOzVTWRrIrf+jwIgr//XmqzwGB/y ryaRq38e3SM3nrAVRdBd68y6kSQhFVbyhLQA0grkQwVLhg3qkwkRK2xXrcflONvE Tzmv2U5B+8GictI4xcMYJu2d8Oqs32Lc340PsLgFlHqv6p2EjWPnpZWcOHoxF92k AJfbxWgSMPYRFp4/TjXXEsbSg3diy0IGpuWgQk+/oONZ7T4WGwoiFp72VBwMNcrJ 7m4qciZHOT3qZlXx1qD+AWZFons+w6Nx0xDx2TYghkXLIpVBE8aSrLbsNNX0TKNN BHHg8dqpgT3YU1T1PvTXuJTmG4ktqe8VYfG6MTITjIIhsZ21LUO3jkXW7arCyosE BjaHSZZW+vcgrQYvy0JfABEBAAGJAjYEGAEIACAWIQQn3+jMtnIJ4pDQy8j35Wta v0fFsgUCYowO+AIbDAAKCRD35Wtav0fFsqlKEADVfGT5hHmFEPKombRqDbAz/acU Xdj5sjSFWTRRhzKxCEonfDsHK6ZgrCSNZZ+N5PT/W5Sk6LQ6vRsnQ3TTzpYwLsO/ P6iuDtTda2euG+lAuU/vzjIYFn+3LJ74DOIdPUxQusRgJSiclmRwqq19L3dLjSLg ufCloGgOBdfCeYq0P9V0Aa5bOv0eEI2ZTovboCRHoGyDMgxJL1+06qIGkXbuR8XZ cUv7tOJAYCgDHxaUm5IZc/VyNWJLauUu9Cp93vW1OdDt8MN+v7Gbfca6eb0cGGR+ OOr5AYGyWTaqHP/e05kkL3tAuVNMyhqxRoUkckFFwbs/EBiZ+N+8HTO5yuedZaYh K3MmL2vRKlilZvseXJjOKWlT7A1xmvTVf9l6ZRO5E+t6/B94Oio9okwKOtPENGxq GNwph/VAXD/igoo7DdzSLZDllYVBRHpIsImtzsbJ/LSWmF9aYIo4Q/AVtFxG2z3a gNZmOb0aDPiv7du+TzJnbat9lb2Elc1Bw8QKMHlyfcyPNkkfs+GLhPNYqDIFVpAr pEINesushfzhnBA3KMIfagpFxe2ZLDfxTPWbm+nVftdexNpWL7ZZkleW2POdRmW2 XjTaSn0bN/bbzqh16pEPEirKKnaXx8gSE5IYqLqOC9Yw4iYIBYrzjvBOalxjVT48 GrticHp1XlAnezB5yw== =1nA4 -----END PGP PUBLIC KEY BLOCK----- EOF elif ! [ -f /etc/pki/rpm-gpg/RPM-GPG-KEY-springdale ]; then cat <<- "EOF" > /etc/pki/rpm-gpg/RPM-GPG-KEY-springdale -----BEGIN PGP PUBLIC KEY BLOCK----- Version: GnuPG v2.0.14 (GNU/Linux) mQILBEzhkmYBEAD1+atknNh1ufb0QZNKl6uMnOnAL2bk8v6VmUjVTNTJ6JBXdREX omVRlEDv1Jw9/11tDEI0JzZieqogahJ3GDqENXhLlfpgev+YHES6jrH8XEYkin/T wxuUyKvqoAk60wqC6Uv/TkHpVOYP9WHtNbybl3Pu8f2tzqdrE20HmQgEzVjTyHEw 33WeAXWlhb/rIMcJ/edk0/hQ0Yu1NR34g/R/1azfz4uyx0qtrTDvaS7qxX5ZSUYZ tHzS8JjFpzRHI3A++Jv6yXMdzekn54TYob+DeKoRsdtfI0KryOv+92tcw4yM35CK d4apq8t35v56wsRXhwVZWEYn3BfIsdDoCF4XXH8nx0B1KJii6uYo1iiyIU10cT+g YCPVIzT+B/FqAmltx3VJg87sZ6QVFp3oFtIOlu14NQBv3wqUneCNK88EZAxi/kqy kFLq4NfWpn2dVZYRlJddWTXZ+qqaYQ6aI9HyGx5/QTI1hsxcOykrm8woZvptYeRQ IfkDFPgqTmdTqFaL5TQc43FvOLss9kBJ2FSbi3WpRIJW0/AQGc9/97SUnC0T075O R3cK+dQvI3QV3/p1UEowXlSHDFc5CrqKT11zMG9KE4UGm08yDbz0d5IbH0Cpzuku vidjBlsaQ/+OldjSFjHhCFLz55lLya6eMpPaScEKqoWNYzuNv9kDgVWXNwAGKbRN UFVJQVMgTGludXggRGlzdHJpYnV0aW9uIChSUE0gSW50ZWdyaXR5IFNpZ25hdHVy ZSkgPHB1aWFzQG1hdGgucHJpbmNldG9uLmVkdT6JAjQEEwECAB4FAkzhkmYCGwMG CwkIBwMCAxUCAwMWAgECHgECF4AACgkQFs/DM0GkCUi22xAAtoeFPRpYAoaq6+Ru nRX5GCDQl6DlOtVxLVclNZzGpnw8Extid+AOLqDXcfncyf04YhlEHj4misz/rDCI a5bRWoNjPHAgHpzCX7+I6pNr1hY9SOW94BEdng9IGGK0XhFBzflmySLZEC9E2ZYe RgWKJcDbyM9sDc2g440ICkn8DOWTvKMcQ7f0AzYtARXfmAEMqgqzNV+0wDJmdEHY 7rif51U8bCOKns/UFKSA3WqUKhn5v2xo4OVqkm+bVG3z04KRAIUWIZIK8RHEp6wk clls8afYSufJmmUeczbE/wDqEgMSE3qGlcQRxTO3EMORb3nwWo5QAA4I/QPFrFoC QZQbaLNOx8P7dnfDoarJrUPYBBUFmMKvHUnSwv696QZhz70RvgjTHcbSyrnmE76C /XU0zUpeWN6FEb77zA1pIlqVf4hqRs+PCaG2sytBQVYEpYgGnoUPSWIT4a6NJtn+ WwJHOqRYrGGTM0Z6V7IgMAkqiwEECn5eDUXYhqwUsyuVkbeOBWTc6nhsPIH3QC86 sL0X/1hztP5sDoCne6SY3X3IyglvApvsKn0TOcVCvbYNhbg2bfPdvfmtSAV3/iMU yPw2JgfcvLeF1tiMQ7i5PfgyOn0Y3/lZjcclYHa1P5PEoTCA2lU7jLm0lmgIrh4N vYD0DGRZTGNEJYUDZFgRyynIOxk= =mKoc -----END PGP PUBLIC KEY BLOCK----- EOF fi if [[ ${VERSION_ID:0:1} == "7" ]] then fname="$(curl -s http://springdale.princeton.edu/data/springdale/7/x86_64/os/Packages/ | grep -Eo 'yum-[[:digit:]].+\.noarch\.rpm')" if [ -n "${fname}" ]; then wget http://springdale.princeton.edu/data/springdale/7/x86_64/os/Packages/"${fname}" rpm -ivh --nodeps yum*.rpm fi fi if [[ ${VERSION_ID:0:1} == "8" || ${VERSION_ID:0:1} == "9" ]] then cat <<- "EOF" > /etc/yum.repos.d/Springdale-Base.repo [sdl8-baseos] name=Springdale core Base $releasever - $basearch mirrorlist=http://springdale.princeton.edu/data/springdale/$releasever/$basearch/os/BaseOS/mirrorlist #baseurl=http://springdale.princeton.edu/data/springdale/$releasever/$basearch/os/BaseOS gpgcheck=1 gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-springdale [sdl8-baseos-updates] name=Springdale core Updates $releasever - $basearch mirrorlist=http://springdale.princeton.edu/data/springdale/updates/$releasever/BaseOS/$basearch/mirrorlist #baseurl=http://springdale.princeton.edu/data/springdale/updates/$releasever/BaseOS/$basearch gpgcheck=1 gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-springdale EOF # Use dnf in the boot iso since yum isn't available alias yum=dnf fi if [[ ${VERSION_ID:0:1} == "7" ]] then cat <<- "EOF" > /etc/yum.repos.d/Springdale-Base.repo [core] name=Springdale core Base $releasever - $basearch #baseurl=file:///springdale/$releasever/$basearch/os #mirrorlist=http://mirror.math.princeton.edu/pub/springdale/puias/$releasever/$basearch/os/mirrorlist baseurl=http://springdale.princeton.edu/data/springdale/$releasever/$basearch/os gpgcheck=1 gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-springdale [updates] name=Springdale core Updates $releasever - $basearch #baseurl=file:///springdale/updates/$releasever/en/os/$basearch #mirrorlist=http://mirror.math.princeton.edu/pub/springdale/puias/updates/$releasever/en/os/$basearch/mirrorlist baseurl=http://springdale.princeton.edu/data/springdale/updates/$releasever/en/os/$basearch gpgcheck=1 gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-springdale EOF fi fi pkgs="basesystem springdale-release yum" # Create a minimal rootfs mkdir /rootfs echo "install rootfs" yum ${yum_args} --installroot=/rootfs -y --releasever=%s --skip-broken install ${pkgs} rm -rf /rootfs/var/cache/yum `, gpgKeysPath, s.majorVersion)) if err != nil { return fmt.Errorf("Failed to run ISO script: %w", err) } return nil } distrobuilder-3.0/sources/ubuntu-http.go000066400000000000000000000117031456216713500205540ustar00rootroot00000000000000package sources import ( "crypto/sha256" "errors" "fmt" "io" "net/http" "net/url" "os" "path/filepath" "regexp" "strings" "github.com/lxc/distrobuilder/shared" ) type ubuntu struct { common fname string fpath string } // Run downloads the tarball and unpacks it. func (s *ubuntu) Run() error { err := s.downloadImage(s.definition) if err != nil { return fmt.Errorf("Failed to download image: %w", err) } return s.unpack(filepath.Join(s.fpath, s.fname), s.rootfsDir) } func (s *ubuntu) downloadImage(definition shared.Definition) error { var baseURL string var err error switch strings.ToLower(s.definition.Image.Variant) { case "default": baseURL = fmt.Sprintf("%s/releases/%s/release/", s.definition.Source.URL, s.definition.Image.Release) if strings.ContainsAny(s.definition.Image.Release, "0123456789") { s.fname = fmt.Sprintf("ubuntu-base-%s-base-%s.tar.gz", s.definition.Image.Release, s.definition.Image.ArchitectureMapped) } else { // if release is non-numerical, find the latest release s.fname, err = getLatestRelease(baseURL, s.definition.Image.Release, s.definition.Image.ArchitectureMapped) if err != nil { return fmt.Errorf("Failed to get latest release: %w", err) } } case "core": baseURL = fmt.Sprintf("%s/%s/stable/current/", s.definition.Source.URL, s.definition.Image.Release) s.fname = fmt.Sprintf("ubuntu-core-%s-%s.img.xz", s.definition.Image.Release, s.definition.Image.ArchitectureMapped) default: return fmt.Errorf("Unknown Ubuntu variant %q", s.definition.Image.Variant) } url, err := url.Parse(baseURL) if err != nil { return fmt.Errorf("Failed to parse URL %q: %w", baseURL, err) } var fpath string checksumFile := "" // Force gpg checks when using http if !s.definition.Source.SkipVerification && url.Scheme != "https" { if len(s.definition.Source.Keys) == 0 { return errors.New("GPG keys are required if downloading from HTTP") } checksumFile = baseURL + "SHA256SUMS" fpath, err = s.DownloadHash(s.definition.Image, baseURL+"SHA256SUMS.gpg", "", nil) if err != nil { return fmt.Errorf("Failed to download %q: %w", baseURL+"SHA256SUMS.gpg", err) } _, err = s.DownloadHash(s.definition.Image, checksumFile, "", nil) if err != nil { return fmt.Errorf("Failed to download %q: %w", checksumFile, err) } valid, err := s.VerifyFile( filepath.Join(fpath, "SHA256SUMS"), filepath.Join(fpath, "SHA256SUMS.gpg")) if err != nil { return fmt.Errorf(`Failed to verify "SHA256SUMS": %w`, err) } if !valid { return errors.New(`Invalid signature for "SHA256SUMS"`) } } s.fpath, err = s.DownloadHash(s.definition.Image, baseURL+s.fname, checksumFile, sha256.New()) if err != nil { return fmt.Errorf("Failed to download %q: %w", baseURL+s.fname, err) } return nil } func (s ubuntu) unpack(filePath, rootDir string) error { err := os.RemoveAll(rootDir) if err != nil { return fmt.Errorf("Failed to remove directory %q: %w", rootDir, err) } err = os.MkdirAll(rootDir, 0755) if err != nil { return fmt.Errorf("Failed to create directory %q: %w", rootDir, err) } s.logger.WithField("file", filePath).Info("Unpacking image") err = shared.Unpack(filePath, rootDir) if err != nil { return fmt.Errorf("Failed to unpack %q: %w", filePath, err) } return nil } func getLatestRelease(baseURL, release, arch string) (string, error) { var ( resp *http.Response err error ) err = shared.Retry(func() error { resp, err = http.Get(baseURL) if err != nil { return fmt.Errorf("Failed to GET %q: %w", baseURL, err) } return nil }, 3) if err != nil { return "", err } defer resp.Body.Close() body, err := io.ReadAll(resp.Body) if err != nil { return "", fmt.Errorf("Failed to read body: %w", err) } regex := regexp.MustCompile(fmt.Sprintf("ubuntu-base-\\d{2}\\.\\d{2}(\\.\\d+)?-base-%s.tar.gz", arch)) releases := regex.FindAllString(string(body), -1) if len(releases) > 1 { return string(releases[len(releases)-1]), nil } return "", errors.New("Failed to find latest release") } func getLatestCoreBaseImage(baseURL, release, arch string) (string, error) { u, err := url.Parse(fmt.Sprintf("%s/ubuntu/%s/%s/default", baseURL, release, arch)) if err != nil { return "", fmt.Errorf("Failed to parse URL %q: %w", fmt.Sprintf("%s/ubuntu/%s/%s/default", baseURL, release, arch), err) } var resp *http.Response err = shared.Retry(func() error { resp, err = http.Get(u.String()) if err != nil { return fmt.Errorf("Failed to GET %q: %w", u.String(), err) } return nil }, 3) if err != nil { return "", err } defer resp.Body.Close() body, err := io.ReadAll(resp.Body) if err != nil { return "", fmt.Errorf("Failed to read body: %w", err) } regex := regexp.MustCompile(`\d{8}_\d{2}:\d{2}`) releases := regex.FindAllString(string(body), -1) if len(releases) > 1 { return fmt.Sprintf("%s/%s/rootfs.tar.xz", u.String(), releases[len(releases)-1]), nil } return "", errors.New("Failed to find latest core base image") } distrobuilder-3.0/sources/ubuntu-http_test.go000066400000000000000000000004471456216713500216160ustar00rootroot00000000000000package sources import ( "testing" "github.com/stretchr/testify/require" ) func TestUbuntuGetLatestCoreBaseImage(t *testing.T) { release, err := getLatestCoreBaseImage("https://images.linuxcontainers.org/images", "bionic", "amd64") require.NoError(t, err) require.NotEmpty(t, release) } distrobuilder-3.0/sources/utils.go000066400000000000000000000117741456216713500174250ustar00rootroot00000000000000package sources import ( "bufio" "bytes" "context" "errors" "fmt" "hash" "io" "net/http" "os" "os/exec" "path/filepath" "regexp" "strings" incus "github.com/lxc/incus/shared/util" ) // downloadChecksum downloads or opens URL, and matches fname against the // checksums inside of the downloaded or opened file. func downloadChecksum(ctx context.Context, client *http.Client, targetDir string, URL string, fname string, hashFunc hash.Hash, hashLen int) ([]string, error) { var ( tempFile *os.File err error ) // do not re-download checksum file if it's already present fi, err := os.Stat(filepath.Join(targetDir, URL)) if err == nil && !fi.IsDir() { tempFile, err = os.Open(filepath.Join(targetDir, URL)) if err != nil { return nil, err } defer os.Remove(tempFile.Name()) } else { tempFile, err = os.CreateTemp(targetDir, "hash.") if err != nil { return nil, err } defer os.Remove(tempFile.Name()) done := make(chan struct{}) defer close(done) _, err = incus.DownloadFileHash(ctx, client, "distrobuilder", nil, nil, "", URL, "", hashFunc, tempFile) // ignore hash mismatch if err != nil && !strings.HasPrefix(err.Error(), "Hash mismatch") { return nil, err } } _, err = tempFile.Seek(0, 0) if err != nil { return nil, fmt.Errorf("Failed setting offset in file %q: %w", tempFile.Name(), err) } checksum := getChecksum(filepath.Base(fname), hashLen, tempFile) if checksum != nil { return checksum, nil } return nil, errors.New("Could not find checksum") } func getChecksum(fname string, hashLen int, r io.Reader) []string { scanner := bufio.NewScanner(r) var matches []string var result []string regex := regexp.MustCompile("[[:xdigit:]]+") for scanner.Scan() { if !strings.Contains(scanner.Text(), fname) { continue } for _, s := range strings.Split(scanner.Text(), " ") { if !regex.MatchString(s) { continue } if hashLen == 0 || hashLen == len(strings.TrimSpace(s)) { matches = append(matches, scanner.Text()) } } } // Check common checksum file (pattern: " ") with the exact filename for _, m := range matches { fields := strings.Split(m, " ") if strings.TrimSpace(fields[len(fields)-1]) == fname { result = append(result, strings.TrimSpace(fields[0])) } } if len(result) > 0 { return result } // Check common checksum file (pattern: " ") which contains the filename for _, m := range matches { fields := strings.Split(m, " ") if strings.Contains(strings.TrimSpace(fields[len(fields)-1]), fname) { result = append(result, strings.TrimSpace(fields[0])) } } if len(result) > 0 { return result } // Special case: CentOS for _, m := range matches { for _, s := range strings.Split(m, " ") { if !regex.MatchString(s) { continue } if hashLen == 0 || hashLen == len(strings.TrimSpace(s)) { result = append(result, s) } } } if len(result) > 0 { return result } return nil } func recvGPGKeys(ctx context.Context, gpgDir string, keyserver string, keys []string) (bool, error) { args := []string{"--homedir", gpgDir} var fingerprints []string var publicKeys []string for _, k := range keys { if strings.HasPrefix(strings.TrimSpace(k), "-----BEGIN PGP PUBLIC KEY BLOCK-----") { publicKeys = append(publicKeys, strings.TrimSpace(k)) } else { fingerprints = append(fingerprints, strings.TrimSpace(k)) } } for _, f := range publicKeys { args := append(args, "--import") cmd := exec.CommandContext(ctx, "gpg", args...) cmd.Stdin = strings.NewReader(f) cmd.Env = append(os.Environ(), "LANG=C.UTF-8") var buffer bytes.Buffer cmd.Stderr = &buffer err := cmd.Run() if err != nil { return false, fmt.Errorf("Failed to run: %s: %s", strings.Join(cmd.Args, " "), strings.TrimSpace(buffer.String())) } } if keyserver != "" { args = append(args, "--keyserver", keyserver) } args = append(args, append([]string{"--recv-keys"}, fingerprints...)...) cmd := exec.CommandContext(ctx, "gpg", args...) cmd.Env = append(os.Environ(), "LANG=C.UTF-8") var buffer bytes.Buffer cmd.Stderr = &buffer err := cmd.Run() if err != nil { return false, fmt.Errorf("Failed to run: %s: %s", strings.Join(cmd.Args, " "), strings.TrimSpace(buffer.String())) } // Verify output var importedKeys []string var missingKeys []string lines := strings.Split(buffer.String(), "\n") for _, l := range lines { if strings.HasPrefix(l, "gpg: key ") && (strings.HasSuffix(l, " imported") || strings.HasSuffix(l, " not changed")) { key := strings.Split(l, " ") importedKeys = append(importedKeys, strings.Split(key[2], ":")[0]) } } // Figure out which key(s) couldn't be imported if len(importedKeys) < len(fingerprints) { for _, j := range fingerprints { found := false for _, k := range importedKeys { if strings.HasSuffix(j, k) { found = true } } if !found { missingKeys = append(missingKeys, j) } } return false, fmt.Errorf("Failed to import keys: %s", strings.Join(missingKeys, " ")) } return true, nil } distrobuilder-3.0/sources/utils_test.go000066400000000000000000000157361456216713500204660ustar00rootroot00000000000000package sources import ( "bytes" "io" "testing" "github.com/stretchr/testify/require" ) func Test_getChecksum(t *testing.T) { type args struct { fname string hashLen int r io.Reader } tests := []struct { name string args args want []string }{ { "openwrt-x86-64-rootfs.tar.gz", args{ "openwrt-x86-64-rootfs.tar.gz", 64, bytes.NewBufferString(`8b194c619b65d675da15d190fe7c7d2ce6125debc98452e30890c16212aa7b1c *openwrt-imagebuilder-x86-64.Linux-x86_64.tar.xz d99669ef301129e6ba59417ff41814dd02b4bdbe7254e2c8535de5eae35801ad *openwrt-sdk-x86-64_gcc-8.4.0_musl.Linux-x86_64.tar.xz 84be5c09beb3791c574a35b9e73dcb7b7637482f83ed61fbe07cd0af68987cf8 *openwrt-x86-64-generic-ext4-combined-efi.img.gz 23d9ac551d0cd9c85458d4032ae030f33f5f6b44158866130c3065f2a121b641 *openwrt-x86-64-generic-ext4-combined.img.gz 4462e51e9b325e107b57a3b44aef176837fcee0ae8ccc01c1e239e343c9666e0 *openwrt-x86-64-generic-ext4-rootfs.img.gz 643ff73b119f3ecb36497a0c71213f9dd0129b64e803fa87d7e75b39c730e7fa *openwrt-x86-64-generic-kernel.bin 770fa5a3e47ed12f46114aca6dca16a1a4ba2b6e89e53d5966839ffc5581dc53 *openwrt-x86-64-generic-squashfs-combined-efi.img.gz 1a19c82c0614ad043fa0b854249bf6cc804550359ec453816ffbd426c31ab4a2 *openwrt-x86-64-generic-squashfs-combined.img.gz 3b961a97e3105e02e07c1aba7671186efe559ce0ac078c370d5082a7a6999dbe *openwrt-x86-64-generic-squashfs-rootfs.img.gz 76cc26429a61a516d348735a8d62bf3885d9d37489f20789a77c879dcf8a1025 *openwrt-x86-64-rootfs.tar.gz`), }, []string{"76cc26429a61a516d348735a8d62bf3885d9d37489f20789a77c879dcf8a1025"}, }, { "stage3-ppc64le-20200414T103003Z.tar.xz", args{ "stage3-ppc64le-20200414T103003Z.tar.xz", 128, bytes.NewBufferString(`# BLAKE2 (b2sum) HASH 2c5dc7ce04e4d72204a513e4bfa4bd0129e61a060747537ca748538ea8ed6016656f84c35b4cf2049df91a164977d1d0e506e722443fdb48874e9a0b90c00f7a /var/tmp/catalyst/builds/default/stage3-ppc64le-20200414T103003Z.tar.xz # SHA512 HASH e4b9cb10146502310cbedf14197afa9e94b75f7d59c1c6977bff23bac529e9114e3fddb155cfcad9119e466a39f0fcd8d75354e5237da79c9289fe76ee77693d stage3-ppc64le-20200414T103003Z.tar.xz # BLAKE2 (b2sum) HASH 7e1a1985a41b61ac24c4fdefe7a09237161dc7ff20150f3e02c73115b74778f96c45042ced08e38c931ad6e316dfef80ac3a4c956fcd16528819dd506a320726 /var/tmp/catalyst/builds/default/stage3-ppc64le-20200414T103003Z.tar.xz.CONTENTS # SHA512 HASH 1047f97cbb209fb22d372dffe4461722b5eaf936fc73546a8f036dc52a5d20433921d367288b28b3de5154cad1253b40d32233104c2be45732ebfa413bd9b09b stage3-ppc64le-20200414T103003Z.tar.xz.CONTENTS`), }, []string{"e4b9cb10146502310cbedf14197afa9e94b75f7d59c1c6977bff23bac529e9114e3fddb155cfcad9119e466a39f0fcd8d75354e5237da79c9289fe76ee77693d"}, }, { "CentOS-8-x86_64-1905-dvd1.iso", args{ "CentOS-8-x86_64-1905-dvd1.iso", 64, bytes.NewBufferString(`-----BEGIN PGP SIGNED MESSAGE----- Hash: SHA256 # CentOS-8-x86_64-1905-boot.iso: 559939584 bytes SHA256 (CentOS-8-x86_64-1905-boot.iso) = a7993a0d4b7fef2433e0d4f53530b63c715d3aadbe91f152ee5c3621139a2cbc # CentOS-8-x86_64-1905-dvd1.iso: 7135559680 bytes SHA256 (CentOS-8-x86_64-1905-dvd1.iso) = ea17ef71e0df3f6bf1d4bf1fc25bec1a76d1f211c115d39618fe688be34503e8 -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIVAwUBXYirdQW1VbOEg8ZdAQigchAAj+LbZtV7BQTnfB3i+fzECuomjsTZE8Ki zUs9fLA67aayBL1KiavIzURMgjqj/+dXWr73Kv49pELngrznPlEPOclCaPkAKSe0 V2Nj56AUhT/tHGcBoNvD0UrC0nCObMLx6PI2FDEozEELyQR32Syjtb0y5CDnxRvX 6JeGWPWQsf+jXdZS/GUUh39XR5va5YAwues0qLfqNf7nfUk07tmU0pcCG+vRN13H 45av+1/49zbxn4Y/Km2AaAbmqX8LlQpppVYE2K5V73YsG3o6eSU1DwjDijQHYPOK ZUixjbhh5xkOzvhv5HUETvPncbnOez+xLwDPFAMFz/jX/4BgLWpA1/PM/3xcFFij qXBlZh+QLWm1Z8UCBftDc+RqoktI460cqL/SsnOyHmQ+95QLt20yR46hi3oZ6/Cv cUdXaql3iCNWZUvi27Dr8bExqaVaJn0zeDCItPWUA7NwxXP2TlGs2VXC4E37HQhZ SyuCQZMrwGmDJl7gMOE7kZ/BifKvrycAlvTPvhq0jrUwLvokX8QhoTmAwRdzwGSk 9nS+BkoK7xW5lSATuVYEcCkb2fL+qDKuSBJMuKhQNhPs6rN5OEZL3gU54so7Jyz9 OmR+r+1+/hELjOIsPcR4IiyauJQXXgtJ28G7swMsrl07PYHOU+awvB/N9GyUzNAM RP3G/3Z1T3c= =HgZm -----END PGP SIGNATURE-----`), }, []string{"ea17ef71e0df3f6bf1d4bf1fc25bec1a76d1f211c115d39618fe688be34503e8"}, }, { "CentOS-7-x86_64-Minimal-1908.iso", args{ "CentOS-7-x86_64-Minimal-1908.iso", 64, bytes.NewBufferString(`-----BEGIN PGP SIGNED MESSAGE----- Hash: SHA256 9bba3da2876cb9fcf6c28fb636bcbd01832fe6d84cd7445fa58e44e569b3b4fe CentOS-7-x86_64-DVD-1908.iso bd5e6ca18386e8a8e0b5a9e906297b5610095e375e4d02342f07f32022b13acf CentOS-7-x86_64-Everything-1908.iso ba827210d4eb9313fc19120b9b85e7baef234c7f81bc55847a336114ddac20cb CentOS-7-x86_64-LiveGNOME-1908.iso 0ef3310d13f7fc140ec5180dc05369d2f473e802577466825205d17e46ef5a9b CentOS-7-x86_64-LiveKDE-1908.iso 9a2c47d97b9975452f7d582264e9fc16d108ed8252ac6816239a3b58cef5c53d CentOS-7-x86_64-Minimal-1908.iso 6ffa7ad44e8716e4cd6a5c3a85ba5675a935fc0448c260f43b12311356ba85ad CentOS-7-x86_64-NetInstall-1908.iso -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIVAwUBXYDRPyTGqKf0qA61AQhHcg/+LvGu95Y825HoUpS9JPFIb7axkIj8fx5/ Qw2fN+BQtd7W7jcUNmofaajjWyqP5b5Q0iCyNrbhAT6CO4lVVY1z+OxCefAk/Wve go1fSY5cRn7LRtvDuKrkDHJE+nYCVBg8ksWRBm2Xwx2sy4AxP2PAs7Oh3QvkK+9V 199YPLAQ+m4cFdBTTR3Dl78OEKVgjp5O351n4q0pKp72jxhjCZ+tk+dWGg9JEBSb 53nMkwnqTWZzFYpLqGc3fOfscc38oIvet0y3gVbZLNsE25AwwMxqjlC/Z2TqXwc5 1JoZI7XkKggWH6fA4BuzcOtezGMPMPDaqnNhfAWzYq3CsQAA8aQuQaCnGoG2dNN/ fdhGRrbXdpAFbKhfQ/dbKSvDGNvZTFfRfD9m5AJ/ddUAv7DFr4VeVur1KMTqtVO2 NvcLRn7BnkN7ZRqvqdT4kDyndWgQCABahqI6OcC8mmc449JecloQK4U1zGhKMRor 33OtMEW/KhnSOu9pK6+CRnPykyIk2yxUCJ11YFXCKNKfX2cmdFf0puUsmefB6O7E 1nVE3n0aZVSVmebl3sjVJvstT2oyVNynnSQ/Fw3NBAiHe5FvgUnVqHQKyg1nnTet hsfTg6egTQUGOB2fVgt7n3p1HIvCjXAjKo6Wa3R8+aoapQ74Gcok3I3rNoL1jWbW Z4iksZrx82g= =L746 -----END PGP SIGNATURE-----`), }, []string{"9a2c47d97b9975452f7d582264e9fc16d108ed8252ac6816239a3b58cef5c53d"}, }, { "stage3-ppc-20200518T100528Z.tar.xz", args{ "stage3-ppc-20200518T100528Z.tar.xz", 128, bytes.NewBufferString(`# BLAKE2B HASH 6298bdc913c83190f6aa5f7399f05f2f1a20b1997479f033a261fa2e8347fd7cee67900a761c47b7c1c8370127e58016dd90d58f2f37b7f0d5e16722ba0650d2 stage3-ppc-20200518T100528Z.tar.xz # SHA512 HASH 2d0183b8151e4560c317c2903c330f9fbfe2cc37c02ee100a60c9f42253e3ac3ef6db341c177a5e7594131bdcbdebfabe73217c0d4dc86e4dc4d1ce59ad5fbe7 stage3-ppc-20200518T100528Z.tar.xz # BLAKE2B HASH f8aeda7504be4a1374cbd837703138880baf70f8b256ee9f1f2f90cea0b669de62b14112afd2302ff03b6b410cd84f7434a79af3cb197c896a8279ca3068cdfe stage3-ppc-20200518T100528Z.tar.xz.CONTENTS.gz # SHA512 HASH 3a7dede7bcb68a0a32310d1bfbdd8806a17a1720be30907a17673f5f303dee340f5ad9c99d25738fb6f65b5ec224786b7d6b3ecbd5f37185469fbf33ea4c8c92 stage3-ppc-20200518T100528Z.tar.xz.CONTENTS.gz`), }, []string{ "6298bdc913c83190f6aa5f7399f05f2f1a20b1997479f033a261fa2e8347fd7cee67900a761c47b7c1c8370127e58016dd90d58f2f37b7f0d5e16722ba0650d2", "2d0183b8151e4560c317c2903c330f9fbfe2cc37c02ee100a60c9f42253e3ac3ef6db341c177a5e7594131bdcbdebfabe73217c0d4dc86e4dc4d1ce59ad5fbe7", }, }, } for _, tt := range tests { got := getChecksum(tt.args.fname, tt.args.hashLen, tt.args.r) require.Equal(t, tt.want, got) } } distrobuilder-3.0/sources/voidlinux-http.go000066400000000000000000000063161456216713500212570ustar00rootroot00000000000000package sources import ( "crypto/sha256" "errors" "fmt" "io" "net/http" "net/url" "path/filepath" "regexp" "strings" "github.com/lxc/distrobuilder/shared" ) type voidlinux struct { common } // Run downloads a Void Linux rootfs tarball. func (s *voidlinux) Run() error { baseURL := s.definition.Source.URL fname, err := s.getLatestBuild(baseURL, s.definition.Image.ArchitectureMapped, s.definition.Source.Variant) if err != nil { return fmt.Errorf("Failed to get latest build: %w", err) } if fname == "" { return errors.New("Failed to determine latest build") } tarball := fmt.Sprintf("%s/%s", baseURL, fname) digests := fmt.Sprintf("%s/sha256sum.txt", baseURL) signatures := fmt.Sprintf("%s/sha256sum.sig", baseURL) url, err := url.Parse(tarball) if err != nil { return fmt.Errorf("Failed to parse URL %q: %w", tarball, err) } if !s.definition.Source.SkipVerification && url.Scheme != "https" && len(s.definition.Source.Keys) == 0 { return errors.New("GPG keys are required if downloading from HTTP") } var fpath string if s.definition.Source.SkipVerification { fpath, err = s.DownloadHash(s.definition.Image, tarball, "", nil) } else { fpath, err = s.DownloadHash(s.definition.Image, tarball, digests, sha256.New()) } if err != nil { return fmt.Errorf("Failed to download %q: %w", tarball, err) } // Force gpg checks when using http if !s.definition.Source.SkipVerification && url.Scheme != "https" { _, err = s.DownloadHash(s.definition.Image, digests, "", nil) if err != nil { return fmt.Errorf("Failed to download %q: %w", digests, err) } _, err = s.DownloadHash(s.definition.Image, signatures, "", nil) if err != nil { return fmt.Errorf("Failed to download %q: %w", signatures, err) } valid, err := s.VerifyFile( filepath.Join(fpath, "sha256sum.txt"), filepath.Join(fpath, "sha256sum.sig")) if err != nil { return fmt.Errorf(`Failed to verify "sha256sum.txt": %w`, err) } if !valid { return errors.New(`Invalid signature for "sha256sum.txt"`) } } s.logger.WithField("file", filepath.Join(fpath, fname)).Info("Unpacking image") // Unpack err = shared.Unpack(filepath.Join(fpath, fname), s.rootfsDir) if err != nil { return fmt.Errorf("Failed to unpack %q: %w", filepath.Join(fpath, fname), err) } return nil } func (s *voidlinux) getLatestBuild(baseURL, arch, variant string) (string, error) { var ( resp *http.Response err error ) err = shared.Retry(func() error { resp, err = http.Get(baseURL) if err != nil { return fmt.Errorf("Failed to GET %q: %w", baseURL, err) } return nil }, 3) if err != nil { return "", err } defer resp.Body.Close() body, err := io.ReadAll(resp.Body) if err != nil { return "", fmt.Errorf("Failed to read body: %w", err) } // Look for .tar.xz selector := arch if variant != "" { selector = fmt.Sprintf("%s-%s", selector, variant) } regex := regexp.MustCompile(fmt.Sprintf(">void-%s-ROOTFS-.*.tar.xz<", selector)) // Find all rootfs related files matches := regex.FindAllString(string(body), -1) if len(matches) > 0 { // Take the first match since they're all the same anyway return strings.Trim(matches[0], "<>"), nil } return "", errors.New("Failed to find latest build") } distrobuilder-3.0/sources/vyos-http.go000066400000000000000000000054621456216713500202370ustar00rootroot00000000000000package sources import ( "context" "fmt" "os" "path/filepath" "github.com/google/go-github/v56/github" "golang.org/x/sys/unix" "github.com/lxc/distrobuilder/shared" ) type vyos struct { common fname string fpath string } func (s *vyos) Run() error { err := s.downloadImage(s.definition) if err != nil { return fmt.Errorf("Failed to download image: %w", err) } return s.unpackISO(filepath.Join(s.fpath, s.fname), s.rootfsDir) } func (s *vyos) downloadImage(definition shared.Definition) error { var err error ctx := context.Background() client := github.NewClient(nil) owner := "vyos" repo := "vyos-rolling-nightly-builds" latestRelease, _, err := client.Repositories.GetLatestRelease(ctx, owner, repo) if err != nil { return fmt.Errorf("Failed to get latest release, %w", err) } isoURL := "" assets := latestRelease.Assets for _, a := range assets { ext := filepath.Ext(a.GetName()) if ext == ".iso" { isoURL = a.GetBrowserDownloadURL() s.fname = a.GetName() } } if isoURL == "" { return fmt.Errorf("Failed to get latest release URL.") } s.fpath, err = s.DownloadHash(s.definition.Image, isoURL, "", nil) return err } func (s *vyos) unpackISO(filePath string, rootfsDir string) error { isoDir, err := os.MkdirTemp(s.cacheDir, "temp_") if err != nil { return fmt.Errorf("Failed creating temporary directory: %w", err) } defer os.RemoveAll(isoDir) squashfsDir, err := os.MkdirTemp(s.cacheDir, "temp_") if err != nil { return fmt.Errorf("Failed creating temporary directory: %w", err) } defer os.RemoveAll(squashfsDir) // this is easier than doing the whole loop thing ourselves err = shared.RunCommand(s.ctx, nil, nil, "mount", "-t", "iso9660", "-o", "ro", filePath, isoDir) if err != nil { return fmt.Errorf("Failed mounting %q: %w", filePath, err) } defer func() { _ = unix.Unmount(isoDir, 0) }() squashfsImage := filepath.Join(isoDir, "live", "filesystem.squashfs") // The squashfs.img contains an image containing the rootfs, so first // mount squashfs.img err = shared.RunCommand(s.ctx, nil, nil, "mount", "-t", "squashfs", "-o", "ro", squashfsImage, squashfsDir) if err != nil { return fmt.Errorf("Failed mounting %q: %w", squashfsImage, err) } defer func() { _ = unix.Unmount(squashfsDir, 0) }() // Remove rootfsDir otherwise rsync will copy the content into the directory // itself err = os.RemoveAll(rootfsDir) if err != nil { return fmt.Errorf("Failed removing directory %q: %w", rootfsDir, err) } s.logger.WithField("file", squashfsImage).Info("Unpacking root image") // Since rootfs is read-only, we need to copy it to a temporary rootfs // directory in order to create the minimal rootfs. err = shared.RsyncLocal(s.ctx, squashfsDir+"/", rootfsDir) if err != nil { return fmt.Errorf("Failed running rsync: %w", err) } return nil } distrobuilder-3.0/test/000077500000000000000000000000001456216713500152205ustar00rootroot00000000000000distrobuilder-3.0/test/lint/000077500000000000000000000000001456216713500161665ustar00rootroot00000000000000distrobuilder-3.0/test/lint/mixed-whitespace.sh000077500000000000000000000003371456216713500217700ustar00rootroot00000000000000#!/bin/sh -eu echo "Checking for mixed tabs and spaces in shell scripts..." OUT=$(git grep --untracked -lP '\t' '*.sh' || true) if [ -n "${OUT}" ]; then echo "ERROR: mixed tabs and spaces in script: ${OUT}" exit 1 fi distrobuilder-3.0/test/lint/negated-is-bool.sh000077500000000000000000000004271456216713500215010ustar00rootroot00000000000000#!/bin/sh -eu echo "Checking usage of negated shared.Is(True|False)*() functions..." OUT=$(git grep --untracked -P '!(shared\.)?Is(True|False).*\(' '*.go' || true) if [ -n "${OUT}" ]; then echo "ERROR: negated shared.Is(True|False)*() function in script: ${OUT}" exit 1 fi distrobuilder-3.0/test/lint/newline-after-block.sh000077500000000000000000000020421456216713500223530ustar00rootroot00000000000000#!/bin/sh -eu echo "Checking that functional blocks are followed by newlines..." # Check all .go files except the protobuf bindings (.pb.go) files=$(git ls-files --cached --modified --others '*.go' ':!:*.pb.go') exit_code=0 for file in $files do # This oneliner has a few steps: # 1. sed: # a. Check for lines that contain a single closing brace (plus whitespace). # b. Move the pattern space window forward to the next line. # c. Match lines that start with a word character. This allows for a closing brace on subsequent lines. # d. Print the line number. # 2. xargs: Print the filename next to the line number of the matches (piped). # 3. If there were no matches, the file name without the line number is printed, use grep to filter it out. # 4. Replace the space with a colon to make a clickable link. RESULT=$(sed -n -e '/^\s*}\s*$/{n;/^\s*\w/{;=}}' "$file" | xargs -L 1 echo "$file" | grep -v '\.go$' | sed 's/ /:/g') if [ -n "${RESULT}" ]; then echo "${RESULT}" exit_code=1 fi done exit $exit_code distrobuilder-3.0/test/lint/no-oneline-assign-and-test.sh000077500000000000000000000003031456216713500235630ustar00rootroot00000000000000#!/bin/sh -eu echo "Checking for oneline assign & test..." # Recursively grep go files for if statements that contain assignments. ! git grep --untracked -P -n '^\s+if.*:=.*;.*{\s*$' -- '*.go' distrobuilder-3.0/test/lint/no-short-form-imports.sh000077500000000000000000000003531456216713500227330ustar00rootroot00000000000000#!/bin/sh -eu echo "Checking for short form imports..." OUT=$(git grep --untracked -n -P '^\s*import\s+"' '*.go' | grep -v ':import "C"$' || true) if [ -n "${OUT}" ]; then echo "ERROR: found short form imports: ${OUT}" exit 1 fi distrobuilder-3.0/test/lint/trailing-space.sh000077500000000000000000000003411456216713500214250ustar00rootroot00000000000000#!/bin/sh -eu echo "Checking that there are no trailing spaces in shell scripts..." OUT=$(git grep --untracked -lP "\s$" '*.sh' || true) if [ -n "${OUT}" ]; then echo "ERROR: trailing space in script: ${OUT}" exit 1 fi distrobuilder-3.0/testdata/000077500000000000000000000000001456216713500160525ustar00rootroot00000000000000distrobuilder-3.0/testdata/testfile000066400000000000000000000000271456216713500176130ustar00rootroot00000000000000I need to be verified. distrobuilder-3.0/testdata/testfile-invalid.asc000066400000000000000000000016111456216713500220040ustar00rootroot00000000000000-----BEGIN PGP SIGNED MESSAGE----- Hash: SHA512 I need to be verified. -----BEGIN PGP SIGNATURE----- kQIzBAEBCgAdFiEEYfHyDZhnHyXKGYh8mTQI0RN7fVEFAlqFpu8ACgkQmTQI0RN7 fVGj3w/7BCzAkG995rA/7ba371SW/5uifLKxEn/izWzuJsEO40BN0rzV53XsIqew TMhudZo2r1lF7L0KkVChCl/E//aGB5srHRmQlogJqjdyw4qCuVmTe/QMadjo67fS wSqH40p5KCQeLZ33xF60vbMwf7ZwtSesFnCsQyvhu85+FDpuexGKKKDxSmO4WjHV lL5nDZ0vtSghw3yobGWiYBQ/6MqGLkL6yK0LAY50slywTgAb5WtSE2YCTTLeJOEi PEWMWbWoRYmN9ijUowo9YP6cKj4Fz0LtbWMBuHDgvO7Zl/qrb57NxRgBM0cCzAnR zEwjRjcfK7GGk+NyfAGbeabgJT/ATI/51sB3MBJgbd+FcSt4zMUL2qfwFDtrTqK3 7NaKOUh7fVnsGeKY/4DSz0+hJy4qR9JuawDCuiS8CJHzp9LKxKmQDhFfpmFYWOOr Nqc4PifAc0OQ3n1iJGMZ0I5CSP79hRLu7FTyOEhARAz1VMR9lOmEAT+M7RcbENs6 U06mI5h5tyKyBt0cUKQSKtYGKydR2+ZGVkkjEpodcU9RpRzvBFQMU23XdtVPNnya sf3ddNIbkaWkF17oxy7PW4ZFnWbA8wATEnnWi3dPIGhRRdS2qJioXFziW/idSkUB AagCicMVQ1XDX/Hg5HrwUBrGBk1JZ3TTwzZ/kpePgry1XSLuGxI= =vSiP -----END PGP SIGNATURE----- distrobuilder-3.0/testdata/testfile.asc000066400000000000000000000016111456216713500203600ustar00rootroot00000000000000-----BEGIN PGP SIGNED MESSAGE----- Hash: SHA512 I need to be verified. -----BEGIN PGP SIGNATURE----- iQIzBAEBCgAdFiEEYfHyDZhnHyXKGYh8mTQI0RN7fVEFAlqFpu8ACgkQmTQI0RN7 fVGj3w/7BCzAkG995rA/7ba371SW/5uifLKxEn/izWzuJsEO40BN0rzV53XsIqew TMhudZo2r1lF7L0KkVChCl/E//aGB5srHRmQlogJqjdyw4qCuVmTe/QMadjo67fS wSqH40p5KCQeLZ33xF60vbMwf7ZwtSesFnCsQyvhu85+FDpuexGKKKDxSmO4WjHV lL5nDZ0vtSghw3yobGWiYBQ/6MqGLkL6yK0LAY50slywTgAb5WtSE2YCTTLeJOEi PEWMWbWoRYmN9ijUowo9YP6cKj4Fz0LtbWMBuHDgvO7Zl/qrb57NxRgBM0cCzAnR zEwjRjcfK7GGk+NyfAGbeabgJT/ATI/51sB3MBJgbd+FcSt4zMUL2qfwFDtrTqK3 7NaKOUh7fVnsGeKY/4DSz0+hJy4qR9JuawDCuiS8CJHzp9LKxKmQDhFfpmFYWOOr Nqc4PifAc0OQ3n1iJGMZ0I5CSP79hRLu7FTyOEhARAz1VMR9lOmEAT+M7RcbENs6 U06mI5h5tyKyBt0cUKQSKtYGKydR2+ZGVkkjEpodcU9RpRzvBFQMU23XdtVPNnya sf3ddNIbkaWkF17oxy7PW4ZFnWbA8wATEnnWi3dPIGhRRdS2qJioXFziW/idSkUB AagCicMVQ1XDX/Hg5HrwUBrGBk1JZ3TTwzZ/kpePgry1XSLuGxI= =vSiP -----END PGP SIGNATURE----- distrobuilder-3.0/testdata/testfile.gpg000066400000000000000000000011631456216713500203710ustar00rootroot00000000000000l  4{}Q%btestfileZI need to be verified. 3 !a g%|4{}QZ 4{}Q3Ǵ%D 3j$dSgNr? I;! `6;ϤoU9CI=TKկd[ [@4(%[؟vT YٸƁN+WIg挨Vs(I0JsrWXn|M ~voIaCCGXڙe֭ n:U1zQfADl¬;BO2kkbmgCZ)iOOۙR)kߖ^ iclafl6&ku%;jU-3-l`m~-4! BQ"mL*>d2F 'K8JhRDL0ZSBAIg6h#uQʱ/ 0+?E'yzm ̒6u6p^4/jAm(7{O.ϤVDG9Â1Ζ (Ht޵1\)Mn\^aY/distrobuilder-3.0/testdata/testfile.sig000066400000000000000000000010661456216713500204000ustar00rootroot000000000000003 !a g%|4{}QZ} 4{}Q(7?Pi2V9}v#~]Ǚ֡rxn;@Fi7kuI{X〮Y ßZi!]+Xǒfw:`PLUYޭ\avB%:#T?!i#3mUAkŠ}$E+eDPV"y_h0UrUK6"O7Q ՉXzߐ' .B HvW-TFWSķȽ G/.N#TJd֘V<CA*A ַ-jNS, N5WW5=\1}FC_Q9G B+lm+*!19J[S= fīt_zv5P*H1ӑhHQ 1/نkS{Ly+7Udistrobuilder-3.0/windows/000077500000000000000000000000001456216713500157335ustar00rootroot00000000000000distrobuilder-3.0/windows/driver_balloon.go000066400000000000000000000141501456216713500212640ustar00rootroot00000000000000package windows var driverBalloon = DriverInfo{ PackageName: "balloon.inf_amd64_c6bc3e0b232c3c2d", SoftwareRegistry: `[\Microsoft\Windows\CurrentVersion\Setup\PnpLockdownFiles\%SystemRoot%/System32/drivers/balloon.sys] "Class"=dword:00000004 "Owners"=hex(7):{{ infFile }},00,00,00,00 "Source"=hex(2):25,00,53,00,79,00,73,00,74,00,65,00,6d,00,52,00,6f,00,6f,00,74,00,25,00,5c,00,53,00,79,00,73,00,74,00,65,00,6d,00,33,00,32,00,5c,00,44,00,72,00,69,00,76,00,65,00,72,00,53,00,74,00,6f,00,72,00,65,00,5c,00,46,00,69,00,6c,00,65,00,52,00,65,00,70,00,6f,00,73,00,69,00,74,00,6f,00,72,00,79,00,5c,00,{{ packageName|toHex }},5c,00,62,00,61,00,6c,00,6c,00,6f,00,6f,00,6e,00,2e,00,73,00,79,00,73,00,00,00 `, SystemRegistry: `[\ControlSet001\Services\BALLOON] "DisplayName"=hex(1):40,00,{{ infFile|toHex }},2c,00,25,00,42,00,41,00,4c,00,4c,00,4f,00,4f,00,4e,00,2e,00,53,00,56,00,43,00,44,00,45,00,53,00,43,00,25,00,3b,00,56,00,69,00,72,00,74,00,49,00,4f,00,20,00,42,00,61,00,6c,00,6c,00,6f,00,6f,00,6e,00,20,00,53,00,65,00,72,00,76,00,69,00,63,00,65,00,00,00 "ErrorControl"=dword:00000001 "ImagePath"=hex(2):5c,00,53,00,79,00,73,00,74,00,65,00,6d,00,52,00,6f,00,6f,00,74,00,5c,00,53,00,79,00,73,00,74,00,65,00,6d,00,33,00,32,00,5c,00,64,00,72,00,69,00,76,00,65,00,72,00,73,00,5c,00,62,00,61,00,6c,00,6c,00,6f,00,6f,00,6e,00,2e,00,73,00,79,00,73,00,00,00 "Owners"=hex(7):{{ infFile|toHex }},00,00,00,00 "Start"=dword:00000003 "Type"=dword:00000001 [\ControlSet001\Services\BALLOON\Parameters] [\ControlSet001\Services\BALLOON\Parameters\Wdf] "KmdfLibraryVersion"=hex(1):31,00,2e,00,31,00,35,00,00,00 [\ControlSet001\Services\EventLog\System\BALLOON] "EventMessageFile"=hex(2):25,00,53,00,79,00,73,00,74,00,65,00,6d,00,52,00,6f,00,6f,00,74,00,25,00,5c,00,53,00,79,00,73,00,74,00,65,00,6d,00,33,00,32,00,5c,00,49,00,6f,00,4c,00,6f,00,67,00,4d,00,73,00,67,00,2e,00,64,00,6c,00,6c,00,3b,00,25,00,53,00,79,00,73,00,74,00,65,00,6d,00,52,00,6f,00,6f,00,74,00,25,00,5c,00,53,00,79,00,73,00,74,00,65,00,6d,00,33,00,32,00,5c,00,64,00,72,00,69,00,76,00,65,00,72,00,73,00,5c,00,62,00,61,00,6c,00,6c,00,6f,00,6f,00,6e,00,2e,00,73,00,79,00,73,00,00,00 "TypesSupported"=dword:00000007 [\DriverDatabase\DeviceIds\PCI\VEN_1AF4&DEV_1002] "{{ infFile }}"=hex(3):02,ff,00,00 [\DriverDatabase\DeviceIds\PCI\VEN_1AF4&DEV_1002&SUBSYS_00051AF4&REV_00] "{{ infFile }}"=hex(3):01,ff,00,00 [\DriverDatabase\DeviceIds\PCI\VEN_1AF4&DEV_1045] "{{ infFile }}"=hex(3):02,ff,00,00 [\DriverDatabase\DeviceIds\PCI\VEN_1AF4&DEV_1045&SUBSYS_11001AF4&REV_01] "{{ infFile }}"=hex(3):01,ff,00,00 [\DriverDatabase\DeviceIds\{{ classGuid|lower }}] "{{ infFile }}"=hex(0): [\DriverDatabase\DriverInfFiles\{{ infFile }}] @=hex(7):{{ packageName|toHex }},00,00,00,00 "Active"=hex(1):{{ packageName|toHex }},00,00 "Configurations"=hex(7):42,00,41,00,4c,00,4c,00,4f,00,4f,00,4e,00,5f,00,44,00,65,00,76,00,69,00,63,00,65,00,2e,00,4e,00,54,00,00,00,00,00 [\DriverDatabase\DriverPackages\{{ packageName }}] @=hex(1):{{ infFile|toHex }},00,00 "Catalog"=hex(1):42,00,61,00,6c,00,6c,00,6f,00,6f,00,6e,00,2e,00,63,00,61,00,74,00,00,00 "ImportDate"=hex(3):b0,78,d7,bd,ac,e9,d6,01 "InfName"=hex(1):62,00,61,00,6c,00,6c,00,6f,00,6f,00,6e,00,2e,00,69,00,6e,00,66,00,00,00 "OemPath"=hex(1):45,00,3a,00,5c,00,42,00,61,00,6c,00,6c,00,6f,00,6f,00,6e,00,5c,00,77,00,31,00,30,00,5c,00,61,00,6d,00,64,00,36,00,34,00,00,00 "Provider"=hex(1):52,00,65,00,64,00,20,00,48,00,61,00,74,00,2c,00,20,00,49,00,6e,00,63,00,2e,00,00,00 "SignerName"=hex(1):00,00 "SignerScore"=dword:0d000004 "StatusFlags"=dword:00000012 "Version"=hex(3):00,ff,09,00,00,00,00,00,7d,e9,36,4d,25,e3,ce,11,bf,c1,08,00,2b,e1,03,18,00,00,8e,c3,86,b8,d6,01,38,4a,68,00,53,00,64,00,00,00,00,00,00,00,00,00 [\DriverDatabase\DriverPackages\{{ packageName }}\Configurations] [\DriverDatabase\DriverPackages\{{ packageName }}\Configurations\BALLOON_Device.NT] "ConfigFlags"=dword:00000000 "ConfigScope"=dword:00000105 "Service"=hex(1):42,00,41,00,4c,00,4c,00,4f,00,4f,00,4e,00,00,00 [\DriverDatabase\DriverPackages\{{ packageName }}\Descriptors] [\DriverDatabase\DriverPackages\{{ packageName }}\Descriptors\PCI] [\DriverDatabase\DriverPackages\{{ packageName }}\Descriptors\PCI\VEN_1AF4&DEV_1002] "Configuration"=hex(1):42,00,41,00,4c,00,4c,00,4f,00,4f,00,4e,00,5f,00,44,00,65,00,76,00,69,00,63,00,65,00,2e,00,4e,00,54,00,00,00 "Description"=hex(1):25,00,62,00,61,00,6c,00,6c,00,6f,00,6f,00,6e,00,2e,00,64,00,65,00,76,00,69,00,63,00,65,00,64,00,65,00,73,00,63,00,25,00,00,00 "Manufacturer"=hex(1):25,00,76,00,65,00,6e,00,64,00,6f,00,72,00,25,00,00,00 [\DriverDatabase\DriverPackages\{{ packageName }}\Descriptors\PCI\VEN_1AF4&DEV_1002&SUBSYS_00051AF4&REV_00] "Configuration"=hex(1):42,00,41,00,4c,00,4c,00,4f,00,4f,00,4e,00,5f,00,44,00,65,00,76,00,69,00,63,00,65,00,2e,00,4e,00,54,00,00,00 "Description"=hex(1):25,00,62,00,61,00,6c,00,6c,00,6f,00,6f,00,6e,00,2e,00,64,00,65,00,76,00,69,00,63,00,65,00,64,00,65,00,73,00,63,00,25,00,00,00 "Manufacturer"=hex(1):25,00,76,00,65,00,6e,00,64,00,6f,00,72,00,25,00,00,00 [\DriverDatabase\DriverPackages\{{ packageName }}\Descriptors\PCI\VEN_1AF4&DEV_1045] "Configuration"=hex(1):42,00,41,00,4c,00,4c,00,4f,00,4f,00,4e,00,5f,00,44,00,65,00,76,00,69,00,63,00,65,00,2e,00,4e,00,54,00,00,00 "Description"=hex(1):25,00,62,00,61,00,6c,00,6c,00,6f,00,6f,00,6e,00,2e,00,64,00,65,00,76,00,69,00,63,00,65,00,64,00,65,00,73,00,63,00,25,00,00,00 "Manufacturer"=hex(1):25,00,76,00,65,00,6e,00,64,00,6f,00,72,00,25,00,00,00 [\DriverDatabase\DriverPackages\{{ packageName }}\Descriptors\PCI\VEN_1AF4&DEV_1045&SUBSYS_11001AF4&REV_01] "Configuration"=hex(1):42,00,41,00,4c,00,4c,00,4f,00,4f,00,4e,00,5f,00,44,00,65,00,76,00,69,00,63,00,65,00,2e,00,4e,00,54,00,00,00 "Description"=hex(1):25,00,62,00,61,00,6c,00,6c,00,6f,00,6f,00,6e,00,2e,00,64,00,65,00,76,00,69,00,63,00,65,00,64,00,65,00,73,00,63,00,25,00,00,00 "Manufacturer"=hex(1):25,00,76,00,65,00,6e,00,64,00,6f,00,72,00,25,00,00,00 [\DriverDatabase\DriverPackages\{{ packageName }}\Strings] "balloon.devicedesc"=hex(1):56,00,69,00,72,00,74,00,49,00,4f,00,20,00,42,00,61,00,6c,00,6c,00,6f,00,6f,00,6e,00,20,00,44,00,72,00,69,00,76,00,65,00,72,00,00,00 "vendor"=hex(1):52,00,65,00,64,00,20,00,48,00,61,00,74,00,2c,00,20,00,49,00,6e,00,63,00,2e,00,00,00 `, } distrobuilder-3.0/windows/driver_netkvm.go000066400000000000000000000027431456216713500211470ustar00rootroot00000000000000package windows var driverNetKVM = DriverInfo{ PackageName: "netkvm.inf_amd64_805ee20efb26a964", DriversRegistry: `[\DriverDatabase\DeviceIds\pci\VEN_1AF4&DEV_1000] "{{ infFile }}"=hex(3):02,ff,00,00 [\DriverDatabase\DeviceIds\pci\VEN_1AF4&DEV_1000&SUBSYS_00011AF4&REV_00] "{{ infFile }}"=hex(3):01,ff,00,00 [\DriverDatabase\DeviceIds\pci\VEN_1AF4&DEV_1041] "{{ infFile }}"=hex(3):02,ff,00,00 [\DriverDatabase\DeviceIds\pci\VEN_1AF4&DEV_1041&SUBSYS_11001AF4&REV_01] "{{ infFile }}"=hex(3):01,ff,00,00 [\DriverDatabase\DeviceIds\{{ classGuid|lower }}] "{{ infFile }}"=hex(0): [\DriverDatabase\DriverInfFiles\{{ infFile }}] @=hex(7):{{ packageName|toHex }},00,00,00,00 "Active"=hex(1):{{ packageName|toHex }},00,00 [\DriverDatabase\DriverPackages\{{ packageName }}] @=hex(1):{{ infFile|toHex }},00,00 "Catalog"=hex(1):6e,00,65,00,74,00,6b,00,76,00,6d,00,2e,00,63,00,61,00,74,00,00,00 "ImportDate"=hex(3):40,8f,c3,dd,bd,e9,d6,01 "InfName"=hex(1):6e,00,65,00,74,00,6b,00,76,00,6d,00,2e,00,69,00,6e,00,66,00,00,00 "OemPath"=hex(1):45,00,3a,00,5c,00,4e,00,65,00,74,00,4b,00,56,00,4d,00,5c,00,77,00,31,00,30,00,5c,00,61,00,6d,00,64,00,36,00,34,00,00,00 "Provider"=hex(1):52,00,65,00,64,00,20,00,48,00,61,00,74,00,2c,00,20,00,49,00,6e,00,63,00,2e,00,00,00 "SignerName"=hex(1):00,00 "SignerScore"=dword:0d000004 "StatusFlags"=dword:00000012 "Version"=hex(3):00,ff,09,00,00,00,00,00,72,e9,36,4d,25,e3,ce,11,bf,c1,08,00,2b,e1,03,18,00,00,8e,c3,86,b8,d6,01,38,4a,68,00,53,00,64,00,00,00,00,00,00,00,00,00 `, } distrobuilder-3.0/windows/driver_viofs.go000066400000000000000000000121621456216713500207650ustar00rootroot00000000000000package windows var driverViofs = DriverInfo{ PackageName: "viofs.inf_amd64_0ea369d39d8c7227", SoftwareRegistry: `[\Microsoft\Windows\CurrentVersion\Setup\PnpLockdownFiles\%SystemRoot%/System32/drivers/viofs.sys] "Class"=dword:00000004 "Owners"=hex(7):{{ infFile|toHex }},00,00,00,00 "Source"=hex(2):25,00,53,00,79,00,73,00,74,00,65,00,6d,00,52,00,6f,00,6f,00,74,00,25,00,5c,00,53,00,79,00,73,00,74,00,65,00,6d,00,33,00,32,00,5c,00,44,00,72,00,69,00,76,00,65,00,72,00,53,00,74,00,6f,00,72,00,65,00,5c,00,46,00,69,00,6c,00,65,00,52,00,65,00,70,00,6f,00,73,00,69,00,74,00,6f,00,72,00,79,00,5c,00,76,00,69,00,6f,00,66,00,73,00,2e,00,69,00,6e,00,66,00,5f,00,61,00,6d,00,64,00,36,00,34,00,5f,00,30,00,65,00,61,00,33,00,36,00,39,00,64,00,33,00,39,00,64,00,38,00,63,00,37,00,32,00,32,00,37,00,5c,00,76,00,69,00,6f,00,66,00,73,00,2e,00,73,00,79,00,73,00,00,00 `, SystemRegistry: `[\ControlSet001\Services\VirtioFsDrv] "DisplayName"=hex(1):40,00,{{ infFile|toHex }},2c,00,25,00,56,00,69,00,72,00,74,00,69,00,6f,00,46,00,73,00,2e,00,53,00,65,00,72,00,76,00,69,00,63,00,65,00,25,00,3b,00,56,00,69,00,72,00,74,00,49,00,4f,00,20,00,46,00,53,00,20,00,44,00,72,00,69,00,76,00,65,00,72,00,00,00 "ErrorControl"=dword:00000001 "Group"=hex(1):45,00,78,00,74,00,65,00,6e,00,64,00,65,00,64,00,20,00,42,00,61,00,73,00,65,00,00,00 "ImagePath"=hex(2):5c,00,53,00,79,00,73,00,74,00,65,00,6d,00,52,00,6f,00,6f,00,74,00,5c,00,53,00,79,00,73,00,74,00,65,00,6d,00,33,00,32,00,5c,00,64,00,72,00,69,00,76,00,65,00,72,00,73,00,5c,00,76,00,69,00,6f,00,66,00,73,00,2e,00,73,00,79,00,73,00,00,00 "Owners"=hex(7):{{ infFile|toHex }},00,00,00,00 "Start"=dword:00000003 "Tag"=dword:0000000a "Type"=dword:00000001 [\ControlSet001\Services\VirtioFsDrv\Parameters] [\ControlSet001\Services\VirtioFsDrv\Parameters\Wdf] "KmdfLibraryVersion"=hex(1):31,00,2e,00,31,00,35,00,00,00 [\DriverDatabase\DeviceIds\PCI\VEN_1AF4&DEV_105A] "{{ infFile }}"=hex(3):02,ff,00,00 [\DriverDatabase\DeviceIds\PCI\VEN_1AF4&DEV_105A&SUBSYS_00041AF4&REV_00] "{{ infFile }}"=hex(3):01,ff,00,00 [\DriverDatabase\DeviceIds\{{ classGuid|lower }}] "{{ infFile }}"=hex(0): [\DriverDatabase\DriverInfFiles\{{ infFile }}] @=hex(7):{{ packageName|toHex }},00,00,00,00 "Active"=hex(1):{{ packageName|toHex }},00,00 "Configurations"=hex(7):56,00,69,00,72,00,74,00,69,00,6f,00,46,00,73,00,5f,00,44,00,65,00,76,00,69,00,63,00,65,00,2e,00,4e,00,54,00,00,00,00,00 [\DriverDatabase\DriverPackages\{{ packageName }}] @=hex(1):{{ infFile|toHex }},00,00 "Catalog"=hex(1):76,00,69,00,6f,00,66,00,73,00,2e,00,63,00,61,00,74,00,00,00 "ImportDate"=hex(3):60,e0,9c,62,9e,ee,d6,01 "InfName"=hex(1):76,00,69,00,6f,00,66,00,73,00,2e,00,69,00,6e,00,66,00,00,00 "OemPath"=hex(1):45,00,3a,00,5c,00,76,00,69,00,6f,00,66,00,73,00,5c,00,77,00,31,00,30,00,5c,00,61,00,6d,00,64,00,36,00,34,00,00,00 "Provider"=hex(1):52,00,65,00,64,00,20,00,48,00,61,00,74,00,2c,00,20,00,49,00,6e,00,63,00,2e,00,00,00 "SignerName"=hex(1):00,00 "SignerScore"=dword:0d000004 "StatusFlags"=dword:00000012 "Version"=hex(3):00,ff,09,00,00,00,00,00,7d,e9,36,4d,25,e3,ce,11,bf,c1,08,00,2b,e1,03,18,00,00,8e,c3,86,b8,d6,01,38,4a,68,00,53,00,64,00,00,00,00,00,00,00,00,00 [\DriverDatabase\DriverPackages\{{ packageName }}\Configurations] [\DriverDatabase\DriverPackages\{{ packageName }}\Configurations\VirtioFs_Device.NT] "ConfigFlags"=dword:00000000 "ConfigScope"=dword:00000107 "Service"=hex(1):56,00,69,00,72,00,74,00,69,00,6f,00,46,00,73,00,44,00,72,00,76,00,00,00 [\DriverDatabase\DriverPackages\{{ packageName }}\Configurations\VirtioFs_Device.NT\Device] [\DriverDatabase\DriverPackages\{{ packageName }}\Configurations\VirtioFs_Device.NT\Device\Interrupt Management] [\DriverDatabase\DriverPackages\{{ packageName }}\Configurations\VirtioFs_Device.NT\Device\Interrupt Management\MessageSignaledInterruptProperties] "MSISupported"=dword:00000001 "MessageNumberLimit"=dword:00000001 [\DriverDatabase\DriverPackages\{{ packageName }}\Descriptors] [\DriverDatabase\DriverPackages\{{ packageName }}\Descriptors\PCI] [\DriverDatabase\DriverPackages\{{ packageName }}\Descriptors\PCI\VEN_1AF4&DEV_105A] "Configuration"=hex(1):56,00,69,00,72,00,74,00,69,00,6f,00,46,00,73,00,5f,00,44,00,65,00,76,00,69,00,63,00,65,00,2e,00,4e,00,54,00,00,00 "Description"=hex(1):25,00,76,00,69,00,72,00,74,00,69,00,6f,00,66,00,73,00,2e,00,64,00,65,00,76,00,69,00,63,00,65,00,64,00,65,00,73,00,63,00,25,00,00,00 "Manufacturer"=hex(1):25,00,76,00,65,00,6e,00,64,00,6f,00,72,00,25,00,00,00 [\DriverDatabase\DriverPackages\{{ packageName }}\Descriptors\PCI\VEN_1AF4&DEV_105A&SUBSYS_00041AF4&REV_00] "Configuration"=hex(1):56,00,69,00,72,00,74,00,69,00,6f,00,46,00,73,00,5f,00,44,00,65,00,76,00,69,00,63,00,65,00,2e,00,4e,00,54,00,00,00 "Description"=hex(1):25,00,76,00,69,00,72,00,74,00,69,00,6f,00,66,00,73,00,2e,00,64,00,65,00,76,00,69,00,63,00,65,00,64,00,65,00,73,00,63,00,25,00,00,00 "Manufacturer"=hex(1):25,00,76,00,65,00,6e,00,64,00,6f,00,72,00,25,00,00,00 [\DriverDatabase\DriverPackages\{{ packageName }}\Strings] "vendor"=hex(1):52,00,65,00,64,00,20,00,48,00,61,00,74,00,2c,00,20,00,49,00,6e,00,63,00,2e,00,00,00 "virtiofs.devicedesc"=hex(1):56,00,69,00,72,00,74,00,49,00,4f,00,20,00,46,00,53,00,20,00,44,00,65,00,76,00,69,00,63,00,65,00,00,00 `, } distrobuilder-3.0/windows/driver_viogpudo.go000066400000000000000000000044341456216713500214760ustar00rootroot00000000000000package windows var driverVioGPUDo = DriverInfo{ PackageName: "viogpudo.inf_amd64_8224060246e67964", DriversRegistry: `[\DriverDatabase\DeviceIds\{{ classGuid|lower }}] "{{ infFile }}"=hex(0): [\DriverDatabase\DeviceIds\pci\VEN_1AF4&DEV_1050&SUBSYS_11001AF4&REV_01] "{{ infFile }}"=hex(3):01,f9,00,00 [\DriverDatabase\DriverInfFiles\{{ infFile }}] @=hex(7):{{ packageName|toHex }},00,00,00,00 "Active"=hex(1):{{ packageName|toHex }},00,00 [\DriverDatabase\DriverPackages\{{ packageName }}] @=hex(1):6f,00,65,00,6d,00,30,00,2e,00,69,00,6e,00,66,00,00,00 "Catalog"=hex(1):76,00,69,00,6f,00,67,00,70,00,75,00,64,00,6f,00,2e,00,63,00,61,00,74,00,00,00 "ImportDate"=hex(3):80,13,e8,66,f6,9d,d7,01 "InfName"=hex(1):76,00,69,00,6f,00,67,00,70,00,75,00,64,00,6f,00,2e,00,69,00,6e,00,66,00,00,00 "OemPath"=hex(1):5c,00,5c,00,31,00,39,00,32,00,2e,00,31,00,36,00,38,00,2e,00,31,00,37,00,38,00,2e,00,37,00,30,00,5c,00,73,00,68,00,61,00,72,00,65,00,64,00,5c,00,76,00,69,00,72,00,74,00,69,00,6f,00,5c,00,76,00,69,00,6f,00,67,00,70,00,75,00,64,00,6f,00,5c,00,77,00,31,00,30,00,5c,00,61,00,6d,00,64,00,36,00,34,00,00,00 "Provider"=hex(1):52,00,65,00,64,00,20,00,48,00,61,00,74,00,2c,00,20,00,49,00,6e,00,63,00,2e,00,00,00 "SignerName"=hex(1):4d,00,69,00,63,00,72,00,6f,00,73,00,6f,00,66,00,74,00,20,00,57,00,69,00,6e,00,64,00,6f,00,77,00,73,00,20,00,48,00,61,00,72,00,64,00,77,00,61,00,72,00,65,00,20,00,43,00,6f,00,6d,00,70,00,61,00,74,00,69,00,62,00,69,00,6c,00,69,00,74,00,79,00,20,00,50,00,75,00,62,00,6c,00,69,00,73,00,68,00,65,00,72,00,00,00 "SignerScore"=dword:0d000005 "StatusFlags"=dword:00000512 "Version"=hex(3):00,ff,09,00,00,00,00,00,68,e9,36,4d,25,e3,ce,11,bf,c1,08,00,2b,e1,03,18,00,40,ef,05,7a,77,d7,01,b0,4f,68,00,55,00,64,00,00,00,00,00,00,00,00,00 [\DriverDatabase\DriverPackages\{{ packageName }}\Properties] [\DriverDatabase\DriverPackages\{{ packageName }}\Properties\{4da162c1-5eb1-4140-a444-5064c9814e76}] [\DriverDatabase\DriverPackages\{{ packageName }}\Properties\{4da162c1-5eb1-4140-a444-5064c9814e76}\0009] @=hex(ffff0012):33,00,30,00,30,00,39,00,37,00,37,00,37,00,30,00,5f,00,31,00,34,00,31,00,35,00,35,00,36,00,33,00,31,00,34,00,35,00,36,00,37,00,30,00,36,00,35,00,39,00,32,00,5f,00,31,00,31,00,35,00,32,00,39,00,32,00,31,00,35,00,30,00,35,00,36,00,39,00,33,00,36,00,38,00,33,00,38,00,34,00,38,00,00,00 `, } distrobuilder-3.0/windows/driver_vioinput.go000066400000000000000000000171261456216713500215210ustar00rootroot00000000000000package windows var driverVioinput = DriverInfo{ PackageName: "vioinput.inf_amd64_e4dfa6cdfd16da9a", SoftwareRegistry: `[\Microsoft\Windows\CurrentVersion\Setup\PnpLockdownFiles\%SystemRoot%/System32/drivers/viohidkmdf.sys] "Class"=dword:00000004 "Owners"=hex(7):{{ infFile|toHex }},00,00,00,00 "Source"=hex(2):25,00,53,00,79,00,73,00,74,00,65,00,6d,00,52,00,6f,00,6f,00,74,00,25,00,5c,00,53,00,79,00,73,00,74,00,65,00,6d,00,33,00,32,00,5c,00,44,00,72,00,69,00,76,00,65,00,72,00,53,00,74,00,6f,00,72,00,65,00,5c,00,46,00,69,00,6c,00,65,00,52,00,65,00,70,00,6f,00,73,00,69,00,74,00,6f,00,72,00,79,00,5c,00,76,00,69,00,6f,00,69,00,6e,00,70,00,75,00,74,00,2e,00,69,00,6e,00,66,00,5f,00,61,00,6d,00,64,00,36,00,34,00,5f,00,65,00,34,00,64,00,66,00,61,00,36,00,63,00,64,00,66,00,64,00,31,00,36,00,64,00,61,00,39,00,61,00,5c,00,76,00,69,00,6f,00,68,00,69,00,64,00,6b,00,6d,00,64,00,66,00,2e,00,73,00,79,00,73,00,00,00 [\Microsoft\Windows\CurrentVersion\Setup\PnpLockdownFiles\%SystemRoot%/System32/drivers/vioinput.sys] "Class"=dword:00000004 "Owners"=hex(7):{{ infFile|toHex }},00,00,00,00 "Source"=hex(2):25,00,53,00,79,00,73,00,74,00,65,00,6d,00,52,00,6f,00,6f,00,74,00,25,00,5c,00,53,00,79,00,73,00,74,00,65,00,6d,00,33,00,32,00,5c,00,44,00,72,00,69,00,76,00,65,00,72,00,53,00,74,00,6f,00,72,00,65,00,5c,00,46,00,69,00,6c,00,65,00,52,00,65,00,70,00,6f,00,73,00,69,00,74,00,6f,00,72,00,79,00,5c,00,76,00,69,00,6f,00,69,00,6e,00,70,00,75,00,74,00,2e,00,69,00,6e,00,66,00,5f,00,61,00,6d,00,64,00,36,00,34,00,5f,00,65,00,34,00,64,00,66,00,61,00,36,00,63,00,64,00,66,00,64,00,31,00,36,00,64,00,61,00,39,00,61,00,5c,00,76,00,69,00,6f,00,69,00,6e,00,70,00,75,00,74,00,2e,00,73,00,79,00,73,00,00,00 `, SystemRegistry: `[\DriverDatabase\DeviceIds\{{ classGuid|lower }}] "{{ infFile }}"=hex(0): [\ControlSet001\Services\VirtioInput] "DisplayName"=hex(1):40,00,{{ infFile|toHex }},2c,00,25,00,56,00,69,00,72,00,74,00,69,00,6f,00,49,00,6e,00,70,00,75,00,74,00,2e,00,53,00,65,00,72,00,76,00,69,00,63,00,65,00,44,00,65,00,73,00,63,00,25,00,3b,00,56,00,69,00,72,00,74,00,49,00,4f,00,20,00,49,00,6e,00,70,00,75,00,74,00,20,00,53,00,65,00,72,00,76,00,69,00,63,00,65,00,00,00 "ErrorControl"=dword:00000001 "ImagePath"=hex(2):5c,00,53,00,79,00,73,00,74,00,65,00,6d,00,52,00,6f,00,6f,00,74,00,5c,00,53,00,79,00,73,00,74,00,65,00,6d,00,33,00,32,00,5c,00,64,00,72,00,69,00,76,00,65,00,72,00,73,00,5c,00,{{ driverName|toHex }},2e,00,73,00,79,00,73,00,00,00 "Owners"=hex(7):{{ infFile|toHex }},00,00,00,00 "Start"=dword:00000003 "Type"=dword:00000001 [\ControlSet001\Services\VirtioInput\Parameters] [\ControlSet001\Services\VirtioInput\Parameters\Wdf] "KmdfLibraryVersion"=hex(1):31,00,2e,00,31,00,35,00,00,00 [\ControlSet001\Services\viohidkmdf] "ErrorControl"=dword:00000001 "ImagePath"=hex(2):5c,00,53,00,79,00,73,00,74,00,65,00,6d,00,52,00,6f,00,6f,00,74,00,5c,00,53,00,79,00,73,00,74,00,65,00,6d,00,33,00,32,00,5c,00,64,00,72,00,69,00,76,00,65,00,72,00,73,00,5c,00,76,00,69,00,6f,00,68,00,69,00,64,00,6b,00,6d,00,64,00,66,00,2e,00,73,00,79,00,73,00,00,00 "Owners"=hex(7):{{ infFile|toHex }},00,00,00,00 "Start"=dword:00000003 "Type"=dword:00000001 [\DriverDatabase\DeviceIds\PCI\VEN_1AF4&DEV_1052] "{{ infFile }}"=hex(3):02,ff,00,00 [\DriverDatabase\DeviceIds\PCI\VEN_1AF4&DEV_1052&SUBSYS_11001AF4&REV_01] "{{ infFile }}"=hex(3):01,ff,00,00 [\DriverDatabase\DeviceIds\VIOINPUT] [\DriverDatabase\DeviceIds\VIOINPUT\REV_01] "{{ infFile }}"=hex(3):01,ff,00,00 [\DriverDatabase\DriverInfFiles\{{ infFile }}] @=hex(7):{{ packageName|toHex }},00,00,00,00 "Active"=hex(1):{{ packageName|toHex }},00,00 "Configurations"=hex(7):56,00,69,00,72,00,74,00,69,00,6f,00,49,00,6e,00,70,00,75,00,74,00,5f,00,44,00,65,00,76,00,69,00,63,00,65,00,2e,00,4e,00,54,00,00,00,56,00,69,00,72,00,74,00,69,00,6f,00,49,00,6e,00,70,00,75,00,74,00,5f,00,43,00,68,00,69,00,6c,00,64,00,2e,00,4e,00,54,00,00,00,00,00 [\DriverDatabase\DriverPackages\{{ packageName }}] @=hex(1):{{ infFile|toHex }},00,00 "Catalog"=hex(1):{{ driverName|toHex }},2e,00,63,00,61,00,74,00,00,00 "ImportDate"=hex(3):b0,1d,ca,bf,fb,e7,d6,01 "InfName"=hex(1):{{ driverName|toHex }},2e,00,69,00,6e,00,66,00,00,00 "OemPath"=hex(1):45,00,3a,00,5c,00,{{ driverName|toHex }},5c,00,77,00,31,00,30,00,5c,00,61,00,6d,00,64,00,36,00,34,00,00,00 "Provider"=hex(1):52,00,65,00,64,00,20,00,48,00,61,00,74,00,2c,00,20,00,49,00,6e,00,63,00,2e,00,00,00 "SignerName"=hex(1):00,00 "SignerScore"=dword:0d000004 "StatusFlags"=dword:00000012 "Version"=hex(3):00,ff,09,00,00,00,00,00,a0,17,5a,74,d3,74,d0,11,b6,fe,00,a0,c9,0f,57,da,00,00,8e,c3,86,b8,d6,01,38,4a,68,00,53,00,64,00,00,00,00,00,00,00,00,00 [\DriverDatabase\DriverPackages\{{ packageName }}\Configurations] [\DriverDatabase\DriverPackages\{{ packageName }}\Configurations\VirtioInput_Child.NT] "ConfigFlags"=dword:00000000 "ConfigScope"=dword:00000005 "Service"=hex(1):76,00,69,00,6f,00,68,00,69,00,64,00,6b,00,6d,00,64,00,66,00,00,00 [\DriverDatabase\DriverPackages\{{ packageName }}\Configurations\VirtioInput_Device.NT] "ConfigFlags"=dword:00000000 "ConfigScope"=dword:00000107 "Service"=hex(1):56,00,69,00,72,00,74,00,69,00,6f,00,49,00,6e,00,70,00,75,00,74,00,00,00 [\DriverDatabase\DriverPackages\{{ packageName }}\Configurations\VirtioInput_Device.NT\Device] [\DriverDatabase\DriverPackages\{{ packageName }}\Configurations\VirtioInput_Device.NT\Device\Interrupt Management] [\DriverDatabase\DriverPackages\{{ packageName }}\Configurations\VirtioInput_Device.NT\Device\Interrupt Management\MessageSignaledInterruptProperties] "MSISupported"=dword:00000001 "MessageNumberLimit"=dword:00000002 [\DriverDatabase\DriverPackages\{{ packageName }}\Descriptors] [\DriverDatabase\DriverPackages\{{ packageName }}\Descriptors\PCI] [\DriverDatabase\DriverPackages\{{ packageName }}\Descriptors\PCI\VEN_1AF4&DEV_1052] "Configuration"=hex(1):56,00,69,00,72,00,74,00,69,00,6f,00,49,00,6e,00,70,00,75,00,74,00,5f,00,44,00,65,00,76,00,69,00,63,00,65,00,2e,00,4e,00,54,00,00,00 "Description"=hex(1):25,00,76,00,69,00,72,00,74,00,69,00,6f,00,69,00,6e,00,70,00,75,00,74,00,2e,00,64,00,65,00,76,00,69,00,63,00,65,00,64,00,65,00,73,00,63,00,25,00,00,00 "Manufacturer"=hex(1):25,00,76,00,65,00,6e,00,64,00,6f,00,72,00,25,00,00,00 [\DriverDatabase\DriverPackages\{{ packageName }}\Descriptors\PCI\VEN_1AF4&DEV_1052&SUBSYS_11001AF4&REV_01] "Configuration"=hex(1):56,00,69,00,72,00,74,00,69,00,6f,00,49,00,6e,00,70,00,75,00,74,00,5f,00,44,00,65,00,76,00,69,00,63,00,65,00,2e,00,4e,00,54,00,00,00 "Description"=hex(1):25,00,76,00,69,00,72,00,74,00,69,00,6f,00,69,00,6e,00,70,00,75,00,74,00,2e,00,64,00,65,00,76,00,69,00,63,00,65,00,64,00,65,00,73,00,63,00,25,00,00,00 "Manufacturer"=hex(1):25,00,76,00,65,00,6e,00,64,00,6f,00,72,00,25,00,00,00 [\DriverDatabase\DriverPackages\{{ packageName }}\Descriptors\VIOINPUT] [\DriverDatabase\DriverPackages\{{ packageName }}\Descriptors\VIOINPUT\REV_01] "Configuration"=hex(1):56,00,69,00,72,00,74,00,69,00,6f,00,49,00,6e,00,70,00,75,00,74,00,5f,00,43,00,68,00,69,00,6c,00,64,00,2e,00,4e,00,54,00,00,00 "Description"=hex(1):25,00,76,00,69,00,72,00,74,00,69,00,6f,00,69,00,6e,00,70,00,75,00,74,00,2e,00,63,00,68,00,69,00,6c,00,64,00,64,00,65,00,73,00,63,00,25,00,00,00 "Manufacturer"=hex(1):25,00,76,00,65,00,6e,00,64,00,6f,00,72,00,25,00,00,00 [\DriverDatabase\DriverPackages\{{ packageName }}\Strings] "vendor"=hex(1):52,00,65,00,64,00,20,00,48,00,61,00,74,00,2c,00,20,00,49,00,6e,00,63,00,2e,00,00,00 "virtioinput.childdesc"=hex(1):56,00,69,00,72,00,74,00,49,00,4f,00,20,00,49,00,6e,00,70,00,75,00,74,00,20,00,44,00,72,00,69,00,76,00,65,00,72,00,20,00,48,00,65,00,6c,00,70,00,65,00,72,00,00,00 "virtioinput.devicedesc"=hex(1):56,00,69,00,72,00,74,00,49,00,4f,00,20,00,49,00,6e,00,70,00,75,00,74,00,20,00,44,00,72,00,69,00,76,00,65,00,72,00,00,00 `, } distrobuilder-3.0/windows/driver_viorng.go000066400000000000000000000162121456216713500211430ustar00rootroot00000000000000package windows var driverViorng = DriverInfo{ PackageName: "viorng.inf_amd64_7a2fd1621d2572b5", SoftwareRegistry: `[\Microsoft\Windows\CurrentVersion\Setup\PnpLockdownFiles\%SystemRoot%/System32/drivers/viorng.sys] "Class"=dword:00000004 "Owners"=hex(7):{{ infFile|toHex }},00,00,00,00 "Source"=hex(2):25,00,53,00,79,00,73,00,74,00,65,00,6d,00,52,00,6f,00,6f,00,74,00,25,00,5c,00,53,00,79,00,73,00,74,00,65,00,6d,00,33,00,32,00,5c,00,44,00,72,00,69,00,76,00,65,00,72,00,53,00,74,00,6f,00,72,00,65,00,5c,00,46,00,69,00,6c,00,65,00,52,00,65,00,70,00,6f,00,73,00,69,00,74,00,6f,00,72,00,79,00,5c,00,{{ packageName|toHex }},5c,00,76,00,69,00,6f,00,72,00,6e,00,67,00,2e,00,73,00,79,00,73,00,00,00 [\Microsoft\Windows\CurrentVersion\Setup\PnpLockdownFiles\%SystemRoot%/System32/viorngci.dll] "Class"=dword:00000004 "Owners"=hex(7):{{ infFile|toHex }},00,00,00,00 "Source"=hex(2):25,00,53,00,79,00,73,00,74,00,65,00,6d,00,52,00,6f,00,6f,00,74,00,25,00,5c,00,53,00,79,00,73,00,74,00,65,00,6d,00,33,00,32,00,5c,00,44,00,72,00,69,00,76,00,65,00,72,00,53,00,74,00,6f,00,72,00,65,00,5c,00,46,00,69,00,6c,00,65,00,52,00,65,00,70,00,6f,00,73,00,69,00,74,00,6f,00,72,00,79,00,5c,00,{{ packageName|toHex }},5c,00,76,00,69,00,6f,00,72,00,6e,00,67,00,63,00,69,00,2e,00,64,00,6c,00,6c,00,00,00 [\Microsoft\Windows\CurrentVersion\Setup\PnpLockdownFiles\%SystemRoot%/System32/viorngum.dll] "Class"=dword:00000004 "Owners"=hex(7):{{ infFile|toHex }},00,00,00,00 "Source"=hex(2):25,00,53,00,79,00,73,00,74,00,65,00,6d,00,52,00,6f,00,6f,00,74,00,25,00,5c,00,53,00,79,00,73,00,74,00,65,00,6d,00,33,00,32,00,5c,00,44,00,72,00,69,00,76,00,65,00,72,00,53,00,74,00,6f,00,72,00,65,00,5c,00,46,00,69,00,6c,00,65,00,52,00,65,00,70,00,6f,00,73,00,69,00,74,00,6f,00,72,00,79,00,5c,00,{{ packageName|toHex }},5c,00,76,00,69,00,6f,00,72,00,6e,00,67,00,75,00,6d,00,2e,00,64,00,6c,00,6c,00,00,00 `, SystemRegistry: `[\ControlSet001\Services\VirtRng] "DisplayName"=hex(1):40,00,{{ infFile|toHex }},2c,00,25,00,56,00,69,00,72,00,74,00,52,00,6e,00,67,00,2e,00,53,00,65,00,72,00,76,00,69,00,63,00,65,00,25,00,3b,00,56,00,69,00,72,00,74,00,49,00,4f,00,20,00,52,00,4e,00,47,00,20,00,53,00,65,00,72,00,76,00,69,00,63,00,65,00,00,00 "ErrorControl"=dword:00000001 "Group"=hex(1):45,00,78,00,74,00,65,00,6e,00,64,00,65,00,64,00,20,00,42,00,61,00,73,00,65,00,00,00 "ImagePath"=hex(2):5c,00,53,00,79,00,73,00,74,00,65,00,6d,00,52,00,6f,00,6f,00,74,00,5c,00,53,00,79,00,73,00,74,00,65,00,6d,00,33,00,32,00,5c,00,64,00,72,00,69,00,76,00,65,00,72,00,73,00,5c,00,76,00,69,00,6f,00,72,00,6e,00,67,00,2e,00,73,00,79,00,73,00,00,00 "Owners"=hex(7):{{ infFile|toHex }},00,00,00,00 "Start"=dword:00000003 "Tag"=dword:0000000a "Type"=dword:00000001 [\ControlSet001\Services\VirtRng\Parameters] [\ControlSet001\Services\VirtRng\Parameters\Wdf] "KmdfLibraryVersion"=hex(1):31,00,2e,00,31,00,35,00,00,00 [\DriverDatabase\DeviceIds\PCI\VEN_1AF4&DEV_1005] "{{ infFile }}"=hex(3):02,ff,00,00 [\DriverDatabase\DeviceIds\PCI\VEN_1AF4&DEV_1005&SUBSYS_00041AF4&REV_00] "{{ infFile }}"=hex(3):01,ff,00,00 [\DriverDatabase\DeviceIds\PCI\VEN_1AF4&DEV_1044] "{{ infFile }}"=hex(3):02,ff,00,00 [\DriverDatabase\DeviceIds\PCI\VEN_1AF4&DEV_1044&SUBSYS_11001AF4&REV_01] "{{ infFile }}"=hex(3):01,ff,00,00 [\DriverDatabase\DeviceIds\{{ classGuid|lower }}] "{{ infFile }}"=hex(0): [\DriverDatabase\DriverInfFiles\{{ infFile }}] @=hex(7):{{ packageName|toHex }},00,00,00,00 "Active"=hex(1):{{ packageName|toHex }},00,00 "Configurations"=hex(7):56,00,69,00,72,00,74,00,52,00,6e,00,67,00,5f,00,44,00,65,00,76,00,69,00,63,00,65,00,2e,00,4e,00,54,00,00,00,00,00 [\DriverDatabase\DriverPackages\{{ packageName }}] @=hex(1):{{ infFile|toHex }},00,00 "Catalog"=hex(1):76,00,69,00,6f,00,72,00,6e,00,67,00,2e,00,63,00,61,00,74,00,00,00 "ImportDate"=hex(3):90,47,15,b3,75,e9,d6,01 "InfName"=hex(1):76,00,69,00,6f,00,72,00,6e,00,67,00,2e,00,69,00,6e,00,66,00,00,00 "OemPath"=hex(1):45,00,3a,00,5c,00,76,00,69,00,6f,00,72,00,6e,00,67,00,5c,00,77,00,31,00,30,00,5c,00,61,00,6d,00,64,00,36,00,34,00,00,00 "Provider"=hex(1):52,00,65,00,64,00,20,00,48,00,61,00,74,00,2c,00,20,00,49,00,6e,00,63,00,2e,00,00,00 "SignerName"=hex(1):00,00 "SignerScore"=dword:0d000004 "StatusFlags"=dword:0000001a "Version"=hex(3):00,ff,09,00,00,00,00,00,7d,e9,36,4d,25,e3,ce,11,bf,c1,08,00,2b,e1,03,18,00,00,8e,c3,86,b8,d6,01,38,4a,68,00,53,00,64,00,00,40,00,00,00,00,00,00 [\DriverDatabase\DriverPackages\{{ packageName }}\Configurations] [\DriverDatabase\DriverPackages\{{ packageName }}\Configurations\VirtRng_Device.NT] "ConfigFlags"=dword:00000400 "ConfigScope"=dword:00000107 "Service"=hex(1):56,00,69,00,72,00,74,00,52,00,6e,00,67,00,00,00 [\DriverDatabase\DriverPackages\{{ packageName }}\Configurations\VirtRng_Device.NT\Device] [\DriverDatabase\DriverPackages\{{ packageName }}\Configurations\VirtRng_Device.NT\Device\Interrupt Management] [\DriverDatabase\DriverPackages\{{ packageName }}\Configurations\VirtRng_Device.NT\Device\Interrupt Management\MessageSignaledInterruptProperties] "MSISupported"=dword:00000001 "MessageNumberLimit"=dword:00000001 [\DriverDatabase\DriverPackages\{{ packageName }}\Descriptors] [\DriverDatabase\DriverPackages\{{ packageName }}\Descriptors\PCI] [\DriverDatabase\DriverPackages\{{ packageName }}\Descriptors\PCI\VEN_1AF4&DEV_1005] "Configuration"=hex(1):56,00,69,00,72,00,74,00,52,00,6e,00,67,00,5f,00,44,00,65,00,76,00,69,00,63,00,65,00,2e,00,4e,00,54,00,00,00 "Description"=hex(1):25,00,76,00,69,00,72,00,74,00,72,00,6e,00,67,00,2e,00,64,00,65,00,76,00,69,00,63,00,65,00,64,00,65,00,73,00,63,00,25,00,00,00 "Manufacturer"=hex(1):25,00,76,00,65,00,6e,00,64,00,6f,00,72,00,25,00,00,00 [\DriverDatabase\DriverPackages\{{ packageName }}\Descriptors\PCI\VEN_1AF4&DEV_1005&SUBSYS_00041AF4&REV_00] "Configuration"=hex(1):56,00,69,00,72,00,74,00,52,00,6e,00,67,00,5f,00,44,00,65,00,76,00,69,00,63,00,65,00,2e,00,4e,00,54,00,00,00 "Description"=hex(1):25,00,76,00,69,00,72,00,74,00,72,00,6e,00,67,00,2e,00,64,00,65,00,76,00,69,00,63,00,65,00,64,00,65,00,73,00,63,00,25,00,00,00 "Manufacturer"=hex(1):25,00,76,00,65,00,6e,00,64,00,6f,00,72,00,25,00,00,00 [\DriverDatabase\DriverPackages\{{ packageName }}\Descriptors\PCI\VEN_1AF4&DEV_1044] "Configuration"=hex(1):56,00,69,00,72,00,74,00,52,00,6e,00,67,00,5f,00,44,00,65,00,76,00,69,00,63,00,65,00,2e,00,4e,00,54,00,00,00 "Description"=hex(1):25,00,76,00,69,00,72,00,74,00,72,00,6e,00,67,00,2e,00,64,00,65,00,76,00,69,00,63,00,65,00,64,00,65,00,73,00,63,00,25,00,00,00 "Manufacturer"=hex(1):25,00,76,00,65,00,6e,00,64,00,6f,00,72,00,25,00,00,00 [\DriverDatabase\DriverPackages\{{ packageName }}\Descriptors\PCI\VEN_1AF4&DEV_1044&SUBSYS_11001AF4&REV_01] "Configuration"=hex(1):56,00,69,00,72,00,74,00,52,00,6e,00,67,00,5f,00,44,00,65,00,76,00,69,00,63,00,65,00,2e,00,4e,00,54,00,00,00 "Description"=hex(1):25,00,76,00,69,00,72,00,74,00,72,00,6e,00,67,00,2e,00,64,00,65,00,76,00,69,00,63,00,65,00,64,00,65,00,73,00,63,00,25,00,00,00 "Manufacturer"=hex(1):25,00,76,00,65,00,6e,00,64,00,6f,00,72,00,25,00,00,00 [\DriverDatabase\DriverPackages\{{ packageName }}\Strings] "vendor"=hex(1):52,00,65,00,64,00,20,00,48,00,61,00,74,00,2c,00,20,00,49,00,6e,00,63,00,2e,00,00,00 "virtrng.devicedesc"=hex(1):56,00,69,00,72,00,74,00,49,00,4f,00,20,00,52,00,4e,00,47,00,20,00,44,00,65,00,76,00,69,00,63,00,65,00,00,00 `, } distrobuilder-3.0/windows/driver_vioscsi.go000066400000000000000000000164161456216713500213240ustar00rootroot00000000000000package windows var driverVioscsi = DriverInfo{ PackageName: "vioscsi.inf_amd64_78d23e29bdcf3e06", SoftwareRegistry: `[\Microsoft\Windows\CurrentVersion\Setup\PnpLockdownFiles\%SystemRoot%/System32/drivers/vioscsi.sys] "Class"=dword:00000005 "Owners"=hex(7):{{ infFile|toHex }},00,00,00,00 "Source"=hex(2):25,00,53,00,79,00,73,00,74,00,65,00,6d,00,52,00,6f,00,6f,00,74,00,25,00,5c,00,53,00,79,00,73,00,74,00,65,00,6d,00,33,00,32,00,5c,00,44,00,72,00,69,00,76,00,65,00,72,00,53,00,74,00,6f,00,72,00,65,00,5c,00,46,00,69,00,6c,00,65,00,52,00,65,00,70,00,6f,00,73,00,69,00,74,00,6f,00,72,00,79,00,5c,00,76,00,69,00,6f,00,73,00,63,00,73,00,69,00,2e,00,69,00,6e,00,66,00,5f,00,61,00,6d,00,64,00,36,00,34,00,5f,00,37,00,38,00,64,00,32,00,33,00,65,00,32,00,39,00,62,00,64,00,63,00,66,00,33,00,65,00,30,00,36,00,5c,00,76,00,69,00,6f,00,73,00,63,00,73,00,69,00,2e,00,73,00,79,00,73,00,00,00 `, SystemRegistry: `[\DriverDatabase] "OemInfMap"=hex(3):e0 [\DriverDatabase\DeviceIds\{{ classGuid|lower }}] "{{ infFile }}"=hex(0): [\ControlSet001\Services\EventLog\System\{{ driverName }}] "EventMessageFile"=hex(2):25,00,53,00,79,00,73,00,74,00,65,00,6d,00,52,00,6f,00,6f,00,74,00,25,00,5c,00,53,00,79,00,73,00,74,00,65,00,6d,00,33,00,32,00,5c,00,49,00,6f,00,4c,00,6f,00,67,00,4d,00,73,00,67,00,2e,00,64,00,6c,00,6c,00,00,00 "TypesSupported"=dword:00000007 [\ControlSet001\Services\{{ driverName }}] "DisplayName"=hex(1):40,00,{{ infFile|toHex }},2c,00,25,00,56,00,69,00,72,00,74,00,69,00,6f,00,53,00,63,00,73,00,69,00,2e,00,53,00,56,00,43,00,44,00,45,00,53,00,43,00,25,00,3b,00,52,00,65,00,64,00,20,00,48,00,61,00,74,00,20,00,56,00,69,00,72,00,74,00,49,00,4f,00,20,00,53,00,43,00,53,00,49,00,20,00,70,00,61,00,73,00,73,00,2d,00,74,00,68,00,72,00,6f,00,75,00,67,00,68,00,20,00,53,00,65,00,72,00,76,00,69,00,63,00,65,00,00,00 "ErrorControl"=dword:00000001 "Group"=hex(1):53,00,43,00,53,00,49,00,20,00,6d,00,69,00,6e,00,69,00,70,00,6f,00,72,00,74,00,00,00 "ImagePath"=hex(2):53,00,79,00,73,00,74,00,65,00,6d,00,33,00,32,00,5c,00,64,00,72,00,69,00,76,00,65,00,72,00,73,00,5c,00,{{ driverName|toHex }},2e,00,73,00,79,00,73,00,00,00 "Owners"=hex(7):{{ infFile|toHex }},00,00,00,00 "Start"=dword:00000000 "Tag"=dword:00000021 "Type"=dword:00000001 [\ControlSet001\Services\{{ driverName }}\Parameters] "BusType"=dword:0000000a [\ControlSet001\Services\{{ driverName }}\Parameters\PnpInterface] "5"=dword:00000001 [\DriverDatabase\DeviceIds\PCI\VEN_1AF4&DEV_1004] "{{ infFile }}"=hex(3):02,ff,00,00 [\DriverDatabase\DeviceIds\PCI\VEN_1AF4&DEV_1004&SUBSYS_00081AF4&REV_00] "{{ infFile }}"=hex(3):01,ff,00,00 [\DriverDatabase\DeviceIds\PCI\VEN_1AF4&DEV_1048] "{{ infFile }}"=hex(3):02,ff,00,00 [\DriverDatabase\DeviceIds\PCI\VEN_1AF4&DEV_1048&SUBSYS_11001AF4&REV_01] "{{ infFile }}"=hex(3):01,ff,00,00 [\DriverDatabase\DriverInfFiles\{{ infFile }}] @=hex(7):{{ packageName|toHex }},00,00,00,00 "Active"=hex(1):{{ packageName|toHex }},00,00 "Configurations"=hex(7):73,00,63,00,73,00,69,00,5f,00,69,00,6e,00,73,00,74,00,00,00,00,00 [\DriverDatabase\DriverPackages\{{ packageName }}] @=hex(1):{{ infFile|toHex }},00,00 "Catalog"=hex(1):{{ driverName|toHex }},2e,00,63,00,61,00,74,00,00,00 "ImportDate"=hex(3):30,f6,fd,27,c8,c7,d6,01 "InfName"=hex(1):{{ driverName|toHex }},2e,00,69,00,6e,00,66,00,00,00 "OemPath"=hex(1):43,00,3a,00,5c,00,55,00,73,00,65,00,72,00,73,00,5c,00,54,00,68,00,6f,00,6d,00,61,00,73,00,5c,00,44,00,6f,00,77,00,6e,00,6c,00,6f,00,61,00,64,00,73,00,5c,00,64,00,72,00,69,00,76,00,65,00,72,00,73,00,00,00 "Provider"=hex(1):52,00,65,00,64,00,20,00,48,00,61,00,74,00,2c,00,20,00,49,00,6e,00,63,00,2e,00,00,00 "SignerName"=hex(1):00,00 "SignerScore"=dword:0d000004 "StatusFlags"=dword:00000012 "Version"=hex(3):00,ff,09,00,00,00,00,00,7b,e9,36,4d,25,e3,ce,11,bf,c1,08,00,2b,e1,03,18,00,00,8e,c3,86,b8,d6,01,38,4a,68,00,53,00,64,00,00,00,00,00,00,00,00,00 [\DriverDatabase\DriverPackages\{{ packageName }}\Configurations] [\DriverDatabase\DriverPackages\{{ packageName }}\Configurations\scsi_inst] "ConfigFlags"=dword:00000000 "ConfigScope"=dword:00000007 "Service"=hex(1):{{ driverName|toHex }},00,00 [\DriverDatabase\DriverPackages\{{ packageName }}\Configurations\scsi_inst\Device] [\DriverDatabase\DriverPackages\{{ packageName }}\Configurations\scsi_inst\Device\Interrupt Management] [\DriverDatabase\DriverPackages\{{ packageName }}\Configurations\scsi_inst\Device\Interrupt Management\Affinity Policy] "DevicePolicy"=dword:00000005 "DevicePriority"=dword:00000003 [\DriverDatabase\DriverPackages\{{ packageName }}\Configurations\scsi_inst\Device\Interrupt Management\MessageSignaledInterruptProperties] "MSISupported"=dword:00000001 "MessageNumberLimit"=dword:00000100 [\DriverDatabase\DriverPackages\{{ packageName }}\Configurations\scsi_inst\Services] [\DriverDatabase\DriverPackages\{{ packageName }}\Configurations\scsi_inst\Services\{{ driverName }}] [\DriverDatabase\DriverPackages\{{ packageName }}\Configurations\scsi_inst\Services\{{ driverName }}\Parameters] "BusType"=dword:0000000a [\DriverDatabase\DriverPackages\{{ packageName }}\Configurations\scsi_inst\Services\{{ driverName }}\Parameters\PnpInterface] "5"=dword:00000001 [\DriverDatabase\DriverPackages\{{ packageName }}\Descriptors] [\DriverDatabase\DriverPackages\{{ packageName }}\Descriptors\PCI] [\DriverDatabase\DriverPackages\{{ packageName }}\Descriptors\PCI\VEN_1AF4&DEV_1004] "Configuration"=hex(1):73,00,63,00,73,00,69,00,5f,00,69,00,6e,00,73,00,74,00,00,00 "Description"=hex(1):25,00,76,00,69,00,72,00,74,00,69,00,6f,00,73,00,63,00,73,00,69,00,2e,00,64,00,65,00,76,00,69,00,63,00,65,00,64,00,65,00,73,00,63,00,25,00,00,00 "Manufacturer"=hex(1):25,00,76,00,65,00,6e,00,64,00,6f,00,72,00,25,00,00,00 [\DriverDatabase\DriverPackages\{{ packageName }}\Descriptors\PCI\VEN_1AF4&DEV_1004&SUBSYS_00081AF4&REV_00] "Configuration"=hex(1):73,00,63,00,73,00,69,00,5f,00,69,00,6e,00,73,00,74,00,00,00 "Description"=hex(1):25,00,76,00,69,00,72,00,74,00,69,00,6f,00,73,00,63,00,73,00,69,00,2e,00,64,00,65,00,76,00,69,00,63,00,65,00,64,00,65,00,73,00,63,00,25,00,00,00 "Manufacturer"=hex(1):25,00,76,00,65,00,6e,00,64,00,6f,00,72,00,25,00,00,00 [\DriverDatabase\DriverPackages\{{ packageName }}\Descriptors\PCI\VEN_1AF4&DEV_1048] "Configuration"=hex(1):73,00,63,00,73,00,69,00,5f,00,69,00,6e,00,73,00,74,00,00,00 "Description"=hex(1):25,00,76,00,69,00,72,00,74,00,69,00,6f,00,73,00,63,00,73,00,69,00,2e,00,64,00,65,00,76,00,69,00,63,00,65,00,64,00,65,00,73,00,63,00,25,00,00,00 "Manufacturer"=hex(1):25,00,76,00,65,00,6e,00,64,00,6f,00,72,00,25,00,00,00 [\DriverDatabase\DriverPackages\{{ packageName }}\Descriptors\PCI\VEN_1AF4&DEV_1048&SUBSYS_11001AF4&REV_01] "Configuration"=hex(1):73,00,63,00,73,00,69,00,5f,00,69,00,6e,00,73,00,74,00,00,00 "Description"=hex(1):25,00,76,00,69,00,72,00,74,00,69,00,6f,00,73,00,63,00,73,00,69,00,2e,00,64,00,65,00,76,00,69,00,63,00,65,00,64,00,65,00,73,00,63,00,25,00,00,00 "Manufacturer"=hex(1):25,00,76,00,65,00,6e,00,64,00,6f,00,72,00,25,00,00,00 [\DriverDatabase\DriverPackages\{{ packageName }}\Strings] "vendor"=hex(1):52,00,65,00,64,00,20,00,48,00,61,00,74,00,2c,00,20,00,49,00,6e,00,63,00,2e,00,00,00 "virtioscsi.devicedesc"=hex(1):52,00,65,00,64,00,20,00,48,00,61,00,74,00,20,00,56,00,69,00,72,00,74,00,49,00,4f,00,20,00,53,00,43,00,53,00,49,00,20,00,70,00,61,00,73,00,73,00,2d,00,74,00,68,00,72,00,6f,00,75,00,67,00,68,00,20,00,63,00,6f,00,6e,00,74,00,72,00,6f,00,6c,00,6c,00,65,00,72,00,00,00 `, } distrobuilder-3.0/windows/driver_vioserial.go000066400000000000000000000145051456216713500216370ustar00rootroot00000000000000package windows var driverVioserial = DriverInfo{ PackageName: "vioser.inf_amd64_6af78671192591e1", SoftwareRegistry: `[\Microsoft\Windows\CurrentVersion\Setup\PnpLockdownFiles\%SystemRoot%/System32/drivers/vioser.sys] "Class"=dword:00000004 "Owners"=hex(7):{{ infFile|toHex }},00,00,00,00 "Source"=hex(2):25,00,53,00,79,00,73,00,74,00,65,00,6d,00,52,00,6f,00,6f,00,74,00,25,00,5c,00,53,00,79,00,73,00,74,00,65,00,6d,00,33,00,32,00,5c,00,44,00,72,00,69,00,76,00,65,00,72,00,53,00,74,00,6f,00,72,00,65,00,5c,00,46,00,69,00,6c,00,65,00,52,00,65,00,70,00,6f,00,73,00,69,00,74,00,6f,00,72,00,79,00,5c,00,{{ packageName|toHex }},5c,00,76,00,69,00,6f,00,73,00,65,00,72,00,2e,00,73,00,79,00,73,00,00,00 `, SystemRegistry: `[\ControlSet001\Services\VirtioSerial] "DisplayName"=hex(1):40,00,{{ infFile|toHex }},2c,00,25,00,56,00,69,00,72,00,74,00,69,00,6f,00,53,00,65,00,72,00,69,00,61,00,6c,00,2e,00,53,00,65,00,72,00,76,00,69,00,63,00,65,00,44,00,65,00,73,00,63,00,25,00,3b,00,56,00,69,00,72,00,74,00,49,00,4f,00,20,00,53,00,65,00,72,00,69,00,61,00,6c,00,20,00,53,00,65,00,72,00,76,00,69,00,63,00,65,00,00,00 "ErrorControl"=dword:00000001 "ImagePath"=hex(2):5c,00,53,00,79,00,73,00,74,00,65,00,6d,00,52,00,6f,00,6f,00,74,00,5c,00,53,00,79,00,73,00,74,00,65,00,6d,00,33,00,32,00,5c,00,64,00,72,00,69,00,76,00,65,00,72,00,73,00,5c,00,76,00,69,00,6f,00,73,00,65,00,72,00,2e,00,73,00,79,00,73,00,00,00 "Owners"=hex(7):{{ infFile|toHex }},00,00,00,00 "Start"=dword:00000003 "Type"=dword:00000001 [\ControlSet001\Services\VirtioSerial\Parameters] [\ControlSet001\Services\VirtioSerial\Parameters\Wdf] "KmdfLibraryVersion"=hex(1):31,00,2e,00,31,00,35,00,00,00 [\DriverDatabase\DeviceIds\PCI\VEN_1AF4&DEV_1003] "{{ infFile }}"=hex(3):02,ff,00,00 [\DriverDatabase\DeviceIds\PCI\VEN_1AF4&DEV_1003&SUBSYS_00031AF4&REV_00] "{{ infFile }}"=hex(3):01,ff,00,00 [\DriverDatabase\DeviceIds\PCI\VEN_1AF4&DEV_1043] "{{ infFile }}"=hex(3):02,ff,00,00 [\DriverDatabase\DeviceIds\PCI\VEN_1AF4&DEV_1043&SUBSYS_11001AF4&REV_01] "{{ infFile }}"=hex(3):01,ff,00,00 [\DriverDatabase\DeviceIds\{{ classGuid|lower }}] "{{ infFile }}"=hex(0): [\DriverDatabase\DriverInfFiles\{{ infFile }}] @=hex(7):{{ packageName|toHex }},00,00,00,00 "Active"=hex(1):{{ packageName|toHex }},00,00 "Configurations"=hex(7):56,00,69,00,72,00,74,00,69,00,6f,00,53,00,65,00,72,00,69,00,61,00,6c,00,5f,00,44,00,65,00,76,00,69,00,63,00,65,00,2e,00,4e,00,54,00,00,00,00,00 [\DriverDatabase\DriverPackages\{{ packageName }}] @=hex(1):{{ infFile|toHex }},00,00 "Catalog"=hex(1):76,00,69,00,6f,00,73,00,65,00,72,00,2e,00,63,00,61,00,74,00,00,00 "ImportDate"=hex(3):10,9d,6e,62,a1,ee,d6,01 "InfName"=hex(1):76,00,69,00,6f,00,73,00,65,00,72,00,2e,00,69,00,6e,00,66,00,00,00 "OemPath"=hex(1):45,00,3a,00,5c,00,76,00,69,00,6f,00,73,00,65,00,72,00,69,00,61,00,6c,00,5c,00,77,00,31,00,30,00,5c,00,61,00,6d,00,64,00,36,00,34,00,00,00 "Provider"=hex(1):52,00,65,00,64,00,20,00,48,00,61,00,74,00,2c,00,20,00,49,00,6e,00,63,00,2e,00,00,00 "SignerName"=hex(1):00,00 "SignerScore"=dword:0d000004 "StatusFlags"=dword:00000012 "Version"=hex(3):00,ff,09,00,00,00,00,00,7d,e9,36,4d,25,e3,ce,11,bf,c1,08,00,2b,e1,03,18,00,00,8e,c3,86,b8,d6,01,38,4a,68,00,53,00,64,00,00,00,00,00,00,00,00,00 [\DriverDatabase\DriverPackages\{{ packageName }}\Configurations] [\DriverDatabase\DriverPackages\{{ packageName }}\Configurations\VirtioSerial_Device.NT] "ConfigFlags"=dword:00000000 "ConfigScope"=dword:00000107 "Service"=hex(1):56,00,69,00,72,00,74,00,69,00,6f,00,53,00,65,00,72,00,69,00,61,00,6c,00,00,00 [\DriverDatabase\DriverPackages\{{ packageName }}\Configurations\VirtioSerial_Device.NT\Device] [\DriverDatabase\DriverPackages\{{ packageName }}\Configurations\VirtioSerial_Device.NT\Device\Interrupt Management] [\DriverDatabase\DriverPackages\{{ packageName }}\Configurations\VirtioSerial_Device.NT\Device\Interrupt Management\MessageSignaledInterruptProperties] "MSISupported"=dword:00000001 "MessageNumberLimit"=dword:00000002 [\DriverDatabase\DriverPackages\{{ packageName }}\Descriptors] [\DriverDatabase\DriverPackages\{{ packageName }}\Descriptors\PCI] [\DriverDatabase\DriverPackages\{{ packageName }}\Descriptors\PCI\VEN_1AF4&DEV_1003] "Configuration"=hex(1):56,00,69,00,72,00,74,00,69,00,6f,00,53,00,65,00,72,00,69,00,61,00,6c,00,5f,00,44,00,65,00,76,00,69,00,63,00,65,00,2e,00,4e,00,54,00,00,00 "Description"=hex(1):25,00,76,00,69,00,72,00,74,00,69,00,6f,00,73,00,65,00,72,00,69,00,61,00,6c,00,2e,00,64,00,65,00,76,00,69,00,63,00,65,00,64,00,65,00,73,00,63,00,25,00,00,00 "Manufacturer"=hex(1):25,00,76,00,65,00,6e,00,64,00,6f,00,72,00,25,00,00,00 [\DriverDatabase\DriverPackages\{{ packageName }}\Descriptors\PCI\VEN_1AF4&DEV_1003&SUBSYS_00031AF4&REV_00] "Configuration"=hex(1):56,00,69,00,72,00,74,00,69,00,6f,00,53,00,65,00,72,00,69,00,61,00,6c,00,5f,00,44,00,65,00,76,00,69,00,63,00,65,00,2e,00,4e,00,54,00,00,00 "Description"=hex(1):25,00,76,00,69,00,72,00,74,00,69,00,6f,00,73,00,65,00,72,00,69,00,61,00,6c,00,2e,00,64,00,65,00,76,00,69,00,63,00,65,00,64,00,65,00,73,00,63,00,25,00,00,00 "Manufacturer"=hex(1):25,00,76,00,65,00,6e,00,64,00,6f,00,72,00,25,00,00,00 [\DriverDatabase\DriverPackages\{{ packageName }}\Descriptors\PCI\VEN_1AF4&DEV_1043] "Configuration"=hex(1):56,00,69,00,72,00,74,00,69,00,6f,00,53,00,65,00,72,00,69,00,61,00,6c,00,5f,00,44,00,65,00,76,00,69,00,63,00,65,00,2e,00,4e,00,54,00,00,00 "Description"=hex(1):25,00,76,00,69,00,72,00,74,00,69,00,6f,00,73,00,65,00,72,00,69,00,61,00,6c,00,2e,00,64,00,65,00,76,00,69,00,63,00,65,00,64,00,65,00,73,00,63,00,25,00,00,00 "Manufacturer"=hex(1):25,00,76,00,65,00,6e,00,64,00,6f,00,72,00,25,00,00,00 [\DriverDatabase\DriverPackages\{{ packageName }}\Descriptors\PCI\VEN_1AF4&DEV_1043&SUBSYS_11001AF4&REV_01] "Configuration"=hex(1):56,00,69,00,72,00,74,00,69,00,6f,00,53,00,65,00,72,00,69,00,61,00,6c,00,5f,00,44,00,65,00,76,00,69,00,63,00,65,00,2e,00,4e,00,54,00,00,00 "Description"=hex(1):25,00,76,00,69,00,72,00,74,00,69,00,6f,00,73,00,65,00,72,00,69,00,61,00,6c,00,2e,00,64,00,65,00,76,00,69,00,63,00,65,00,64,00,65,00,73,00,63,00,25,00,00,00 "Manufacturer"=hex(1):25,00,76,00,65,00,6e,00,64,00,6f,00,72,00,25,00,00,00 [\DriverDatabase\DriverPackages\{{ packageName }}\Strings] "vendor"=hex(1):52,00,65,00,64,00,20,00,48,00,61,00,74,00,2c,00,20,00,49,00,6e,00,63,00,2e,00,00,00 "virtioserial.devicedesc"=hex(1):56,00,69,00,72,00,74,00,49,00,4f,00,20,00,53,00,65,00,72,00,69,00,61,00,6c,00,20,00,44,00,72,00,69,00,76,00,65,00,72,00,00,00 `, } distrobuilder-3.0/windows/driver_viostor.go000066400000000000000000000163141456216713500213470ustar00rootroot00000000000000package windows var driverViostor = DriverInfo{ PackageName: "viostor.inf_amd64_520417bbc533faba", SoftwareRegistry: `[\Microsoft\Windows\CurrentVersion\Setup\PnpLockdownFiles\%SystemRoot%/System32/drivers/viostor.sys] "Class"=dword:00000004 "Owners"=hex(7):{{ infFile|toHex }},00,00,00,00 "Source"=hex(2):25,00,53,00,79,00,73,00,74,00,65,00,6d,00,52,00,6f,00,6f,00,74,00,25,00,5c,00,53,00,79,00,73,00,74,00,65,00,6d,00,33,00,32,00,5c,00,44,00,72,00,69,00,{{ packageName|toHex }},5c,00,76,00,69,00,6f,00,73,00,74,00,6f,00,72,00,2e,00,73,00,79,00,73,00,00,00 `, SystemRegistry: `[\ControlSet001\Services\EventLog\System\viostor] "EventMessageFile"=hex(2):25,00,53,00,79,00,73,00,74,00,65,00,6d,00,52,00,6f,00,6f,00,74,00,25,00,5c,00,53,00,79,00,73,00,74,00,65,00,6d,00,33,00,32,00,5c,00,49,00,6f,00,4c,00,6f,00,67,00,4d,00,73,00,67,00,2e,00,64,00,6c,00,6c,00,00,00 "TypesSupported"=dword:00000007 [\ControlSet001\Services\viostor] "ErrorControl"=dword:00000001 "Group"=hex(1):53,00,43,00,53,00,49,00,20,00,6d,00,69,00,6e,00,69,00,70,00,6f,00,72,00,74,00,00,00 "ImagePath"=hex(2):53,00,79,00,73,00,74,00,65,00,6d,00,33,00,32,00,5c,00,64,00,72,00,69,00,76,00,65,00,72,00,73,00,5c,00,76,00,69,00,6f,00,73,00,74,00,6f,00,72,00,2e,00,73,00,79,00,73,00,00,00 "Owners"=hex(7):{{ infFile|toHex }},00,00,00,00 "Start"=dword:00000000 "Tag"=dword:00000021 "Type"=dword:00000001 [\ControlSet001\Services\viostor\Parameters] "BusType"=dword:00000001 [\ControlSet001\Services\viostor\Parameters\PnpInterface] "5"=dword:00000001 [\DriverDatabase] "OemInfMap"=hex(3):80 "UpdateDate"=hex(3):a0,95,cd,69,92,58,d8,01 [\DriverDatabase\DeviceIds\PCI\VEN_1AF4&DEV_1001] "{{ infFile }}"=hex(3):02,ff,00,00 [\DriverDatabase\DeviceIds\PCI\VEN_1AF4&DEV_1001&SUBSYS_00021AF4&REV_00] "{{ infFile }}"=hex(3):01,ff,00,00 [\DriverDatabase\DeviceIds\PCI\VEN_1AF4&DEV_1042] "{{ infFile }}"=hex(3):02,ff,00,00 [\DriverDatabase\DeviceIds\PCI\VEN_1AF4&DEV_1042&SUBSYS_11001AF4&REV_01] "{{ infFile }}"=hex(3):01,ff,00,00 [\DriverDatabase\DriverInfFiles\{{ infFile }}] @=hex(7):{{ packageName|toHex }},00,00,00,00 "Active"=hex(1):{{ packageName|toHex }},00,00 "Configurations"=hex(7):73,00,63,00,73,00,69,00,5f,00,69,00,6e,00,73,00,74,00,00,00,00,00 [\DriverDatabase\DriverPackages\{{ packageName }}] @=hex(1):{{ infFile|toHex }},00,00 "Catalog"=hex(1):76,00,69,00,6f,00,73,00,74,00,6f,00,72,00,2e,00,63,00,61,00,74,00,00,00 "FileSize"=hex(b):ae,29,01,00,00,00,00,00 "ImportDate"=hex(3):d0,46,c4,69,92,58,d8,01 "InfName"=hex(1):76,00,69,00,6f,00,73,00,74,00,6f,00,72,00,2e,00,69,00,6e,00,66,00,00,00 "OemPath"=hex(1):44,00,3a,00,5c,00,61,00,6d,00,64,00,36,00,34,00,5c,00,77,00,31,00,31,00,00,00 "Provider"=hex(1):52,00,65,00,64,00,20,00,48,00,61,00,74,00,2c,00,20,00,49,00,6e,00,63,00,2e,00,00,00 "SignerName"=hex(1):4d,00,69,00,63,00,72,00,6f,00,73,00,6f,00,66,00,74,00,20,00,57,00,69,00,6e,00,64,00,6f,00,77,00,73,00,20,00,48,00,61,00,72,00,64,00,77,00,61,00,72,00,65,00,20,00,43,00,6f,00,6d,00,70,00,61,00,74,00,69,00,62,00,69,00,6c,00,69,00,74,00,79,00,20,00,50,00,75,00,62,00,6c,00,69,00,73,00,68,00,65,00,72,00,00,00 "SignerScore"=dword:0d000005 "StatusFlags"=dword:00000012 "Version"=hex(3):00,ff,09,00,00,00,00,00,7b,e9,36,4d,25,e3,ce,11,bf,c1,08,00,2b,e1,03,18,00,c0,97,8c,0f,e7,d7,01,fc,53,68,00,5a,00,64,00,00,00,00,00,00,00,00,00 [\DriverDatabase\DriverPackages\{{ packageName }}\Configurations] [\DriverDatabase\DriverPackages\{{ packageName }}\Configurations\scsi_inst] "ConfigFlags"=dword:00000000 "ConfigScope"=dword:00000007 "Service"=hex(1):76,00,69,00,6f,00,73,00,74,00,6f,00,72,00,00,00 [\DriverDatabase\DriverPackages\{{ packageName }}\Configurations\scsi_inst\Device] [\DriverDatabase\DriverPackages\{{ packageName }}\Configurations\scsi_inst\Device\Interrupt Management] [\DriverDatabase\DriverPackages\{{ packageName }}\Configurations\scsi_inst\Device\Interrupt Management\Affinity Policy] "DevicePolicy"=dword:00000005 [\DriverDatabase\DriverPackages\{{ packageName }}\Configurations\scsi_inst\Device\Interrupt Management\MessageSignaledInterruptProperties] "MSISupported"=dword:00000001 "MessageNumberLimit"=dword:00000100 [\DriverDatabase\DriverPackages\{{ packageName }}\Configurations\scsi_inst\Services] [\DriverDatabase\DriverPackages\{{ packageName }}\Configurations\scsi_inst\Services\viostor] [\DriverDatabase\DriverPackages\{{ packageName }}\Configurations\scsi_inst\Services\viostor\Parameters] "BusType"=dword:00000001 [\DriverDatabase\DriverPackages\{{ packageName }}\Configurations\scsi_inst\Services\viostor\Parameters\PnpInterface] "5"=dword:00000001 [\DriverDatabase\DriverPackages\{{ packageName }}\Descriptors] [\DriverDatabase\DriverPackages\{{ packageName }}\Descriptors\PCI] [\DriverDatabase\DriverPackages\{{ packageName }}\Descriptors\PCI\VEN_1AF4&DEV_1001] "Configuration"=hex(1):73,00,63,00,73,00,69,00,5f,00,69,00,6e,00,73,00,74,00,00,00 "Description"=hex(1):25,00,76,00,69,00,6f,00,73,00,74,00,6f,00,72,00,73,00,63,00,73,00,69,00,2e,00,64,00,65,00,76,00,69,00,63,00,65,00,64,00,65,00,73,00,63,00,25,00,00,00 "Manufacturer"=hex(1):25,00,76,00,65,00,6e,00,64,00,6f,00,72,00,25,00,00,00 [\DriverDatabase\DriverPackages\{{ packageName }}\Descriptors\PCI\VEN_1AF4&DEV_1001&SUBSYS_00021AF4&REV_00] "Configuration"=hex(1):73,00,63,00,73,00,69,00,5f,00,69,00,6e,00,73,00,74,00,00,00 "Description"=hex(1):25,00,76,00,69,00,6f,00,73,00,74,00,6f,00,72,00,73,00,63,00,73,00,69,00,2e,00,64,00,65,00,76,00,69,00,63,00,65,00,64,00,65,00,73,00,63,00,25,00,00,00 "Manufacturer"=hex(1):25,00,76,00,65,00,6e,00,64,00,6f,00,72,00,25,00,00,00 [\DriverDatabase\DriverPackages\{{ packageName }}\Descriptors\PCI\VEN_1AF4&DEV_1042] "Configuration"=hex(1):73,00,63,00,73,00,69,00,5f,00,69,00,6e,00,73,00,74,00,00,00 "Description"=hex(1):25,00,76,00,69,00,6f,00,73,00,74,00,6f,00,72,00,73,00,63,00,73,00,69,00,2e,00,64,00,65,00,76,00,69,00,63,00,65,00,64,00,65,00,73,00,63,00,25,00,00,00 "Manufacturer"=hex(1):25,00,76,00,65,00,6e,00,64,00,6f,00,72,00,25,00,00,00 [\DriverDatabase\DriverPackages\{{ packageName }}\Descriptors\PCI\VEN_1AF4&DEV_1042&SUBSYS_11001AF4&REV_01] "Configuration"=hex(1):73,00,63,00,73,00,69,00,5f,00,69,00,6e,00,73,00,74,00,00,00 "Description"=hex(1):25,00,76,00,69,00,6f,00,73,00,74,00,6f,00,72,00,73,00,63,00,73,00,69,00,2e,00,64,00,65,00,76,00,69,00,63,00,65,00,64,00,65,00,73,00,63,00,25,00,00,00 "Manufacturer"=hex(1):25,00,76,00,65,00,6e,00,64,00,6f,00,72,00,25,00,00,00 [\DriverDatabase\DriverPackages\{{ packageName }}\Properties] [\DriverDatabase\DriverPackages\{{ packageName }}\Properties\{4da162c1-5eb1-4140-a444-5064c9814e76}] [\DriverDatabase\DriverPackages\{{ packageName }}\Properties\{4da162c1-5eb1-4140-a444-5064c9814e76}\0009] @=hex(ffff0012):33,00,30,00,30,00,39,00,37,00,37,00,37,00,30,00,5f,00,31,00,33,00,35,00,39,00,34,00,32,00,39,00,33,00,35,00,33,00,31,00,36,00,32,00,37,00,31,00,37,00,32,00,5f,00,31,00,31,00,35,00,32,00,39,00,32,00,31,00,35,00,30,00,35,00,36,00,39,00,34,00,33,00,31,00,34,00,34,00,36,00,36,00,00,00 [\DriverDatabase\DriverPackages\{{ packageName }}\Strings] "vendor"=hex(1):52,00,65,00,64,00,20,00,48,00,61,00,74,00,2c,00,20,00,49,00,6e,00,63,00,2e,00,00,00 "viostorscsi.devicedesc"=hex(1):52,00,65,00,64,00,20,00,48,00,61,00,74,00,20,00,56,00,69,00,72,00,74,00,49,00,4f,00,20,00,53,00,43,00,53,00,49,00,20,00,63,00,6f,00,6e,00,74,00,72,00,6f,00,6c,00,6c,00,65,00,72,00,00,00 `, } distrobuilder-3.0/windows/drivers.go000066400000000000000000000010341456216713500177360ustar00rootroot00000000000000package windows // DriverInfo contains driver specific information. type DriverInfo struct { PackageName string SoftwareRegistry string SystemRegistry string DriversRegistry string } // Drivers contains all supported drivers. var Drivers = map[string]DriverInfo{ "Balloon": driverBalloon, "NetKVM": driverNetKVM, "vioinput": driverVioinput, "viorng": driverViorng, "vioscsi": driverVioscsi, "vioserial": driverVioserial, "viofs": driverViofs, "viogpudo": driverVioGPUDo, "viostor": driverViostor, }