./0000755000015600001650000000000012701500553011073 5ustar jenkinsjenkins./system_image.egg-info/0000755000015600001650000000000012701500553015253 5ustar jenkinsjenkins./system_image.egg-info/requires.txt0000644000015600001650000000001512701500553017647 0ustar jenkinsjenkinspython-gnupg ./system_image.egg-info/dependency_links.txt0000644000015600001650000000000112701500553021321 0ustar jenkinsjenkins ./system_image.egg-info/entry_points.txt0000644000015600001650000000015112701500553020546 0ustar jenkinsjenkins[console_scripts] system-image-cli = systemimage.main:main system-image-dbus = systemimage.service:main ./system_image.egg-info/SOURCES.txt0000644000015600001650000001556712701500553017155 0ustar jenkinsjenkinsMANIFEST.in NEWS.rst README.rst cli-manpage.rst coverage-curl.ini coverage-udm.ini dbus-manpage.rst ini-manpage.rst setup.cfg setup.py tox.ini unittest.cfg system_image.egg-info/PKG-INFO system_image.egg-info/SOURCES.txt system_image.egg-info/dependency_links.txt system_image.egg-info/entry_points.txt system_image.egg-info/requires.txt system_image.egg-info/top_level.txt systemimage/__init__.py systemimage/api.py systemimage/apply.py systemimage/bag.py systemimage/candidates.py systemimage/channel.py systemimage/config.py systemimage/curl.py systemimage/dbus.py systemimage/device.py systemimage/download.py systemimage/gpg.py systemimage/helpers.py systemimage/image.py systemimage/index.py systemimage/keyring.py systemimage/logging.py systemimage/main.py systemimage/reactor.py systemimage/scores.py systemimage/service.py systemimage/settings.py systemimage/state.py systemimage/udm.py systemimage/version.txt systemimage/data/__init__.py systemimage/data/com.canonical.SystemImage.conf systemimage/data/com.canonical.SystemImage.service systemimage/docs/__init__.py systemimage/docs/conf.py systemimage/docs/readme.rst systemimage/testing/__init__.py systemimage/testing/controller.py systemimage/testing/dbus.py systemimage/testing/demo.py systemimage/testing/helpers.py systemimage/testing/nose.py systemimage/testing/service.py systemimage/tests/__init__.py systemimage/tests/test_api.py systemimage/tests/test_bag.py systemimage/tests/test_candidates.py systemimage/tests/test_channel.py systemimage/tests/test_config.py systemimage/tests/test_dbus.py systemimage/tests/test_download.py systemimage/tests/test_gpg.py systemimage/tests/test_helpers.py systemimage/tests/test_image.py systemimage/tests/test_index.py systemimage/tests/test_keyring.py systemimage/tests/test_main.py systemimage/tests/test_scores.py systemimage/tests/test_settings.py systemimage/tests/test_state.py systemimage/tests/test_winner.py systemimage/tests/data/00.ini systemimage/tests/data/01.ini systemimage/tests/data/__init__.py systemimage/tests/data/api.channels_01.json systemimage/tests/data/api.index_01.json systemimage/tests/data/api.index_02.json systemimage/tests/data/api.index_03.json systemimage/tests/data/archive-master.gpg systemimage/tests/data/bad_cert.pem systemimage/tests/data/bad_key.pem systemimage/tests/data/candidates.index_01.json systemimage/tests/data/candidates.index_02.json systemimage/tests/data/candidates.index_03.json systemimage/tests/data/candidates.index_04.json systemimage/tests/data/candidates.index_05.json systemimage/tests/data/candidates.index_06.json systemimage/tests/data/candidates.index_07.json systemimage/tests/data/candidates.index_08.json systemimage/tests/data/candidates.index_09.json systemimage/tests/data/candidates.index_10.json systemimage/tests/data/candidates.index_11.json systemimage/tests/data/candidates.index_12.json systemimage/tests/data/candidates.index_13.json systemimage/tests/data/cert.pem systemimage/tests/data/channel.channels_01.json systemimage/tests/data/channel.channels_02.json systemimage/tests/data/channel.channels_03.json systemimage/tests/data/channel.channels_04.json systemimage/tests/data/channel.channels_05.json systemimage/tests/data/com.canonical.SystemImage.service.in systemimage/tests/data/com.canonical.applications.Downloader.service.in systemimage/tests/data/config.config_01.ini systemimage/tests/data/config.config_02.ini systemimage/tests/data/config.config_03.ini systemimage/tests/data/config.config_04.ini systemimage/tests/data/config.config_05.ini systemimage/tests/data/config.config_06.ini systemimage/tests/data/config.config_07.ini systemimage/tests/data/config.config_08.ini systemimage/tests/data/config.config_09.ini systemimage/tests/data/config.config_10.ini systemimage/tests/data/config.config_11.ini systemimage/tests/data/dbus-system.conf.in systemimage/tests/data/dbus.channels_01.json systemimage/tests/data/dbus.index_01.json systemimage/tests/data/dbus.index_02.json systemimage/tests/data/dbus.index_03.json systemimage/tests/data/dbus.index_04.json systemimage/tests/data/dbus.index_05.json systemimage/tests/data/dbus.index_06.json systemimage/tests/data/device-signing.gpg systemimage/tests/data/download.index_01.json systemimage/tests/data/expired_cert.pem systemimage/tests/data/expired_key.pem systemimage/tests/data/gpg.channels_01.json systemimage/tests/data/helpers.config_01.ini systemimage/tests/data/helpers.config_02.ini systemimage/tests/data/image-master.gpg systemimage/tests/data/image-signing.gpg systemimage/tests/data/index.channels_01.json systemimage/tests/data/index.channels_02.json systemimage/tests/data/index.channels_03.json systemimage/tests/data/index.channels_04.json systemimage/tests/data/index.channels_05.json systemimage/tests/data/index.index_01.json systemimage/tests/data/index.index_02.json systemimage/tests/data/index.index_03.json systemimage/tests/data/index.index_04.json systemimage/tests/data/index.index_05.json systemimage/tests/data/key.pem systemimage/tests/data/main.channels_01.json systemimage/tests/data/main.channels_02.json systemimage/tests/data/main.channels_03.json systemimage/tests/data/main.config_01.ini systemimage/tests/data/main.config_02.ini systemimage/tests/data/main.config_03.ini systemimage/tests/data/main.config_04.ini systemimage/tests/data/main.config_05.ini systemimage/tests/data/main.config_07.ini systemimage/tests/data/main.index_01.json systemimage/tests/data/main.index_02.json systemimage/tests/data/main.index_03.json systemimage/tests/data/main.index_04.json systemimage/tests/data/main.index_05.json systemimage/tests/data/master-secring.gpg systemimage/tests/data/nasty_cert.pem systemimage/tests/data/nasty_key.pem systemimage/tests/data/scores.index_01.json systemimage/tests/data/scores.index_02.json systemimage/tests/data/scores.index_03.json systemimage/tests/data/scores.index_04.json systemimage/tests/data/scores.index_05.json systemimage/tests/data/scores.index_06.json systemimage/tests/data/scores.index_07.json systemimage/tests/data/spare.gpg systemimage/tests/data/state.channels_01.json systemimage/tests/data/state.channels_02.json systemimage/tests/data/state.channels_03.json systemimage/tests/data/state.channels_04.json systemimage/tests/data/state.channels_05.json systemimage/tests/data/state.channels_06.json systemimage/tests/data/state.channels_07.json systemimage/tests/data/state.config_01.ini systemimage/tests/data/state.config_02.ini systemimage/tests/data/state.index_01.json systemimage/tests/data/state.index_02.json systemimage/tests/data/state.index_03.json systemimage/tests/data/state.index_04.json systemimage/tests/data/state.index_05.json systemimage/tests/data/state.index_06.json systemimage/tests/data/state.index_07.json systemimage/tests/data/state.index_08.json systemimage/tests/data/winner.channels_01.json systemimage/tests/data/winner.channels_02.json systemimage/tests/data/winner.index_01.json systemimage/tests/data/winner.index_02.json tools/demo.ini tools/runme.sh./system_image.egg-info/top_level.txt0000644000015600001650000000001412701500553020000 0ustar jenkinsjenkinssystemimage ./system_image.egg-info/PKG-INFO0000644000015600001650000000034312701500553016350 0ustar jenkinsjenkinsMetadata-Version: 1.0 Name: system-image Version: 3.1 Summary: Ubuntu System Image Based Upgrades Home-page: UNKNOWN Author: Barry Warsaw Author-email: barry@ubuntu.com License: GNU GPLv3 Description: UNKNOWN Platform: UNKNOWN ./coverage-udm.ini0000644000015600001650000000071512701500553014155 0ustar jenkinsjenkins[run] branch = true parallel = true omit = setup* systemimage/data/* systemimage/docs/* systemimage/testing/* systemimage/tests/* systemimage/curl.py /usr/lib/* .tox/coverage-*/lib/python*/distutils/* .tox/coverage-*/lib/python*/site-packages/pkg_resources* [paths] source = systemimage .tox/coverage-*/lib/python*/site-packages/systemimage [report] exclude_lines = pragma: no cover pragma: no udm ./tools/0000755000015600001650000000000012701500553012233 5ustar jenkinsjenkins./tools/runme.sh0000644000015600001650000000061612701500553013720 0ustar jenkinsjenkinswhere=udm/build root=$HOME/projects/phone/${where}/src/downloads/daemon logfile=$HOME/.cache/ubuntu-download-manager/ubuntu-download-manager.INFO # export GLOG_logtostderr=1 # export GLOG_v=100 echo -n `date --rfc-3339=ns` >> ${logfile} echo -n " " >> ${logfile} echo $* >> ${logfile} #exec env -u DBUS_SESSION_BUS_ADDRESS ${root}/ubuntu-download-manager $* exec ${root}/ubuntu-download-manager $* ./tools/demo.ini0000644000015600001650000000131012701500553013653 0ustar jenkinsjenkins# A demo ini file for local testing. [service] base: system-image.ubuntu.com http_port: 80 https_port: 443 timeout: 1m [system] channel: daily tempdir: /tmp logfile: /tmp/system-image/logs/client.log loglevel: info [gpg] archive_master: /tmp/system-image/etc/archive-master.tar.xz image_master: /tmp/system-image/var/image-master.tar.xz image_signing: /tmp/system-image/var/image-signing.tar.xz device_signing: /tmp/systemimage/var/device-signing.tar.xz [updater] cache_partition: /tmp/system-image/android data_partition: /tmp/system-image/ubuntu [hooks] device: systemimage.testing.demo.DemoDevice scorer: systemimage.scores.WeightedScorer reboot: systemimage.testing.demo.DemoReboot [dbus] lifetime: 2m ./unittest.cfg0000644000015600001650000000017712701500553013440 0ustar jenkinsjenkins[unittest] verbose = 2 plugins = systemimage.testing.nose [systemimage] always-on = True [log-capture] always-on = False ./NEWS.rst0000644000015600001650000006257412701500553012417 0ustar jenkinsjenkins============================= NEWS for system-image updater ============================= 3.1 (2016-03-02) ================ * In ``system-image-cli``, add a ``-m``/``--maximage`` flag which can be used to cap a winning upgrade path to a maximum image number. (LP: #1386302) * Remove the previously deprecated ``Info()`` D-Bus method. (LP: #1380678) * Remove the previously deprecated ``--no-reboot`` command line option. * Add support for temporarily overriding the wifi-only setting when using ubuntu-download-manager. (LP: #1508081) - Added ``ForceAllowGSMDownload()`` method to the D-Bus API. - Added ``DownloadStarted`` D-Bus signal, which gets sent when the download for an update has begun. - Added ``--override-gsm`` flag to ``system-image-cli``. 3.0.2 (2015-09-22) ================== * Don't crash when one of the .ini files is a dangling symlink. (LP: #1495688) 3.0.1 (2015-06-16) ================== * When `--progress=json` is used, print an error record to stdout if the state machine fails. (LP: #1463061) 3.0 (2015-05-08) ================ * Support a built-in PyCURL-based downloader in addition to the traditional ubuntu-download-manager (over D-BUS) downloader. Auto-detects which downloader to use based on whether udm is available on the system bus, pycurl is importable, and the setting of the SYSTEMIMAGE_PYCURL environment variable. Initial contribution by Michael Vogt. (LP: #1374459) * Support alternative machine-id files as fall backs if the D-Bus file does not exist. Specifically, add systemd's /etc/machine-id to the list. Initial contribution by Michael Vogt. (LP: #1384859) * Support multiple configuration files, as in a `config.d` directory. Now, configuration files are named `NN_whatever.ini` where "NN" must be a numeric prefix. Files are loaded in sorted numeric order, with later files overriding newer files. Support for both the `client.ini` and `channel.ini` files has been removed. (LP: #1373467) * The `[system]build_file` variable has been removed. Build number information now must come from the `.ini` files, and last update date comes from the newest `.ini` file loaded. * The `-C` command line option now takes a path to the configuration directory. * Reworked the checking and downloading locks/flags to so that they will work better with configuration reloading. (LP: #1412698) * Support for the `/etc/ubuntu-build` file has been removed. The build number now comes from the configuration files. (LP: #1377312) * Move the `archive-master.tar.xz` file to `/usr/share/system-image` for better FHS compliance. (LP: #1377184) * Since devices do not always reboot to apply changes, the `[hooks]update` variable has been renamed to `[hooks]apply`. (LP: #1381538) * For testing purposes only, `system-image-cli` now supports an undocumented command line switch `--skip-gpg-verification`. Originally given by Jani Monoses. (LP: #1333414) * A new D-Bus signal `Applied(bool)` is added, which is returned in response to the `ApplyUpdate()` asynchronous method call. For devices which do not need to reboot in order to apply the update, this is the only signal you will get. If your device needs to reboot you will also receive the `Rebooting(bool)` command as with earlier versions. The semantics of the flag argument are the same in both cases, as are the race timing issues inherent in these signals. See the `system-image-dbus(8)` manpage for details. (LP: #1417176) * As part of LP: #1417176, the `--no-reboot` switch for `system-image-cli(1)` has been deprecated. Use `--no-apply` instead (`-g` is still the shortcut). * Support production factory resets. `system-image-cli --production-reset` and a new D-Bus API method `ProductionReset()` are added. Given by Ricardo Salveti. (LP: #1419027) * A new key, `target_version_detail` has been added to the dictionary returned by the `.Information()` D-Bus method. (LP: #1399687) * The `User-Agent` HTTP header now also includes device and channel names. (LP: #1387719) * Added `--progress` flag to `system-image-cli` for specifying methods for reporting progress. Current available values are: `dots` (compatible with system-image 2.5), `logfile` (compatible with system-image 2.5's `--verbose` flag), and `json` for JSON records on stdout. (LP: #1423622) * Support for the `SYSTEMIMAGE_DBUS_DAEMON_HUP_SLEEP_SECONDS` environment variable has been removed. * Fix `system-image-cli --list-channels`. (LP: #1448153) 2.5.1 (2014-10-21) ================== * Make phased upgrade percentage calculation idempotent for each tuple of (channel, target-build-number, machine-id). Also, modify the candidate upgrade path selection process such that if the lowest scored candidate path has a phased percentage greater than the device's percentage, the candidate will be ignored, and the next lowest scored candidate will be checked until either a winner is found or no candidates are left, in which case the device is deemed to be up-to-date. (LP: #1383539) * `system-image-cli -p/--percentage` is added to allow command line override of the device's phased percentage. * `system-image-cli --dry-run` now also displays the phase percentage of the winning candidate upgrade path. 2.5 (2014-09-29) ================ * Remove the previously deprecated `system-image-cli --dbus` command line switch. (LP: #1369717) * Add a `target_build_number` key to the mapping returned by the `.Information()` D-Bus method. (LP: #1370586) 2.4 (2014-09-16) ================ * The channel.ini file can override the device name by setting ``[service]device``. (LP: #1353178) * Add optional instrumentation to collect code coverage data during test suite run via tox. (LP: #1324241) * When an exception occurs in a `system-image-dbus` D-Bus method, signal, or callback, this exception is logged in the standard log file, and the process exits. Also, `[system]loglevel` can now take an optional ":level" prefix which can be used to set the log level for the D-Bus API methods. By default, they log at `ERROR` level, but can be set lower for debugging purposes. (LP: #1279970) * Don't crash when releasing an unacquired checking lock. (LP: #1365646) * When checking files for `last_update_date()` ignore PermissionErrors and just keep checking the fall backs. (LP: #1365761) * `system-image-cli --dbus` has been deprecated and will be removed in the future. (LP: #1369714) 2.3.2 (2014-07-31) ================== * When system-image-{cli,dbus} is run as non-root, use a fallback location for the settings.db file, if the parent directory isn't writable. (LP: #1349478) 2.3.1 (2014-07-23) ================== * Fix a traceback that occurs when the `systemimage.testing` subpackage isn't available, as is the case when the system-image-dev binary package is not installed. 2.3 (2014-07-16) ================ * Support factory resets. `system-image-cli --factory-reset` and a new D-Bus API method `FactoryReset()` are added. (LP: #1207860) * Data file checksums are passed to ubuntu-download-manager where available. (LP: #1262256) * Certain duplicate destinations are allowed, if they have matching source urls and checksums. (LP: #1286542) * When system-image-{cli,dbus} is run as non-root, use a fallback location for the log file if the system log file isn't writable. (LP: #1301995) * `system-image-cli --list-channels` lists all the available channels, including aliases. (LP: #1251291) * `system-image-cli --no-reboot` downloads all files and prepares for recovery, but does not actually issue a reboot. (LP: #1279028) * `system-image-cli --switch ` is a convenient alias for `system-image-cli -b 0 -c `. (LP: #1249347) * Added `--show-settings`, `--get`, `--set`, and `--del` options for viewing, changing, and setting all the internal database settings. (LP: #1294273) * Improve memory usage when verifying file checksums. Given by Michael Vogt. (LP: #1271684) * In the `UpdatePaused` signal, return a percentage value that's closer to reality than hardcoding it to 0. (LP: #1274131) * New D-Bus API method `.Information()` which is like `.Info()` except that it returns extended information details, as a mapping of strings to strings. These details include a `last_check_date` which is the ISO 8601 timestamp of the last time an `UpdateAvailableStatus` signal was sent. (LP: #1280169) * Set the GSM flag in ubuntu-download-manager based on the current s-i download setting. (LP: #1339157) * The system-image-dbus(8) manpage now describes the full D-Bus API. (LP: #1340882) * Fix the D-Bus mock service so that the downloading flag for `UpdateAvailableStatus` will correctly return true when checking twice under manual downloads. (LP: #1273354) * Pay down some tech-debt. (LP: #1342183) 2.2 (2014-03-05) ================ * When `CheckForUpdate()` is called a second time, while an auto-download is in progress, but after the first check is complete, we send an `UpdateAvailableStatus` signal with the cached information. (LP: #1284217) * Close a race condition when manually downloading and issuing multiple `CheckForUpdate` calls. (LP: #1287919) * Support disabling either the HTTP or HTTPS services for update (but not both). The ``[service]http_port`` or ``[service]https_port`` may be set to the string ``disabled`` and the disabled protocol will fall back to the enabled protocol. Implementation given by Vojtech Bocek. (LP: #1278589) * Allow the channel.ini file to override the ``[service]`` section. * Now that ubuntu-download-manager performs atomic renames of temporary files, system-image no longer needs to do that. (LP: #1287287) * When an exception in the state machine occurs while checking for updates, the exception is caught and logged. When using the CLI, the result is an exit code of 1. When using the D-Bus API, an `UpdateAvailableStatus` signal is sent with `error_reason` set to the exception string. This exception is *not* propagated back to GLib. (LP: #1250817) * Log directory path is passed to ubuntu-download-manager to assist in debugging. Given by Manuel de la Peña. (LP: #1279532) 2.1 (2014-02-20) ================ * Internal improvements to SignatureError for better debugging. (LP: #1279056) * Better protection against several possible race conditions during `CheckForUpdate()` (LP: #1277589) - Use a threading.Lock instance as the internal "checking for update" barrier instead of a boolean. This should eliminate the race window between testing and acquiring the checking lock. - Put an exclusive claim on the `com.canonical.SystemImage` system dbus name, and if we cannot get that claim, exit with an error code 2. This prevents multiple instances of the D-Bus system service from running at the same time. * Return the empty string from `ApplyUpdate()` D-Bus method. This restores the original API (patch merged from Ubuntu package, given by Didier Roche). (LP: #1260768) * Request ubuntu-download-manager to download all files to temporary destinations, then atomically rename them into place. This avoids clobbering by multiple processes and mimics changes coming in u-d-m. * Provide much more detailed logging. - `Mediator` instances have a helpful `repr` which also includes the id of the `State` object. - More logging during state transitions. - All emitted D-Bus signals are also logged (at debug level). * Added `-L` flag to nose test runner, which can be used to specify an explicit log file path for debugging. * Fixed D-Bus error logging. - Don't initialize the root logger, since this can interfere with python-dbus, which doesn't initialize its loggers correctly. - Only use `.format()` based interpolation for `systemimage` logs. * Give virtualized buildds a fighting chance against D-Bus by - using `org.freedesktop.DBus`s `ReloadConfig()` interface instead of SIGHUP. - add a configurable sleep call after the `ReloadConfig()`. This defaults to 0 since de-virtualized and local builds do not need them. Set the environment variable `SYSTEMIMAGE_DBUS_DAEMON_HUP_SLEEP_SECONDS` to override. * Run the tox test suite for both Python 3.3 and 3.4. 2.0.5 (2014-01-30) ================== * MANIFEST.in: Make sure the .bzr directory doesn't end up in the sdist tarball. 2.0.4 (2014-01-30) ================== * No change release to test the new landing process. 2.0.3 (2013-12-11) ================== * More attempted DEP-8 test failure fixes. 2.0.2 (2013-12-03) ================== * Fix additional build environment test failures. (LP: #1256947) 2.0.1 (2013-11-27) ================== * Fix some build environment test failures. 2.0 (2013-11-13) ================ * Avoid re-downloading data files if previously download files are found and are still valid (by checksum and gpg signature). (LP: #1217098) * In the D-Bus API, `ApplyUpdate()` is changed from a synchronous method returning a string to an asynchronous method not returning anything. Instead a `Rebooting(bool)` signal is added with the value being the status if the reboot operation (obviously, this signal isn't ever received if the reboot succeeds). (LP: #1247215) * Remove the old channels.json format. (LP: #1221843) * Remove support for old version numbers. (LP: #1220238) * Switch to nose2 as the test runner. (LP: #1238071) + Add -P option to provide much nicer test pattern matching. + Add -V option to increase `systemimage` logging verbosity during tests (separate from nose2's own -v options). * Write the `ubuntu_command` file atomically. (LP: #1241236) * Remove the unused `-u` and `--upgrade` switches. * Clarify that `--channel` should be used with `--build 0` to switch channels. (LP: #1243612) * `--info` output will include the alias name if the current channel.ini has a `channel_target` variable. * `--dry-run` output now includes channel switch information when an upgrade changes the channel alias mapping. * Add a workaround for LP: #1245597, caused by a bug in ubuntu-download-manager when presented with an empty download list. * If an existing image-master or image-signing key is found on the file system, double check its signature (LP: #1195057) and expiration date (LP: #1192717) if it has one, before using it. * If the winning path includes two URLs which map to the same local destination file name, the download should fail. (LP: #1250181) * Provide a bit more useful traceback in various places of the state machine so that error conditions in system-image-cli make a bit more sense. (LP: #1248639) * Tweak the scoring algorithm to highly discourage candidate upgrade paths that don't leave you at the maximum build number. (LP: #1250553) * When running system-image-cli under verbosity 1, print dots to stderr so that the user knows something is happening. * Remove unused `state_file` setting from client.ini. 1.9.1 (2013-10-15) ================== * Further refinement of permission checking/fixing. (LP: #1240105) * Work around some failures in DEP 8 tests. (LP: #1240106) 1.9 (2013-10-14) ================ * Fix file and directory permissions. A random temporary directory inside /tmp (by default, see `[system]tempdir` in client.ini) is securely created for actual ephemeral files. The log file will have 0600 permission. (LP: #1235975) * Download files directly to the cache partition or data partition. (LP: #1233521) * Proactively remove files from the cache and data partitions before starting to download anything (except `log` and `last_log` in the cache partition). This avoid various problems that can occur if the reboot fails (LP: #1238102) and improves the ability to recover from partial downloads without rebooting (LP: #1233521). * Keep the D-Bus process alive as long as progress is being made (as tracked by any calls, internally or externally to D-Bus methods or signals). (LP: #1238290) * Pause/resume downloads. (LP: #1237360) * Remove all references to the `[system]threads` variable since it is no longer used, after the integration of the download manager. * Through the use of the psutil library, re-enable some previously skipped tests. (LP: #1206588) 1.8 (2013-10-02) ================ * Support channel alias tracking. If the channel.ini file has a `channel_target` key, and the channel spec in the channel.json file has an `alias` key, and these don't match, then the channel alias has changed, and we squash the build number to 0 for upgrade path calculation. An explicit `--build` option for system-image-cli still overrides this. (LP: #1221844) * Support *phased updates* where we can ignore some images if their 'phased-percentage' key is less than a machine-specific value. (LP: #1231628) * Switch the default `auto_download` value back to '1', i.e. download automatically but only over wifi. (LP: #1229807) * Plumb progress signals from ubuntu-download-manager through the system-image D-Bus API. (LP: #1204618) * Only send the `UpdateFailed` signal in response to a `CancelUpdate()` call if a download is already in progress. No signal is sent if there's no download in progress. Getting the files to determine whether an update is available or not does not count as a "download in progress". (LP: #1215946) 1.7 (2013-09-30) ================ * Fix test suite failure on 32 bit systems. Again. * Reset the D-Bus reactor timeout every time we see an active signal from the D-Bus service we're talking to. (LP: #1233379) 1.6 (2013-09-30) ================ * Use the new ubuntu-download-manager to manage all requested downloads. (LP: #1196991) * Use /userdata/.last_update file as the "last upgrade date" if the file exists. (LP: #1215943) * Default D-Bus service timeout is now 1 hour. * Default D-Bus logging level is now `info`. * Verbose (i.e. `debug`) logging now includes the scores and paths for all upgrade candidates, from highest score (biggest loser) to lowest score (winner) last. * --verbose logging level is now properly propagated to the log file. 1.5.1 (2013-09-08) ================== * Fix test for 32 bit systems. 1.5 (2013-09-06) ================ * `system-image-cli --dry-run -c ` no longer produces a traceback. You get "Already up-to-date", but use `-v` for more info. * `system-image-cli --info` prints additional information: - last update time (i.e. the mtime of `/etc/system-image/channel.ini` falling back to the mtime of `/etc/ubuntu-build`). - version details for ubuntu, the device, and any custom version, if the `/etc/system-image/channel.ini` file contains these details. * D-Bus API changes: - `UpdateAvailableStatus` field `last_update_date` has changes its format. It's still ISO 8601, but with a space instead of a 'T' separating the date from the time. - New `Info()` method returns data similar to `system-image-cli --info`. (LP: #1215959) * Support the new channels.json file format with backward compatibility (for now) with the old format. (LP: #1221841) 1.4 (2013-08-30) ================ * Update the `system-image-cli` manpage with the previously added switches. * Support the new version number regime, which uses sequential version numbers starting at 1. (LP: #1218612) 1.3 (2013-08-29) ================ * Fixed bug in resolving channels with dashes in their name. (LP: #1217932) * Add `system-image-cli --filter` option to allow for forcing full or delta updates. (LP: #1208909) * Command line option changes for `system-image-cli`: - Added -i/--info to get current build number, device, and channel. - Re-purposed -c/--channel to allow for overriding the channel name. - Re-purposed -b/--build to allow for overriding the build number. - Added -d/--device to allow for overriding the device name. * State persistence is disabled for now. (LP: #1218357) * LP: #1192575 supported by `system-image-cli -c --filter=full`. 1.2 (2013-08-26) ================ * Add support for an optional /etc/system-image/channel.ini file, and shuffle some of the other /etc/system-image/client.ini file options. (LP: #1214009) * Set "auto_download" mode to '0' by default (manual download). This prevents inadvertent downloading over 3G until we integrate the download service. * Add -n/--dry-run option to system-image-cli. (LP: #1212713) 1.1 (2013-08-23) ================ * Use nose as the test runner. This allows us to pre-initialize the logging to prevent unwanted output. (LP: #1207117) * Update the DBus API to the new specification. (LP: #1212781) 1.0 (2013-08-01) ================ * Add manpage for system-image-dbus. (LP: #1206617) * Fix the dbus tests so they can all be run. (LP: #1205163) * system-image-dbus must also create the tempdir if it doesn't yet exist, just like -cli does. (LP: #1206515) * Fix upgrade path scoring and winner resolution when two candidate upgrade paths have the same score. (LP: #1206866) * Make system-image-cli and system-image-dbus more amenable to being run in "demo" mode out of a virtualenv. - Update setup.py with run-time dependencies. - Add a tools/demo.ini sample configuration file which allows the full upgrade procedure to be executed (reboots are a no-op, and the device is fixed to 'grouper'). - Give system-image-cli a --dbus option so that it will perform the update over dbus rather than against the internal API. * Major changes to the way logging is done. - The config file now has [system]logfile and [system]loglevel variables which control where and how logging goes under normal operation. - A single -v on the command line mirrors the log file output to the console, and sets both log levels to INFO level. Two -v on the command line also mirrors the output, but sets the log levels to DEBUG. * Added tools/sd.py which serves as a DBus client for testing and debugging purposes. * Print the channel and device in the log file. (LP: #1206898) * Added some useful tools for debugging in a live environment. (LP: 1207391) 0.9.2 (2013-07-30) ================== * system-image-dbus must run on the system bus instead of the session bus. Fix contributed by Loïc Minier. (LP: #1206558) * Add systemimage/data/com.canonical.SystemImage.conf which will get installed into /etc/dbus-1/system.d/ for dbus permissions. (LP: #1206523) * Use full path to executable in dbus service file. * system-image-dbus executable now resides in /usr/sbin * client.ini: Bump dbus timeout to 10 minutes. 0.9.1 (2013-07-26) ================== * Further DBus API refinements to better support U/I development. - Add a .Exit() method. - Calling .Cancel() immediately issues a Canceled signal. - .GetUpdate() and .Reboot() no longer issue Canceled signals, but they no-op if a .Cancel() has been previously called. 0.9 (2013-07-25) ================ * Rename DBus method IsUpdateAvailable() to CheckForUpdate() and make it asynchronous. Rename the UpdatePending() signal to UpdateAvailableStatus() and have it contain a boolean flag which indicates whether an update is available or not. Make GetUpdate() actually asynchronous. (LP: #1204976) * Add DBus method mocks (LP: #1204528) 0.8 (2013-07-24) ================ * Calculate the device name by querying the system, rather than defining it as a key in the client.ini file. (LP: #1204090) * Add -c/--channel option to system-image-cli; this prints the channel/device name being used. 0.7 (2013-07-22) ================ * No reboot should be issued if there is no update available. (LP: #1202915) * DBus API implemented. (LP: #1192585) * system-image-cli -v displays the files being downloaded, but not their progress (use -vv for that). (LP: #1202283) 0.6 (2013-07-15) ================ * Fix Image hashes to fit in 32 bites, fixing FTBFS on i386 and for better compatibility with actual phone hardware. (LP: #1200981) 0.5 (2013-07-12) ================ * Add manpages for system-image-cli and client.ini. (LP: #1195497) 0.4 (2013-07-10) ================ * Fix reboot bug. (LP: #1199981) * Fix ubuntu_command file ordering. (LP: #1199986) * Ensure the /var/lib target directory for cached .tar.xz keyring files exists before copying them. (LP: #1199982) 0.3 (2013-07-09) ================ * Update the client.ini file to reflect the actual update service (which is now deployed) and the system partitioning on the actual device. * By default, search for client.ini in /etc/system-image/client.ini. Also, create the /tmp and /var/lib directories if possible and they don't yet exist. (LP: #1199177) * Fix timeout error when downloading more files than the number of threads. (LP: #1199361) * Preserve all descriptions in all languages from the index.json file. * State machine changes: - Allow the passing of a callback which is used in the big download call. This will be used to implement a cancel operation. - Add .run_thru() and .run_until() methods used for better step control. - Split the "prepare command file" and reboot steps. * The ubuntu_command file written to the recovery partition now supports the currently specified format. (LP: #1199498) 0.2 (2013-06-27) ================ * Fix distutils packaging bugs exposed by Debian packaging work. * Rename 'resolver' package to 'systemimage' and script to /usr/bin/system-image-cli (LP: #1193142) 0.1 (2013-06-27) ================ * Initial release. ./ini-manpage.rst0000644000015600001650000001665212701500553014024 0ustar jenkinsjenkins================ system-image.ini ================ ------------------------------------------------ Ubuntu System Image Upgrader configuration files ------------------------------------------------ :Author: Barry Warsaw :Date: 2016-01-15 :Copyright: 2013-2016 Canonical Ltd. :Version: 3.0 :Manual section: 5 DESCRIPTION =========== ``/etc/system-image/config.d`` is the default configuration directory for the system image upgrader. It contains ini-style configuration files with sections that define the service to connect to, as well as local system resources. Generally, the options never need to be changed. The system image upgrader will read all files in this directory that start with a numeric prefix, followed by an underscore, and then any alphanumeric suffix, ending in ``.ini``. E.g. ``07_myconfig.ini``. The files are read in sorted numerical order, from lowest prefix number to highest, with later configuration files able to override any variable in any section. SYNTAX ====== Sections in the ``.ini`` files are delimited by square brackets, e.g. ``[service]``. Variables inside the service separate the variable name and value by a colon. Blank lines and lines that start with a ``#`` are ignored. THE SERVICE SECTION =================== The section that starts with ``[service]`` defines the remote host name and ports that provide upgrade images. Because some files are downloaded over HTTP and others over HTTPS, both ports must be defined. This section contains the following variables: base The host name to connect to containing the upgrade. This host must provide both HTTP and HTTPS services. http_port The port for HTTP connections. This is an integer, or the string ``disabled`` if you wish to disable all HTTP connections and use only HTTPS. It is an error to disable both the HTTP and HTTPS services. https_port The port for HTTPS connections. This is an integer, or the string ``disabled`` if you wish to disable all HTTPS connections and use only HTTP. It is an error to disable both the HTTP and HTTPS services. channel The upgrade channel. device The device name. If missing or unset (i.e. the empty string), then the device is calculated using the ``[hooks]device`` callback. build_number The system's current build number. THE SYSTEM SECTION ================== The section that starts with ``[system]`` defines attributes of the local system to be upgraded. Every system has an upgrade *channel* and a *device* name. The channel roughly indicates the frequency with which the server will provide upgrades. The system is queried for the device. The channel and device combine to define a URL path on the server to look for upgrades appropriate to the given device on the given schedule. The specification for these paths is given in `[1]`_. This section contains the following variables: tempdir The base temporary directory on the local file system. When any of the system-image processes run, a secure subdirectory inside `tempdir` will be created for the duration of the process. logfile The file where logging output will be sent. loglevel The level at which logging information will be emitted. There are two loggers which both log messages to `logfile`. "systemimage" is the main logger, but additional logging can go to the "systemimage.dbus" logger. The latter is used in debugging situations to get more information about the D-Bus service. `loglevel` can be a single case-insensitive string corresponding to the following `log levels`_ from least verbose to most verbose: ``DEBUG``, ``INFO``, ``WARNING``, ``ERROR``, ``CRITICAL``. In this case, the "systemimage" logger will be placed at this level, while the "systemimage.dbus" logger will be placed at the ``ERROR`` level. `loglevel` can also describe two levels, separated by a colon. In this case, the main logger is placed at the first level, while the D-Bus logger is placed at the second level. For example: ``debug:info``. timeout The maximum allowed time interval for downloading the individual files. The actual time to complete the downloading of all required files may be longer than this timeout. This variable takes a numeric value followed by an optional interval marker. Supported markers are ``w`` for weeks, ``d`` for days, ``h`` for hours, ``m`` for minutes, and ``s`` for seconds. When no marker is given, the default is seconds. Thus a value of ``1m`` indicates a timeout of one minute, while a value of ``15`` indicates a timeout of 15 seconds. A negative or zero value indicates that there is no timeout. THE GPG SECTION =============== The section that starts with ``[gpg]`` defines paths on the local file system used to cache GPG keyrings in compressed tar format. The specification for the contents of these files is given in `[2]`_. This section contains the following variables: archive_master The location on the local file system for the archive master keyring. This key will never expire and never changes. image_master The location on the local file system for the image master keyring. This key will never expire and will change only rarely, if ever. image_signing The location on the local file system for the image signing keyring. This key expires after two years, and is updated regularly. device_signing The location on the local file system for the optional device signing keyring. If present, this key expires after one month and is updated regularly. THE UPDATER SECTION =================== The section that starts with ``[updater]`` defines directories where upgrade files will be placed for recovery reboot to apply. This section contains the following variables: cache_partition The directory bind-mounted read-write from the Android side into the Ubuntu side, containing the bulk of the upgrade files. data_partition The directory bind-mounted read-only from the Ubuntu side into the Android side, generally containing only the temporary GPG blacklist, if present. THE HOOKS SECTION ================= The section that starts with ``[hooks]`` provides minimal capability to customize the upgrader operation by selecting different upgrade path winner scoring algorithms and different reboot commands. This section contains the following variables: device The Python import path to the class implementing the device query command. scorer The Python import path to the class implementing the upgrade scoring algorithm. apply The Python import path to the class that implements the mechanism for applying the update. This often reboots the device. *New in system-image 3.0: ``reboot`` was renamed to ``apply``* THE DBUS SECTION ================ The section that starts with ``[dbus]`` controls operation of the ``system-image-dbus(8)`` program. This section contains the following variables: lifetime The total lifetime of the DBus server. After this amount of time, it will automatically exit. The format is the same as the ``[system]timeout`` variable. SEE ALSO ======== system-image-cli(1) [1]: https://wiki.ubuntu.com/ImageBasedUpgrades/Server [2]: https://wiki.ubuntu.com/ImageBasedUpgrades/GPG .. _[1]: https://wiki.ubuntu.com/ImageBasedUpgrades/Server .. _[2]: https://wiki.ubuntu.com/ImageBasedUpgrades/GPG .. _`log levels`: http://docs.python.org/3/howto/logging.html#when-to-use-logging ./MANIFEST.in0000644000015600001650000000025612701500553012634 0ustar jenkinsjenkinsinclude *.py MANIFEST.in global-include *.txt *.rst *.json *.ini *.gpg *.pem *.service *.in *.conf *.cfg *.sh prune build prune dist prune .tox prune .bzr exclude .bzrignore ./cli-manpage.rst0000644000015600001650000001155312701500553014007 0ustar jenkinsjenkins================ system-image-cli ================ ------------------------------------------------ Ubuntu System Image Upgrader command line script ------------------------------------------------ :Author: Barry Warsaw :Date: 2016-02-25 :Copyright: 2013-2016 Canonical Ltd. :Version: 3.1 :Manual section: 1 SYNOPSIS ======== system-image-cli [options] DESCRIPTION =========== This script upgrades the system to the latest available image (i.e. build number). With no options, this script checks the latest version available on the server and calculates an upgrade path to that version from the system's current version. If an upgrade path is found, the relevant files are downloaded and the upgrade is applied by rebooting the system into recovery mode. OPTIONS ======= -h, --help Show the program's message and exit. --version Show the program's version number and exit. -b NUMBER, --build NUMBER Override the device's current build number just this once. ``NUMBER`` must be an integer. Use ``-b 0`` for force an upgrade. -c CHANNEL, --channel CHANNEL Override the device's upgrade channel just this once. Use in combination with ``--build 0`` to switch channels. --switch CHANNEL This is a convenience alias for the combination of ``-b 0 -c CHANNEL``. It is an easier way to switch channels. If ``-switch`` is given with ``-b`` and/or ``-c``, the latter take precedence. --list-channels Lists the available channels, including aliases, and exits. -d DEVICE, --device DEVICE Override the device name just this once. -f FILTER, --filter FILTER Filter the candidate upgrade paths to only contain full or delta updates. ``FILTER`` must be either ``full`` or ``delta``. -m IMAGENO, --maximage IMAGENO Cap a winning upgrade path to image number ``IMAGENO``. All images with a version number greater than ``IMAGENO`` will be ignored. For example, if the winning upgrade path is ``200:204:304`` and you give ``-m 205``, the upgrade will not include image number 304. Note that this capping happens *after* the winning upgrade path is selected. -i, --info Show some information about the current device, including the current build number, device name, and channel, then exit. -n, --dry-run Calculate and print the upgrade path, but do not download or apply it. *New in system-image 2.5.1: output displays the target phase percentage* -p VALUE, --percentage VALUE For testing purposes, force a device specific phase percentage. The value must be an integer between 0 and 100. *New in system-image 2.5.1* -g, --no-apply Downloads all files and prepares for, but does not actually apply the update. On devices which require a reboot to apply the update, no reboot is performed. -v, --verbose Increase the logging verbosity. With one ``-v``, logging goes to the console in addition to the log file, and logging at ``INFO`` level is enabled. With two ``-v`` (or ``-vv``), logging both to the console and to the log file are output at ``DEBUG`` level. -C DIR, --config DIR Use the given configuration directory, otherwise use the system default. The program will read all the files in this directory that begin with a number, followed by an underscore, and ending in ``.ini`` (e.g. ``03_myconfig.ini``). The files are read in sorted numerical order from lowest prefix number to highest, with later configuration files able to override any variable in any section. --factory-reset Wipes the data partition and issues a reboot into recovery. This effectively performs a device factory reset. --show-settings Show all the key/value pairs in the settings database. --progress [dots|logfile|json] Report progress in various ways. `dots` prints some dots every once in a while to stderr; this mimic what was available in system-image 2.5. `logfile` prints messages at debug level to the system-image log file, and is also available in 2.5 (via the `--verbose` flag). `json` prints JSON records to stdout. *New in system-image 3.0* --get KEY Print the value for the given key in the settings database. If the key is missing, a default value is printed. May be given multiple times. --set KEY=VALUE Set the value for the given key in the settings database. If the key is missing it is added. May be given multiple times. --del KEY Deletes the given key from the settings database. If the key does not exist, this is a no-op. May be given multiple times. --override-gsm Allows an update to proceed while the device is on GSM and currently set to only use wifi. This is only effective when using ``ubuntu-download-manager``. **New in system-image 3.1.** FILES ===== /etc/system-image/[0-9]+*.ini Default configuration files. SEE ALSO ======== system-image.ini(5), system-image-dbus(8) ./setup.cfg0000644000015600001650000000021612701500553012713 0ustar jenkinsjenkins[nosetests] verbosity = 2 with-systemimageplugin = 1 logging-filter = systemimage [egg_info] tag_build = tag_svn_revision = 0 tag_date = 0 ./coverage-curl.ini0000644000015600001650000000120112701500553014324 0ustar jenkinsjenkins[run] branch = true parallel = true omit = setup* systemimage/data/* systemimage/docs/* systemimage/testing/* systemimage/tests/* systemimage/udm.py /usr/lib/* .tox/coverage-curl/lib/python*/distutils/* .tox/coverage-curl/lib/python*/site-packages/pkg_resources* .tox/coverage-udm/lib/python*/distutils/* .tox/coverage-udm/lib/python*/site-packages/pkg_resources* [paths] source = systemimage .tox/coverage-curl/lib/python*/site-packages/systemimage .tox/coverage-udm/lib/python*/site-packages/systemimage [report] exclude_lines = pragma: no cover pragma: no curl ./setup.py0000644000015600001650000000257712701500553012620 0ustar jenkinsjenkins# Copyright (C) 2013-2016 Canonical Ltd. # Author: Barry Warsaw # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 3 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . from setuptools import find_packages, setup with open('systemimage/version.txt') as fp: __version__ = fp.read().strip() setup( name='system-image', version=__version__, description='Ubuntu System Image Based Upgrades', author='Barry Warsaw', author_email='barry@ubuntu.com', license='GNU GPLv3', packages=find_packages(), include_package_data=True, entry_points={ 'console_scripts': [ 'system-image-cli = systemimage.main:main', 'system-image-dbus = systemimage.service:main', ], }, install_requires = [ 'python-gnupg', ], tests_require = [ 'nose2', ], test_suite = 'nose2.collector.collector', ) ./README.rst0000644000015600001650000000367412701500553012574 0ustar jenkinsjenkins====================== System Image Updater ====================== This repository contains the client side tools for system image based upgrades. For more details, see: https://wiki.ubuntu.com/ImageBasedUpgrades Testing ======= To test locally run:: $ tox This will run the test suite against all supported Python 3 versions. You can also run a subset of tests by using a regular expression pattern. First you need to set up the local virtual environment. Running `tox` as above does this as a side-effect, but you can also set up (or update [1]_) the environment without running the test suite:: $ tox --notest -r Once the environment is set up, you can run individual tests like so:: $ .tox/py34/bin/python -m nose2 -P Multiple `-P` options can be given. The pattern matches the full test "name", so you can use a file name (without the `.py` extension), a test class, a test method, or various other combinations here. E.g.:: $ .tox/py34/bin/python -m nose2 -P test_add_existing_key Other options are available to help with debugging and verbosity. Try this to get full help:: $ .tox/py34/bin/python -m nose2 --help Project Information =================== Launchpad project page: https://launchpad.net/ubuntu-system-image Filing Bugs =========== File bugs at https://bugs.launchpad.net/ubuntu-system-image This is preferred rather than using the Ubuntu source package, but if you do file it against the source package, please also add the project as a bugtask. Also, please tag the bug with the `client` tag (since the project page above also refers to the server and other components of image based system upgrades). Author ====== You can contact the primary author/maintainer at Barry Warsaw barry@ubuntu.com barry@canonical.com IRC: barry on freenode (#ubuntu-phone) .. _[1]: Sometimes you need to update the environment, if for example you make a change to the entry points in main.py or service.py. ./dbus-manpage.rst0000644000015600001650000003540412701500553014176 0ustar jenkinsjenkins================= system-image-dbus ================= ----------------------------------------- Ubuntu System Image Upgrader DBus service ----------------------------------------- :Author: Barry Warsaw :Date: 2016-02-25 :Copyright: 2013-2016 Canonical Ltd. :Version: 3.1 :Manual section: 8 SYNOPSYS ======== system-image-dbus [options] DESCRIPTION =========== The DBus service published by this script upgrades the system to the latest available image (i.e. build number). With no options, this starts up the ``com.canonical.SystemImage`` service. OPTIONS ======= -h, --help Show the program's message and exit. --version Show the program's version number and exit. -v, --verbose Increase the logging verbosity. With one ``-v``, logging goes to the console in addition to the log file, and logging at ``INFO`` level is enabled. With two ``-v`` (or ``-vv``), logging both to the console and to the log file are output at ``DEBUG`` level. -C DIR, --config DIR Use the given configuration directory, otherwise use the system default. The program will read all the files in this directory that begin with a number, followed by an underscore, and ending in ``.ini`` (e.g. ``03_myconfig.ini``). The files are read in sorted numerical order from lowest prefix number to highest, with later configuration files able to override any variable in any section. D-BUS API ========= This process exports a D-Bus API on the bus name ``com.canonical.SystemImage``, object path ``/Service``, and interface ``com.canonical.SystemImage``. The D-Bus service process is normally started by D-Bus activation. The API specification follows. In all cases, where strings are described, they are UTF-8 encoded, and in English where appropriate. All datetimes are encoded as UTF-8 strings in the UTC timezone using the *combined* format (i.e. 'T' separating the date and time portions), with 1 second resolution. The calls may be synchronous or asynchronous. In the former case, the return values are described. In the latter case, a description of the possible signals a client may receive is given; see the detailed description of the signals for details of their payload. Methods ------- ``CheckForUpdate()`` This is an **asynchronous** call instructing the client to check whether an update is available. If a check is already in progress, it continues. If the client is in *auto-download* mode (see below), then this call will automatically begin to download the update if one is available, otherwise the download must be explicitly initiated by a ``DownloadUpdate()`` call. It is possible for an update to only occur if certain criteria are met, e.g. only if the devices is on wifi. ``CheckForUpdate()`` never resumes a paused download. In all cases, an ``UpdateAvailableStatus`` signal is emitted containing the results of the check. If the device is in auto-download mode, an ``UpdateProgress`` signal is sent as soon as the download is started. ``DownloadUpdate()`` This is an **asynchronous** call used to begin the downloading of an available update, and it is a no-op if there is no update to download, a download is already in progress, ``CheckForUpdate()`` was not called first, or the update status is in an error condition. If a previous download was paused, ``DownloadUpdate()`` resumes the download. An ``UpdateProgress()`` signal is sent as soon as the download begins. Other status signals as described below will be sent when the download terminates. ``ApplyUpdate()`` This is an **asynchronous** call used to apply a previously downloaded update. After the update has been applied, an ``Applied`` signal is sent. Some devices require a reboot in order to apply the update, and such devices may also issue a ``Rebooting`` signal. However, on devices which require a reboot, the timing and emission of both the ``Applied`` and ``Rebooting`` signals are in a race condition with system shutdown, and may not occur. ``CancelUpdate()`` This is a **synchronous** call to cancel any update check or download in progress. The empty string is returned unless an error occurred, in which case the error message is returned. ``PauseDownload()`` This is a **synchronous** method to pause the current download. The empty string is returned unless an error occurred, in which case the error message is returned. ``Information()`` This is a **synchronous** call which returns an extensible mapping of UTF-8 keys to UTF-8 values. The following keys are currently defined: * *current_build_number* - The current build number as an integer. * *target_build_number* - If an update is known to be available, this will be the build number that an update will leave the device at. If no `CheckForUpdate()` has been previously performed, then the *target_build_number* will be "-1". If a previous check has been performed, but no update is available (i.e., the device is already at the latest version), then *target_build_number* will be the same as *current_build_number*. * *device_name* - The name of the device type. * *channel_name* - The channel the device is currently on. * *last_update_date* - The last time this device was updated as a datetime, e.g. "YYYY-MM-DDTHH:MM:SS" * *version_detail* - A string containing a comma-separated list of key-value pairs providing additional component version details, e.g. "ubuntu=123,mako=456,custom=789". * *target_version_detail* - Like *version_detail* but contains the information from the server. If an update is known to be available, this will be taken from ``index.json`` file's image specification, for the image that the upgrade will leave the device at. If no update is available this will be identical to *version_detail*. If no `CheckForUpdate()` as been previously performed, then the *target_version_detail* will be the empty string. * *last_check_date* - The last time a ``CheckForUpdate()`` call was performed. *New in system-image 2.3* *New in system-image 2.5: target_build_number was added.* *New in system-image 3.0: target_version_detail was added.* ``FactoryReset()`` This is a **synchronous** call which wipes the data partition and issue a reboot to recovery. A ``Rebooting`` signal may be sent, depending on timing. *New in system-image 2.3*. ``ProductionReset()`` This is a **synchronous** call which wipes the data partition, sets a flag for factory wipe (used in production), and issue a reboot to recovery. A ``Rebooting`` signal may be sent, depending on timing. *New in system-image 3.0*. ``SetSetting(key, value)`` This is a **synchronous** call to write or update a setting. ``key`` and ``value`` are strings. While any key/value pair may be set, some keys have predefined semantics and values. See below for details. If the new value is different than the old value, or if the key was not previously set, a ``SettingChanged`` signal is sent. For values with the above semantics, any invalid value is ignored (i.e. *not* set or stored). Keys with underscore prefixes are reserved for user defined values. ``GetSetting(key)`` This is a **synchronous** call to read and return a setting. If ``key`` has not been previously set, the empty string is returned. Note that some of the pre-defined keys have default settings. ``ForceAllowGSMDownload()`` This is a **synchronous** call to force the use of the GSM network for an in-progress wifi-only update stalled while the device is on GSM. This is only effective when using ``ubuntu-download-manager``. **New in system-image 3.1.** ``Exit()`` This is a **synchronous** call which causes the D-Bus service process to exit immediately. There is no return value. If ``Exit()`` is never called, the service will still exit normally after some configurable amount of time. D-Bus activation will restart it. Signals ------- ``UpdateAvailableStatus(is_available, downloading, available_version, update_size, last_update_date, error_reason)`` Sent in response to a ``CheckForUpdate()`` call, this signal provides information about the state of the update. The signal includes these pieces of information: * **is_available** - A boolean flag which indicates whether an update is available or not. This will be false if the device's build number is equal to or greater than any candidate build on the server (IOW, there is no candidate available). This flag will be true when there is an update available. * **downloading** - A boolean flag indicating whether a download is in progress. This doesn't include any preliminary downloads needed to determine whether a candidate is available or not (e.g. keyrings, blacklists, channels.json, and index.json files). This flag will be false if a download is paused. * **available_version** - A string specifying the update target candidate version. * **update_size** - An integer providing total size in bytes for an available upgrade. This does not include any preliminary files needed to determine whether an update is available or not. * **last_update_date** - The ISO 8601 format UTC date (to the second) that the last update was applied to this device. This will be the empty string if no update has been previously applied. * **error_reason** - A string indicating why the download did not start. Only useful if the second argument (downloading) is false, otherwise ignore this value. Depending on the state of the system, some of the arguments of this signal may be ignored. Some example signal values include: * ``UpdateAvailableStatus(true, true, build_number, size, "YYYY-MM-DDTHH:MM:SS", descriptions, "")`` - This means that an update is available and is currently downloading. The build number of the candidate update is given, as is its total size in bytes, and the descriptions of the updates in all available languages. * ``UpdateAvailableStatus(true, false, build_number, size, "YYYY-MM-DDTHH:MM:SS", descriptions, "paused")`` - This means that an update is available, but it is not yet downloading, possibly because the client is in manual-update mode, or because the download is currently paused. The reason is given in the last argument, and the build number, size, and descriptions are given as above. * ``UpdateAvailableStatus(false, ?, ?, ?, "YYYY-MM-DDTHH:MM:SS", ?, ?)`` - There is no update available. The ISO 8601 date of the last applied update is given, but all other arguments should be ignored. ``DownloadStarted()`` Sent when the download of the update files has started. **New in system-image 3.1.** ``UpdateProgress(percentage, eta)`` Sent periodically, while a download is in progress. This signal is not sent when an upgrade is paused. * **percentage** - An integer between 0 and 100 indicating how much of the download (not including preliminary files) have been currently downloaded. This may be 0 if we do not yet know what percentage has been downloaded. * **eta** - The estimated time remaining to complete the download, in float seconds. This may be 0 if we don't have a reasonable estimate. ``UpdatePaused(percentage)`` Sent whenever a download is paused as detected via the download service. * **percentage** - An integer between 0 and 100 indicating how much of the download (not including preliminary files) have been currently downloaded. May be 0 if this information cannot be obtained. ``UpdateDownloaded()`` Sent when the currently in progress update has been completely and successfully downloaded. When this signal is received, it means that the device is ready to have the update applied via ``ApplyUpdate()``. ``UpdateFailed(consecutive_failure_count, last_reason)`` Sent when the update failed for any reason (including cancellation, but only if a download is in progress). The client will remain in the failure state until the next ``CheckForUpdate()`` call. * **consecutive_failure_count** - An integer specifying the number of times in a row that a ``CheckForUpdate()`` has resulted in an update failure. This increments until an update completes successfully (i.e. until the next ``UpdateDownloaded`` signal is issued). * **last_reason** - A string containing the reason for why this updated failed. ``Applied(status)`` Sent in response to an ``ApplyUpdate()`` call. See the timing caveats for that method. **New in system-image 3.0** * **status** - A boolean indicating whether an update has been applied or not. ``Rebooting(status)`` On devices which require a reboot in order to apply an update, this signal may be sent in response to an ``ApplyUpdate()`` call. See the timing caveats for that method. * **status** - A boolean indicating whether the device has initiated a reboot sequence or not. ``SettingChanged(key, value)`` Sent when a setting is changed. This signal is not sent if the new value is the same as the old value. Both the key and value are strings. * **key** - The key of the value that was changed. * **value** - The new value for the key. Additional API details ---------------------- The ``SetSetting()`` call takes a key string and a value string. The following keys are predefined. * *min_battery* - The minimum battery strength which will allow downloads to proceed. The value is the string representation of a number between 0 and 100 percent. * *auto_download* - A tri-state value indicating whether downloads should normally proceed automatically if an update is available when a ``CheckForUpdate()`` was issued. The value is the string representation of the following integer values: * *0* - Never download automatically; i.e. an explicit ``DownloadUpdate()`` call is required to start the download. * *1* - Only download automatically if the device is connected via wifi. *This is the default*. * *2* - Always download the update automatically. * *failures_before_warning* - Unused by the client, but stored here for use by the user interface. FILES ===== /etc/system-image/[0-9]+*.ini Default configuration files. /etc/dbus-1/system.d/com.canonical.SystemImage.conf DBus service permissions file. /usr/share/dbus-1/system-services/com.canonical.SystemImage.service DBus service definition file. SEE ALSO ======== system-image.ini(5), system-image-cli(1) .. _`ISO 8601`: http://en.wikipedia.org/wiki/ISO_8601 ./systemimage/0000755000015600001650000000000012701500553013422 5ustar jenkinsjenkins./systemimage/index.py0000644000015600001650000000545312701500553015112 0ustar jenkinsjenkins# Copyright (C) 2013-2016 Canonical Ltd. # Author: Barry Warsaw # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 3 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . """Device/channel indexes.""" __all__ = [ 'Index', ] import json from datetime import datetime, timezone from systemimage.bag import Bag from systemimage.image import Image IN_FMT = '%a %b %d %H:%M:%S %Z %Y' OUT_FMT = '%a %b %d %H:%M:%S UTC %Y' class Index(Bag): @classmethod def from_json(cls, data): """Parse the JSON data and produce an index.""" mapping = json.loads(data) # Parse the global data, of which there is only the timestamp. Even # though the string will contain 'UTC' (which we assert is so since we # can only handle UTC timestamps), strptime() will return a naive # datetime. We'll turn it into an aware datetime in UTC, which is the # only thing that can possibly make sense. timestamp_str = mapping['global']['generated_at'] assert 'UTC' in timestamp_str.split(), 'timestamps must be UTC' naive_generated_at = datetime.strptime(timestamp_str, IN_FMT) generated_at=naive_generated_at.replace(tzinfo=timezone.utc) global_ = Bag(generated_at=generated_at) # Parse the images. images = [] for image_data in mapping['images']: # Descriptions can be any of: # # * description # * description-xx (e.g. description-en) # * description-xx_CC (e.g. description-en_US) # # We want to preserve the keys exactly as given, and because the # extended forms are not Python identifiers, we'll pull these out # into a separate, non-Bag dictionary. descriptions = {} # We're going to mutate the dictionary during iteration. for key in list(image_data): if key.startswith('description'): descriptions[key] = image_data.pop(key) files = image_data.pop('files', []) bundles = [Bag(**bundle_data) for bundle_data in files] image = Image(files=bundles, descriptions=descriptions, **image_data) images.append(image) return cls(global_=global_, images=images) ./systemimage/docs/0000755000015600001650000000000012701500553014352 5ustar jenkinsjenkins./systemimage/docs/__init__.py0000644000015600001650000000000012701500553016451 0ustar jenkinsjenkins./systemimage/docs/conf.py0000644000015600001650000001732512701500553015661 0ustar jenkinsjenkins# -*- coding: utf-8 -*- # # Image Update Resolver documentation build configuration file, created by # sphinx-quickstart on Tue Apr 23 11:56:20 2013. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'readme' # General information about the project. project = u'System Image Update Client' copyright = u'2013-2016, Canonical Ltd.' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '3.1' # The full version, including alpha/beta/rc tags. release = '3.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'ImageUpdateResolverdoc' # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('readme', 'ImageUpdateResolver.tex', u'System Image Update Client Documentation', u'Barry Warsaw', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('readme', 'systemimage', u'Ubuntu System Image Update Client Documentation', [u'Barry Warsaw'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('readme', 'ImageUpdateResolver', u'System Image Update Client Documentation', u'Barry Warsaw', 'ImageUpdateResolver', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' ./systemimage/docs/readme.rst0000644000015600001650000002406512701500553016350 0ustar jenkinsjenkins===================== Resolution of updates ===================== This package implements a prototype resolver for determining how to update a device to the latest image. The `full specification`_ is available online. This package doesn't actually perform the updates, but it has several tools that serve as core pieces to the update story. In summary: * A site publishes a summary of the images that are available. * This tool downloads a description of what's available * This tool inspects the current device to determine what is installed * A resolver is executed which compares what's available to what's installed, and returns a set of images that should be applied to get the device to the latest image * Different criteria can be applied to resolution order (tentative). By default resolution is optimized for minimal download size * If not enough disk space is available, the resolver may spit out a partial update, i.e. getting the device closer, but requiring subsequent resolutions to complete the update * If no resolution is possible either because the device is in a funny state (e.g. too old, or apt-get'ted to an unknown state), or because of a lack of available disk space, an error is returned * The output of the resolver is JSON file containing the results Another tool reads this JSON file, performs the downloads, putting the files in the proper location and causing a reboot to perform the update. These tasks are out of scope for this tool. Full vs. partial updates ======================== As describe in `more detail`_ full and partial images are available. E.g. for the last 3 months of release: ========== ===================== ================================== Release id Description We release ========== ===================== ================================== 201303-0 First month release full image 201303-1 security update full image and delta from 201303-0 201303-1 security update full image and delta from 201303-0 201303-2 security update full image and delta from 201303-0 and delta from 201303-1 201304-0 April monthly release full image and delta from 201303-2 201304-1 security update full image and delta from 201304-0 201304-2 security update full image and delta from 201304-0 and delta from 201304-1 201304-3 security update full image and delta from 201304-0 and delta from 201304-2 201304-4 security update full image and delta from 201304-0 and delta from 201304-3 201304-5 security update full image and delta from 201304-0 and delta from 201304-4 201305-0 May monthly release full image and delta from 201304-5 ========== ===================== ================================== Devices should never be more than one full and one delta from the latest version, but it might be necessary, or the system may elect, to update from a series of deltas, e.g. due to size considerations. Builds and images ================= There are two types of builds and associated images automated monthly images, and manually triggered urgent security and/or bug fix releases. Monthly images contain: * A new full disk image * A partial disk image from the last full image Update images contain: * A new full disk image * A partial disk image from the last full monthly image * A partial disk image from the last update image after the last monthly Images are numbered YYYYMMXX where YYYY is the year, MM is the month and XX is the build number. Discovery ========= First, the system needs to know about the available channels, and which channel it's interested in. This will usually be the *stable* channel, although some users will want the more bleeding edge *daily* channel. Other channels may also be available. At the top of the server hierarchy are three files, and directories for each available channel. The files are: * ``phablet.pubkey.asc`` - This is the public key used to sign various other files in the hierarchy. In order to prevent man-in-the-middle attacks, the system must download this key over https. Eventually, this key will be installed in the initial device flash and not typically downloaded over the internet. * ``channels.json`` - This file contains a listing of all the available channels. It's contents are detailed below. * ``channels.json.asc`` - The detached signature of the ``channels.json`` file. Channels -------- The ``channels.json`` contains a listing of all available channels as keys in the top-level mapping. Each channel listing further has a mapping naming the available devices. Each device name is mapped to the path, rooted at the top of the hierarchy which names the *index* file, in JSON format. This index file contains all the details for the updates which are available for that device, in that channel. The channel files are not expected to change very often, so they can be cached. If a channel/device is requested that is unknown, the top-level channels listing can be reacquired. Occasionally, on a schedule TBD, the cached channels listing can be refreshed. Configuration ------------- There is a configuration file for the resolver which is used to define static information about the upgrade process. This includes the base URL for contacting the update server, local file system cache directories, cache entry lifetimes, and the channel and device type for this system's upgrades. As an example:: # Configuration file for specifying relatively static information about the # upgrade resolution process. [service] base: https://phablet.stgraber.org [cache] directory: /var/cache/resolver lifetime: 14d [upgrade] channel: stable device: nexus7 The device with the above configuration file will upgrade to the stable Nexus 7 image. Indexes ------- The channel/device index file is where all the available images for that combination is described. Only the images defined in this file are available for download for this device in this channel. The index file has three sections: *bundles*, *global*, and *images*. The *global* section currently contains just a UTC date string marking when the index file was generated, and the client updater doesn't really care about this value. The *images* section is a sequence describing every image file that is available for download. There are two types of images, *full* and *delta*. A full image is exactly as you'd expect, it contains the entire root filesystem (for the Ubuntu side) or Android image needed to bring the device up to the stated version. Full image items contain the following keys: * checksum - The SHA1 hash of the zip file * content - Either *android* or *ubuntu-rootfs* describing whether the image is for the Ubuntu or Android side * path - The URL to the zip file, relative to the server root * size - The size of the zip file in bytes * type - Whether the image is a *full* update or *delta* from some previous image * version - A version string, which is **not** guaranteed to be a number, but generally will be in the YYYYMMXX format In addition, *delta* images also have this key: * base - A version string in YYYYMMXX format naming the version from which this delta was generated The *bundles* section is a sequence of all supported image combinations for both the Ubuntu and Android sides. Each bundle item contains the following keys: * images - This should have both an *android* and an *ubuntu-rootfs* key, the values of which are version numbers for the supported bundle of images * version - A version string, guaranteed to be in the format YYYYMMXX where XX starts at 00 and is sortable. Updates ------- These then are the steps to determine whether the device needs to be updated: * Download the ``index.json`` file for the channel/device and verify it * Sort the available *bundles* by version, taking the highest value as the latest bundle. The bundle versions are ignored after this. * Inspect the latest bundle to get the image versions for *ubuntu-rootfs* and *android*. * If the device's current *android* version matches the latest bundle's *android* version, there's nothing to do on the Android side * If the device's current *ubuntu-rootfs* version matches the latest bundle's *ubuntu-rootfs* version, there's nothing to do on the Ubuntu side * If either side's current image version is lower, the device needs updating If the device needs to be updated, then you have to figure out what it can be updated from. In the best case scenario, the device should be at most one full and one delta away from the latest. Here are the steps to determine what needs to be downloaded and applied. This assumes that there's plenty of disk space so multiple deltas are not necessary. * For each of *android* and *ubuntu-rootfs*, find all the deltas which matches the version number in the latest bundle. There may be more than one, e.g. delta from the last monthly to this version, and delta from the last delta to this version. * Chase all the bases until you reach a YYYYMM00 version, which names the last monthly that the latest delta is based off of * Now you should have up to two chains of possible updates, running through the individual deltas, or from the latest delta to the latest monthly * Decide which chain you want :) The decision of which chain to use is based on several criteria. It could be that we'll optimize for fewest downloads, in which case we'll take the shortest chain. Maybe we'll optimize for total download size, in which case we'll add up all the image sizes and choose the chain with the smallest total size. There maybe be other criteria applied to the possible update chains to consider, such as if there's not enough space for either chain to be downloaded entirely. .. _`full specification`: https://wiki.ubuntu.com/ImageBasedUpgrades/Mobile .. _`more detail`: https://wiki.ubuntu.com/ImageBasedUpgrades/Mobile#Full_vs._partial_updates ./systemimage/api.py0000644000015600001650000001026712701500553014553 0ustar jenkinsjenkins# Copyright (C) 2013-2016 Canonical Ltd. # Author: Barry Warsaw # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 3 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . """DBus API mediator.""" __all__ = [ 'Mediator', 'Update', ] import logging from systemimage.apply import factory_reset, production_reset from systemimage.state import State log = logging.getLogger('systemimage') class Update: """A representation of the available update.""" def __init__(self, winners=None, error=''): self._winners = [] if winners is None else winners self.error = error @property def is_available(self): return len(self._winners) > 0 @property def size(self): total_size = 0 for image in self._winners: total_size += sum(filerec.size for filerec in image.files) return total_size @property def descriptions(self): return [image.descriptions for image in self._winners] @property def version(self): try: return str(self._winners[-1].version) except IndexError: # No winners. return '' @property def version_detail(self): try: return self._winners[-1].version_detail except IndexError: # No winners. return '' class Mediator: """This is the DBus API mediator. It essentially implements the entire DBus API, but at a level below the mechanics of DBus. Methods of this class are hooked directly into the DBus layer to satisfy that interface. """ def __init__(self, callback=None): self._state = State() self._update = None self._callback = callback def __repr__(self): # pragma: no cover fmt = '' args = [id(self), id(self._state), 'None' if self._state.downloader is None else '0x{:x}'.format(id(self._state.downloader)) ] return fmt.format(*args) def cancel(self): self._state.downloader.cancel() def pause(self): self._state.downloader.pause() def resume(self): self._state.downloader.resume() def check_for_update(self): """Is there an update available for this machine? :return: Flag indicating whether an update is available or not. :rtype: bool """ if self._update is None: try: self._state.run_until('download_files') except Exception as error: # Rather than letting this percolate up, eventually reaching # the GLib main loop and thus triggering apport, Let's log the # error and set the relevant information in the class. log.exception('check_for_update failed') self._update = Update(error=str(error)) else: self._update = Update(self._state.winner) return self._update def download(self): """Download the available update.""" # We only want callback progress during the actual download. old_callbacks = self._state.downloader.callbacks[:] try: self._state.downloader.callbacks = [self._callback] self._state.run_until('apply') finally: self._state.downloader.callbacks = old_callbacks def apply(self): """Apply the update.""" # Transition through all remaining states. list(self._state) def factory_reset(self): factory_reset() def production_reset(self): production_reset() def allow_gsm(self): self._state.downloader.allow_gsm() # pragma: no curl ./systemimage/__init__.py0000644000015600001650000000000012701500553015521 0ustar jenkinsjenkins./systemimage/tests/0000755000015600001650000000000012701500553014564 5ustar jenkinsjenkins./systemimage/tests/test_candidates.py0000644000015600001650000003436012701500553020302 0ustar jenkinsjenkins# Copyright (C) 2013-2016 Canonical Ltd. # Author: Barry Warsaw # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 3 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . """Test the candidate upgrade path algorithm.""" __all__ = [ 'TestCandidateDownloads', 'TestCandidateFilters', 'TestCandidates', 'TestNewVersionRegime', ] import unittest from operator import attrgetter from systemimage.candidates import ( delta_filter, full_filter, get_candidates, iter_path) from systemimage.scores import WeightedScorer from systemimage.testing.helpers import ( configuration, descriptions, get_index) class TestCandidates(unittest.TestCase): def test_no_images(self): # If there are no images defined, there are no candidates. index = get_index('candidates.index_01.json') candidates = get_candidates(index, 1400) self.assertEqual(candidates, []) def test_only_higher_fulls(self): # All the full images have a minversion greater than our version, so # we cannot upgrade to any of them. index = get_index('candidates.index_02.json') candidates = get_candidates(index, 100) self.assertEqual(candidates, []) def test_one_higher_full(self): # Our device is between the minversions of the two available fulls, so # the older one can be upgraded too. index = get_index('candidates.index_02.json') candidates = get_candidates(index, 800) # There is exactly one upgrade path. self.assertEqual(len(candidates), 1) path = candidates[0] # The path has exactly one image. self.assertEqual(len(path), 1) image = path[0] self.assertEqual(list(image.descriptions.values()), ['New full build 1']) def test_fulls_with_no_minversion(self): # Like the previous test, there are two full upgrades, but because # neither of them have minversions, both are candidates. index = get_index('candidates.index_03.json') candidates = get_candidates(index, 400) self.assertEqual(len(candidates), 2) # Both candidate paths have exactly one image in them. We can't sort # these paths, so just test them both. path0, path1 = candidates self.assertEqual(len(path0), 1) self.assertEqual(len(path1), 1) # One path gets us to version 1300 and the other 1400. images = sorted([path0[0], path1[0]], key=attrgetter('version')) self.assertEqual(list(images[0].descriptions.values()), ['New full build 1']) self.assertEqual(list(images[1].descriptions.values()), ['New full build 2']) def test_no_deltas_based_on_us(self): # There are deltas in the test data, but no fulls. None of the deltas # have a base equal to our build number. index = get_index('candidates.index_04.json') candidates = get_candidates(index, 100) self.assertEqual(candidates, []) def test_one_delta_based_on_us(self): # There is one delta in the test data that is based on us. index = get_index('candidates.index_04.json') candidates = get_candidates(index, 500) self.assertEqual(len(candidates), 1) path = candidates[0] # The path has exactly one image. self.assertEqual(len(path), 1) image = path[0] self.assertEqual(list(image.descriptions.values()), ['Delta 2']) def test_two_deltas_based_on_us(self): # There are two deltas that are based on us, so both are candidates. # They get us to different final versions. index = get_index('candidates.index_05.json') candidates = get_candidates(index, 1100) self.assertEqual(len(candidates), 2) # Both candidate paths have exactly one image in them. We can't sort # these paths, so just test them both. path0, path1 = candidates self.assertEqual(len(path0), 1) self.assertEqual(len(path1), 1) # One path gets us to version 1300 and the other 1400. images = sorted([path0[0], path1[0]], key=attrgetter('version')) self.assertEqual(descriptions(images), ['Delta 2', 'Delta 1']) def test_one_path_with_full_and_deltas(self): # There's one path to upgrade from our version to the final version. # This one starts at a full and includes several deltas. index = get_index('candidates.index_06.json') candidates = get_candidates(index, 1000) self.assertEqual(len(candidates), 1) path = candidates[0] self.assertEqual(len(path), 3) self.assertEqual([image.version for image in path], [1300, 1301, 1302]) self.assertEqual(descriptions(path), ['Full 1', 'Delta 1', 'Delta 2']) def test_one_path_with_deltas(self): # Similar to above, except that because we're upgrading from the # version of the full, the path is only two images long, i.e. the # deltas. index = get_index('candidates.index_06.json') candidates = get_candidates(index, 1300) self.assertEqual(len(candidates), 1) path = candidates[0] self.assertEqual(len(path), 2) self.assertEqual([image.version for image in path], [1301, 1302]) self.assertEqual(descriptions(path), ['Delta 1', 'Delta 2']) def test_forked_paths(self): # We have a fork in the road. There is a full update, but two deltas # with different versions point to the same base. This will give us # two upgrade paths, both of which include the full. index = get_index('candidates.index_07.json') candidates = get_candidates(index, 1200) self.assertEqual(len(candidates), 2) # We can sort the paths by length. paths = sorted(candidates, key=len) # The shortest path gets us to 1302 in two steps. self.assertEqual(len(paths[0]), 2) self.assertEqual([image.version for image in paths[0]], [1300, 1302]) descriptions = [] for image in paths[0]: # There's only one description per image so order doesn't matter. descriptions.extend(image.descriptions.values()) self.assertEqual(descriptions, ['Full 1', 'Delta 2']) # The longer path gets us to 1302 in three steps. self.assertEqual(len(paths[1]), 3) self.assertEqual([image.version for image in paths[1]], [1300, 1301, 1302]) descriptions = [] for image in paths[1]: # There's only one description per image so order doesn't matter. descriptions.extend(image.descriptions.values()) self.assertEqual(descriptions, ['Full 1', 'Delta 1', 'Delta 3']) class TestCandidateDownloads(unittest.TestCase): maxDiff = None @configuration def test_get_downloads(self): # Path B will win; it has one full and two deltas, none of which have # a bootme flag. Download all their files. index = get_index('candidates.index_08.json') candidates = get_candidates(index, 600) winner = WeightedScorer().choose(candidates, 'devel') descriptions = [] for image in winner: # There's only one description per image so order doesn't matter. descriptions.extend(image.descriptions.values()) self.assertEqual(descriptions, ['Full B', 'Delta B.1', 'Delta B.2']) downloads = list(iter_path(winner)) paths = set(filerec.path for (n, filerec) in downloads) self.assertEqual(paths, set([ '/3/4/5.txt', '/4/5/6.txt', '/5/6/7.txt', '/6/7/8.txt', '/7/8/9.txt', '/8/9/a.txt', '/9/a/b.txt', '/e/d/c.txt', '/f/e/d.txt', ])) signatures = set(filerec.signature for (n, filerec) in downloads) self.assertEqual(signatures, set([ '/3/4/5.txt.asc', '/4/5/6.txt.asc', '/5/6/7.txt.asc', '/6/7/8.txt.asc', '/7/8/9.txt.asc', '/8/9/a.txt.asc', '/9/a/b.txt.asc', '/e/d/c.txt.asc', '/f/e/d.txt.asc', ])) @configuration def test_get_downloads_with_bootme(self): # Path B will win; it has one full and two deltas. The first delta # has a bootme flag so the second delta's files are not downloaded. index = get_index('candidates.index_09.json') candidates = get_candidates(index, 600) winner = WeightedScorer().choose(candidates, 'devel') descriptions = [] for image in winner: # There's only one description per image so order doesn't matter. descriptions.extend(image.descriptions.values()) self.assertEqual(descriptions, ['Full B', 'Delta B.1', 'Delta B.2']) downloads = iter_path(winner) paths = set(filerec.path for (n, filerec) in downloads) self.assertEqual(paths, set([ '/3/4/5.txt', '/4/5/6.txt', '/5/6/7.txt', '/6/7/8.txt', '/7/8/9.txt', '/8/9/a.txt', ])) class TestCandidateFilters(unittest.TestCase): def test_filter_for_fulls(self): # Run a filter over the candidates, such that the only ones left are # those that contain only full upgrades. This can truncate any paths # that start with some fulls and then contain some deltas. index = get_index('candidates.index_08.json') candidates = get_candidates(index, 600) filtered = full_filter(candidates) # Since all images start with a full update, we're still left with # three candidates. self.assertEqual(len(filtered), 3) self.assertEqual([image.type for image in filtered[0]], ['full']) self.assertEqual([image.type for image in filtered[1]], ['full']) self.assertEqual([image.type for image in filtered[2]], ['full']) self.assertEqual(descriptions(filtered[0]), ['Full A']) self.assertEqual(descriptions(filtered[1]), ['Full B']) self.assertEqual(descriptions(filtered[2]), ['Full C']) def test_filter_for_fulls_one_candidate(self): # Filter for full updates, where the only candidate has one full image. index = get_index('candidates.index_10.json') candidates = get_candidates(index, 600) filtered = full_filter(candidates) self.assertEqual(filtered, candidates) def test_filter_for_fulls_with_just_delta_candidates(self): # A candidate path that contains only deltas will have no filtered # paths if all the images are delta updates. index = get_index('candidates.index_11.json') candidates = get_candidates(index, 100) self.assertEqual(len(candidates), 1) filtered = full_filter(candidates) self.assertEqual(len(filtered), 0) def test_filter_for_deltas(self): # Filter the candidates, where the only available path is a delta path. index = get_index('candidates.index_11.json') candidates = get_candidates(index, 100) self.assertEqual(len(candidates), 1) filtered = delta_filter(candidates) self.assertEqual(len(filtered), 1) self.assertEqual(candidates, filtered) def test_filter_for_deltas_none_available(self): # Run a filter over the candidates, such that the only ones left are # those that start with and contain only deltas. Since none of the # paths do so, tere are no candidates left. index = get_index('candidates.index_08.json') candidates = get_candidates(index, 600) filtered = delta_filter(candidates) self.assertEqual(len(filtered), 0) def test_filter_for_deltas_one_candidate(self): # Filter for delta updates, but the only candidate is a full. index = get_index('candidates.index_10.json') candidates = get_candidates(index, 600) filtered = delta_filter(candidates) self.assertEqual(len(filtered), 0) def test_filter_for_multiple_deltas(self): # The candidate path has multiple deltas. All are preserved. index = get_index('candidates.index_12.json') candidates = get_candidates(index, 100) filtered = delta_filter(candidates) self.assertEqual(len(filtered), 1) path = filtered[0] self.assertEqual(len(path), 3) self.assertEqual(descriptions(path), ['Delta A', 'Delta B', 'Delta C']) class TestNewVersionRegime(unittest.TestCase): """LP: #1218612""" def test_candidates(self): # Path B will win; it has one full and two deltas. index = get_index('candidates.index_13.json') candidates = get_candidates(index, 0) self.assertEqual(len(candidates), 3) path0 = candidates[0] self.assertEqual(descriptions(path0), ['Full A', 'Delta A.1', 'Delta A.2']) path1 = candidates[1] self.assertEqual(descriptions(path1), ['Full B', 'Delta B.1', 'Delta B.2']) path2 = candidates[2] self.assertEqual(descriptions(path2), ['Full C', 'Delta C.1']) # The version numbers use the new regime. self.assertEqual(path0[0].version, 300) self.assertEqual(path0[1].base, 300) self.assertEqual(path0[1].version, 301) self.assertEqual(path0[2].base, 301) self.assertEqual(path0[2].version, 304) winner = WeightedScorer().choose(candidates, 'devel') self.assertEqual(descriptions(winner), ['Full B', 'Delta B.1', 'Delta B.2']) self.assertEqual(winner[0].version, 200) self.assertEqual(winner[1].base, 200) self.assertEqual(winner[1].version, 201) self.assertEqual(winner[2].base, 201) self.assertEqual(winner[2].version, 304) ./systemimage/tests/test_api.py0000644000015600001650000002517312701500553016756 0ustar jenkinsjenkins# Copyright (C) 2013-2016 Canonical Ltd. # Author: Barry Warsaw # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 3 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . """Test the DBus API mediator.""" __all__ = [ 'TestAPI', 'TestAPIVersionDetail', ] import os import unittest from pathlib import Path from systemimage.api import Mediator from systemimage.config import config from systemimage.download import Canceled from systemimage.testing.helpers import ( ServerTestBase, chmod, configuration, copy, setup_index, sign, touch_build) from textwrap import dedent from unittest.mock import patch class TestAPI(ServerTestBase): INDEX_FILE = 'api.index_01.json' CHANNEL_FILE = 'api.channels_01.json' CHANNEL = 'stable' DEVICE = 'nexus7' @configuration def test_update_available(self): # Because our build number is lower than the latest available in the # index file, there is an update available. self._setup_server_keyrings() update = Mediator().check_for_update() self.assertTrue(update.is_available) @configuration def test_update_available_cached(self): # If we try to check twice on the same mediator object, the second one # will return the cached update. self._setup_server_keyrings() mediator = Mediator() update_1 = mediator.check_for_update() self.assertTrue(update_1.is_available) update_2 = mediator.check_for_update() self.assertTrue(update_2.is_available) self.assertIs(update_1, update_2) @configuration def test_update_available_version(self): # An update is available. What's the target version number? self._setup_server_keyrings() update = Mediator().check_for_update() self.assertEqual(update.version, '1600') @configuration def test_no_update_available_version(self): # No update is available, so the target version number is zero. self._setup_server_keyrings() touch_build(1600) update = Mediator().check_for_update() self.assertFalse(update.is_available) self.assertEqual(update.version, '') @configuration def test_no_update_available_at_latest(self): # Because our build number is equal to the latest available in the # index file, there is no update available. self._setup_server_keyrings() touch_build(1600) update = Mediator().check_for_update() self.assertFalse(update.is_available) @configuration def test_no_update_available_newer(self): # Because our build number is higher than the latest available in the # index file, there is no update available. self._setup_server_keyrings() touch_build(1700) update = Mediator().check_for_update() self.assertFalse(update.is_available) @configuration def test_get_details(self): # Get the details of an available update. self._setup_server_keyrings() # Index 14 has a more interesting upgrade path, and will yield a # richer description set. index_dir = Path(self._serverdir) / self.CHANNEL / self.DEVICE index_path = index_dir / 'index.json' copy('api.index_02.json', index_dir, 'index.json') sign(index_path, 'device-signing.gpg') setup_index('api.index_02.json', self._serverdir, 'device-signing.gpg') # Get the descriptions. update = Mediator().check_for_update() self.assertTrue(update.is_available) self.assertEqual(update.size, 180009) self.assertEqual(len(update.descriptions), 3) # The first contains the descriptions for the full update. self.assertEqual(update.descriptions[0], { 'description': 'Full B', 'description-en': 'The full B', }) # The first delta. self.assertEqual(update.descriptions[1], { 'description': 'Delta B.1', 'description-en_US': 'This is the delta B.1', 'description-xx': 'XX This is the delta B.1', 'description-yy': 'YY This is the delta B.1', 'description-yy_ZZ': 'YY-ZZ This is the delta B.1', }) # The second delta. self.assertEqual(update.descriptions[2], { 'description': 'Delta B.2', 'description-xx': 'Oh delta, my delta', 'description-xx_CC': 'This hyar is the delta B.2', }) @configuration def test_download(self): # After checking that an update is available, complete the update, but # don't reboot. self._setup_server_keyrings() mediator = Mediator() self.assertTrue(mediator.check_for_update()) # Make sure a reboot did not get issued. with patch('systemimage.apply.Reboot.apply') as mock: mediator.download() # The update was not applied. self.assertFalse(mock.called) # But the command file did get written, and all the files are present. path = Path(config.updater.cache_partition) / 'ubuntu_command' with path.open('r', encoding='utf-8') as fp: command = fp.read() self.assertMultiLineEqual(command, """\ load_keyring image-master.tar.xz image-master.tar.xz.asc load_keyring image-signing.tar.xz image-signing.tar.xz.asc load_keyring device-signing.tar.xz device-signing.tar.xz.asc format system mount system update 6.txt 6.txt.asc update 7.txt 7.txt.asc update 5.txt 5.txt.asc unmount system """) self.assertEqual(set(os.listdir(config.updater.cache_partition)), set([ '5.txt', '5.txt.asc', '6.txt', '6.txt.asc', '7.txt', '7.txt.asc', 'device-signing.tar.xz', 'device-signing.tar.xz.asc', 'image-master.tar.xz', 'image-master.tar.xz.asc', 'image-signing.tar.xz', 'image-signing.tar.xz.asc', 'ubuntu_command', ])) # And the blacklist keyring is available too. self.assertEqual(set(os.listdir(config.updater.data_partition)), set([ 'blacklist.tar.xz', 'blacklist.tar.xz.asc', ])) @configuration def test_apply(self): # Run the intermediate steps, applying the update at the end. self._setup_server_keyrings() mediator = Mediator() # Mock to check the state of reboot. with patch('systemimage.apply.Reboot.apply') as mock: mediator.check_for_update() mediator.download() self.assertFalse(mock.called) mediator.apply() self.assertTrue(mock.called) @configuration def test_factory_reset(self): mediator = Mediator() with patch('systemimage.apply.Reboot.apply') as mock: mediator.factory_reset() self.assertTrue(mock.called) path = Path(config.updater.cache_partition) / 'ubuntu_command' with path.open('r', encoding='utf-8') as fp: command = fp.read() self.assertMultiLineEqual(command, dedent("""\ format data """)) @configuration def test_production_reset(self): mediator = Mediator() with patch('systemimage.apply.Reboot.apply') as mock: mediator.production_reset() self.assertTrue(mock.called) path = Path(config.updater.cache_partition) / 'ubuntu_command' with path.open('r', encoding='utf-8') as fp: command = fp.read() self.assertMultiLineEqual(command, dedent("""\ format data enable factory_wipe """)) @configuration def test_cancel(self): # When we get to the step of downloading the files, cancel it. self._setup_server_keyrings() mediator = Mediator() mediator.check_for_update() mediator.cancel() self.assertRaises(Canceled, mediator.download) @configuration def test_callback(self): # When downloading, we get callbacks. self._setup_server_keyrings() received_bytes = 0 total_bytes = 0 def callback(received, total): nonlocal received_bytes, total_bytes received_bytes = received total_bytes = total mediator = Mediator(callback) mediator.check_for_update() # Checking for updates does not trigger the callback. self.assertEqual(received_bytes, 0) self.assertEqual(total_bytes, 0) mediator.download() # We don't know exactly how many bytes got downloaded, but we know # some did. self.assertNotEqual(received_bytes, 0) self.assertNotEqual(total_bytes, 0) from systemimage.testing.controller import USING_PYCURL @unittest.skipIf(os.getuid() == 0, 'Test cannot succeed when run as root') @unittest.skipUnless(USING_PYCURL, 'LP: #1411866') @configuration def test_state_machine_exceptions(self, config): # An exception in the state machine captures the exception and returns # an error string in the Update instance. self._setup_server_keyrings() with chmod(config.updater.cache_partition, 0): update = Mediator().check_for_update() # There's no winning path, but there is an error. self.assertFalse(update.is_available) self.assertIn('Permission denied', update.error) class TestAPIVersionDetail(ServerTestBase): INDEX_FILE = 'api.index_03.json' CHANNEL_FILE = 'api.channels_01.json' CHANNEL = 'stable' DEVICE = 'nexus7' @configuration def test_update_available_version(self): # An update is available. What's the target version number? self._setup_server_keyrings() update = Mediator().check_for_update() self.assertEqual(update.version_detail, 'ubuntu=101,raw-device=201,version=301') @configuration def test_no_update_available_version(self): # No update is available, so the target version number is zero. self._setup_server_keyrings() touch_build(1600) update = Mediator().check_for_update() self.assertFalse(update.is_available) self.assertEqual(update.version_detail, '') ./systemimage/tests/test_config.py0000644000015600001650000004245012701500553017447 0ustar jenkinsjenkins# Copyright (C) 2013-2016 Canonical Ltd. # Author: Barry Warsaw # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 3 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . """Test the configuration parser.""" __all__ = [ 'TestConfiguration', ] import os import sys import stat import shutil import logging import unittest from contextlib import ExitStack, contextmanager from datetime import timedelta from subprocess import CalledProcessError, check_output from systemimage.apply import Reboot from systemimage.config import Configuration from systemimage.device import SystemProperty from systemimage.scores import WeightedScorer from systemimage.testing.helpers import configuration, data_path, touch_build from unittest.mock import patch @contextmanager def _patch_device_hook(side_effect=FileNotFoundError): # The device hook has two things that generally need patching. The first # is the logging output, which is just noise for testing purposes, so # silence it. The second is that the `getprop` command may actually exist # on test system, and we want a consistent environment (i.e. the # assumption that the command does not exist). with ExitStack() as resources: resources.enter_context(patch('systemimage.device.logging.getLogger')) resources.enter_context( patch('systemimage.device.check_output', side_effect=side_effect)) yield class TestConfiguration(unittest.TestCase): def test_defaults(self): config = Configuration() # [service] self.assertEqual(config.service.base, 'system-image.ubuntu.com') self.assertEqual(config.http_base, 'http://system-image.ubuntu.com') self.assertEqual(config.https_base, 'https://system-image.ubuntu.com') self.assertEqual(config.service.channel, 'daily') self.assertEqual(config.service.build_number, 0) # [system] self.assertEqual(config.system.tempdir, '/tmp') self.assertEqual(config.system.logfile, '/var/log/system-image/client.log') self.assertEqual(config.system.loglevel, (logging.INFO, logging.ERROR)) self.assertEqual(config.system.settings_db, '/var/lib/system-image/settings.db') # [hooks] self.assertEqual(config.hooks.device, SystemProperty) self.assertEqual(config.hooks.scorer, WeightedScorer) self.assertEqual(config.hooks.apply, Reboot) # [gpg] self.assertEqual(config.gpg.archive_master, '/usr/share/system-image/archive-master.tar.xz') self.assertEqual( config.gpg.image_master, '/var/lib/system-image/keyrings/image-master.tar.xz') self.assertEqual( config.gpg.image_signing, '/var/lib/system-image/keyrings/image-signing.tar.xz') self.assertEqual( config.gpg.device_signing, '/var/lib/system-image/keyrings/device-signing.tar.xz') # [updater] self.assertEqual(config.updater.cache_partition, '/android/cache/recovery') self.assertEqual(config.updater.data_partition, '/var/lib/system-image') # [dbus] self.assertEqual(config.dbus.lifetime.total_seconds(), 600) @configuration('config.config_01.ini') def test_basic_config_d(self, config): # Read a basic config.d directory and check that the various attributes # and values are correct. # # [service] self.assertEqual(config.service.base, 'phablet.example.com') self.assertEqual(config.http_base, 'http://phablet.example.com') self.assertEqual(config.https_base, 'https://phablet.example.com') self.assertEqual(config.service.channel, 'stable') self.assertEqual(config.service.build_number, 0) # [system] self.assertEqual(config.system.tempdir, '/tmp') self.assertEqual(config.system.logfile, '/var/log/system-image/client.log') self.assertEqual(config.system.loglevel, (logging.ERROR, logging.ERROR)) self.assertEqual(config.system.settings_db, '/var/lib/phablet/settings.db') self.assertEqual(config.system.timeout, timedelta(seconds=10)) # [hooks] self.assertEqual(config.hooks.device, SystemProperty) self.assertEqual(config.hooks.scorer, WeightedScorer) self.assertEqual(config.hooks.apply, Reboot) # [gpg] self.assertEqual(config.gpg.archive_master, '/usr/share/phablet/archive-master.tar.xz') self.assertEqual(config.gpg.image_master, '/etc/phablet/image-master.tar.xz') self.assertEqual(config.gpg.image_signing, '/var/lib/phablet/image-signing.tar.xz') self.assertEqual(config.gpg.device_signing, '/var/lib/phablet/device-signing.tar.xz') # [updater] self.assertEqual(config.updater.cache_partition[-14:], '/android/cache') self.assertEqual(config.updater.data_partition[-20:], '/lib/phablet/updater') # [dbus] self.assertEqual(config.dbus.lifetime.total_seconds(), 120) @configuration def test_should_have_reloaded(self, config_d): # If a configuration is already loaded, it cannot be loaded again. # Use .reload() instead. config = Configuration(config_d) self.assertRaises(RuntimeError, config.load, config_d) @configuration def test_ignore_some_files(self, config_d): # Any file that doesn't follow the NN_whatever.ini format isn't loaded. path_1 = os.path.join(config_d, 'dummy_file') with open(path_1, 'w', encoding='utf-8') as fp: print('ignore me', file=fp) path_2 = os.path.join(config_d, 'nounderscore.ini') with open(path_2, 'w', encoding='utf-8') as fp: print('ignore me', file=fp) path_3 = os.path.join(config_d, 'XX_almost.ini') with open(path_3, 'w', encoding='utf-8') as fp: print('ignore me', file=fp) config = Configuration(config_d) self.assertNotIn('dummy_file', config.ini_files) self.assertNotIn('nounderscore.ini', config.ini_files) self.assertNotIn('XX_almost.ini', config.ini_files) @configuration('config.config_02.ini') def test_special_dbus_logging_level(self, config): # Read a config.ini that has a loglevel value with an explicit dbus # logging level. self.assertEqual(config.system.loglevel, (logging.CRITICAL, logging.DEBUG)) @configuration('config.config_03.ini') def test_nonstandard_ports(self, config): # This ini file has non-standard http and https ports. self.assertEqual(config.service.base, 'phablet.example.com') self.assertEqual(config.http_base, 'http://phablet.example.com:8080') self.assertEqual(config.https_base, 'https://phablet.example.com:80443') @configuration('config.config_05.ini') def test_disabled_http_port(self, config): # This ini file has http port disabled and non-standard https port. self.assertEqual(config.service.base, 'phablet.example.com') self.assertEqual(config.http_base, 'https://phablet.example.com:80443') self.assertEqual(config.https_base, 'https://phablet.example.com:80443') @configuration('config.config_06.ini') def test_disabled_https_port(self, config): # This in i file has https port disabled and standard http port. self.assertEqual(config.service.base, 'phablet.example.com') self.assertEqual(config.http_base, 'http://phablet.example.com') self.assertEqual(config.https_base, 'http://phablet.example.com') @configuration def test_both_ports_disabled(self, config_d): # This ini file has both http and https ports disabled. shutil.copy(data_path('config.config_07.ini'), os.path.join(config_d, '01_override.ini')) config = Configuration() with self.assertRaises(ValueError) as cm: config.load(config_d) self.assertEqual(cm.exception.args[0], 'Cannot disable both http and https ports') @configuration def test_negative_port_number(self, config_d): # This ini file has a negative port number. shutil.copy(data_path('config.config_08.ini'), os.path.join(config_d, '01_override.ini')) with self.assertRaises(ValueError) as cm: Configuration(config_d) self.assertEqual(cm.exception.args[0], '-1') @configuration def test_get_build_number(self, config): # The current build number is stored in a file specified in the # configuration file. touch_build(1500) config.reload() self.assertEqual(config.build_number, 1500) @configuration def test_get_build_number_after_reload(self, config): # After a reload, the build number gets updated. self.assertEqual(config.build_number, 0) touch_build(801) config.reload() self.assertEqual(config.build_number, 801) @configuration def test_get_build_number_missing(self, config): # The build file is missing, so the build number defaults to 0. self.assertEqual(config.build_number, 0) @configuration def test_get_device_name(self, config): # The device name as we'd expect it to work on a real image. with patch('systemimage.device.check_output', return_value='nexus7'): self.assertEqual(config.device, 'nexus7') # Get it again to test out the cache. self.assertEqual(config.device, 'nexus7') @configuration def test_get_device_name_fallback(self, config): # Fallback for testing on non-images. with _patch_device_hook(side_effect=CalledProcessError(1, 'ignore')): self.assertEqual(config.device, '?') @configuration def test_service_device(self, config_d): # A configuration file could have a [service]device variable, which # takes precedence. shutil.copy(data_path('config.config_11.ini'), os.path.join(config_d, '01_override.ini')) with patch('systemimage.device.check_output', return_value='nexus9'): config = Configuration(config_d) # This gets the [service]device value from the configuration file, # not the output of the hook. self.assertEqual(config.device, 'nexus8') @configuration def test_device_no_getprop_fallback(self, config): # Like above, but a FileNotFoundError occurs instead. with _patch_device_hook(): self.assertEqual(config.device, '?') @configuration def test_get_channel(self, config): self.assertEqual(config.channel, 'stable') @configuration def test_overrides(self, config): self.assertEqual(config.build_number, 0) self.assertEqual(config.device, 'nexus7') self.assertEqual(config.channel, 'stable') config.build_number = 20250801 config.device = 'phablet' config.channel = 'daily-proposed' self.assertEqual(config.build_number, 20250801) self.assertEqual(config.device, 'phablet') self.assertEqual(config.channel, 'daily-proposed') @configuration def test_build_number_cli_override(self, config): # When setting the build number, e.g. --build on the cli, we have an # additional value we can check. Normally we only care what the build # number is, but in a one specific case we care whether it was # overridden on the command line. When a channel alias switch is # happening we normally set the build number to 0 to force a full # update. However the user can override this on the cli by setting # --build, which takes precedence. self.assertEqual(config.build_number, 0) self.assertFalse(config.build_number_override) config.build_number = 108 self.assertEqual(config.build_number, 108) self.assertTrue(config.build_number_override) @configuration def test_bad_override(self, config): with self.assertRaises(ValueError) as cm: # Looks like an int, but isn't. config.build_number = '20150801' self.assertEqual(str(cm.exception), 'integer is required, got: str') @configuration def test_reset_build_number(self, config): old_build = config.build_number self.assertEqual(old_build, 0) config.build_number = 20990000 self.assertEqual(config.build_number, 20990000) del config.build_number self.assertEqual(config.build_number, 0) config.build_number = 21000000 self.assertEqual(config.build_number, 21000000) @configuration('00.ini', 'config.config_09.ini') def test_later_files_override(self, config): # This value comes from the 00.ini file. self.assertEqual(config.system.timeout, timedelta(seconds=1)) # These get overridden in second ini file. self.assertEqual(config.service.base, 'systum-imaje.ubuntu.com') self.assertEqual(config.dbus.lifetime, timedelta(hours=1)) @configuration def test_tempdir(self, config): # config.tempdir is randomly created. self.assertEqual(config.tempdir[-26:-8], '/tmp/system-image-') self.assertEqual(stat.filemode(os.stat(config.tempdir).st_mode), 'drwx--S---') def test_tempdir_cleanup(self): # config.tempdir gets cleaned up when the process exits gracefully. # # To test this, we invoke Python in a subprocess and ask it to print # config.tempdir, letting that process exit normally. Then check that # the directory has been removed. Note of course that *ungraceful* # exits won't invoke the atexit handlers and thus won't clean up the # directory. Be sure [system]tempdir is on a tempfs and you'll be # fine. command = [ sys.executable, '-c', """from systemimage.config import config; import stat, os; \ print(stat.filemode(os.stat(config.tempdir).st_mode), \ config.tempdir)""" ] stdout = check_output(command, universal_newlines=True) self.assertEqual(stdout[:29], 'drwx--S--- /tmp/system-image-') self.assertFalse(os.path.exists(stdout.split()[1])) @configuration('config.config_01.ini') def test_constructor(self, config_d): # Configuration constructor takes an optional directory argument. config = Configuration(config_d) self.assertEqual(config.service.base, 'phablet.example.com') # Passing in a non-directory is not allowed. self.assertRaises(TypeError, Configuration, data_path('config.config_01.ini')) @configuration def test_phased_percentage(self, config): # By default, the phased percentage override is None. self.assertIsNone(config.phase_override) @configuration def test_phased_percentage_override(self, config): # The phased percentage for the device can be overridden. self.assertIsNone(config.phase_override) config.phase_override = 33 self.assertEqual(config.phase_override, 33) # It can also be reset. del config.phase_override self.assertIsNone(config.phase_override) @configuration def test_phased_percentage_override_int(self, config): # When overriding the phased percentage, the new value must be an int. self.assertRaises(ValueError, setattr, config, 'phase_override', '!') @configuration def test_crazy_phase(self, config): config.phase_override = -100 self.assertEqual(config.phase_override, 0) config.phase_override = 108 self.assertEqual(config.phase_override, 100) config.phase_override = 0 self.assertEqual(config.phase_override, 0) config.phase_override = 100 self.assertEqual(config.phase_override, 100) @configuration('config.config_10.ini') def test_missing_stanza_okay(self, config): # config_09.ini does not contain a [system] section, so that gets set # to the built-in default values. self.assertEqual(config.system.logfile, '/var/log/system-image/client.log') @configuration def test_user_agent(self, config): # The User-Agent string contains the device, channel, and build. config.device = 'geddyboard' config.channel = 'devel-trio' config.build_number = 2112 self.assertEqual( config.user_agent, 'Ubuntu System Image Upgrade Client: ' 'device=geddyboard;channel=devel-trio;build=2112') ./systemimage/tests/__init__.py0000644000015600001650000000000012701500553016663 0ustar jenkinsjenkins./systemimage/tests/test_bag.py0000644000015600001650000001267712701500553016743 0ustar jenkinsjenkins# Copyright (C) 2013-2016 Canonical Ltd. # Author: Barry Warsaw # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 3 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . """Test the Bag class.""" __all__ = [ 'TestBag', ] import pickle import unittest from operator import setitem from systemimage.bag import Bag class TestBag(unittest.TestCase): def test_simple(self): # Initialize a bag; its attributes are the keywords of the ctor. bag = Bag(a=1, b=2, c=3) self.assertEqual(bag.a, 1) self.assertEqual(bag.b, 2) self.assertEqual(bag.c, 3) def test_dash_translation(self): # Dashes in keys get turned into underscore in attributes. bag = Bag(**{'a-b': 1, 'c-d': 2, 'e-f': 3}) self.assertEqual(bag.a_b, 1) self.assertEqual(bag.c_d, 2) self.assertEqual(bag.e_f, 3) def test_dash_literal_access(self): # For keys with dashes, the original name is preserved in getitem. bag = Bag(**{'a-b': 1, 'c-d': 2, 'e-f': 3}) self.assertEqual(bag['a-b'], 1) self.assertEqual(bag['c-d'], 2) self.assertEqual(bag['e-f'], 3) def test_keyword_translation(self): # Python keywords get a trailing underscore. bag = Bag(**{'global': 1, 'with': 2, 'import': 3}) self.assertEqual(bag.global_, 1) self.assertEqual(bag.with_, 2) self.assertEqual(bag.import_, 3) def test_repr(self): # The repr of a bag includes its translated keys. bag = Bag(**{'a-b': 1, 'global': 2, 'foo': 3}) self.assertEqual(repr(bag), '') def test_original(self): # There's a magical attribute containing the original ctor arguments. source = {'a-b': 1, 'global': 2, 'foo': 3} bag = Bag(**source) self.assertEqual(bag.__original__, source) def test_add_key(self): # We can add new keys/attributes via setitem. bag = Bag(a=1, b=2, c=3) bag['d'] = bag.b + bag.c self.assertEqual(bag.d, 5) def test_add_existing_key(self): # A key set in the original ctor cannot be changed. bag = Bag(a=1, b=2, c=3) self.assertRaises(ValueError, setitem, bag, 'b', 5) self.assertEqual(bag.b, 2) def test_add_new_key(self): # A key added by setitem can be changed. bag = Bag(a=1, b=2, c=3) bag['d'] = 4 bag['d'] = 5 self.assertEqual(bag.d, 5) def test_pickle(self): # Bags can be pickled and unpickled. bag = Bag(a=1, b=2, c=3) pck = pickle.dumps(bag) new_bag = pickle.loads(pck) self.assertEqual(new_bag.a, 1) self.assertEqual(new_bag.b, 2) self.assertEqual(new_bag.c, 3) def test_update(self): # Bags can be updated, similar to dicts. bag = Bag(a=1, b=2, c=3) bag.update(b=7, d=9) self.assertEqual(bag.a, 1) self.assertEqual(bag.b, 7) self.assertEqual(bag.c, 3) self.assertEqual(bag.d, 9) def test_converters(self): # The Bag ctor accepts a mapping of type converter functions. bag = Bag(converters=dict(a=int, b=int), a='1', b='2', c='3') self.assertEqual(bag.a, 1) self.assertEqual(bag.b, 2) self.assertEqual(bag.c, '3') def test_converters_error(self): # Type converter function errors get propagated. converters = dict(a=int, b=int) keywords = dict(a='1', b='foo', c=3) self.assertRaises(ValueError, Bag, converters=converters, **keywords) def test_update_converters(self): # The update method also accepts converters. bag = Bag(a=1, b=2, c=3) bag.update(converters=dict(d=int), d='4', e='5') self.assertEqual(bag.d, 4) self.assertEqual(bag.e, '5') def test_update_converter_overrides(self): # Converters in the update method permanently override ctor converters. converters = dict(a=int, b=int) bag = Bag(converters=converters, a='1', b='2') self.assertEqual(bag.a, 1) self.assertEqual(bag.b, 2) new_converters = dict(a=str) bag.update(converters=new_converters, a='3', b='4') self.assertEqual(bag.a, '3') self.assertEqual(bag.b, 4) bag.update(a='5', b='6') self.assertEqual(bag.a, '5') self.assertEqual(bag.b, 6) def test_keys(self): bag = Bag(c=1, b=2, a=3) self.assertEqual(sorted(bag.keys()), ['a', 'b', 'c']) def test_iter(self): # Iteration is over the available keys. bag = Bag(c=1, b=2, a=3) self.assertEqual(sorted(bag, reverse=True), ['c', 'b', 'a']) def test_get(self): # You can get a single key. If missing, None or a supplied default is # returned. bag = Bag(c=1, b=2, a=3) self.assertEqual(bag.get('b'), 2) self.assertIsNone(bag.get('missing')) missing = object() self.assertIs(bag.get('missing', missing), missing) ./systemimage/tests/test_dbus.py0000644000015600001650000030475412701500553017147 0ustar jenkinsjenkins# Copyright (C) 2013-2016 Canonical Ltd. # Author: Barry Warsaw # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 3 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . """Test the SystemImage dbus service.""" __all__ = [ 'TestDBusApply', 'TestDBusCheckForUpdate', 'TestDBusCheckForUpdateToUnwritablePartition', 'TestDBusCheckForUpdateWithBrokenIndex', 'TestDBusDownload', 'TestDBusDownloadBigFiles', 'TestDBusFactoryReset', 'TestDBusGSMDownloads', 'TestDBusGSMNoDownloads', 'TestDBusGetSet', 'TestDBusInfo', 'TestDBusMiscellaneous', 'TestDBusMockCrashers', 'TestDBusMockFailApply', 'TestDBusMockFailPause', 'TestDBusMockFailResume', 'TestDBusMockNoUpdate', 'TestDBusMockUpdateAutoSuccess', 'TestDBusMockUpdateManualSuccess', 'TestDBusMultipleChecksInFlight', 'TestDBusPauseResume', 'TestDBusProductionReset', 'TestDBusProgress', 'TestDBusRegressions', 'TestDBusUseCache', 'TestLiveDBusInfo', ] import os import sys import dbus import json import time import shutil import dbusmock import tempfile import unittest import subprocess from contextlib import ExitStack, suppress from collections import namedtuple from datetime import datetime, timedelta from dbus.exceptions import DBusException from functools import partial from pathlib import Path from textwrap import dedent from systemimage.config import Configuration from systemimage.helpers import MiB, safe_remove from systemimage.reactor import Reactor from systemimage.settings import Settings from systemimage.testing.controller import USING_PYCURL, stop_downloader from systemimage.testing.helpers import ( copy, data_path, find_dbus_process, make_http_server, setup_index, setup_keyring_txz, setup_keyrings, sign, terminate_service, touch_build, wait_for_service, write_bytes) from systemimage.testing.nose import SystemImagePlugin # Precomputed SHA256 hash for 750MiB of b'x'. HASH750 = '5fdddb486eeb1aa4dbdada48424418fce5f753844544b6970e4a25879d6d6f52' # Use a namedtuple for more convenient argument unpacking. UASRecord = namedtuple('UASRecord', 'is_available downloading available_version update_size ' 'last_update_date error_reason') def capture_dbus_calls(function): def inner(self, *args, **kws): with ExitStack() as resources: fd, filename = tempfile.mkstemp('.log') os.close(fd) resources.callback(os.remove, filename) old_filename, old_level = self.iface.DebugDBusTo(filename, 'debug') try: result = function(self, *args, **kws) finally: self.iface.DebugDBusTo('', old_level) with open(filename, 'r', encoding='utf-8') as fp: print('\nvvvvv', function.__name__, 'dbus calls vvvvv', file=sys.stderr) sys.stderr.write(fp.read()) print('^^^^^', function.__name__, 'dbus calls ^^^^^', file=sys.stderr) return result return inner def tweak_checksums(checksum): index_path = os.path.join( SystemImagePlugin.controller.serverdir, 'stable', 'nexus7', 'index.json') with open(index_path, 'r', encoding='utf-8') as fp: index = json.load(fp) for i in range(3): index['images'][0]['files'][i]['checksum'] = checksum with open(index_path, 'w', encoding='utf-8') as fp: json.dump(index, fp) sign(index_path, 'device-signing.gpg') class SignalCapturingReactor(Reactor): def __init__(self, *signals): super().__init__(dbus.SystemBus()) for signal in signals: self.react_to(signal) self.signals = [] def _default(self, signal, path, *args, **kws): self.signals.append(args) self.quit() def _do_UpdateAvailableStatus(self, signal, path, *args): self.signals.append(UASRecord(*args)) self.quit() def run(self, method=None, timeout=None): if method is not None: self.schedule(method) super().run(timeout=timeout) class AutoDownloadCancelingReactor(Reactor): def __init__(self, iface): super().__init__(dbus.SystemBus()) self._iface = iface self.got_update_available_status = False self.got_update_failed = False self.react_to('UpdateAvailableStatus') self.react_to('UpdateFailed') def _do_UpdateAvailableStatus(self, signal, path, *args, **kws): self.got_update_available_status = True self._iface.CancelUpdate() def _do_UpdateFailed(self, signal, path, *args, **kws): self.got_update_failed = True self.quit() class MiscellaneousCancelingReactor(Reactor): def __init__(self, iface): super().__init__(dbus.SystemBus()) self._iface = iface self.update_failures = [] self.react_to('UpdateProgress') self.react_to('UpdateFailed') def _do_UpdateProgress(self, signal, path, *args, **kws): self._iface.CancelUpdate() def _do_UpdateFailed(self, signal, path, *args, **kws): self.update_failures.append(args) self.quit() class ProgressRecordingReactor(Reactor): def __init__(self): super().__init__(dbus.SystemBus()) self.react_to('UpdateDownloaded') self.react_to('UpdateProgress') self.progress = [] def _do_UpdateDownloaded(self, signal, path, *args, **kws): self.quit() def _do_UpdateProgress(self, signal, path, *args, **kws): self.progress.append(args) class PausingReactor(Reactor): def __init__(self, iface): super().__init__(dbus.SystemBus()) self._iface = iface self.pause_progress = 0 self.paused = False self.percentage = 0 self.react_to('UpdateProgress') self.react_to('UpdatePaused') def _do_UpdateProgress(self, signal, path, percentage, eta): if self.pause_progress == 0 and percentage > 0: self._iface.PauseDownload() self.pause_progress = percentage def _do_UpdatePaused(self, signal, path, percentage): self.paused = True self.percentage = percentage self.quit() class DoubleCheckingReactor(Reactor): def __init__(self, iface): super().__init__(dbus.SystemBus()) self.iface = iface self.uas_signals = [] self.react_to('UpdateAvailableStatus') self.react_to('UpdateDownloaded') self.schedule(self.iface.CheckForUpdate) def _do_UpdateAvailableStatus(self, signal, path, *args): # We'll keep doing this until we get the UpdateDownloaded signal. self.uas_signals.append(UASRecord(*args)) self.schedule(self.iface.CheckForUpdate) def _do_UpdateDownloaded(self, *args, **kws): self.quit() class DoubleFiringReactor(Reactor): def __init__(self, iface, wait_count=2): super().__init__(dbus.SystemBus()) self.iface = iface self.wait_count = wait_count self.uas_signals = [] self.react_to('UpdateAvailableStatus') def _do_UpdateAvailableStatus(self, signal, path, *args): self.uas_signals.append(UASRecord(*args)) if len(self.uas_signals) >= self.wait_count: self.quit() def run(self): self.schedule(self.iface.CheckForUpdate, milliseconds=50) self.schedule(self.iface.CheckForUpdate, milliseconds=55) super().run() class ManualUpdateReactor(Reactor): def __init__(self, iface): super().__init__(dbus.SystemBus()) self.iface = iface self.applied = False self.react_to('UpdateAvailableStatus') self.react_to('UpdateProgress') self.react_to('UpdateDownloaded') self.react_to('Applied') self.react_to('UpdateFailed') self.iface.CheckForUpdate() def _do_UpdateAvailableStatus(self, signal, path, *args, **kws): # When the update is available, start the download. self.iface.DownloadUpdate() def _do_UpdateProgress(self, signal, path, *args, **kws): # Once the download is in progress, initiate another check. Only do # this on the first progress signal. if args == (0, 0): self.iface.CheckForUpdate() def _do_UpdateDownloaded(self, signal, path, *args, **kws): # The update successfully downloaded, so apply the update now. self.iface.ApplyUpdate() def _do_UpdateFailed(self, signal, path, *args, **kws): # Before LP: #1287919 was fixed, this signal would have been sent. self.applied = False self.quit() def _do_Applied(self, signal, path, *args, **kws): # The update was applied. self.applied = True self.quit() class AppliedNoRebootingReactor(Reactor): def __init__(self, iface): super().__init__(dbus.SystemBus()) self.iface = iface # Values here are (received, flag) self.applied = (False, False) self.rebooting = (False, False) self.react_to('Applied') self.react_to('Rebooting') self.react_to('UpdateDownloaded') self.schedule(self.iface.CheckForUpdate) def _do_UpdateDownloaded(self, signal, path, *args, **kws): # The update successfully downloaded, so apply the update now. self.iface.ApplyUpdate() def _do_Applied(self, signal, path, *args): self.applied = (True, args[0]) self.quit() def _do_Rebooting(self, signal, path, *args): self.rebooting = (True, args[0]) class StartedReactor(Reactor): def __init__(self): super().__init__(dbus.SystemBus()) self.got_started = 0 self.react_to('DownloadStarted') self.react_to('UpdateDownloaded') def _do_DownloadStarted(self, *args, **kws): self.got_started += 1 def _do_UpdateDownloaded(self, *args, **kws): self.quit() class _TestBase(unittest.TestCase): """Base class for all DBus testing.""" # For unittest's assertMultiLineEqual(). maxDiff = None # Override this to start the DBus server in a different testing mode. mode = None @classmethod def setUpClass(cls): super().setUpClass() SystemImagePlugin.controller.set_mode( cert_pem='cert.pem', service_mode=cls.mode) @classmethod def tearDownClass(cls): SystemImagePlugin.controller.stop_children() # Clear out the temporary directory. config = Configuration(SystemImagePlugin.controller.ini_path) try: shutil.rmtree(config.system.tempdir) except FileNotFoundError: pass super().tearDownClass() def setUp(self): super().setUp() self.system_bus = dbus.SystemBus() service = self.system_bus.get_object( 'com.canonical.SystemImage', '/Service') self.iface = dbus.Interface(service, 'com.canonical.SystemImage') def tearDown(self): self.reset_service() super().tearDown() def reset_service(self): self.iface.Reset() def download_manually(self): self.iface.SetSetting('auto_download', '0') def download_on_wifi(self): self.iface.SetSetting('auto_download', '1') def download_always(self): self.iface.SetSetting('auto_download', '2') class _LiveTesting(_TestBase): mode = 'live' @classmethod def setUpClass(cls): super().setUpClass() cls._resources = ExitStack() # Set up the http/https servers that the dbus client will talk to. # Start up both an HTTPS and HTTP server. The data files are vended # over the latter, everything else, over the former. serverdir = SystemImagePlugin.controller.serverdir try: cls._resources.push( make_http_server(serverdir, 8943, 'cert.pem', 'key.pem')) cls._resources.push(make_http_server(serverdir, 8980)) # Set up the server files. copy('dbus.channels_01.json', serverdir, 'channels.json') sign(os.path.join(serverdir, 'channels.json'), 'image-signing.gpg') # Only the archive-master key is pre-loaded. All the other keys # are downloaded and there will be both a blacklist and device # keyring. The four signed keyring tar.xz files and their # signatures end up in the proper location after the state machine # runs to completion. config = Configuration(SystemImagePlugin.controller.ini_path) setup_keyrings('archive-master', use_config=config) setup_keyring_txz( 'spare.gpg', 'image-master.gpg', dict(type='blacklist'), os.path.join(serverdir, 'gpg', 'blacklist.tar.xz')) setup_keyring_txz( 'image-master.gpg', 'archive-master.gpg', dict(type='image-master'), os.path.join(serverdir, 'gpg', 'image-master.tar.xz')) setup_keyring_txz( 'image-signing.gpg', 'image-master.gpg', dict(type='image-signing'), os.path.join(serverdir, 'gpg', 'image-signing.tar.xz')) setup_keyring_txz( 'device-signing.gpg', 'image-signing.gpg', dict(type='device-signing'), os.path.join(serverdir, 'stable', 'nexus7', 'device-signing.tar.xz')) except: cls._resources.close() raise @classmethod def tearDownClass(cls): cls._resources.close() super().tearDownClass() def setUp(self): super().setUp() self._prepare_index('dbus.index_01.json') # We need a configuration file that agrees with the dbus client. self.config = Configuration(SystemImagePlugin.controller.ini_path) # For testing reboot preparation. self.command_file = os.path.join( self.config.updater.cache_partition, 'ubuntu_command') # For testing the reboot command without actually rebooting. self.reboot_log = os.path.join( self.config.updater.cache_partition, 'reboot.log') def tearDown(self): # Consume the UpdateFailed that results from the cancellation. reactor = SignalCapturingReactor('TornDown') reactor.run(self.iface.TearDown, timeout=15) # Clear out any previously downloaded data files. for updater_dir in (self.config.updater.cache_partition, self.config.updater.data_partition): try: all_files = os.listdir(updater_dir) except FileNotFoundError: # The directory itself may not exist. pass for filename in all_files: safe_remove(os.path.join(updater_dir, filename)) # Since the controller re-uses the same config_d directory, clear out # any touched config files that aren't the default. for ini_file in os.listdir(self.config.config_d): if ini_file != '00_defaults.ini': safe_remove(os.path.join(self.config.config_d, ini_file)) safe_remove(self.reboot_log) super().tearDown() def _prepare_index(self, index_file, write_callback=None): serverdir = SystemImagePlugin.controller.serverdir index_path = os.path.join(serverdir, 'stable', 'nexus7', 'index.json') head, tail = os.path.split(index_path) copy(index_file, head, tail) sign(index_path, 'device-signing.gpg') setup_index(index_file, serverdir, 'device-signing.gpg', write_callback) class TestDBusCheckForUpdate(_LiveTesting): """Test the SystemImage dbus service.""" def test_update_available(self): # There is an update available. self.download_manually() reactor = SignalCapturingReactor('UpdateAvailableStatus') reactor.run(self.iface.CheckForUpdate) self.assertEqual(len(reactor.signals), 1) # There's one boolean argument to the result. signal = reactor.signals[0] self.assertTrue(signal.is_available, msg=signal.error_reason) self.assertFalse(signal.downloading) self.assertEqual(signal.available_version, '1600') self.assertEqual(signal.update_size, 314572800) def test_update_available_auto_download(self): # Automatically download the available update. self.download_always() timestamp = int(datetime(2022, 8, 1, 10, 11, 12).timestamp()) touch_build(1701, timestamp, self.config) reactor = SignalCapturingReactor('UpdateAvailableStatus') reactor.run(self.iface.CheckForUpdate) self.assertEqual(len(reactor.signals), 1) # There's one boolean argument to the result. signal = reactor.signals[0] self.assertTrue(signal.is_available, msg=signal.error_reason) self.assertTrue(signal.downloading) self.assertEqual(signal.available_version, '1600') self.assertEqual(signal.update_size, 314572800) # This is the first update applied. self.assertEqual(signal.last_update_date, '2022-08-01 10:11:12') self.assertEqual(signal.error_reason, '') def test_no_update_available(self): # Our device is newer than the version that's available. timestamp = int(datetime(2022, 8, 1, 10, 11, 12).timestamp()) touch_build(1701, timestamp, self.config) self.iface.Reset() reactor = SignalCapturingReactor('UpdateAvailableStatus') reactor.run(self.iface.CheckForUpdate) self.assertEqual(len(reactor.signals), 1) signal = reactor.signals[0] self.assertFalse(signal.is_available) # No update has been previously applied. self.assertEqual(signal.last_update_date, '2022-08-01 10:11:12') # All other values are undefined. def test_last_update_date(self): # Pretend the device got a previous update. Now, there's no update # available, but the date of the last update is provided in the # signal. timestamp = int(datetime(2022, 1, 20, 12, 1, 45).timestamp()) touch_build(1701, timestamp, self.config) self.iface.Reset() # Fake that there was a previous update. reactor = SignalCapturingReactor('UpdateAvailableStatus') reactor.run(self.iface.CheckForUpdate) self.assertEqual(len(reactor.signals), 1) signal = reactor.signals[0] self.assertFalse(signal.is_available) # No update has been previously applied. self.assertEqual(signal.last_update_date, '2022-01-20 12:01:45') # All other values are undefined. def test_check_for_update_twice(self): # Issue two CheckForUpdate calls immediate after each other. self.download_always() reactor = SignalCapturingReactor('UpdateAvailableStatus') def two_calls(): self.iface.CheckForUpdate() self.iface.CheckForUpdate() reactor.run(two_calls) self.assertEqual(len(reactor.signals), 1) # There's one boolean argument to the result. signal = reactor.signals[0] self.assertTrue(signal.is_available, msg=signal.error_reason) self.assertTrue(signal.downloading) class TestDBusDownload(_LiveTesting): def test_auto_download(self): # When always auto-downloading, and there is an update available, the # update gets automatically downloaded. First, we'll get an # UpdateAvailableStatus signal, followed by a bunch of UpdateProgress # signals (which for this test, we'll ignore), and finally an # UpdateDownloaded signal. self.download_always() self.assertFalse(os.path.exists(self.command_file)) reactor = SignalCapturingReactor('UpdateAvailableStatus') reactor.run(self.iface.CheckForUpdate) self.assertEqual(len(reactor.signals), 1) # There's one boolean argument to the result. signal = reactor.signals[0] self.assertTrue(signal.is_available) self.assertTrue(signal.downloading) # Now, wait for the UpdateDownloaded signal. reactor = SignalCapturingReactor('UpdateDownloaded') reactor.run() self.assertEqual(len(reactor.signals), 1) with open(self.command_file, 'r', encoding='utf-8') as fp: command = fp.read() self.assertMultiLineEqual(command, """\ load_keyring image-master.tar.xz image-master.tar.xz.asc load_keyring image-signing.tar.xz image-signing.tar.xz.asc load_keyring device-signing.tar.xz device-signing.tar.xz.asc format system mount system update 6.txt 6.txt.asc update 7.txt 7.txt.asc update 5.txt 5.txt.asc unmount system """) def test_nothing_to_auto_download(self): # We're auto-downloading, but there's no update available. self.download_always() touch_build(1701, use_config=self.config) self.iface.Reset() self.assertFalse(os.path.exists(self.command_file)) reactor = SignalCapturingReactor('UpdateAvailableStatus') reactor.run(self.iface.CheckForUpdate) self.assertEqual(len(reactor.signals), 1) # There's one boolean argument to the result. signal = reactor.signals[0] self.assertFalse(signal.is_available) self.assertFalse(signal.downloading) # Now, wait for the UpdateDownloaded signal, which isn't coming. reactor = SignalCapturingReactor('UpdateDownloaded') reactor.run(timeout=15) self.assertEqual(len(reactor.signals), 0) self.assertFalse(os.path.exists(self.command_file)) def test_manual_download(self): # When manually downloading, and there is an update available, the # update does not get downloaded until we explicitly ask it to be. self.download_manually() self.assertFalse(os.path.exists(self.command_file)) reactor = SignalCapturingReactor('UpdateAvailableStatus') reactor.run(self.iface.CheckForUpdate) self.assertEqual(len(reactor.signals), 1) # There's one boolean argument to the result. signal = reactor.signals[0] self.assertTrue(signal.is_available) # This is false because we're in manual download mode. self.assertFalse(signal.downloading) self.assertFalse(os.path.exists(self.command_file)) # No UpdateDownloaded signal is coming. reactor = SignalCapturingReactor('UpdateDownloaded') reactor.run(timeout=15) self.assertEqual(len(reactor.signals), 0) self.assertFalse(os.path.exists(self.command_file)) # Now we download manually and wait again for the signal. reactor = SignalCapturingReactor('UpdateDownloaded') reactor.run(self.iface.DownloadUpdate) with open(self.command_file, 'r', encoding='utf-8') as fp: command = fp.read() self.assertMultiLineEqual(command, """\ load_keyring image-master.tar.xz image-master.tar.xz.asc load_keyring image-signing.tar.xz image-signing.tar.xz.asc load_keyring device-signing.tar.xz device-signing.tar.xz.asc format system mount system update 6.txt 6.txt.asc update 7.txt 7.txt.asc update 5.txt 5.txt.asc unmount system """) def test_nothing_to_manually_download(self): # We're manually downloading, but there's no update available. self.download_manually() touch_build(1701, use_config=self.config) self.iface.Reset() self.assertFalse(os.path.exists(self.command_file)) reactor = SignalCapturingReactor('UpdateAvailableStatus') reactor.run(self.iface.CheckForUpdate) self.assertEqual(len(reactor.signals), 1) # There's one boolean argument to the result. signal = reactor.signals[0] self.assertFalse(signal.is_available) # This is false because we're in manual download mode. self.assertFalse(signal.downloading) # No UpdateDownloaded signal is coming reactor = SignalCapturingReactor('UpdateDownloaded') reactor.run(timeout=15) self.assertEqual(len(reactor.signals), 0) self.assertFalse(os.path.exists(self.command_file)) # Now we download manually, but no signal is coming. reactor = SignalCapturingReactor('UpdateDownloaded') reactor.run(self.iface.DownloadUpdate, timeout=15) self.assertEqual(len(reactor.signals), 0) self.assertFalse(os.path.exists(self.command_file)) def test_update_failed_signal(self): # A signal is issued when the update failed. self.download_manually() reactor = SignalCapturingReactor('UpdateAvailableStatus') reactor.run(self.iface.CheckForUpdate) # Cause the update to fail by deleting a file from the server. os.remove(os.path.join(SystemImagePlugin.controller.serverdir, '4/5/6.txt.asc')) reactor = SignalCapturingReactor('UpdateFailed') reactor.run(self.iface.DownloadUpdate) self.assertEqual(len(reactor.signals), 1) failure_count, last_reason = reactor.signals[0] self.assertEqual(failure_count, 1) self.assertEqual(last_reason[:17], 'FileNotFoundError') def test_duplicate_destinations(self): # A faulty index.json might specify that two different urls yield the # same local destination file. This is a bug on the server and the # client cannot perform an update. self.download_manually() self._prepare_index('dbus.index_03.json') reactor = SignalCapturingReactor('UpdateAvailableStatus') reactor.run(self.iface.CheckForUpdate) self.assertEqual(len(reactor.signals), 1) signal = reactor.signals[0] self.assertTrue(signal.is_available) self.assertFalse(signal.downloading) reactor = SignalCapturingReactor('UpdateFailed') reactor.run(self.iface.DownloadUpdate) self.assertEqual(len(reactor.signals), 1) failure_count, last_reason = reactor.signals[0] self.assertEqual(failure_count, 1) # Don't count on a specific error message. self.assertEqual(last_reason[:25], 'DuplicateDestinationError') def test_started(self): # A DownloadStarted signal is sent when the download has started. # This either comes proxied through UDM or gets sent by the built-in # cURL downloader. self.download_always() reactor = StartedReactor() reactor.schedule(self.iface.CheckForUpdate) reactor.run(timeout=60) # Exactly one DownloadStarted signal was received. self.assertEqual(reactor.got_started, 1) class TestDBusDownloadBigFiles(_LiveTesting): # If the update contains several very large files, ensure that they can be # successfully downloaded. With the PyCURL downloader, this will ensure # that the minimum transfer rate error isn't triggered. def test_download_big_files(self): # Start by creating some big files which will take a while to # download. def write_callback(dst): # Write a 500 MiB sized file. write_bytes(dst, 750) self._prepare_index('dbus.index_04.json', write_callback) tweak_checksums(HASH750) # Do the download. self.download_always() reactor = SignalCapturingReactor('UpdateDownloaded') reactor.run(self.iface.CheckForUpdate, timeout=1200) self.assertEqual(len(reactor.signals), 1) with open(self.command_file, 'r', encoding='utf-8') as fp: command = fp.read() self.assertMultiLineEqual(command, """\ load_keyring image-master.tar.xz image-master.tar.xz.asc load_keyring image-signing.tar.xz image-signing.tar.xz.asc load_keyring device-signing.tar.xz device-signing.tar.xz.asc format system mount system update 6.txt 6.txt.asc update 7.txt 7.txt.asc update 5.txt 5.txt.asc unmount system """) class TestDBusApply(_LiveTesting): def setUp(self): super().setUp() self.download_always() def test_reboot(self): # Apply the update, which reboots the device. self.assertFalse(os.path.exists(self.reboot_log)) reactor = SignalCapturingReactor('UpdateDownloaded') reactor.run(self.iface.CheckForUpdate) reactor = SignalCapturingReactor('Rebooting') reactor.run(self.iface.ApplyUpdate) self.assertEqual(len(reactor.signals), 1) self.assertTrue(reactor.signals[0]) with open(self.reboot_log, encoding='utf-8') as fp: reboot = fp.read() self.assertEqual(reboot, '/sbin/reboot -f recovery') def test_applied(self): # Apply the update, and the Applied signal we'll get. self.assertFalse(os.path.exists(self.reboot_log)) reactor = SignalCapturingReactor('UpdateDownloaded') reactor.run(self.iface.CheckForUpdate) reactor = SignalCapturingReactor('Applied') reactor.run(self.iface.ApplyUpdate) self.assertEqual(len(reactor.signals), 1) self.assertTrue(reactor.signals[0]) def test_applied_no_reboot(self): # Apply the update, but do not reboot. ini_path = os.path.join( SystemImagePlugin.controller.ini_path, '12_noreboot.ini') shutil.copy(data_path('state.config_01.ini'), ini_path) self.iface.Reset() reactor = AppliedNoRebootingReactor(self.iface) reactor.run() # We should have gotten only one signal, the Applied. received, flag = reactor.applied self.assertTrue(received) self.assertTrue(flag) received, flag = reactor.rebooting self.assertFalse(received) def test_applied_no_update(self): # There's no update to reboot to. touch_build(1701, use_config=self.config) self.iface.Reset() reactor = SignalCapturingReactor('UpdateAvailableStatus') reactor.run(self.iface.CheckForUpdate) reactor = SignalCapturingReactor('Applied') reactor.run(self.iface.ApplyUpdate) self.assertEqual(len(reactor.signals), 1) self.assertFalse(reactor.signals[0][0]) def test_reboot_after_update_failed(self): # Cause the update to fail by deleting a file from the server. self.download_manually() reactor = SignalCapturingReactor('UpdateAvailableStatus') reactor.run(self.iface.CheckForUpdate) os.remove(os.path.join(SystemImagePlugin.controller.serverdir, '4/5/6.txt.asc')) reactor = SignalCapturingReactor('UpdateFailed') reactor.run(self.iface.DownloadUpdate) self.assertEqual(len(reactor.signals), 1) failure_count, reason = reactor.signals[0] self.assertEqual(failure_count, 1) self.assertNotEqual(reason, '') # The reboot fails. reactor = SignalCapturingReactor('Applied') reactor.run(self.iface.ApplyUpdate) self.assertEqual(len(reactor.signals), 1) self.assertFalse(reactor.signals[0][0]) def test_applied_after_update_failed(self): # Cause the update to fail by deleting a file from the server. self.download_manually() reactor = SignalCapturingReactor('UpdateAvailableStatus') reactor.run(self.iface.CheckForUpdate) os.remove(os.path.join(SystemImagePlugin.controller.serverdir, '4/5/6.txt.asc')) reactor = SignalCapturingReactor('UpdateFailed') reactor.run(self.iface.DownloadUpdate) self.assertEqual(len(reactor.signals), 1) failure_count, reason = reactor.signals[0] self.assertEqual(failure_count, 1) self.assertNotEqual(reason, '') # Applying the update fails. reactor = SignalCapturingReactor('Applied') reactor.run(self.iface.ApplyUpdate) self.assertEqual(len(reactor.signals), 1) self.assertFalse(reactor.signals[0][0]) def test_auto_download_cancel(self): # While automatically downloading, cancel the update. self.download_always() reactor = AutoDownloadCancelingReactor(self.iface) reactor.schedule(self.iface.CheckForUpdate) reactor.run() self.assertTrue(reactor.got_update_available_status) self.assertTrue(reactor.got_update_failed) def test_exit(self): # There is a D-Bus method to exit the server immediately. proc = find_dbus_process(SystemImagePlugin.controller.ini_path) self.iface.Exit() proc.wait() self.assertRaises(DBusException, self.iface.Information) # Re-establish a new connection. bus = dbus.SystemBus() service = bus.get_object('com.canonical.SystemImage', '/Service') self.iface = dbus.Interface(service, 'com.canonical.SystemImage') # There's no update to apply. reactor = SignalCapturingReactor('Applied') reactor.run(self.iface.ApplyUpdate) self.assertEqual(len(reactor.signals), 1) self.assertFalse(reactor.signals[0][0]) def test_cancel_while_not_downloading(self): # If we call CancelUpdate() when we're not downloading anything, no # UpdateFailed signal is sent. self.download_manually() reactor = SignalCapturingReactor('UpdateAvailableStatus') reactor.run(self.iface.CheckForUpdate) self.assertEqual(len(reactor.signals), 1) # Since we're downloading manually, no signal will be sent. reactor = SignalCapturingReactor('UpdateFailed') reactor.run(self.iface.CancelUpdate, timeout=15) self.assertEqual(len(reactor.signals), 0) def test_cancel_manual(self): # While manually downloading, cancel the update. self.download_manually() # The downloads can be canceled when there is an update available. reactor = SignalCapturingReactor('UpdateAvailableStatus') reactor.run(self.iface.CheckForUpdate) # Cancel future operations. However, since no download is in # progress, we will not get a signal. reactor = SignalCapturingReactor('UpdateFailed') reactor.run(self.iface.CancelUpdate, timeout=15) self.assertEqual(len(reactor.signals), 0) self.assertFalse(os.path.exists(self.command_file)) # Try to download the update again, though this will fail again. reactor = SignalCapturingReactor('UpdateFailed') reactor.run(self.iface.DownloadUpdate) self.assertEqual(len(reactor.signals), 1) failure_count, reason = reactor.signals[0] self.assertEqual(failure_count, 1) self.assertNotEqual(reason, '') self.assertFalse(os.path.exists(self.command_file)) # The next check resets the failure count and succeeds. reactor = SignalCapturingReactor('UpdateAvailableStatus') reactor.run(self.iface.CheckForUpdate) self.assertEqual(len(reactor.signals), 1) signal = reactor.signals[0] self.assertTrue(signal.is_available) self.assertFalse(signal.downloading) # And now we can successfully download the update. reactor = SignalCapturingReactor('UpdateDownloaded') reactor.run(self.iface.DownloadUpdate) self.assertEqual(len(reactor.signals), 1) class MockReactor(Reactor): def __init__(self, iface): super().__init__(dbus.SystemBus()) self._iface = iface self.timeout = 120 self.react_to('UpdateProgress') self.pause_at_percentage = None self.cancel_at_percentage = None self.pause_start = None self.pause_end = None self.progress = [] self.react_to('UpdateDownloaded') self.downloaded = False self.react_to('UpdateAvailableStatus') self.status = None self.auto_download = True self.react_to('UpdateFailed') self.failed = [] self.react_to('UpdatePaused') self.pauses = [] self.pauses_should_quit = True def _resume(self): self.pause_end = time.time() self._iface.DownloadUpdate() return False def _do_UpdateProgress(self, signal, path, percentage, eta): self.progress.append((percentage, eta)) if percentage == self.pause_at_percentage: self.pause_start = time.time() self._iface.PauseDownload() # Wait 5 seconds, then resume the download. self.schedule(self._resume, 5000) elif percentage == self.cancel_at_percentage: self._iface.CancelUpdate() def _do_UpdateDownloaded(self, *args, **kws): self.downloaded = True self.quit() def _do_UpdateAvailableStatus(self, signal, path, *args): self.status = UASRecord(*args) if not self.auto_download: # The download must be started manually. self.quit() def _do_UpdateFailed(self, signal, path, *args, **kws): self.failed.append(args) self.quit() def _do_UpdatePaused(self, signal, path, percentage): self.pauses.append(percentage) if self.pauses_should_quit: self.quit() class TestDBusMockUpdateAutoSuccess(_TestBase): mode = 'update-auto-success' def test_scenario_1(self): # Start the ball rolling. reactor = MockReactor(self.iface) reactor.schedule(self.iface.CheckForUpdate) reactor.run() self.assertTrue(reactor.status.is_available) self.assertTrue(reactor.status.downloading) self.assertEqual(reactor.status.available_version, '42') self.assertEqual(reactor.status.update_size, 1337 * MiB) self.assertEqual(reactor.status.last_update_date, '1983-09-13T12:13:14') self.assertEqual(reactor.status.error_reason, '') # We should have gotten 100 UpdateProgress signals, where each # increments the percentage by 1 and decrements the eta by 0.5. self.assertEqual(len(reactor.progress), 100) for i in range(100): percentage, eta = reactor.progress[i] self.assertEqual(percentage, i) self.assertEqual(eta, 50 - (i * 0.5)) self.assertTrue(reactor.downloaded) reactor = SignalCapturingReactor('Applied') reactor.run(self.iface.ApplyUpdate) self.assertEqual(len(reactor.signals), 1) self.assertTrue(reactor.signals[0]) def test_scenario_2(self): # Like scenario 1, but with PauseDownload called during the downloads. reactor = MockReactor(self.iface) reactor.pauses_should_quit = False reactor.pause_at_percentage = 35 reactor.schedule(self.iface.CheckForUpdate) reactor.run() # We got a pause signal. self.assertEqual(len(reactor.pauses), 1) self.assertEqual(reactor.pauses[0], 35) # Make sure that we still got 100 progress reports. self.assertEqual(len(reactor.progress), 100) # And we still completed successfully. self.assertTrue(reactor.downloaded) # And that we paused successfully. We can't be exact about the amount # of time we paused, but it should always be at least 4 seconds. self.assertGreater(reactor.pause_end - reactor.pause_start, 4) def test_scenario_3(self): # Like scenario 2, but PauseDownload is called when not downloading, # so it is a no-op. The test service waits 3 seconds after a # CheckForUpdate before it begins downloading, so let's issue a # no-op PauseDownload after 1 second. reactor = MockReactor(self.iface) reactor.schedule(self.iface.PauseDownload, 1000) reactor.schedule(self.iface.CheckForUpdate) reactor.run() self.assertEqual(len(reactor.pauses), 0) self.assertIsNone(reactor.pause_start) self.assertIsNone(reactor.pause_end) def test_scenario_4(self): # If DownloadUpdate is called when not paused, downloading, or # update-checked, it is a no-op. self.iface.DownloadUpdate() # Only run for 15 seconds, but still, we'll never see an # UpdateAvailableStatus or UpdateDownloaded. reactor = MockReactor(self.iface) reactor.timeout = 15 reactor.run() self.assertIsNone(reactor.status) self.assertFalse(reactor.downloaded) def test_scenario_5(self): # In this scenario, we cancel the download midway through. This will # result in an UpdateFailed signal. reactor = MockReactor(self.iface) reactor.cancel_at_percentage = 27 reactor.schedule(self.iface.CheckForUpdate) reactor.run() # Our failed signal will tell us we got one consecutive failure and # the reason is that we canceled (but don't depend on the exact # content of the last_reason, just that it's not the empty string). self.assertEqual(len(reactor.failed), 1) failure_count, reason = reactor.failed[0] self.assertEqual(failure_count, 1) self.assertNotEqual(reason, '') # We also didn't download anything. self.assertFalse(reactor.downloaded) def test_scenario_6(self): # Like secenario 5, but after a cancel, CheckForUpdate will restart # things again. reactor = MockReactor(self.iface) reactor.cancel_at_percentage = 13 reactor.schedule(self.iface.CheckForUpdate) reactor.run() # Our failed signal will tell us we got one consecutive failure and # the reason is that we canceled (but don't depend on the exact # content of the last_reason, just that it's not the empty string). self.assertEqual(len(reactor.failed), 1) failure_count, reason = reactor.failed[0] self.assertEqual(failure_count, 1) self.assertNotEqual(reason, '') # We also didn't download anything. self.assertFalse(reactor.downloaded) # Now, restart the download. reactor = MockReactor(self.iface) reactor.schedule(self.iface.CheckForUpdate) reactor.run() # This time, we've downloaded everything self.assertTrue(reactor.downloaded) self.assertEqual(len(reactor.failed), 0) class TestDBusMockUpdateManualSuccess(_TestBase): mode = 'update-manual-success' def test_scenario_1(self): # Like scenario 1 for auto-downloading except that the download must # be started explicitly. reactor = MockReactor(self.iface) reactor.schedule(self.iface.CheckForUpdate) reactor.auto_download = False reactor.run() self.assertTrue(reactor.status.is_available) self.assertFalse(reactor.status.downloading) self.assertEqual(reactor.status.available_version, '42') self.assertEqual(reactor.status.update_size, 1337 * MiB) self.assertEqual(reactor.status.last_update_date, '1983-09-13T12:13:14') self.assertEqual(reactor.status.error_reason, '') # There should be no progress yet. self.assertEqual(len(reactor.progress), 0) reactor = MockReactor(self.iface) reactor.schedule(self.iface.DownloadUpdate) reactor.auto_download = False reactor.run() # We should have gotten 100 UpdateProgress signals, where each # increments the percentage by 1 and decrements the eta by 0.5. self.assertEqual(len(reactor.progress), 100) for i in range(100): percentage, eta = reactor.progress[i] self.assertEqual(percentage, i) self.assertEqual(eta, 50 - (i * 0.5)) self.assertTrue(reactor.downloaded) reactor = SignalCapturingReactor('Applied') reactor.run(self.iface.ApplyUpdate) self.assertEqual(len(reactor.signals), 1) self.assertTrue(reactor.signals[0]) def test_second_uas_signal_is_still_downloading(self): # LP: #1273354 claims that if you "trigger the download, close system # settings, and reopen it, the signal UpdateAvailableStatus will send # downloading==false, instead of true". reactor = MockReactor(self.iface) reactor.schedule(self.iface.CheckForUpdate) reactor.auto_download = False reactor.run() self.assertTrue(reactor.status.is_available) self.assertFalse(reactor.status.downloading) # Now trigger the download, but ignore any signals that come from it. self.iface.DownloadUpdate() # Simulate closing and re-opening system settings by creating a new # reactor and issuing another check. reactor = MockReactor(self.iface) reactor.schedule(self.iface.CheckForUpdate) reactor.auto_download = False reactor.run() self.assertTrue(reactor.status.is_available) self.assertTrue(reactor.status.downloading) class TestDBusMockUpdateFailed(_TestBase): mode = 'update-failed' def test_scenario_1(self): # The server is already in failure mode. A CheckForUpdate() restarts # the check, which returns information about the new update. It # auto-starts, but this fails. reactor = MockReactor(self.iface) reactor.schedule(self.iface.CheckForUpdate) reactor.run() self.assertTrue(reactor.status.is_available) self.assertFalse(reactor.status.downloading) self.assertEqual(reactor.status.available_version, '42') self.assertEqual(reactor.status.update_size, 1337 * MiB) self.assertEqual(reactor.status.last_update_date, '1983-09-13T12:13:14') self.assertEqual(reactor.status.error_reason, 'You need some network for downloading') self.assertEqual(len(reactor.failed), 1) failure_count, reason = reactor.failed[0] self.assertEqual(failure_count, 2) self.assertEqual(reason, 'You need some network for downloading') def test_scenario_2(self): # The server starts out in a failure mode. When we ask it to download # an update, because it's not already downloading and the failure mode # has not been reset, we get an UpdateFailed signal. self.iface.CheckForUpdate() reactor = SignalCapturingReactor('UpdateFailed') reactor.run(self.iface.DownloadUpdate, timeout=10) self.assertEqual(len(reactor.signals), 1) failure_count, last_error = reactor.signals[0] # The failure_count will be three because: # 1) it gets set to 1 in the mock's constructor. # 2) the mock's CheckForUpdate() bumps it to two. # 3) the mock's superclass's DownloadUpdate bumps it to three after it # checks to see if downloading is paused (it's not), and if the # download is available (it is, though mocked). # # The code in #3 that terminates with bumping the failure count is the # bit we're really trying to test here. An UpdateFailed signal gets # sent (the only one in this test, as seen above) and it contains the # current failure count as accounted above, and the mock's last error. self.assertEqual(failure_count, 3) self.assertEqual(last_error, 'mock service failed') class TestDBusMockFailApply(_TestBase): mode = 'fail-apply' def test_scenario_1(self): # The update has been downloaded, client sends CheckForUpdate and # receives a response. The update is downloaded successfully. An # error occurs when we try to apply the update. reactor = MockReactor(self.iface) reactor.schedule(self.iface.CheckForUpdate) reactor.run() self.assertTrue(reactor.status.is_available) self.assertFalse(reactor.status.downloading) self.assertEqual(reactor.status.available_version, '42') self.assertEqual(reactor.status.update_size, 1337 * MiB) self.assertEqual(reactor.status.last_update_date, '1983-09-13T12:13:14') self.assertEqual(reactor.status.error_reason, '') self.assertTrue(reactor.downloaded) reactor = SignalCapturingReactor('Applied') reactor.run(self.iface.ApplyUpdate) self.assertEqual(len(reactor.signals), 1) self.assertFalse(bool(reactor.signals[0][0])) class TestDBusMockFailResume(_TestBase): mode = 'fail-resume' def test_scenario_1(self): # The server download is paused at 42%. A CheckForUpdate is issued # and gets a response. An UpdatePaused signal is sent. A problem # occurs that prevents resuming. reactor = MockReactor(self.iface) reactor.schedule(self.iface.CheckForUpdate) reactor.run() self.assertTrue(reactor.status.is_available) self.assertFalse(reactor.status.downloading) self.assertEqual(reactor.status.available_version, '42') self.assertEqual(reactor.status.update_size, 1337 * MiB) self.assertEqual(reactor.status.last_update_date, '1983-09-13T12:13:14') self.assertEqual(reactor.status.error_reason, '') # The download is already paused. self.assertEqual(len(reactor.pauses), 1) self.assertEqual(reactor.pauses[0], 42) # We try to resume the download, but that fails. self.assertEqual(len(reactor.failed), 0) reactor = MockReactor(self.iface) reactor.schedule(self.iface.DownloadUpdate) reactor.run() self.assertEqual(len(reactor.failed), 1) failure_count, reason = reactor.failed[0] self.assertEqual(failure_count, 9) self.assertEqual(reason, 'You need some network for downloading') class TestDBusMockFailPause(_TestBase): mode = 'fail-pause' def test_scenario_1(self): # The server is downloading, currently at 10% with no known ETA. The # client tries to pause the download but is unable to do so. reactor = MockReactor(self.iface) reactor.schedule(self.iface.CheckForUpdate) # Only run the loop for a few seconds, since otherwise there's no # natural way to pause the download. reactor.timeout = 5 reactor.run() self.assertTrue(reactor.status.is_available) self.assertTrue(reactor.status.downloading) self.assertEqual(reactor.status.available_version, '42') self.assertEqual(reactor.status.update_size, 1337 * MiB) self.assertEqual(reactor.status.last_update_date, '1983-09-13T12:13:14') self.assertEqual(reactor.status.error_reason, '') self.assertEqual(len(reactor.progress), 1) percentage, eta = reactor.progress[0] self.assertEqual(percentage, 10) self.assertEqual(eta, 0) reason = self.iface.PauseDownload() self.assertEqual(reason, 'no no, not now') class TestDBusMockNoUpdate(_TestBase): mode = 'no-update' def test_scenario_1(self): # No update is available. reactor = SignalCapturingReactor('UpdateAvailableStatus') reactor.run(self.iface.CheckForUpdate) self.assertEqual(len(reactor.signals), 1) signal = reactor.signals[0] self.assertFalse(signal.is_available) self.assertFalse(signal.downloading) self.assertEqual(signal.last_update_date, '1983-09-13T12:13:14') # All the other status variables can be ignored. def test_lp_1215946(self): reactor = MockReactor(self.iface) reactor.auto_download = False # no-update mock sends UpdateFailed before UpdateAvailableStatus. reactor.schedule(self.iface.CheckForUpdate) reactor.run() self.assertEqual(len(reactor.failed), 0) self.assertIsNotNone(reactor.status) class TestDBusRegressions(_LiveTesting): """Test that various regressions have been fixed.""" def test_lp_1205398(self): # Reset state after cancel. self.download_manually() # This test requires that the download take more than 50ms, since # that's the quickest we can issue the cancel, so make one of the # files huge. serverdir = SystemImagePlugin.controller.serverdir index_path = os.path.join(serverdir, 'stable', 'nexus7', 'index.json') file_path = os.path.join(serverdir, '5', '6', '7.txt') # This index file has a 5/6/7.txt checksum equal to the one we're # going to create below. setup_index('dbus.index_02.json', serverdir, 'device-signing.gpg') head, tail = os.path.split(index_path) copy('dbus.index_02.json', head, tail) sign(index_path, 'device-signing.gpg') write_bytes(file_path, 50) sign(file_path, 'device-signing.gpg') # An update is available. reactor = SignalCapturingReactor('UpdateAvailableStatus') reactor.run(self.iface.CheckForUpdate) self.assertEqual(len(reactor.signals), 1) signal = reactor.signals[0] self.assertTrue(signal.is_available, msg=signal.error_reason) self.assertFalse(signal.downloading) self.assertFalse(os.path.exists(self.command_file)) # Arrange for the download to be canceled after it starts. reactor = SignalCapturingReactor('UpdateFailed') reactor.schedule(self.iface.CancelUpdate) reactor.run(self.iface.DownloadUpdate) self.assertEqual(len(reactor.signals), 1) failure_count, reason = reactor.signals[0] self.assertNotEqual(reason, '') self.assertFalse(os.path.exists(self.command_file)) # There's still an update available though, so check again. reactor = SignalCapturingReactor('UpdateAvailableStatus') reactor.run(self.iface.CheckForUpdate) self.assertEqual(len(reactor.signals), 1) signal = reactor.signals[0] self.assertTrue(signal.is_available) self.assertFalse(signal.downloading) # Now we'll let the download proceed to completion. reactor = SignalCapturingReactor('UpdateDownloaded') reactor.run(self.iface.DownloadUpdate) self.assertEqual(len(reactor.signals), 1) # And now there is a command file for the update. self.assertTrue(os.path.exists(self.command_file)) def test_lp_1365646(self): # After an automatic download is complete, we got three DownloadUpdate # calls with no intervening CheckForUpdate. This causes a crash since # an unlocked checking lock was released. self.download_always() # Do a normal automatic download. reactor = SignalCapturingReactor('UpdateDownloaded') reactor.run(self.iface.CheckForUpdate) self.assertEqual(len(reactor.signals), 1) # Now, just do a manual DownloadUpdate. We should get an almost # immediate UpdateDownloaded in response. Nothing actually gets # downloaded, but the files in the cache are still valid. The bug # referenced by this method would cause s-i-d to crash, so as long as # the process still exists after the signal is received, the bug is # fixed. The crash doesn't actually effect any client behavior! But # the traceback does show up in the crash reporter. process = find_dbus_process(SystemImagePlugin.controller.ini_path) reactor = SignalCapturingReactor('UpdateDownloaded') reactor.run(self.iface.DownloadUpdate) self.assertEqual(len(reactor.signals), 1) self.assertTrue(process.is_running()) class TestDBusGetSet(_TestBase): """Test the DBus client's key/value settings.""" mode = 'live' def test_set_get_basic(self): # get/set a random key. self.iface.SetSetting('name', 'ant') self.assertEqual(self.iface.GetSetting('name'), 'ant') def test_set_get_change(self): # get/set a random key, then change it. self.iface.SetSetting('name', 'ant') self.assertEqual(self.iface.GetSetting('name'), 'ant') self.iface.SetSetting('name', 'bee') self.assertEqual(self.iface.GetSetting('name'), 'bee') def test_get_before_set(self): # Getting a key that doesn't exist returns the empty string. self.assertEqual(self.iface.GetSetting('thing'), '') self.iface.SetSetting('thing', 'one') self.assertEqual(self.iface.GetSetting('thing'), 'one') def test_setting_persists(self): # Set a key, restart the dbus server, and the key's value persists. self.iface.SetSetting('permanent', 'waves') self.assertEqual(self.iface.GetSetting('permanent'), 'waves') terminate_service() self.assertRaises(DBusException, self.iface.GetSetting, 'permanent') # Re-establish a new connection. bus = dbus.SystemBus() service = bus.get_object('com.canonical.SystemImage', '/Service') self.iface = dbus.Interface(service, 'com.canonical.SystemImage') self.assertEqual(self.iface.GetSetting('permanent'), 'waves') def test_setting_min_battery_good(self): # min_battery has special semantics. self.iface.SetSetting('min_battery', '30') self.assertEqual(self.iface.GetSetting('min_battery'), '30') def test_setting_min_battery_bad(self): # min_battery requires the string representation of a percentage. self.iface.SetSetting('min_battery', 'standby') self.assertEqual(self.iface.GetSetting('min_battery'), '') self.iface.SetSetting('min_battery', '30') self.assertEqual(self.iface.GetSetting('min_battery'), '30') self.iface.SetSetting('min_battery', 'foo') self.assertEqual(self.iface.GetSetting('min_battery'), '30') self.iface.SetSetting('min_battery', '-10') self.assertEqual(self.iface.GetSetting('min_battery'), '30') self.iface.SetSetting('min_battery', '100') self.assertEqual(self.iface.GetSetting('min_battery'), '100') self.iface.SetSetting('min_battery', '101') self.assertEqual(self.iface.GetSetting('min_battery'), '100') self.iface.SetSetting('min_battery', 'standby') self.assertEqual(self.iface.GetSetting('min_battery'), '100') def test_setting_auto_download_good(self): # auto_download has special semantics. self.iface.SetSetting('auto_download', '0') self.assertEqual(self.iface.GetSetting('auto_download'), '0') self.iface.SetSetting('auto_download', '1') self.assertEqual(self.iface.GetSetting('auto_download'), '1') self.iface.SetSetting('auto_download', '2') self.assertEqual(self.iface.GetSetting('auto_download'), '2') def test_setting_auto_download_bad(self): # auto_download requires an integer between 0 and 2. Don't forget # that it gets pre-populated when the database is created. self.iface.SetSetting('auto_download', 'standby') self.assertEqual(self.iface.GetSetting('auto_download'), '1') self.iface.SetSetting('auto_download', '-1') self.assertEqual(self.iface.GetSetting('auto_download'), '1') self.iface.SetSetting('auto_download', '0') self.assertEqual(self.iface.GetSetting('auto_download'), '0') self.iface.SetSetting('auto_download', '3') self.assertEqual(self.iface.GetSetting('auto_download'), '0') self.iface.SetSetting('auto_download', '2') self.assertEqual(self.iface.GetSetting('auto_download'), '2') def test_prepopulated_settings(self): # Some settings are pre-populated. self.assertEqual(self.iface.GetSetting('auto_download'), '1') def test_setting_changed_signal(self): reactor = SignalCapturingReactor('SettingChanged') reactor.run(partial(self.iface.SetSetting, 'foo', 'yes')) self.assertEqual(len(reactor.signals), 1) key, new_value = reactor.signals[0] self.assertEqual(key, 'foo') self.assertEqual(new_value, 'yes') # The value did not change. reactor = SignalCapturingReactor('SettingChanged') reactor.run(partial(self.iface.SetSetting, 'foo', 'yes'), timeout=15) self.assertEqual(len(reactor.signals), 0) # This is the default value, so nothing changes. reactor = SignalCapturingReactor('SettingChanged') reactor.run(partial(self.iface.SetSetting, 'auto_download', '1'), timeout=15) self.assertEqual(len(reactor.signals), 0) # This is a bogus value, so nothing changes. reactor = SignalCapturingReactor('SettingChanged') reactor.run(partial(self.iface.SetSetting, 'min_battery', '200'), timeout=15) self.assertEqual(len(reactor.signals), 0) # Change back. reactor = SignalCapturingReactor('SettingChanged') reactor.run(partial(self.iface.SetSetting, 'auto_download', '0')) self.assertEqual(len(reactor.signals), 1) key, new_value = reactor.signals[0] self.assertEqual(key, 'auto_download') self.assertEqual(new_value, '0') # Change back. reactor = SignalCapturingReactor('SettingChanged') reactor.run(partial(self.iface.SetSetting, 'min_battery', '30')) self.assertEqual(len(reactor.signals), 1) key, new_value = reactor.signals[0] self.assertEqual(key, 'min_battery') self.assertEqual(new_value, '30') class TestDBusInfo(_TestBase): mode = 'more-info' def test_information(self): # .Information() with some version details. response = self.iface.Information() self.assertEqual( sorted(str(key) for key in response), [ 'channel_name', 'current_build_number', 'device_name', 'last_check_date', 'last_update_date', 'target_build_number', 'version_detail', ]) self.assertEqual(response['current_build_number'], '45') self.assertEqual(response['target_build_number'], '53') self.assertEqual(response['device_name'], 'nexus11') self.assertEqual(response['channel_name'], 'daily-proposed') self.assertEqual(response['last_update_date'], '2099-08-01 04:45:45') self.assertEqual(response['version_detail'], 'ubuntu=123,mako=456,custom=789') self.assertEqual(response['last_check_date'], '2099-08-01 04:45:00') class TestLiveDBusInfo(_LiveTesting): def test_information_before_check_no_details(self): # .Information() where there are no version details, and no previous # CheckForUpdate() call was made. timestamp = int(datetime(2022, 8, 1, 4, 45, 45).timestamp()) touch_build(45, timestamp, self.config) self.iface.Reset() response = self.iface.Information() self.assertEqual(response['current_build_number'], '45') self.assertEqual(response['device_name'], 'nexus7') self.assertEqual(response['channel_name'], 'stable') self.assertEqual(response['last_update_date'], '2022-08-01 04:45:45') self.assertEqual(response['version_detail'], '') self.assertEqual(response['last_check_date'], '') self.assertEqual(response['target_build_number'], '-1') def test_information_no_details(self): # .Information() where there are no version details, but a previous # CheckForUpdate() call was made. timestamp = int(datetime(2022, 8, 1, 4, 45, 45).timestamp()) touch_build(45, timestamp, self.config) self.iface.Reset() reactor = SignalCapturingReactor('UpdateAvailableStatus') reactor.run(self.iface.CheckForUpdate) # Before we get the information, let's poke a known value into the # settings database. Before we do that, make sure that the database # already has a value in it. config = Configuration(SystemImagePlugin.controller.ini_path) settings = Settings(config) real_last_check_date = settings.get('last_check_date') # We can't really test the last check date against anything in a # robust way. E.g. what if we just happen to be at 12:59:59 on # December 31st? Let's at least make sure it has a sane format. self.assertRegex(real_last_check_date, r'\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}') settings.set('last_check_date', '2055-08-01 21:12:00') response = self.iface.Information() self.assertEqual(response['current_build_number'], '45') self.assertEqual(response['device_name'], 'nexus7') self.assertEqual(response['channel_name'], 'stable') self.assertEqual(response['last_update_date'], '2022-08-01 04:45:45') self.assertEqual(response['version_detail'], '') self.assertEqual(response['last_check_date'], '2055-08-01 21:12:00') self.assertEqual(response['target_build_number'], '1600') def test_information(self): # .Information() where there there are version details, and a previous # CheckForUpdate() call was made. timestamp = int(datetime(2022, 8, 1, 4, 45, 45).timestamp()) touch_build(45, timestamp, use_config=self.config) ini_path = Path(SystemImagePlugin.controller.ini_path) override_ini = ini_path / '03_override.ini' with override_ini.open('w', encoding='utf-8') as fp: print("""\ [service] version_detail: ubuntu=222,mako=333,custom=444 """, file=fp) self.iface.Reset() # Set last_update_date. reactor = SignalCapturingReactor('UpdateAvailableStatus') reactor.run(self.iface.CheckForUpdate) # Before we get the information, let's poke a known value into the # settings database. Before we do that, make sure that the database # already has a value in it. config = Configuration(SystemImagePlugin.controller.ini_path) settings = Settings(config) real_last_check_date = settings.get('last_check_date') # We can't really test the last check date against anything in a # robust way. E.g. what if we just happen to be at 12:59:59 on # December 31st? Let's at least make sure it has a sane format. self.assertRegex(real_last_check_date, r'\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}') settings.set('last_check_date', '2055-08-01 21:12:01') response = self.iface.Information() self.assertEqual(response['current_build_number'], '45') self.assertEqual(response['device_name'], 'nexus7') self.assertEqual(response['channel_name'], 'stable') self.assertEqual(response['last_update_date'], '2022-08-01 04:45:45') self.assertEqual(response['version_detail'], 'ubuntu=222,mako=333,custom=444') # We can't really check the returned last check date against anything # in a robust way. E.g. what if we just happen to be at 12:59:59 on # December 31st? Let's at least make sure it has a sane format. self.assertRegex(response['last_check_date'], '2055-08-01 21:12:01') self.assertEqual(response['target_build_number'], '1600') def test_information_no_update_available(self): # .Information() where we know that no update is available, gives us a # target build number equal to the current build number. touch_build(1701, use_config=self.config) self.iface.Reset() reactor = SignalCapturingReactor('UpdateAvailableStatus') reactor.run(self.iface.CheckForUpdate) signal = reactor.signals[0] self.assertEqual(signal.available_version, '') response = self.iface.Information() self.assertEqual(response['target_build_number'], '1701') def test_information_workflow(self): # At first, .Information() won't know whether there is an update # available or not. Then we check, and it tells us there is one. touch_build(45, use_config=self.config) response = self.iface.Information() self.assertEqual(response['target_build_number'], '-1') reactor = SignalCapturingReactor('UpdateAvailableStatus') reactor.run(self.iface.CheckForUpdate) signal = reactor.signals[0] self.assertEqual(signal.available_version, '1600') response = self.iface.Information() self.assertEqual(response['target_build_number'], '1600') def test_target_version_detail_before_check(self): # Before we do a CheckForUpdate, there is no target version detail. timestamp = int(datetime(2022, 8, 1, 4, 45, 45).timestamp()) touch_build(45, timestamp, self.config) self.iface.Reset() response = self.iface.Information() self.assertEqual(response['version_detail'], '') self.assertEqual(response['target_version_detail'], '') def test_target_version_detail_after_check_no_update_available(self): # After a CheckForUpdate, if there is no update available, the target # version detail is the same as the version detail. ini_path = Path(SystemImagePlugin.controller.ini_path) override_ini = ini_path / '03_override.ini' with override_ini.open('w', encoding='utf-8') as fp: print("""\ [service] version_detail: ubuntu=401,mako=501,custom=601 """, file=fp) timestamp = int(datetime(2022, 8, 1, 4, 45, 45).timestamp()) touch_build(1700, timestamp, use_config=self.config) self.iface.Reset() reactor = SignalCapturingReactor('UpdateAvailableStatus') reactor.run(self.iface.CheckForUpdate) response = self.iface.Information() self.assertEqual(response['version_detail'], 'ubuntu=401,mako=501,custom=601') self.assertEqual(response['target_version_detail'], 'ubuntu=401,mako=501,custom=601') def test_target_version_detail_after_check_update_available(self): # After a CheckForUpdate, if there is an update available, the target # version detail is the new update. ini_path = Path(SystemImagePlugin.controller.ini_path) override_ini = ini_path / '03_override.ini' with override_ini.open('w', encoding='utf-8') as fp: print("""\ [service] version_detail: ubuntu=401,mako=501,custom=601 """, file=fp) timestamp = int(datetime(2022, 8, 1, 4, 45, 45).timestamp()) touch_build(45, timestamp, use_config=self.config) # This index.json file is exactly like the tests's default # dbus.index_01.json file except that it has version_detail keys in # the image sections. self._prepare_index('dbus.index_06.json') self.iface.Reset() reactor = SignalCapturingReactor('UpdateAvailableStatus') reactor.run(self.iface.CheckForUpdate) response = self.iface.Information() self.assertEqual(response['version_detail'], 'ubuntu=401,mako=501,custom=601') self.assertEqual(response['target_version_detail'], 'ubuntu=402,mako=502,custom=602') class TestDBusFactoryReset(_LiveTesting): def test_factory_reset(self): # A factory reset is applied. command_file = os.path.join( self.config.updater.cache_partition, 'ubuntu_command') self.assertFalse(os.path.exists(self.reboot_log)) self.assertFalse(os.path.exists(command_file)) reactor = SignalCapturingReactor('Rebooting') reactor.run(self.iface.FactoryReset) self.assertEqual(len(reactor.signals), 1) self.assertTrue(reactor.signals[0]) with open(self.reboot_log, encoding='utf-8') as fp: reboot = fp.read() self.assertEqual(reboot, '/sbin/reboot -f recovery') with open(command_file, encoding='utf-8') as fp: command = fp.read() self.assertEqual(command, 'format data\n') class TestDBusProductionReset(_LiveTesting): def test_production_reset(self): # A production factory reset is applied. command_file = os.path.join( self.config.updater.cache_partition, 'ubuntu_command') self.assertFalse(os.path.exists(self.reboot_log)) self.assertFalse(os.path.exists(command_file)) reactor = SignalCapturingReactor('Rebooting') reactor.run(self.iface.ProductionReset) self.assertEqual(len(reactor.signals), 1) self.assertTrue(reactor.signals[0]) with open(self.reboot_log, encoding='utf-8') as fp: reboot = fp.read() self.assertEqual(reboot, '/sbin/reboot -f recovery') with open(command_file, encoding='utf-8') as fp: command = fp.read() self.assertMultiLineEqual(command, dedent("""\ format data enable factory_wipe """)) class TestDBusProgress(_LiveTesting): def test_progress(self): self.download_manually() touch_build(0, use_config=self.config) reactor = SignalCapturingReactor('UpdateAvailableStatus') reactor.run(self.iface.CheckForUpdate) self.assertEqual(len(reactor.signals), 1) # Start the download and watch the progress meters. reactor = ProgressRecordingReactor() reactor.schedule(self.iface.DownloadUpdate) reactor.run() # The only progress we can count on is the first and last ones. All # will have an eta of 0, since that value is not calculable right now. # The first progress will have percentage 0 and the last will have # percentage 100. self.assertGreaterEqual(len(reactor.progress), 2) percentage, eta = reactor.progress[0] self.assertEqual(percentage, 0) self.assertEqual(eta, 0) percentage, eta = reactor.progress[-1] self.assertEqual(percentage, 100) self.assertEqual(eta, 0) class TestDBusPauseResume(_LiveTesting): def setUp(self): super().setUp() # We have to hack the files to be rather large so that the download # doesn't complete before we get a chance to pause it. Of course, # this breaks the signatures because we're changing the file contents # after the .asc files have been written. We do have to update the # checksums in the index.json file, and then resign the index. for path in ('3/4/5.txt', '4/5/6.txt', '5/6/7.txt'): full_path = os.path.join( SystemImagePlugin.controller.serverdir, path) write_bytes(full_path, 750) tweak_checksums('') #@capture_dbus_calls def test_pause(self): # Set up some extra D-Bus debugging. self.download_manually() touch_build(0, use_config=self.config) reactor = SignalCapturingReactor('UpdateAvailableStatus') reactor.run(self.iface.CheckForUpdate) self.assertEqual(len(reactor.signals), 1) # There must be an update available. self.assertTrue(reactor.signals[0].is_available) # We're ready to start downloading. We schedule a pause to happen in # a little bit and then ensure that we get the proper signal. reactor = PausingReactor(self.iface) reactor.schedule(self.iface.DownloadUpdate) reactor.run(timeout=15) self.assertTrue(reactor.paused) # There's a race condition between issuing the PauseDownload() call to # u-d-m and it reacting to send us a `paused` signal. The best we can # know is that the pause percentage is in the range (0:100) and that # it's greater than the percentage at which we issued the pause. Even # this is partly timing related, so we've hopefully tuned the file # size to be big enough to trigger the expected behavior. There's no # other way to control the live u-d-m process. self.assertGreater(reactor.percentage, 0) self.assertLessEqual(reactor.percentage, 100) self.assertGreaterEqual(reactor.percentage, reactor.pause_progress) # Now let's resume the download. Because we intentionally corrupted # the downloaded files, we'll get an UpdateFailed signal instead of # the successful UpdateDownloaded signal. reactor = SignalCapturingReactor('UpdateFailed') reactor.run(self.iface.DownloadUpdate, timeout=300) self.assertEqual(len(reactor.signals), 1) # The error message will include lots of details on the SignatureError # that results. The key thing is that it's 5.txt that is the first # file to fail its signature check. failure_count, last_error = reactor.signals[0] self.assertEqual(failure_count, 1) check_next = False for line in last_error.splitlines(): line = line.strip() if check_next: self.assertEqual(os.path.basename(line), '5.txt') break if line.startswith('data path:'): check_next = True else: raise AssertionError('Did not find expected error output') def test_must_be_downloading_to_pause(self): # You get an error string if you try to pause the download but no # download is in progress. error_message = self.iface.PauseDownload() self.assertEqual(error_message, 'not downloading') class TestDBusUseCache(_LiveTesting): # See LP: #1217098 def test_use_cache(self): # We run the D-Bus service once through to download all the relevant # files. Then we kill the service before performing the reboot, and # try to do another download. The second one should only try to # download the ancillary files (i.e. channels.json, index.json, # keyrings), but not the data files. self.download_always() touch_build(0, use_config=self.config) reactor = SignalCapturingReactor('UpdateAvailableStatus') reactor.run(self.iface.CheckForUpdate) self.assertEqual(len(reactor.signals), 1) # There's one boolean argument to the result. signal = reactor.signals[0] self.assertTrue(signal.is_available, msg=signal.error_reason) self.assertTrue(signal.downloading) # Now, wait for the UpdateDownloaded signal. reactor = SignalCapturingReactor('UpdateDownloaded') reactor.run() self.assertEqual(len(reactor.signals), 1) config = Configuration(SystemImagePlugin.controller.ini_path) self.assertEqual(set(os.listdir(config.updater.cache_partition)), set(( '5.txt', '5.txt.asc', '6.txt', '6.txt.asc', '7.txt', '7.txt.asc', 'device-signing.tar.xz', 'device-signing.tar.xz.asc', 'image-master.tar.xz', 'image-master.tar.xz.asc', 'image-signing.tar.xz', 'image-signing.tar.xz.asc', 'ubuntu_command', ))) # To prove that the data files are not downloaded again, let's # actually remove them from the server. for dirpath, dirnames, filenames in os.walk( SystemImagePlugin.controller.serverdir): for filename in filenames: if filename.endswith('.txt') or filename.endswith('.txt.asc'): os.remove(os.path.join(dirpath, filename)) # As extra proof, get the mtime in nanoseconds for the .txt and # .txt.asc files. mtimes = {} for filename in os.listdir(config.updater.cache_partition): path = os.path.join(config.updater.cache_partition, filename) if filename.endswith('.txt') or filename.endswith('.txt.asc'): mtimes[filename] = os.stat(path).st_mtime_ns # Don't issue the reboot. Instead, kill the service, which throws away # all state, but does not delete the cached files. Re-establish a new # connection. terminate_service() bus = dbus.SystemBus() service = bus.get_object('com.canonical.SystemImage', '/Service') self.iface = dbus.Interface(service, 'com.canonical.SystemImage') # Now, if we just apply the update, it will succeed, since it knows # that the cached files are valid. reactor = SignalCapturingReactor('Applied') reactor.run(self.iface.ApplyUpdate) self.assertEqual(len(reactor.signals), 1) self.assertTrue(reactor.signals[0]) self.assertEqual(set(os.listdir(config.updater.cache_partition)), set(( '5.txt', '5.txt.asc', '6.txt', '6.txt.asc', '7.txt', '7.txt.asc', 'device-signing.tar.xz', 'device-signing.tar.xz.asc', 'image-master.tar.xz', 'image-master.tar.xz.asc', 'image-signing.tar.xz', 'image-signing.tar.xz.asc', # This file exists because reboot is mocked out # in the dbus tests to just write to a log file. 'reboot.log', 'ubuntu_command', ))) for filename in os.listdir(config.updater.cache_partition): path = os.path.join(config.updater.cache_partition, filename) if filename.endswith('.txt') or filename.endswith('.txt.asc'): self.assertEqual(mtimes[filename], os.stat(path).st_mtime_ns) # Make sure the ubuntu_command file has the full update. path = os.path.join(config.updater.cache_partition, 'ubuntu_command') with open(path, 'r', encoding='utf-8') as fp: command = fp.read() self.assertMultiLineEqual(command, """\ load_keyring image-master.tar.xz image-master.tar.xz.asc load_keyring image-signing.tar.xz image-signing.tar.xz.asc load_keyring device-signing.tar.xz device-signing.tar.xz.asc format system mount system update 6.txt 6.txt.asc update 7.txt 7.txt.asc update 5.txt 5.txt.asc unmount system """) class TestDBusMultipleChecksInFlight(_LiveTesting): def test_multiple_check_for_updates(self): # Log analysis of LP: #1277589 appears to show the following scenario, # reproduced in this test case: # # * Automatic updates are enabled. # * No image signing or image master keys are present. # * A full update is checked. # - A new image master key and image signing key is downloaded. # - Update is available # # Start by creating some big files which will take a while to # download. def write_callback(dst): # Write a 100 MiB sized file. write_bytes(dst, 100) self._prepare_index('dbus.index_04.json', write_callback) timestamp = int(datetime(2022, 8, 1, 10, 11, 12).timestamp()) touch_build(0, timestamp, self.config) # Create a reactor that will exit when the UpdateDownloaded signal is # received. We're going to issue a CheckForUpdate with automatic # updates enabled. As soon as we receive the UpdateAvailableStatus # signal, we'll immediately issue *another* CheckForUpdate, which # should run while the auto-download is working. # # As per LP: #1284217, we will get a second UpdateAvailableStatus # signal, since the status is available even while the original # request is being downloaded. reactor = DoubleCheckingReactor(self.iface) reactor.run() # We need to have received at least 2 signals, but due to timing # issues it could possibly be more. self.assertGreater(len(reactor.uas_signals), 1) # All received signals should have the same information. for signal in reactor.uas_signals: self.assertTrue(signal.is_available) self.assertTrue(signal.downloading) self.assertEqual(signal.available_version, '1600') self.assertEqual(signal.update_size, 314572800) self.assertEqual(signal.last_update_date, '2022-08-01 10:11:12') self.assertEqual(signal.error_reason, '') def test_multiple_check_for_updates_with_manual_downloading(self): # Log analysis of LP: #1287919 (a refinement of LP: #1277589 with # manual downloading enabled) shows that it's possible to enter the # checking phase while a download of the data files is still running. # When manually downloading, this will start another check, and as # part of that check, the blacklist and other files will be deleted # (in anticipation of them being re-downloaded). When the data files # are downloaded, the state machine that just did the data download # may find its files deleted out from underneath it by the state # machine doing the checking. self.download_manually() # Start by creating some big files which will take a while to # download. def write_callback(dst): # Write a 100 MiB sized file. write_bytes(dst, 100) self._prepare_index('dbus.index_04.json', write_callback) touch_build(0, use_config=self.config) # Create a reactor that implements the following test plan: # * Set the device to download manually. # * Flash to an older revision # * Open System Settings and wait for it to say Updates available # * Click on About this phone # * Click on Check for Update and wait for it to say Install 1 update # * Click on Install 1 update and while the files are downloading, # swipe up from the bottom and click Back # * Click on Check for Update again # * Wait for the Update System overlay to come up, and then install # the update, and reboot reactor = ManualUpdateReactor(self.iface) reactor.run() self.assertTrue(reactor.applied) def test_schedule_lots_of_checks(self): # There is a checking lock in the D-Bus layer. If that lock cannot be # acquired *and* the results of a previous check have already been # cached, then the cached results are returned. self.download_manually() reactor = SignalCapturingReactor('UpdateAvailableStatus') reactor.run(self.iface.CheckForUpdate) # At this point, we now have a cached update status. Although this is # timing dependent, schedule two more CheckForUpdates right after each # other. The second one should get caught by the checking lock. reactor = DoubleFiringReactor(self.iface) reactor.run() self.assertEqual(reactor.uas_signals[0], reactor.uas_signals[1]) @unittest.skipIf(os.getuid() == 0, 'Test cannot succeed when run as root') @unittest.skipUnless(USING_PYCURL, 'LP: #1411866') class TestDBusCheckForUpdateToUnwritablePartition(_LiveTesting): @classmethod def setUpClass(cls): super().setUpClass() # Put cache_partition in an unwritable directory. config = Configuration(SystemImagePlugin.controller.ini_path) cache_partition = config.updater.cache_partition cls.bad_path = Path(cache_partition) / 'unwritable' cls.bad_path.mkdir(mode=0, parents=True) # Write a .ini file to override the cache partition. cls.override = os.path.join(config.config_d, '10_override.ini') with open(cls.override, 'w', encoding='utf-8') as fp: print("""\ [updater] cache_partition: {} """.format(cls.bad_path), file=fp) @classmethod def tearDownClass(cls): safe_remove(cls.override) shutil.rmtree(str(cls.bad_path)) super().tearDownClass() def setUp(self): # wait_for_service() must be called befor the upcall to setUp(), # otherwise self will have an iface attribute pointing to a defunct # proxy. wait_for_service(restart=True) super().setUp() def tearDown(self): self.bad_path.chmod(0o777) super().tearDown() def test_check_for_update_error(self): # CheckForUpdate sees an error, in this case because the destination # directory for downloads is not writable. We'll get an # UpdateAvailableStatus with an error string. reactor = SignalCapturingReactor('UpdateAvailableStatus') reactor.run(self.iface.CheckForUpdate) self.assertEqual(len(reactor.signals), 1) self.assertIn('Permission denied', reactor.signals[0].error_reason) class TestDBusCheckForUpdateWithBrokenIndex(_LiveTesting): def test_bad_index_file_crashes_hash(self): # LP: #1222910. A broken index.json file contained an image with type # == 'delta' but no base field. This breaks the hash calculation of # that image and causes the check-for-update to fail. self._prepare_index('dbus.index_05.json') reactor = SignalCapturingReactor('UpdateAvailableStatus') reactor.run(self.iface.CheckForUpdate) self.assertEqual(len(reactor.signals), 1) self.assertEqual( reactor.signals[0].error_reason, "'Image' object has no attribute 'base'") class TestDBusMockCrashers(_TestBase): """Tests error handling in methods and signals.""" mode = 'crasher' def reset_service(self): # No-op this so we don't get the tear down .Reset() call messing with # our expected results. pass def test_method_good_path(self): # This tests a wrapped method that does not traceback. process = find_dbus_process(SystemImagePlugin.controller.ini_path) self.iface.Okay() self.assertTrue(process.is_running()) def test_method_crasher(self): # When this method tracebacks, a log will be written and the process # exited. There's no good way to test that the log was written, but # it's easy to test that the process exits. process = find_dbus_process(SystemImagePlugin.controller.ini_path) with suppress(DBusException): self.iface.Crash() process.wait(5) self.assertFalse(process.is_running()) def test_signal_crasher(self): # Here, it's the signal that tracebacks. reactor = SignalCapturingReactor('SignalCrash') process = find_dbus_process(SystemImagePlugin.controller.ini_path) def safe_run(): with suppress(DBusException): self.iface.CrashSignal() reactor.run(safe_run, timeout=5) # The signal never made it. self.assertEqual(len(reactor.signals), 0) process.wait(5) self.assertFalse(process.is_running()) def test_crash_after_signal(self): # Here, the method tracebacks, but not until after it sends the # signal, which we should still receive. reactor = SignalCapturingReactor('SignalOkay') process = find_dbus_process(SystemImagePlugin.controller.ini_path) def safe_run(): with suppress(DBusException): self.iface.CrashAfterSignal() reactor.run(safe_run, timeout=15) # The signal made it. self.assertEqual(len(reactor.signals), 1) # But the process didn't. process.wait(5) self.assertFalse(process.is_running()) class TestDBusMiscellaneous(_LiveTesting): """Various other random tests to improve coverage.""" def test_lone_cancel(self): # Canceling an update while none is in progress will trigger an # ignored exception when the checking lock, which is not acquired, is # attempted to be released. That's fine. Note too that since no # download is in progress, *no* UpdateFailed signal will be received. reactor = SignalCapturingReactor('UpdateFailed') reactor.run(self.iface.CancelUpdate, timeout=5) self.assertEqual(len(reactor.signals), 0) def test_cancel_while_downloading(self): # Wait until we're actually downloading data files, then cancel the # update. This tests another code coverage path. self.download_always() reactor = MiscellaneousCancelingReactor(self.iface) reactor.schedule(self.iface.CheckForUpdate) reactor.run() self.assertEqual(len(reactor.update_failures), 1) failure = reactor.update_failures[0] # Failure count. self.assertEqual(failure[0], 1) self.assertEqual(failure[1], 'Canceled') @unittest.skipIf(USING_PYCURL, 'UDM-only tests') class TestDBusGSMDownloads(_LiveTesting): def _mock_udm(self): # Stop the actual UDM downloader, create our mock, and let it # "perform" the download of the update. stop_downloader(SystemImagePlugin.controller) # Remove UDM's .service file from the temporary directory so that it # won't get D-Bus activated. This should let the mock service win. os.remove(os.path.join( SystemImagePlugin.controller.tmpdir, 'com.canonical.applications.Downloader.service')) # And restart dbus-daemon. wait_for_service() # Create the mock UDM. argv = [sys.executable, '-m', 'dbusmock', '--system', 'com.canonical.applications.Downloader', '/', 'com.canonical.applications.DownloadManager'] self.server = subprocess.Popen( argv, stdout=subprocess.PIPE, env=os.environ) bus = dbus.SystemBus() until = datetime.now() + timedelta(seconds=60) while datetime.now() < until: try: p = dbus.Interface( bus.get_object( 'com.canonical.applications.Downloader', '/'), dbus_interface=dbus.INTROSPECTABLE_IFACE) p.Introspect() break except dbus.exceptions.DBusException as e: if '.UnknownInterface' in str(e): break time.sleep(0.1) # Shut down the mock UDM when this test completes. def terminate(): self.server.terminate() self.server.wait() self.addCleanup(terminate) # Start filling out the UDM mock, but only enough to complete the # test. Remember, all we care about is that we can flip the GSM flag # while a download is paused. We have to assume that the real UDM # does the right thing in this case, so we're just ensuring that # system-image handles the situation correctly. # # See https://wiki.ubuntu.com/DownloadService self.udm = dbus.Interface( bus.get_object('com.canonical.applications.Downloader', '/'), dbusmock.MOCK_IFACE) # On the primary entry point, we only care about being able to create # a group download. This is the object that UDM's # createDownloadGroup() method will return. We hard code its object # path because we really don't care about the details. self.group = self.udm.AddObject( '/group1', 'com.canonical.applications.GroupDownload', # No properties. {}, [ # We only care about a few methods on the group download object. # First up is the method that si calls to flip the GSM flag in # UDM. We'll simulate the resuming of a paused (due to being on # wifi-only) UDM by emitting the `started` and `finished` signals # that si expects. This will actually cause si to fail an # internal assertion because the files it requested to be # downloaded won't be present. But we don't really care about # that since we're only making sure that the resumption of the # paused download works. ('allowGSMDownload', 'b', '', 'self.EmitSignal("", "started", "b", (False,)); ' 'self.EmitSignal("", "finished", "ao", ([objects["/group1"]],))' ), # UDM's start() gets called by si's DownloadUpdate(), but it # doesn't take any arguments, return any values, or have any # side-effects. ('start', '', '', ''), # Similarly, UDM's cancel() gets called by the si test framework # when the test completes. ('cancel', '', '', ''), ]) # Here's the mock of UDM's createDownloadGroup() method. The only # thing we care about is that the object created above is returned. self.udm.AddMethod( '', 'createDownloadGroup', # https://wiki.ubuntu.com/DownloadService/DownloadManager 'a(sss)sba{sv}a{ss}', 'o', 'ret = objects["/group1"]') # Because of the way the UDMDownloadManager works, we have to mock # UDM's getAllDownloads() method to also return the object created # above. See UDMDownloadManager.allow_gsm() for details. self.udm.AddMethod( '', 'getAllDownloads', '', 'ao', 'ret = [objects["/group1"]]') def test_allow_gsm_download(self): self.download_manually() # Check for update available. Use the real UDM so that we get all the # keyrings and JSON files that define the update. Because we're # downloading manually, the data files won't be downloaded yet. reactor = SignalCapturingReactor('UpdateAvailableStatus') reactor.run(self.iface.CheckForUpdate) self.assertEqual(len(reactor.signals), 1) signal = reactor.signals[0] self.assertTrue(signal.is_available, msg=signal.error_reason) # Set up the mock and attempt to start the download. This will fail # to get a 'started' signal because we're not on wifi. self.download_on_wifi() self._mock_udm() reactor = StartedReactor() reactor.schedule(self.iface.DownloadUpdate) reactor.run(timeout=10) self.assertEqual(reactor.got_started, 0) # Now tell UDM that GSM downloads are okay and watch again for the # simulated started signal. We do however need a new reactor since # the old one won't respond to the subsequent signals. reactor = StartedReactor() reactor.schedule(self.iface.ForceAllowGSMDownload) reactor.run(timeout=10) self.assertEqual(reactor.got_started, 1) @unittest.skipIf(USING_PYCURL, 'UDM-only tests') class TestDBusGSMNoDownloads(_LiveTesting): def test_force_gsm_noops_when_no_download_is_in_progress(self): self.download_on_wifi() reactor = StartedReactor() reactor.schedule(self.iface.ForceAllowGSMDownload) reactor.run(timeout=10) self.assertEqual(reactor.got_started, 0) def test_force_gsm_noops_when_download_is_manual(self): self.download_manually() reactor = SignalCapturingReactor('UpdateAvailableStatus') reactor.run(self.iface.CheckForUpdate) self.assertEqual(len(reactor.signals), 1) signal = reactor.signals[0] self.assertTrue(signal.is_available, msg=signal.error_reason) # Don't start the download. reactor = StartedReactor() reactor.schedule(self.iface.ForceAllowGSMDownload) reactor.run(timeout=10) self.assertEqual(reactor.got_started, 0) ./systemimage/tests/test_main.py0000644000015600001650000022150212701500553017123 0ustar jenkinsjenkins# Copyright (C) 2013-2016 Canonical Ltd. # Author: Barry Warsaw # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 3 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . """Test the main entry point.""" __all__ = [ 'TestCLIDuplicateDestinations', 'TestCLIFactoryReset', 'TestCLIFilters', 'TestCLIListChannels', 'TestCLIMain', 'TestCLIMainDryRun', 'TestCLIMainDryRunAliases', 'TestCLIMaximumImage', 'TestCLINoReboot', 'TestCLIProductionReset', 'TestCLIProgress', 'TestCLISettings', 'TestCLISignatures', 'TestDBusMain', 'TestDBusMainNoConfigD', ] import os import sys import dbus import json import stat import time import shutil import unittest import subprocess from contextlib import ExitStack, contextmanager from datetime import datetime from functools import partial from io import StringIO from pathlib import Path from systemimage.config import Configuration, config from systemimage.helpers import safe_remove from systemimage.main import main as cli_main from systemimage.settings import Settings from systemimage.testing.controller import USING_PYCURL from systemimage.testing.helpers import ( ServerTestBase, chmod, configuration, copy, data_path, find_dbus_process, sign, temporary_directory, terminate_service, touch_build, wait_for_service) from systemimage.testing.nose import SystemImagePlugin from textwrap import dedent from unittest.mock import MagicMock, patch SPACE = ' ' TIMESTAMP = datetime(2013, 8, 1, 12, 11, 10).timestamp() @contextmanager def umask(new_mask): old_mask = None try: old_mask = os.umask(new_mask) yield finally: if old_mask is not None: os.umask(old_mask) def machine_id(mid): with ExitStack() as resources: tempdir = resources.enter_context(temporary_directory()) path = os.path.join(tempdir, 'machine-id') with open(path, 'w', encoding='utf-8') as fp: print(mid, file=fp) resources.enter_context( patch('systemimage.helpers.UNIQUE_MACHINE_ID_FILES', [path])) return resources.pop_all() def capture_print(fp): return patch('builtins.print', partial(print, file=fp)) def argv(*args): args = list(args) args.insert(0, 'argv0') with ExitStack() as resources: resources.enter_context(patch('systemimage.main.sys.argv', args)) # We need a fresh global Configuration object to mimic what the # command line script would see. resources.enter_context( patch('systemimage.config._config', Configuration())) return resources.pop_all() class TestCLIMain(unittest.TestCase): def setUp(self): super().setUp() self._resources = ExitStack() try: self._stdout = StringIO() self._stderr = StringIO() # We patch builtin print() rather than sys.stdout because the # latter can mess with pdb output should we need to trace through # the code. self._resources.enter_context(capture_print(self._stdout)) # Patch argparse's stderr to capture its error messages. self._resources.enter_context( patch('argparse._sys.stderr', self._stderr)) self._resources.push( machine_id('feedfacebeefbacafeedfacebeefbaca')) except: self._resources.close() raise self.addCleanup(self._resources.close) def test_config_directory_good_path(self): # The default configuration directory exists. self._resources.enter_context(argv('--info')) # Patch default configuration directory. tempdir = self._resources.enter_context(temporary_directory()) copy('main.config_01.ini', tempdir, '00_config.ini') self._resources.enter_context( patch('systemimage.main.DEFAULT_CONFIG_D', tempdir)) # Mock out the initialize() call so that the main() doesn't try to # create a log file in a non-existent system directory. self._resources.enter_context(patch('systemimage.main.initialize')) cli_main() self.assertEqual(config.config_d, tempdir) self.assertEqual(config.channel, 'special') def test_missing_default_config_directory(self): # The default configuration directory is missing. self._resources.enter_context(argv()) # Patch default configuration directory. self._resources.enter_context( patch('systemimage.main.DEFAULT_CONFIG_D', '/does/not/exist')) with self.assertRaises(SystemExit) as cm: cli_main() self.assertEqual(cm.exception.code, 2) self.assertEqual( self._stderr.getvalue().splitlines()[-1], 'Configuration directory not found: /does/not/exist') def test_missing_explicit_config_directory(self): # An explicit configuration directory given with -C is missing. self._resources.enter_context(argv('-C', '/does/not/exist')) with self.assertRaises(SystemExit) as cm: cli_main() self.assertEqual(cm.exception.code, 2) self.assertEqual( self._stderr.getvalue().splitlines()[-1], 'Configuration directory not found: /does/not/exist') def test_ensure_directories_exist(self): # The temporary and var directories are created if they don't exist. dir_1 = self._resources.enter_context(temporary_directory()) dir_2 = self._resources.enter_context(temporary_directory()) # Create a configuration file with directories that point to # non-existent locations. config_ini = os.path.join(dir_1, '00_config.ini') with open(data_path('00.ini'), encoding='utf-8') as fp: template = fp.read() # These paths look something like they would on the real system. tmpdir = os.path.join(dir_2, 'tmp', 'system-image') vardir = os.path.join(dir_2, 'var', 'lib', 'system-image') configuration = template.format(tmpdir=tmpdir, vardir=vardir) with open(config_ini, 'wt', encoding='utf-8') as fp: fp.write(configuration) # Invoking main() creates the directories. self._resources.enter_context(argv('-C', dir_1, '--info')) self.assertFalse(os.path.exists(tmpdir)) cli_main() self.assertTrue(os.path.exists(tmpdir)) def test_permissions(self): # LP: #1235975 - Various directories and files have unsafe # permissions. dir_1 = self._resources.enter_context(temporary_directory()) dir_2 = self._resources.enter_context(temporary_directory()) # Create a configuration file with directories that point to # non-existent locations. config_ini = os.path.join(dir_1, '00_config.ini') with open(data_path('00.ini'), encoding='utf-8') as fp: template = fp.read() # These paths look something like they would on the real system. tmpdir = os.path.join(dir_2, 'tmp', 'system-image') vardir = os.path.join(dir_2, 'var', 'lib', 'system-image') configuration = template.format(tmpdir=tmpdir, vardir=vardir) with open(config_ini, 'w', encoding='utf-8') as fp: fp.write(configuration) # Invoking main() creates the directories. config = Configuration(dir_1) self.assertFalse(os.path.exists(config.system.tempdir)) self.assertFalse(os.path.exists(config.system.logfile)) self._resources.enter_context(argv('-C', dir_1, '--info')) cli_main() mode = os.stat(config.system.tempdir).st_mode self.assertEqual(stat.filemode(mode), 'drwx--S---') mode = os.stat(os.path.dirname(config.system.logfile)).st_mode self.assertEqual(stat.filemode(mode), 'drwx--S---') mode = os.stat(config.system.logfile).st_mode self.assertEqual(stat.filemode(mode), '-rw-------') @configuration def test_info(self, config_d): # -i/--info gives information about the device, including the current # build number, channel, and device name. touch_build(1701, TIMESTAMP) self._resources.enter_context(argv('-C', config_d, '--info')) cli_main() self.assertEqual(self._stdout.getvalue(), dedent("""\ current build number: 1701 device name: nexus7 channel: stable last update: 2013-08-01 12:11:10 """)) @configuration def test_info_last_update_timestamps(self, config_d): # --info's last update date uses the latest mtime of the files in the # config.d directory. copy('main.config_02.ini', config_d, '00_config.ini') copy('main.config_02.ini', config_d, '01_config.ini') copy('main.config_02.ini', config_d, '02_config.ini') # Give the default ini file an even earlier timestamp. timestamp_0 = int(datetime(2010, 11, 8, 2, 3, 4).timestamp()) touch_build(1701, timestamp_0) # Make the 01 ini file the latest. timestamp_1 = int(datetime(2011, 1, 8, 2, 3, 4).timestamp()) os.utime(os.path.join(config_d, '00_config.ini'), (timestamp_1, timestamp_1)) os.utime(os.path.join(config_d, '02_config.ini'), (timestamp_1, timestamp_1)) timestamp_2 = int(datetime(2011, 8, 1, 5, 6, 7).timestamp()) os.utime(os.path.join(config_d, '01_config.ini'), (timestamp_2, timestamp_2)) self._resources.enter_context(argv('-C', config_d, '--info')) cli_main() self.assertEqual(self._stdout.getvalue(), dedent("""\ current build number: 1701 device name: nexus7 channel: proposed last update: 2011-08-01 05:06:07 """)) @configuration def test_build_number(self, config_d): # -b/--build overrides the build number. touch_build(1701, TIMESTAMP) # Use --build to override the default build number. self._resources.enter_context( argv('-C', config_d, '--build', '20250801', '--info')) cli_main() self.assertEqual(self._stdout.getvalue(), dedent("""\ current build number: 20250801 device name: nexus7 channel: stable last update: 2013-08-01 12:11:10 """)) @configuration def test_device_name(self, config_d): # -d/--device overrides the device type. touch_build(1701, TIMESTAMP) self._resources.enter_context( argv('-C', config_d, '--device', 'phablet', '--info')) cli_main() self.assertEqual(self._stdout.getvalue(), dedent("""\ current build number: 1701 device name: phablet channel: stable last update: 2013-08-01 12:11:10 """)) @configuration def test_channel_name(self, config_d): # -c/--channel overrides the channel. touch_build(1701, TIMESTAMP) self._resources.enter_context( argv('-C', config_d, '--channel', 'daily-proposed', '--info')) cli_main() self.assertEqual(self._stdout.getvalue(), dedent("""\ current build number: 1701 device name: nexus7 channel: daily-proposed last update: 2013-08-01 12:11:10 """)) @configuration def test_channel_name_with_alias(self, config_d): # When the current channel has an alias, this is reflected in the # output for --info copy('main.config_03.ini', config_d, '01_config.ini') touch_build(300, TIMESTAMP) self._resources.enter_context(argv('-C', config_d, '--info')) cli_main() self.assertEqual(self._stdout.getvalue(), dedent("""\ current build number: 300 device name: nexus7 channel: daily alias: saucy last update: 2013-08-01 12:11:10 """)) @configuration def test_all_overrides(self, config_d): # Use -b -d and -c together. touch_build(1701, TIMESTAMP) # Use --build to override the default build number. self._resources.enter_context( argv('-C', config_d, '-b', '20250801', '-c', 'daily-proposed', '-d', 'phablet', '--info')) cli_main() self.assertEqual(self._stdout.getvalue(), dedent("""\ current build number: 20250801 device name: phablet channel: daily-proposed last update: 2013-08-01 12:11:10 """)) @configuration def test_bad_build_number_override(self, config_d): # -b/--build requires an integer. self._resources.enter_context(argv('-C', config_d, '--build', 'bogus')) with self.assertRaises(SystemExit) as cm: cli_main() self.assertEqual(cm.exception.code, 2) self.assertEqual( self._stderr.getvalue().splitlines()[-1], 'system-image-cli: error: -b/--build requires an integer: bogus') @configuration def test_switch_channel(self, config_d): # `system-image-cli --switch ` is a convenience equivalent to # `system-image-cli -b 0 --channel `. touch_build(801, TIMESTAMP) self._resources.enter_context( argv('-C', config_d, '--switch', 'utopic-proposed', '--info')) cli_main() self.assertEqual(self._stdout.getvalue(), dedent("""\ current build number: 0 device name: nexus7 channel: utopic-proposed last update: 2013-08-01 12:11:10 """)) @configuration def test_switch_channel_with_overrides(self, config_d): # The use of --switch is a convenience only, and if -b and/or -c is # given explicitly, they override the convenience. touch_build(801, TIMESTAMP) self._resources.enter_context( argv('-C', config_d, '--switch', 'utopic-proposed', '-b', '1', '-c', 'utopic', '--info')) cli_main() self.assertEqual(self._stdout.getvalue(), dedent("""\ current build number: 1 device name: nexus7 channel: utopic last update: 2013-08-01 12:11:10 """)) @configuration def test_log_file(self, config): # Test that the system log file gets created and written. self.assertFalse(os.path.exists(config.system.logfile)) class FakeState: def __init__(self): self.downloader = MagicMock() def __iter__(self): return self def __next__(self): raise StopIteration self._resources.enter_context(argv('-C', config.config_d)) self._resources.enter_context( patch('systemimage.main.State', FakeState)) cli_main() self.assertTrue(os.path.exists(config.system.logfile)) with open(config.system.logfile, encoding='utf-8') as fp: logged = fp.readlines() # Ignore any leading timestamp and the trailing newline. self.assertRegex( logged[0], r'\[systemimage\] [^(]+ \(\d+\) ' r'running state machine \[stable/nexus7\]\n') self.assertRegex( logged[1], r'\[systemimage\] [^(]+ \(\d+\) ' r'state machine finished\n') @unittest.skipIf(os.getuid() == 0, 'Test cannot succeed when run as root') @configuration def test_log_file_permission_denied(self, config): # LP: #1301995 - some tests are run as non-root, meaning they don't # have access to the system log file. Use a fallback in that case. # Set the log file to read-only. system_log = Path(config.system.logfile) system_log.touch(0o444, exist_ok=False) # Mock the fallback cache directory location for testability. tmpdir = self._resources.enter_context(temporary_directory()) self._resources.enter_context( patch('systemimage.logging.xdg_cache_home', tmpdir)) self._resources.enter_context(argv('-C', config.config_d, '--dry-run')) cli_main() # There should now be nothing in the system log file, and something in # the fallback log file. self.assertEqual(system_log.stat().st_size, 0) fallback = Path(tmpdir) / 'system-image' / 'client.log' self.assertGreater(fallback.stat().st_size, 0) # The log file also has the expected permissions. self.assertEqual(stat.filemode(fallback.stat().st_mode), '-rw-------') @configuration def test_bad_filter_type(self, config_d): # --filter option where value is not `full` or `delta` is an error. self._resources.enter_context( argv('-C', config_d, '--filter', 'bogus')) with self.assertRaises(SystemExit) as cm: cli_main() self.assertEqual(cm.exception.code, 2) self.assertEqual( self._stderr.getvalue().splitlines()[-1], 'system-image-cli: error: Bad filter type: bogus') @configuration def test_version_detail(self, config_d): # --info where a config file has [service]version_detail. copy('main.config_04.ini', config_d, '01_config.ini') touch_build(1933, TIMESTAMP) self._resources.enter_context(argv('-C', config_d, '-i')) cli_main() self.assertEqual(self._stdout.getvalue(), dedent("""\ current build number: 1933 device name: nexus7 channel: proposed last update: 2013-08-01 12:11:10 version ubuntu: 123 version mako: 456 version custom: 789 """)) @configuration def test_no_version_detail(self, config_d): # --info where there is no [service]version_detail setting. copy('main.config_02.ini', config_d, '01_config.ini') touch_build(1933, TIMESTAMP) self._resources.enter_context(argv('-C', config_d, '-i')) cli_main() self.assertEqual(self._stdout.getvalue(), dedent("""\ current build number: 1933 device name: nexus7 channel: proposed last update: 2013-08-01 12:11:10 """)) @configuration def test_state_machine_exceptions(self, config): # If an exception happens during the state machine run, the error is # logged and main exits with code 1. self._resources.enter_context(argv('-C', config.config_d)) # Making the cache directory unwritable is a good way to trigger a # crash. Be sure to set it back though! with chmod(config.updater.cache_partition, 0): exit_code = cli_main() self.assertEqual(exit_code, 1) @configuration def test_state_machine_exceptions_dry_run(self, config): # Like above, but doing only a --dry-run. self._resources.enter_context(argv('-C', config.config_d, '--dry-run')) with chmod(config.updater.cache_partition, 0): exit_code = cli_main() self.assertEqual(exit_code, 1) class TestCLIMainDryRun(ServerTestBase): INDEX_FILE = 'main.index_01.json' CHANNEL_FILE = 'main.channels_01.json' CHANNEL = 'stable' DEVICE = 'nexus7' @configuration def test_dry_run(self, config_d): # `system-image-cli --dry-run` prints the winning upgrade path. self._setup_server_keyrings() # We patch builtin print() rather than sys.stdout because the # latter can mess with pdb output should we need to trace through # the code. capture = StringIO() with ExitStack() as resources: resources.enter_context(capture_print(capture)) resources.push(machine_id('0000000000000000aaaaaaaaaaaaaaaa')) resources.enter_context(argv('-C', config_d, '--dry-run')) cli_main() self.assertEqual( capture.getvalue(), """\ Upgrade path is 1200:1201:1304 Target phase: 12% """) @configuration def test_dry_run_no_update(self, config_d): # `system-image-cli --dry-run` when there are no updates available. self._setup_server_keyrings() # We patch builtin print() rather than sys.stdout because the # latter can mess with pdb output should we need to trace through # the code. capture = StringIO() # Set up the build number. touch_build(1701) with ExitStack() as resources: resources.enter_context(capture_print(capture)) resources.enter_context(argv('-C', config_d, '--dry-run')) cli_main() self.assertEqual(capture.getvalue(), 'Already up-to-date\n') @configuration def test_dry_run_bad_channel(self, config_d): # `system-image-cli --dry-run --channel ` should say it's # already up-to-date. self._setup_server_keyrings() # We patch builtin print() rather than sys.stdout because the # latter can mess with pdb output should we need to trace through # the code. capture = StringIO() with ExitStack() as resources: resources.enter_context(capture_print(capture)) # Use --build to override the default build number. resources.enter_context( argv('-C', config_d, '--channel', 'daily-proposed', '--dry-run')) cli_main() self.assertEqual(capture.getvalue(), 'Already up-to-date\n') @configuration def test_percentage(self, config_d): # --percentage overrides the device's target percentage. self._setup_server_keyrings() capture = StringIO() with ExitStack() as resources: resources.enter_context(capture_print(capture)) resources.push(machine_id('0000000000000000aaaaaaaaaaaaaaaa')) resources.enter_context(argv('-C', config_d, '--dry-run')) cli_main() self.assertEqual( capture.getvalue(), """\ Upgrade path is 1200:1201:1304 Target phase: 12% """) capture = StringIO() with ExitStack() as resources: resources.enter_context(capture_print(capture)) resources.push(machine_id('0000000000000000aaaaaaaaaaaaaaaa')) resources.enter_context( argv('-C', config_d, '--dry-run', '--percentage', '81')) cli_main() self.assertEqual( capture.getvalue(), """\ Upgrade path is 1200:1201:1304 Target phase: 81% """) @configuration def test_p(self, config_d): # -p overrides the device's target percentage. self._setup_server_keyrings() capture = StringIO() with ExitStack() as resources: resources.enter_context(capture_print(capture)) resources.push(machine_id('0000000000000000aaaaaaaaaaaaaaaa')) resources.enter_context(argv('-C', config_d, '--dry-run')) cli_main() self.assertEqual( capture.getvalue(), """\ Upgrade path is 1200:1201:1304 Target phase: 12% """) capture = StringIO() with ExitStack() as resources: resources.enter_context(capture_print(capture)) resources.push(machine_id('0000000000000000aaaaaaaaaaaaaaaa')) resources.enter_context( argv('-C', config_d, '--dry-run', '-p', '81')) cli_main() self.assertEqual( capture.getvalue(), """\ Upgrade path is 1200:1201:1304 Target phase: 81% """) @configuration def test_crazy_p(self, config_d): # --percentage/-p value is floored at 0% and ceilinged at 100%. self._setup_server_keyrings() capture = StringIO() with ExitStack() as resources: resources.enter_context(capture_print(capture)) resources.push(machine_id('0000000000000000aaaaaaaaaaaaaaaa')) resources.enter_context( argv('-C', config_d, '--dry-run', '-p', '10000')) cli_main() self.assertEqual( capture.getvalue(), """\ Upgrade path is 1200:1201:1304 Target phase: 100% """) capture = StringIO() with ExitStack() as resources: resources.enter_context(capture_print(capture)) resources.push(machine_id('0000000000000000aaaaaaaaaaaaaaaa')) resources.enter_context( argv('-C', config_d, '--dry-run', '-p', '-10')) cli_main() self.assertEqual( capture.getvalue(), """\ Upgrade path is 1200:1201:1304 Target phase: 0% """) class TestCLIMainDryRunAliases(ServerTestBase): INDEX_FILE = 'main.index_02.json' CHANNEL_FILE = 'main.channels_02.json' CHANNEL = 'daily' DEVICE = 'manta' @configuration def test_dry_run_with_channel_alias_switch(self, config_d): # `system-image-cli --dry-run` where the channel alias the device was # on got switched should include this information. self._setup_server_keyrings() copy('main.config_05.ini', config_d, '01_config.ini') capture = StringIO() # Do not use self._resources to manage the check_output mock. Because # of the nesting order of the @configuration decorator and the base # class's tearDown(), using self._resources causes the mocks to be # unwound in the wrong order, affecting future tests. with ExitStack() as resources: resources.enter_context(capture_print(capture)) resources.enter_context(argv('-C', config_d, '--dry-run')) # Patch the machine id. resources.push(machine_id('0000000000000000aaaaaaaaaaaaaaaa')) resources.enter_context( patch('systemimage.device.check_output', return_value='manta')) cli_main() self.assertEqual( capture.getvalue(), """\ Upgrade path is 200:201:304 (saucy -> tubular) Target phase: 25% """) class TestCLIListChannels(ServerTestBase): INDEX_FILE = 'main.index_02.json' CHANNEL_FILE = 'main.channels_02.json' CHANNEL = 'daily' DEVICE = 'manta' @configuration def test_list_channels(self, config_d): # `system-image-cli --list-channels` shows all available channels, # including aliases. self._setup_server_keyrings() copy('main.config_05.ini', config_d, '01_config.ini') capture = StringIO() self._resources.enter_context(capture_print(capture)) self._resources.enter_context(argv('-C', config_d, '--list-channels')) # Do not use self._resources to manage the check_output mock. Because # of the nesting order of the @configuration decorator and the base # class's tearDown(), using self._resources causes the mocks to be # unwound in the wrong order, affecting future tests. with patch('systemimage.device.check_output', return_value='manta'): cli_main() self.assertMultiLineEqual(capture.getvalue(), dedent("""\ Available channels: daily (alias for: tubular) saucy tubular """)) @configuration def test_list_channels_exception(self, config_d): # If an exception occurs while getting the list of channels, we get a # non-zero exit status. self._setup_server_keyrings() copy('main.config_05.ini', config_d, '01_config.ini') capture = StringIO() self._resources.enter_context(capture_print(capture)) self._resources.enter_context(argv('-C', config_d, '--list-channels')) # Do not use self._resources to manage the check_output mock. Because # of the nesting order of the @configuration decorator and the base # class's tearDown(), using self._resources causes the mocks to be # unwound in the wrong order, affecting future tests. with ExitStack() as more: more.enter_context( patch('systemimage.device.check_output', return_value='manta')) more.enter_context( patch('systemimage.state.State._get_channel', side_effect=RuntimeError)) status = cli_main() self.assertEqual(status, 1) class TestCLIFilters(ServerTestBase): INDEX_FILE = 'main.index_03.json' CHANNEL_FILE = 'main.channels_03.json' CHANNEL = 'stable' DEVICE = 'nexus7' maxDiff = None @configuration def test_filter_full(self, config_d): # With --filter=full, only full updates will be considered. self._setup_server_keyrings() # We patch builtin print() rather than sys.stdout because the # latter can mess with pdb output should we need to trace through # the code. capture = StringIO() # Set up the build number. touch_build(100) with ExitStack() as resources: resources.enter_context(capture_print(capture)) resources.enter_context( argv('-C', config_d, '--dry-run', '--filter', 'full')) cli_main() self.assertMultiLineEqual(capture.getvalue(), 'Already up-to-date\n') @configuration def test_filter_delta(self, config_d): # With --filter=delta, only delta updates will be considered. self._setup_server_keyrings() # We patch builtin print() rather than sys.stdout because the # latter can mess with pdb output should we need to trace through # the code. capture = StringIO() # Set up the build number. touch_build(100) with ExitStack() as resources: resources.enter_context(capture_print(capture)) resources.enter_context( argv('-C', config_d, '--dry-run', '--filter', 'delta')) resources.push(machine_id('0000000000000000aaaaaaaaaaaaaaaa')) cli_main() self.assertMultiLineEqual(capture.getvalue(), """\ Upgrade path is 1600 Target phase: 80% """) class TestCLIMaximumImage(ServerTestBase): INDEX_FILE = 'main.index_02.json' CHANNEL_FILE = 'main.channels_03.json' CHANNEL = 'stable' DEVICE = 'nexus7' maxDiff = None @configuration def test_no_maximage(self, config_d): # With no --maximage we get the full upgrade path. self._setup_server_keyrings() # We patch builtin print() rather than sys.stdout because the # latter can mess with pdb output should we need to trace through # the code. capture = StringIO() # Set up the build number. touch_build(100) with ExitStack() as resources: resources.enter_context(capture_print(capture)) resources.enter_context(argv('-C', config_d, '--dry-run')) resources.push(machine_id('0000000000000000aaaaaaaaaaaaaaaa')) cli_main() self.assertMultiLineEqual(capture.getvalue(), """\ Upgrade path is 200:201:304 Target phase: 44% """) @configuration def test_maximage_inexact(self, config_d): # With --maximage the winning path is capped. self._setup_server_keyrings() # We patch builtin print() rather than sys.stdout because the # latter can mess with pdb output should we need to trace through # the code. capture = StringIO() # Set up the build number. touch_build(100) with ExitStack() as resources: resources.enter_context(capture_print(capture)) resources.enter_context( argv('-C', config_d, '--dry-run', '--maximage', '205')) resources.push(machine_id('0000000000000000aaaaaaaaaaaaaaaa')) cli_main() self.assertMultiLineEqual(capture.getvalue(), """\ Upgrade path is 200:201 Target phase: 18% """) @configuration def test_maximage_exact(self, config_d): # With --maximage the winning path is capped. self._setup_server_keyrings() # We patch builtin print() rather than sys.stdout because the # latter can mess with pdb output should we need to trace through # the code. capture = StringIO() # Set up the build number. touch_build(100) with ExitStack() as resources: resources.enter_context(capture_print(capture)) resources.enter_context( argv('-C', config_d, '--dry-run', '--maximage', '201')) resources.push(machine_id('0000000000000000aaaaaaaaaaaaaaaa')) cli_main() self.assertMultiLineEqual(capture.getvalue(), """\ Upgrade path is 200:201 Target phase: 18% """) @configuration def test_maximage_too_high(self, config_d): # With --maximage set above the highest winning image, there is no # effective cap. self._setup_server_keyrings() # We patch builtin print() rather than sys.stdout because the # latter can mess with pdb output should we need to trace through # the code. capture = StringIO() # Set up the build number. touch_build(100) with ExitStack() as resources: resources.enter_context(capture_print(capture)) resources.enter_context( argv('-C', config_d, '--dry-run', '--maximage', '500')) resources.push(machine_id('0000000000000000aaaaaaaaaaaaaaaa')) cli_main() self.assertMultiLineEqual(capture.getvalue(), """\ Upgrade path is 200:201:304 Target phase: 44% """) @configuration def test_maximage_lower_bound(self, config_d): # With --maximage set at the lower bound, we still get an upgrade. self._setup_server_keyrings() # We patch builtin print() rather than sys.stdout because the # latter can mess with pdb output should we need to trace through # the code. capture = StringIO() # Set up the build number. touch_build(100) with ExitStack() as resources: resources.enter_context(capture_print(capture)) resources.enter_context( argv('-C', config_d, '--dry-run', '--maximage', '200')) resources.push(machine_id('0000000000000000aaaaaaaaaaaaaaaa')) cli_main() self.assertMultiLineEqual(capture.getvalue(), """\ Upgrade path is 200 Target phase: 1% """) @configuration def test_maximage_0(self, config_d): # With --maximage set at zero, we get no upgrade path. self._setup_server_keyrings() # We patch builtin print() rather than sys.stdout because the # latter can mess with pdb output should we need to trace through # the code. capture = StringIO() # Set up the build number. touch_build(100) with ExitStack() as resources: resources.enter_context(capture_print(capture)) resources.enter_context( argv('-C', config_d, '--dry-run', '--maximage', '0')) resources.push(machine_id('0000000000000000aaaaaaaaaaaaaaaa')) cli_main() self.assertMultiLineEqual(capture.getvalue(), 'Already up-to-date\n') @configuration def test_maximage_negative(self, config_d): # With --maximage negative, we also get no upgrade path. self._setup_server_keyrings() # We patch builtin print() rather than sys.stdout because the # latter can mess with pdb output should we need to trace through # the code. capture = StringIO() # Set up the build number. touch_build(100) with ExitStack() as resources: resources.enter_context(capture_print(capture)) resources.enter_context( argv('-C', config_d, '--dry-run', '--maximage', '-100')) resources.push(machine_id('0000000000000000aaaaaaaaaaaaaaaa')) cli_main() self.assertMultiLineEqual(capture.getvalue(), 'Already up-to-date\n') @configuration def test_maximage_m(self, config_d): # With -m is a shortcut for --maximage. self._setup_server_keyrings() # We patch builtin print() rather than sys.stdout because the # latter can mess with pdb output should we need to trace through # the code. capture = StringIO() # Set up the build number. touch_build(100) with ExitStack() as resources: resources.enter_context(capture_print(capture)) resources.enter_context( argv('-C', config_d, '--dry-run', '-m', '204')) resources.push(machine_id('0000000000000000aaaaaaaaaaaaaaaa')) cli_main() self.assertMultiLineEqual(capture.getvalue(), """\ Upgrade path is 200:201 Target phase: 18% """) class TestCLIDuplicateDestinations(ServerTestBase): INDEX_FILE = 'main.index_04.json' CHANNEL_FILE = 'main.channels_03.json' CHANNEL = 'stable' DEVICE = 'nexus7' @configuration def test_duplicate_destinations(self, config_d): # main.index_04.json has the bug we saw in the wild in LP: #1250181. # There, the server erroneously included a data file twice in two # different images. This can't happen and indicates a server # problem. The client must refuse to upgrade in this case, by raising # an exception. self._setup_server_keyrings() with ExitStack() as resources: resources.enter_context(argv('-C', config_d)) exit_code = cli_main() self.assertEqual(exit_code, 1) # 2013-11-12 BAW: IWBNI we could assert something about the log # output, since that contains a display of the duplicate destination # paths and the urls that map to them, but that's difficult for # several reasons, including that we can't really mock the log # instance (it's a local variable to main(), and the output will # contain stack traces and random paths. I bet we could hack # something in with doctest.OutputChecker.check_output(), but I'm not # sure it's worth it. class TestCLINoReboot(ServerTestBase): INDEX_FILE = 'main.index_05.json' CHANNEL_FILE = 'main.channels_02.json' CHANNEL = 'daily' DEVICE = 'manta' @configuration def test_no_apply(self, config_d): # `system-image-cli --no-apply` downloads everything but does not # apply the update. self._setup_server_keyrings() capture = StringIO() self._resources.enter_context(capture_print(capture)) self._resources.enter_context( argv('-C', config_d, '--no-apply', '-b', 0, '-c', 'daily')) mock = self._resources.enter_context( patch('systemimage.apply.Reboot.apply')) # Do not use self._resources to manage the check_output mock. Because # of the nesting order of the @configuration decorator and the base # class's tearDown(), using self._resources causes the mocks to be # unwound in the wrong order, affecting future tests. with patch('systemimage.device.check_output', return_value='manta'): cli_main() # The reboot method was never called. self.assertFalse(mock.called) # All the expected files should be downloaded. self.assertEqual(set(os.listdir(config.updater.data_partition)), set([ 'blacklist.tar.xz', 'blacklist.tar.xz.asc', ])) self.assertEqual(set(os.listdir(config.updater.cache_partition)), set([ '5.txt', '5.txt.asc', '6.txt', '6.txt.asc', '7.txt', '7.txt.asc', 'device-signing.tar.xz', 'device-signing.tar.xz.asc', 'image-master.tar.xz', 'image-master.tar.xz.asc', 'image-signing.tar.xz', 'image-signing.tar.xz.asc', 'ubuntu_command', ])) path = os.path.join(config.updater.cache_partition, 'ubuntu_command') with open(path, 'r', encoding='utf-8') as fp: command = fp.read() self.assertMultiLineEqual(command, """\ load_keyring image-master.tar.xz image-master.tar.xz.asc load_keyring image-signing.tar.xz image-signing.tar.xz.asc load_keyring device-signing.tar.xz device-signing.tar.xz.asc format system mount system update 6.txt 6.txt.asc update 7.txt 7.txt.asc update 5.txt 5.txt.asc unmount system """) @configuration def test_g(self, config_d): # `system-image-cli -g` downloads everything but does not reboot into # recovery. self._setup_server_keyrings() capture = StringIO() self._resources.enter_context(capture_print(capture)) self._resources.enter_context( argv('-C', config_d, '-g', '-b', 0, '-c', 'daily')) mock = self._resources.enter_context( patch('systemimage.apply.Reboot.apply')) # Do not use self._resources to manage the check_output mock. Because # of the nesting order of the @configuration decorator and the base # class's tearDown(), using self._resources causes the mocks to be # unwound in the wrong order, affecting future tests. with patch('systemimage.device.check_output', return_value='manta'): cli_main() # The reboot method was never called. self.assertFalse(mock.called) # All the expected files should be downloaded. self.assertEqual(set(os.listdir(config.updater.data_partition)), set([ 'blacklist.tar.xz', 'blacklist.tar.xz.asc', ])) self.assertEqual(set(os.listdir(config.updater.cache_partition)), set([ '5.txt', '5.txt.asc', '6.txt', '6.txt.asc', '7.txt', '7.txt.asc', 'device-signing.tar.xz', 'device-signing.tar.xz.asc', 'image-master.tar.xz', 'image-master.tar.xz.asc', 'image-signing.tar.xz', 'image-signing.tar.xz.asc', 'ubuntu_command', ])) path = os.path.join(config.updater.cache_partition, 'ubuntu_command') with open(path, 'r', encoding='utf-8') as fp: command = fp.read() self.assertMultiLineEqual(command, """\ load_keyring image-master.tar.xz image-master.tar.xz.asc load_keyring image-signing.tar.xz image-signing.tar.xz.asc load_keyring device-signing.tar.xz device-signing.tar.xz.asc format system mount system update 6.txt 6.txt.asc update 7.txt 7.txt.asc update 5.txt 5.txt.asc unmount system """) @configuration def test_rerun_after_no_reboot_reboots(self, config_d): # Running system-image-cli again after a `system-image-cli -g` does # not download anything the second time, but does issue a reboot. self._setup_server_keyrings() capture = StringIO() self._resources.enter_context(capture_print(capture)) mock = self._resources.enter_context( patch('systemimage.apply.Reboot.apply')) self._resources.enter_context( argv('-C', config_d, '-g', '-b', 0, '-c', 'daily')) # Do not use self._resources to manage the check_output mock. Because # of the nesting order of the @configuration decorator and the base # class's tearDown(), using self._resources causes the mocks to be # unwound in the wrong order, affecting future tests. with patch('systemimage.device.check_output', return_value='manta'): cli_main() # The reboot method was never called. self.assertFalse(mock.called) # To prove nothing gets downloaded the second time, actually delete # the data files from the server. shutil.rmtree(os.path.join(self._serverdir, '3')) shutil.rmtree(os.path.join(self._serverdir, '4')) shutil.rmtree(os.path.join(self._serverdir, '5')) # Run main again without the -g flag this time we reboot. with ExitStack() as stack: stack.enter_context(argv('-C', config_d, '-b', 0, '-c', 'daily')) stack.enter_context( patch('systemimage.device.check_output', return_value='manta')) cli_main() # The reboot method was never called. self.assertTrue(mock.called) class TestCLIFactoryReset(unittest.TestCase): """Test the --factory-reset option for factory resets.""" @configuration def test_factory_reset(self, config_d): # system-image-cli --factory-reset capture = StringIO() with ExitStack() as resources: resources.enter_context(capture_print(capture)) mock = resources.enter_context( patch('systemimage.apply.Reboot.apply')) resources.enter_context(argv('-C', config_d, '--factory-reset')) cli_main() # A reboot was issued. self.assertTrue(mock.called) path = os.path.join(config.updater.cache_partition, 'ubuntu_command') with open(path, 'r', encoding='utf-8') as fp: command = fp.read() self.assertMultiLineEqual(command, dedent("""\ format data """)) class TestCLIProductionReset(unittest.TestCase): """Test the --production-reset option for production factory resets.""" @configuration def test_production_reset(self, config_d): # system-image-cli --production-reset capture = StringIO() with ExitStack() as resources: resources.enter_context(capture_print(capture)) mock = resources.enter_context( patch('systemimage.apply.Reboot.apply')) resources.enter_context(argv('-C', config_d, '--production-reset')) cli_main() # A reboot was issued. self.assertTrue(mock.called) path = os.path.join(config.updater.cache_partition, 'ubuntu_command') with open(path, 'r', encoding='utf-8') as fp: command = fp.read() self.assertMultiLineEqual(command, dedent("""\ format data enable factory_wipe """)) class TestCLISettings(unittest.TestCase): """Test settings command line options.""" def setUp(self): super().setUp() self._resources = ExitStack() try: self._stdout = StringIO() self._stderr = StringIO() # We patch builtin print() rather than sys.stdout because the # latter can mess with pdb output should we need to trace through # the code. self._resources.enter_context(capture_print(self._stdout)) # Patch argparse's stderr to capture its error messages. self._resources.enter_context( patch('argparse._sys.stderr', self._stderr)) except: self._resources.close() raise self.addCleanup(self._resources.close) @configuration def test_show_settings(self, config_d): # `system-image-cli --show-settings` shows all the keys and values in # sorted order by alphanumeric key name. settings = Settings() settings.set('peart', 'neil') settings.set('lee', 'geddy') settings.set('lifeson', 'alex') self._resources.enter_context(argv('-C', config_d, '--show-settings')) cli_main() self.assertMultiLineEqual(self._stdout.getvalue(), dedent("""\ lee=geddy lifeson=alex peart=neil """)) @configuration def test_get_key(self, config_d): # `system-image-cli --get key` prints the key's value. settings = Settings() settings.set('ant', 'aunt') self._resources.enter_context(argv('-C', config_d, '--get', 'ant')) cli_main() self.assertMultiLineEqual(self._stdout.getvalue(), dedent("""\ aunt """)) @configuration def test_get_keys(self, config_d): # `--get key` can be used multiple times. settings = Settings() settings.set('s', 'saucy') settings.set('t', 'trusty') settings.set('u', 'utopic') self._resources.enter_context( argv('-C', config_d, '--get', 's', '--get', 'u', '--get', 't')) cli_main() self.assertMultiLineEqual(self._stdout.getvalue(), dedent("""\ saucy utopic trusty """)) @configuration def test_get_missing_key(self, config_d): # Since by definition a missing key has a default value, you can get # missing keys. Note that `auto_download` is the one weirdo. self._resources.enter_context( argv('-C', config_d, '--get', 'missing', '--get', 'auto_download')) cli_main() # This produces a blank line, since `missing` returns the empty # string. For better readability, don't indent the results. self.assertMultiLineEqual(self._stdout.getvalue(), """\ 1 """) @configuration def test_set_key(self, config_d): # `system-image-cli --set key=value` sets a key/value pair. self._resources.enter_context(argv('-C', config_d, '--set', 'bass=4')) cli_main() self.assertEqual(Settings().get('bass'), '4') @configuration def test_change_key(self, config_d): # `--set key=value` changes an existing key's value. settings = Settings() settings.set('a', 'ant') settings.set('b', 'bee') settings.set('c', 'cat') self._resources.enter_context(argv('-C', config_d, '--set', 'b=bat')) cli_main() self.assertEqual(settings.get('a'), 'ant') self.assertEqual(settings.get('b'), 'bat') self.assertEqual(settings.get('c'), 'cat') @configuration def test_set_keys(self, config_d): # `--set key=value` can be used multiple times. self._resources.enter_context( argv('-C', config_d, '--set', 'a=ant', '--set', 'b=bee', '--set', 'c=cat')) cli_main() settings = Settings() self.assertEqual(settings.get('a'), 'ant') self.assertEqual(settings.get('b'), 'bee') self.assertEqual(settings.get('c'), 'cat') @configuration def test_del_key(self, config_d): # `system-image-cli --del key` removes a key from the database. settings = Settings() settings.set('ant', 'insect') settings.set('bee', 'insect') settings.set('cat', 'mammal') self._resources.enter_context(argv('-C', config_d, '--del', 'bee')) cli_main() settings = Settings() self.assertEqual(settings.get('ant'), 'insect') self.assertEqual(settings.get('cat'), 'mammal') # When the key is missing, the empty string is the default. self.assertEqual(settings.get('bee'), '') @configuration def test_del_keys(self, config_d): # `--del key` can be used multiple times. settings = Settings() settings.set('ant', 'insect') settings.set('bee', 'insect') settings.set('cat', 'mammal') self._resources.enter_context( argv('-C', config_d, '--del', 'bee', '--del', 'cat')) cli_main() settings = Settings() self.assertEqual(settings.get('ant'), 'insect') # When the key is missing, the empty string is the default. self.assertEqual(settings.get('cat'), '') self.assertEqual(settings.get('bee'), '') @configuration def test_del_missing_key(self, config_d): # When asked to delete a key that's not in the database, nothing # much happens. self._resources.enter_context(argv('-C', config_d, '--del', 'missing')) cli_main() self.assertEqual(Settings().get('missing'), '') @configuration def test_mix_and_match(self, config_d): # Because argument order is not preserved, and any semantics for # mixing and matching database arguments would be arbitrary, it is not # allowed to mix them. capture = StringIO() self._resources.enter_context(capture_print(capture)) self._resources.enter_context( argv('-C', config_d, '--set', 'c=cat', '--del', 'bee', '--get', 'dog')) with self.assertRaises(SystemExit) as cm: cli_main() self.assertEqual(cm.exception.code, 2) self.assertEqual( self._stderr.getvalue().splitlines()[-1], 'system-image-cli: error: Cannot mix and match settings arguments') class TestDBusMain(unittest.TestCase): def setUp(self): super().setUp() self._resources = ExitStack() try: SystemImagePlugin.controller.set_mode() config_d = SystemImagePlugin.controller.ini_path override = os.path.join(config_d, '06_override.ini') self._resources.callback(safe_remove, override) with open(override, 'w', encoding='utf-8') as fp: print('[dbus]\nlifetime: 3s\n', file=fp) # The testing framework will have caused system-image-dbus to be # started by now. The tests below assume it is not yet running, so # let's be sure to stop it. terminate_service() except: self._resources.close() raise self.addCleanup(terminate_service) self.addCleanup(self._resources.close) def _activate(self): # Re-start and reload the D-Bus service. wait_for_service() bus = dbus.SystemBus() service = bus.get_object('com.canonical.SystemImage', '/Service') self._iface = dbus.Interface(service, 'com.canonical.SystemImage') return self._iface.Information() def test_service_exits(self): # The dbus service automatically exits after a set amount of time. config_d = SystemImagePlugin.controller.ini_path # Nothing has been spawned yet. self.assertIsNone(find_dbus_process(config_d)) self._activate() process = find_dbus_process(config_d) self.assertTrue(process.is_running()) # Now wait for the process to self-terminate. If this times out # before the process exits, a TimeoutExpired exception will be # raised. Let this propagate up as a test failure. process.wait(timeout=6) self.assertFalse(process.is_running()) def test_service_keepalive(self): # Proactively calling methods on the service keeps it alive. config_d = SystemImagePlugin.controller.ini_path self.assertIsNone(find_dbus_process(config_d)) self._activate() process = find_dbus_process(config_d) self.assertTrue(process.is_running()) # Normally the process would exit after 3 seconds, but we'll keep it # alive for a bit. for i in range(3): self._iface.Information() time.sleep(2) self.assertTrue(process.is_running()) def test_config_override(self): # Other ini files can override the build number and channel. config_d = SystemImagePlugin.controller.ini_path copy('main.config_07.ini', config_d, '07_override.ini') info = self._activate() # The build number. self.assertEqual(info['current_build_number'], '33') # The channel self.assertEqual(info['channel_name'], 'saucy') def test_temp_directory(self): # The temporary directory gets created if it doesn't exist. config_d = SystemImagePlugin.controller.ini_path config = Configuration(config_d) # The temporary directory may have already been created via the # .set_mode() call in the setUp(). That invokes a 'stopper' for the # -dbus process, which has the perverse effect of first D-Bus # activating the process, and thus creating the temporary directory # before calling .Exit(). However, due to timing issues, it's # possible we get here before the process was ever started, and thus # the daemon won't be killed. Conditionally deleting it now will # allow re-activation to re-create the directory. try: shutil.rmtree(config.system.tempdir) except FileNotFoundError: pass self.assertFalse(os.path.exists(config.system.tempdir)) self._activate() self.assertTrue(os.path.exists(config.system.tempdir)) def test_permissions(self): # LP: #1235975 - The created tempdir had unsafe permissions. config = Configuration(SystemImagePlugin.controller.ini_path) # See above. try: shutil.rmtree(config.system.tempdir) except FileNotFoundError: pass safe_remove(config.system.logfile) self._activate() mode = os.stat(config.system.tempdir).st_mode self.assertEqual(stat.filemode(mode), 'drwx--S---') mode = os.stat(os.path.dirname(config.system.logfile)).st_mode self.assertEqual(stat.filemode(mode), 'drwx--S---') mode = os.stat(config.system.logfile).st_mode self.assertEqual(stat.filemode(mode), '-rw-------') def test_single_instance(self): # Only one instance of the system-image-dbus service is allowed to # remain active on a single system bus. config_d = SystemImagePlugin.controller.ini_path self.assertIsNone(find_dbus_process(config_d)) self._activate() proc = find_dbus_process(config_d) # Attempt to start a second process on the same system bus. env = dict( DBUS_SYSTEM_BUS_ADDRESS=os.environ['DBUS_SYSTEM_BUS_ADDRESS']) coverage_env = os.environ.get('COVERAGE_PROCESS_START') if coverage_env is not None: env['COVERAGE_PROCESS_START'] = coverage_env args = (sys.executable, '-m', 'systemimage.testing.service', '-C', config_d) second = subprocess.Popen(args, universal_newlines=True, env=env) # Allow a TimeoutExpired exception to fail the test. try: code = second.wait(timeout=10) except subprocess.TimeoutExpired: second.kill() second.communicate() raise self.assertNotEqual(second.pid, proc.pid) self.assertEqual(code, 2) class TestDBusMainNoConfigD(unittest.TestCase): def test_start_with_missing_config_d(self): # Trying to start the D-Bus service with a configuration directory # that doesn't exist yields an error. terminate_service() wait_for_service(reload=False) # Try to start a new process with a bogus configuration directory. env = dict( DBUS_SYSTEM_BUS_ADDRESS=os.environ['DBUS_SYSTEM_BUS_ADDRESS']) coverage_env = os.environ.get('COVERAGE_PROCESS_START') if coverage_env is not None: env['COVERAGE_PROCESS_START'] = coverage_env args = (sys.executable, '-m', 'systemimage.testing.service', '-C', '/does/not/exist') with temporary_directory() as tempdir: stdout_path = os.path.join(tempdir, 'stdout') stderr_path = os.path.join(tempdir, 'stderr') with ExitStack() as files: tempdir = files.enter_context(temporary_directory()) stdout = files.enter_context( open(stdout_path, 'w', encoding='utf-8')) stderr = files.enter_context( open(stderr_path, 'w', encoding='utf-8')) try: subprocess.check_call(args, universal_newlines=True, env=env, stdout=stdout, stderr=stderr) except subprocess.CalledProcessError as error: self.assertNotEqual(error.returncode, 0) with open(stdout_path, 'r', encoding='utf-8') as fp: stdout = fp.read() with open(stderr_path, 'r', encoding='utf-8') as fp: stderr = fp.readlines() self.assertEqual(stdout, '') self.assertEqual( stderr[-1], 'Configuration directory not found: .load() requires a ' 'directory: /does/not/exist\n') class TestCLISignatures(ServerTestBase): INDEX_FILE = 'main.index_01.json' CHANNEL_FILE = 'main.channels_01.json' CHANNEL = 'stable' DEVICE = 'nexus7' @configuration def test_update_attempt_with_bad_signatures(self, config_d): # Let's say the index.json file has a bad signature. The update # should refuse to apply. self._setup_server_keyrings() # Sign the index.json file with the wrong (i.e. bad) key. index_path = os.path.join( self._serverdir, self.CHANNEL, self.DEVICE, 'index.json') sign(index_path, 'spare.gpg') stdout = StringIO() with ExitStack() as resources: resources.enter_context(capture_print(stdout)) # Patch argparse's stderr to capture its error messages. resources.push(machine_id('feedfacebeefbacafeedfacebeefbaca')) resources.enter_context(argv('-C', config_d, '--dry-run')) # Now that the index.json on the server is signed with the wrong # keyring, try to upgrade. code = cli_main() # The upgrade failed because of the signature. self.assertEqual(code, 1) with open(config.system.logfile, encoding='utf-8') as fp: logged = fp.readlines() # Slog through the log output and look for evidence that the upgrade # failed because of the faulty signature on the index.json file. # Then assert on those clues, but get rid of the trailing newlines. exception_found = False data_path = sig_path = None i = 0 while i < len(logged): line = logged[i][:-1] i += 1 if line.startswith('systemimage.gpg.SignatureError'): # There should only be one of these lines. self.assertFalse(exception_found) exception_found = True elif line.strip().startswith('sig path'): sig_path = logged[i][:-1] i += 1 elif line.strip().startswith('data path'): data_path = logged[i][:-1] i += 1 # Check the clues. self.assertTrue(exception_found) self.assertTrue(sig_path.endswith('index.json.asc'), repr(sig_path)) self.assertTrue(data_path.endswith('index.json'), repr(data_path)) @configuration def test_update_attempt_with_bad_signatures_overridden(self, config_d): # Let's say the index.json file has a bad signature. Normally, the # update should refuse to apply, but we override the GPG checks so it # will succeed. self._setup_server_keyrings() # Sign the index.json file with the wrong (i.e. bad) key. index_path = os.path.join( self._serverdir, self.CHANNEL, self.DEVICE, 'index.json') sign(index_path, 'spare.gpg') stdout = StringIO() stderr = StringIO() with ExitStack() as resources: resources.enter_context(capture_print(stdout)) resources.enter_context( patch('systemimage.main.sys.stderr', stderr)) # Patch argparse's stderr to capture its error messages. resources.push(machine_id('feedfacebeefbacafeedfacebeefbaca')) resources.enter_context( argv('-C', config_d, '--dry-run', '--skip-gpg-verification')) # Now that the index.json on the server is signed with the wrong # keyring, try to upgrade. code = cli_main() # The upgrade failed because of the signature. self.assertEqual(code, 0) self.assertEqual(stdout.getvalue(), """\ Upgrade path is 1200:1201:1304 Target phase: 64% """) # And we get the scary warning on the console. self.assertMultiLineEqual(stderr.getvalue(), """\ WARNING: All GPG signature verifications have been disabled. Your upgrades are INSECURE. """) class TestCLIProgress(ServerTestBase): INDEX_FILE = 'main.index_01.json' CHANNEL_FILE = 'main.channels_01.json' CHANNEL = 'stable' DEVICE = 'nexus7' def setUp(self): super().setUp() # Use a private name to avoid conflicts with the superclass attribute. self.__resources = ExitStack() try: self._stdout = StringIO() self._stderr = StringIO() # Some output uses print() and others use sys.stdout.write(). Be # sure to capture them both to the same object. self.__resources.enter_context(capture_print(self._stdout)) self.__resources.enter_context( patch('systemimage.main.sys.stdout', self._stdout)) self.__resources.enter_context( patch('systemimage.main.sys.stderr', self._stderr)) except: self.__resources.close() raise self.addCleanup(self.__resources.close) @configuration def test_dots_progress(self, config_d): # --progress=dots prints a bunch of dots to stderr. self._setup_server_keyrings() with ExitStack() as resources: resources.enter_context( patch('systemimage.main.LINE_LENGTH', 10)) resources.enter_context( argv('-C', config_d, '-b', '0', '--no-apply', '--progress', 'dots')) cli_main() # There should be some dots in the stderr. self.assertGreater(self._stderr.getvalue().count('.'), 2) @configuration def test_json_progress(self, config_d): # --progress=json prints some JSON to stdout. self._setup_server_keyrings() with argv('-C', config_d, '-b', '0', '--no-apply', '--progress', 'json'): cli_main() # stdout is now filled with JSON goodness. We can't assert too much # about the contents though. line_count = 0 for line in self._stdout.getvalue().splitlines(): line_count += 1 record = json.loads(line) self.assertEqual(record['type'], 'progress') self.assertIn('now', record) self.assertIn('total', record) self.assertGreater(line_count, 4) @configuration def test_logfile_progress(self, config_d): # --progress=logfile dumps some messages to the log file. self._setup_server_keyrings() log_mock = MagicMock() from systemimage.main import _LogfileProgress class Testable(_LogfileProgress): def __init__(self, log): super().__init__(log) self._log = log_mock with ExitStack() as resources: resources.enter_context( patch('systemimage.main._LogfileProgress', Testable)) resources.enter_context( argv('-C', config_d, '-b', '0', '--no-apply', '--progress', 'logfile')) cli_main() self.assertGreater(log_mock.debug.call_count, 4) positional, keyword = log_mock.debug.call_args self.assertTrue(positional[0].startswith('received: ')) @configuration def test_all_progress(self, config_d): # We can have more than one --progress flag. self._setup_server_keyrings() log_mock = MagicMock() from systemimage.main import _LogfileProgress class Testable(_LogfileProgress): def __init__(self, log): super().__init__(log) self._log = log_mock with ExitStack() as resources: resources.enter_context( patch('systemimage.main.LINE_LENGTH', 10)) resources.enter_context( patch('systemimage.main._LogfileProgress', Testable)) resources.enter_context( argv('-C', config_d, '-b', '0', '--no-apply', '--progress', 'dots', '--progress', 'json', '--progress', 'logfile')) cli_main() self.assertGreater(self._stderr.getvalue().count('.'), 2) line_count = 0 for line in self._stdout.getvalue().splitlines(): line_count += 1 record = json.loads(line) self.assertEqual(record['type'], 'progress') self.assertIn('now', record) self.assertIn('total', record) self.assertGreater(line_count, 4) self.assertGreater(log_mock.debug.call_count, 4) positional, keyword = log_mock.debug.call_args self.assertTrue(positional[0].startswith('received: ')) @configuration def test_bad_progress(self, config_d): # An unknown progress type results in an error. with ExitStack() as resources: resources.enter_context( argv('-C', config_d, '-b', '0', '--no-apply', '--progress', 'not-a-meter')) cm = resources.enter_context(self.assertRaises(SystemExit)) cli_main() self.assertEqual(cm.exception.code, 2) self.assertEqual( self._stderr.getvalue().splitlines()[-1], 'system-image-cli: error: Unknown progress meter: not-a-meter') @configuration def test_json_progress_errors(self, config): # When an error occurs in the state machine, --progress=json should # produce some client-consumable output. LP: #1463061 self._setup_server_keyrings() with ExitStack() as resources: resources.enter_context( argv('-C', config.config_d, '-b', '0', '--no-apply', '--progress', 'json')) # It's maybe not the best thing to hook into a private # implementation function in order to cause the state machine to # fail, but it's expedient and works with both downloaders. resources.enter_context( patch('systemimage.state._copy_if_missing', side_effect=RuntimeError('Bad things!'))) exit_code = cli_main() self.assertEqual(exit_code, 1) # stdout is now filled with JSON progress. The last line should be # the error record. lines = self._stdout.getvalue().splitlines() record = json.loads(lines[-1]) self.assertEqual(record['type'], 'error', lines) self.assertEqual(record['msg'], 'Bad things!') @configuration def test_no_json_progress_errors(self, config): # Like above, but without --progress=json. self._setup_server_keyrings() with ExitStack() as resources: resources.enter_context( argv('-C', config.config_d, '-b', '0', '--no-apply')) # It's maybe not the best thing to hook into a private # implementation function in order to cause the state machine to # fail, but it's expedient and works with both downloaders. resources.enter_context( patch('systemimage.state._copy_if_missing', side_effect=RuntimeError)) exit_code = cli_main() self.assertEqual(exit_code, 1) # stdout is now filled with JSON progress. The last line should be # the error record. lines = self._stdout.getvalue().splitlines() self.assertEqual(len(lines), 0) @unittest.skipIf(USING_PYCURL, 'UDM-only tests') class TestCLIGSMOverride(ServerTestBase): INDEX_FILE = 'main.index_05.json' CHANNEL_FILE = 'main.channels_02.json' CHANNEL = 'daily' DEVICE = 'manta' @configuration def test_no_gsm_override(self, config_d): # Without --override-gsm, the normal auto_download setting rules. self._setup_server_keyrings() Settings().set('auto_download', '1') with ExitStack() as resources: resources.enter_context(argv('-C', config_d, '-b', '0')) mock = resources.enter_context( patch('systemimage.udm.UDMDownloadManager._set_gsm')) exit_code = cli_main() self.assertEqual(exit_code, 0) # The last time the mock was called, was for the downloads of the data # files. Here, the first argument that the method was called with is # the interface, but the second argument is the flag we care about. # It's called as a keyword argument, so dig this out of the mock's # call args. args, kws = mock.call_args self.assertFalse(kws['allow_gsm']) @configuration def test_gsm_override(self, config_d): # --override-gsm overrides any local setting for auto_download. self._setup_server_keyrings() Settings().set('auto_download', '1') with ExitStack() as resources: resources.enter_context( argv('-C', config_d, '-b', '0', '--override-gsm')) mock = resources.enter_context( patch('systemimage.udm.UDMDownloadManager._set_gsm')) exit_code = cli_main() self.assertEqual(exit_code, 0) # The last time the mock was called, was for the downloads of the data # files. Here, the first argument that the method was called with is # the interface, but the second argument is the flag we care about. # It's called as a keyword argument, so dig this out of the mock's # call args. args, kws = mock.call_args self.assertTrue(kws['allow_gsm']) ./systemimage/tests/test_index.py0000644000015600001650000002575412701500553017321 0ustar jenkinsjenkins# Copyright (C) 2013-2016 Canonical Ltd. # Author: Barry Warsaw # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 3 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . """Test channel/device index parsing.""" __all__ = [ 'TestIndex', 'TestDownloadIndex', ] import os import unittest from contextlib import ExitStack from datetime import datetime, timezone from systemimage.gpg import SignatureError from systemimage.helpers import temporary_directory from systemimage.state import State from systemimage.testing.helpers import ( configuration, copy, get_index, make_http_server, makedirs, setup_keyring_txz, setup_keyrings, sign) from systemimage.testing.nose import SystemImagePlugin class TestIndex(unittest.TestCase): @classmethod def setUpClass(self): SystemImagePlugin.controller.set_mode(cert_pem='cert.pem') def test_index_global(self): index = get_index('index.index_02.json') self.assertEqual( index.global_.generated_at, datetime(2013, 4, 29, 18, 45, 27, tzinfo=timezone.utc)) def test_index_image_count(self): index = get_index('index.index_02.json') self.assertEqual(len(index.images), 0) index = get_index('index.index_03.json') self.assertEqual(len(index.images), 2) def test_image_20130300_full(self): index = get_index('index.index_05.json') image = index.images[0] self.assertEqual( image.descriptions, {'description': 'Some kind of daily build'}) self.assertEqual(image.type, 'full') self.assertEqual(image.version, 20130300) self.assertTrue(image.bootme) self.assertEqual(len(image.files), 3) # The first file is the device dependent image. The second is the # device independent file, and the third is the version zip. dev, ind, ver = image.files self.assertEqual(dev.path, '/sprint/nexus7/nexus7-20130300.full.zip') self.assertEqual(dev.signature, '/sprint/nexus7/nexus7-20130300.full.zip.asc') self.assertEqual(dev.checksum, 'abcdef0') self.assertEqual(dev.order, 0) self.assertEqual(dev.size, 0) # Let's not check the whole file, just a few useful bits. self.assertEqual(ind.checksum, 'abcdef1') self.assertEqual(ind.order, 0) self.assertEqual(ver.checksum, 'abcdef2') self.assertEqual(ver.order, 1) def test_image_20130500_minversion(self): # Some full images have a minimum version older than which they refuse # to upgrade from. index = get_index('index.index_05.json') image = index.images[5] self.assertEqual(image.type, 'full') self.assertEqual(image.version, 20130500) self.assertTrue(image.bootme) self.assertEqual(image.minversion, 20130100) def test_image_descriptions(self): # Image descriptions can come in a variety of locales. index = get_index('index.index_01.json') self.assertEqual(index.images[0].descriptions, { 'description': 'Full A'}) self.assertEqual(index.images[3].descriptions, { 'description': 'Full B', 'description-en': 'The full B', }) self.assertEqual(index.images[4].descriptions, { 'description': 'Delta B.1', 'description-en_US': 'This is the delta B.1', 'description-xx': 'XX This is the delta B.1', 'description-yy': 'YY This is the delta B.1', 'description-yy_ZZ': 'YY-ZZ This is the delta B.1', }) # The second delta. self.assertEqual(index.images[5].descriptions, { 'description': 'Delta B.2', 'description-xx': 'Oh delta, my delta', 'description-xx_CC': 'This hyar is the delta B.2', }) class TestDownloadIndex(unittest.TestCase): maxDiff = None @classmethod def setUpClass(self): SystemImagePlugin.controller.set_mode(cert_pem='cert.pem') def setUp(self): # Start the HTTPS server running. Vend it out of a temporary # directory which we load up with the right files. self._stack = ExitStack() try: self._serverdir = self._stack.enter_context(temporary_directory()) self._stack.push(make_http_server( self._serverdir, 8943, 'cert.pem', 'key.pem')) except: self._stack.close() raise def tearDown(self): self._stack.close() def _copysign(self, src, dst, keyring): server_dst = os.path.join(self._serverdir, dst) makedirs(os.path.dirname(server_dst)) copy(src, self._serverdir, dst) sign(server_dst, keyring) @configuration def test_load_index_good_path(self): # Load the index.json pointed to by the channels.json. All signatures # validate correctly and there is no device keyring or blacklist. self._copysign( 'index.channels_05.json', 'channels.json', 'image-signing.gpg') # index.index_04.json path B will win, with no bootme flags. self._copysign( 'index.index_04.json', 'stable/nexus7/index.json', 'image-signing.gpg') setup_keyrings() state = State() state.run_thru('get_index') self.assertEqual( state.index.global_.generated_at, datetime(2013, 4, 29, 18, 45, 27, tzinfo=timezone.utc)) self.assertEqual( state.index.images[0].files[1].checksum, 'bcd') @configuration def test_load_index_with_device_keyring(self): # Here, the index.json file is signed with a device keyring. self._copysign( 'index.channels_02.json', 'channels.json', 'image-signing.gpg') # index.index_04.json.json path B will win, with no bootme flags. self._copysign( 'index.index_04.json', 'stable/nexus7/index.json', 'device-signing.gpg') setup_keyrings() setup_keyring_txz( 'device-signing.gpg', 'image-signing.gpg', dict(type='device-signing'), os.path.join(self._serverdir, 'stable', 'nexus7', 'device.tar.xz')) state = State() state.run_thru('get_index') self.assertEqual( state.index.global_.generated_at, datetime(2013, 4, 29, 18, 45, 27, tzinfo=timezone.utc)) self.assertEqual( state.index.images[0].files[1].checksum, 'bcd') @configuration def test_load_index_with_device_keyring_and_signing_key(self): # Here, the index.json file is signed with the image signing keyring, # even though there is a device key. That's fine. self._copysign( 'index.channels_02.json', 'channels.json', 'image-signing.gpg') # index.index_04.json.json path B will win, with no bootme flags. self._copysign( 'index.index_04.json', 'stable/nexus7/index.json', 'image-signing.gpg') setup_keyrings() setup_keyring_txz( 'device-signing.gpg', 'image-signing.gpg', dict(type='device-signing'), os.path.join(self._serverdir, 'stable', 'nexus7', 'device.tar.xz')) state = State() state.run_thru('get_index') self.assertEqual( state.index.global_.generated_at, datetime(2013, 4, 29, 18, 45, 27, tzinfo=timezone.utc)) self.assertEqual( state.index.images[0].files[1].checksum, 'bcd') @configuration def test_load_index_with_bad_keyring(self): # Here, the index.json file is signed with a defective device keyring. self._copysign( 'index.channels_02.json', 'channels.json', 'image-signing.gpg') # This will be signed by a keyring that is not the device keyring. self._copysign( 'index.index_04.json', 'stable/nexus7/index.json', 'spare.gpg') setup_keyrings() setup_keyring_txz( 'device-signing.gpg', 'image-signing.gpg', dict(type='device-signing'), os.path.join(self._serverdir, 'stable', 'nexus7', 'device.tar.xz')) state = State() state.run_until('get_index') self.assertRaises(SignatureError, next, state) @configuration def test_load_index_with_blacklist(self): # Here, we've blacklisted the device key. self._copysign( 'index.channels_02.json', 'channels.json', 'image-signing.gpg') # This will be signed by a keyring that is not the device keyring. self._copysign( 'index.index_04.json', 'stable/nexus7/index.json', 'device-signing.gpg') setup_keyrings() setup_keyring_txz( 'device-signing.gpg', 'image-signing.gpg', dict(type='device-signing'), os.path.join(self._serverdir, 'stable', 'nexus7', 'device.tar.xz')) setup_keyring_txz( 'device-signing.gpg', 'image-master.gpg', dict(type='blacklist'), os.path.join(self._serverdir, 'gpg', 'blacklist.tar.xz')) state = State() state.run_until('get_index') self.assertRaises(SignatureError, next, state) @configuration def test_missing_channel(self): # The system's channel does not exist. self._copysign( 'index.channels_03.json', 'channels.json', 'image-signing.gpg') # index.index_04.json path B will win, with no bootme flags. self._copysign( 'index.index_04.json', 'stable/nexus7/index.json', 'image-signing.gpg') setup_keyrings() # Our channel (stable) isn't in the channels.json file, so there's # nothing to do. Running the state machine to its conclusion leaves # us with no index file. state = State() list(state) # There really is nothing left to do. self.assertIsNone(state.index) @configuration def test_missing_device(self): # The system's device does not exist. self._copysign( 'index.channels_04.json', 'channels.json', 'image-signing.gpg') # index.index_04.json path B will win, with no bootme flags. self._copysign( 'index.index_04.json', 'stable/nexus7/index.json', 'image-signing.gpg') setup_keyrings() # Our device (nexus7) isn't in the channels.json file, so there's # nothing to do. Running the state machine to its conclusion leaves # us with no index file. state = State() list(state) # There really is nothing left to do. self.assertIsNone(state.index) ./systemimage/tests/test_scores.py0000644000015600001650000002164412701500553017502 0ustar jenkinsjenkins# Copyright (C) 2013-2016 Canonical Ltd. # Author: Barry Warsaw # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 3 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . __all__ = [ 'TestPhasedUpdates', 'TestVersionDetail', 'TestWeightedScorer', ] import unittest from systemimage.candidates import get_candidates from systemimage.scores import WeightedScorer from systemimage.testing.helpers import descriptions, get_index from unittest.mock import patch class TestWeightedScorer(unittest.TestCase): def setUp(self): self.scorer = WeightedScorer() def test_choose_no_candidates(self): # If there are no candidates, then there is no path to upgrade. self.assertEqual(self.scorer.choose([], 'devel'), []) def test_score_no_candidates(self): self.assertEqual(self.scorer.score([]), []) def test_one_path(self): index = get_index('scores.index_02.json') candidates = get_candidates(index, 600) # There's only one path. scores = self.scorer.score(candidates) # The score is 200 for the two extra bootme flags. self.assertEqual(scores, [200]) # And we upgrade to the only path available. winner = self.scorer.choose(candidates, 'devel') # There are two images in the winning path. self.assertEqual(len(winner), 2) self.assertEqual([image.version for image in winner], [1300, 1301]) def test_three_paths(self): # - Path A requires three extra reboots, is the smallest total # download and leaves you at the highest available version. # Score: 300 # # - Path B requires one extra reboot, but is 100MiB bigger and leaves # you at the highest available version. Score: 200 # # - Path C requires no extra reboots, but is 400MiB bigger and leaves # you at 1303 instead of the highest 1304. For that reason, it gets # a huge score making it impossible to win. # # Path B wins. index = get_index('scores.index_03.json') candidates = get_candidates(index, 600) # There are three paths. The scores are as above. scores = self.scorer.score(candidates) self.assertEqual(scores, [300, 200, 9401]) winner = self.scorer.choose(candidates, 'devel') self.assertEqual(len(winner), 3) self.assertEqual([image.version for image in winner], [1200, 1201, 1304]) self.assertEqual(descriptions(winner), ['Full B', 'Delta B.1', 'Delta B.2']) def test_tied_candidates(self): # LP: #1206866 - TypeError when two candidate paths scored equal. # # index_04.json was captured from real data causing the traceback. index = get_index('scores.index_04.json') candidates = get_candidates(index, 1) path = self.scorer.choose(candidates, 'devel') self.assertEqual(len(path), 1) self.assertEqual(path[0].version, 1800) class TestPhasedUpdates(unittest.TestCase): def setUp(self): self.scorer = WeightedScorer() def test_inside_phase_gets_update(self): # When the final image on an update path has a phase percentage higher # than the device percentage, the candidate path is okay. In this # case, the `Full B` has phase of 50%. index = get_index('scores.index_05.json') candidates = get_candidates(index, 100) with patch('systemimage.scores.phased_percentage', return_value=22): winner = self.scorer.choose(candidates, 'devel') descriptions = [] for image in winner: descriptions.extend(image.descriptions.values()) self.assertEqual(descriptions, ['Full B', 'Delta B.1', 'Delta B.2']) def test_outside_phase_gets_update(self): # When the final image on an update path has a phase percentage lower # than the device percentage, the scorer falls back to the next # candidate path. index = get_index('scores.index_05.json') candidates = get_candidates(index, 100) with patch('systemimage.scores.phased_percentage', return_value=66): winner = self.scorer.choose(candidates, 'devel') self.assertEqual(descriptions(winner), ['Full A', 'Delta A.1', 'Delta A.2']) def test_equal_phase_gets_update(self): # When the final image on an update path has a phase percentage exactly # equal to the device percentage, the candidate path is okay. In this # case, the `Full B` has phase of 50%. index = get_index('scores.index_05.json') candidates = get_candidates(index, 100) with patch('systemimage.scores.phased_percentage', return_value=50): winner = self.scorer.choose(candidates, 'devel') self.assertEqual(descriptions(winner), ['Full B', 'Delta B.1', 'Delta B.2']) def test_pulled_update(self): # When the final image on an update path has a phase percentage of # zero, then regardless of the device's percentage, the candidate path # is not okay. In this case, the `Full B` has phase of 0%. index = get_index('scores.index_01.json') candidates = get_candidates(index, 100) with patch('systemimage.scores.phased_percentage', return_value=0): winner = self.scorer.choose(candidates, 'devel') self.assertEqual(descriptions(winner), ['Full A', 'Delta A.1', 'Delta A.2']) def test_pulled_update_insanely_negative_randint(self): # When the final image on an update path has a phase percentage of # zero, then regardless of the device's percentage (even if randint # returned some insane value), the candidate path is not okay. In this # case, the `Full B` has phase of 0%. index = get_index('scores.index_01.json') candidates = get_candidates(index, 100) with patch('systemimage.scores.phased_percentage', return_value=-100): winner = self.scorer.choose(candidates, 'devel') self.assertEqual(descriptions(winner), ['Full A', 'Delta A.1', 'Delta A.2']) def test_pulled_update_insanely_positive_randint(self): # When the final image on an update path has a phase percentage of # zero, then regardless of the device's percentage (even if randint # returned some insane value), the candidate path is not okay. In this # case, the `Full B` has phase of 0%. index = get_index('scores.index_01.json') candidates = get_candidates(index, 100) with patch('systemimage.scores.phased_percentage', return_value=1000): winner = self.scorer.choose(candidates, 'devel') self.assertEqual(len(winner), 0) class TestVersionDetail(unittest.TestCase): def setUp(self): self.scorer = WeightedScorer() def test_version_detail(self): # The index.json file has three paths for updates, but only one is # selected. The winning path lands on an image with a version_detail # key. index = get_index('scores.index_06.json') candidates = get_candidates(index, 600) scores = self.scorer.score(candidates) self.assertEqual(scores, [300, 200, 9401]) winner = self.scorer.choose(candidates, 'devel') self.assertEqual(len(winner), 3) self.assertEqual([image.version for image in winner], [1200, 1201, 1304]) self.assertEqual(descriptions(winner), ['Full B', 'Delta B.1', 'Delta B.2']) self.assertEqual(winner[-1].version_detail, "ubuntu=105,raw-device=205,version=305") def test_no_version_detail(self): # The index.json file has three paths for updates, but only one is # selected. The winning path lands on an image without a # version_detail key. index = get_index('scores.index_07.json') candidates = get_candidates(index, 600) scores = self.scorer.score(candidates) self.assertEqual(scores, [300, 200, 9401]) winner = self.scorer.choose(candidates, 'devel') self.assertEqual(len(winner), 3) self.assertEqual([image.version for image in winner], [1200, 1201, 1304]) self.assertEqual(descriptions(winner), ['Full B', 'Delta B.1', 'Delta B.2']) self.assertEqual(winner[-1].version_detail, '') ./systemimage/tests/data/0000755000015600001650000000000012701500553015475 5ustar jenkinsjenkins./systemimage/tests/data/main.config_07.ini0000644000015600001650000000013312701500553020671 0ustar jenkinsjenkins[service] base: localhost http_port: 8980 https_port: 8943 channel: saucy build_number: 33 ./systemimage/tests/data/channel.channels_04.json0000644000015600001650000000276212701500553022104 0ustar jenkinsjenkins{ "daily": { "alias": "saucy", "devices": { "grouper": { "index": "/daily/grouper/index.json" }, "maguro": { "index": "/daily/maguro/index.json" }, "mako": { "index": "/daily/mako/index.json" }, "manta": { "index": "/daily/manta/index.json" } } }, "saucy": { "devices": { "grouper": { "index": "/saucy/grouper/index.json" }, "maguro": { "index": "/saucy/maguro/index.json" }, "mako": { "index": "/saucy/mako/index.json" }, "manta": { "index": "/saucy/manta/index.json", "keyring": { "path": "/saucy/manta/device-signing.tar.xz", "signature": "/saucy/manta/device-signing.tar.xz.asc" } } } }, "saucy-proposed": { "hidden": true, "devices": { "grouper": { "index": "/saucy-proposed/grouper/index.json" }, "maguro": { "index": "/saucy-proposed/maguro/index.json" }, "mako": { "index": "/saucy-proposed/mako/index.json" }, "manta": { "index": "/saucy-proposed/manta/index.json" } } } } ./systemimage/tests/data/main.channels_02.json0000644000015600001650000000351512701500553021413 0ustar jenkinsjenkins{ "daily": { "alias": "tubular", "devices": { "grouper": { "index": "/daily/grouper/index.json" }, "maguro": { "index": "/daily/maguro/index.json" }, "mako": { "index": "/daily/mako/index.json" }, "manta": { "index": "/daily/manta/index.json", "keyring": { "path": "/daily/manta/device-signing.tar.xz", "signature": "/daily/manta/device-signing.tar.xz.asc" } } } }, "saucy": { "devices": { "grouper": { "index": "/saucy/grouper/index.json" }, "maguro": { "index": "/saucy/maguro/index.json" }, "mako": { "index": "/saucy/mako/index.json" }, "manta": { "index": "/saucy/manta/index.json", "keyring": { "path": "/saucy/manta/device-signing.tar.xz", "signature": "/saucy/manta/device-signing.tar.xz.asc" } } } }, "tubular": { "hidden": true, "devices": { "grouper": { "index": "/tubular/grouper/index.json" }, "maguro": { "index": "/tubular/maguro/index.json" }, "mako": { "index": "/tubular/mako/index.json" }, "manta": { "index": "/tubular/manta/index.json", "keyring": { "path": "/tubular/manta/device-signing.tar.xz", "signature": "/tubular/manta/device-signing.tar.xz.asc" } } } } } ./systemimage/tests/data/winner.index_02.json0000644000015600001650000001723312701500553021307 0ustar jenkinsjenkins{ "global": { "generated_at": "Mon Apr 29 18:45:27 UTC 2013" }, "images": [ { "bootme": true, "description": "Full A", "files": [ { "checksum": "abc", "order": 1, "path": "/a/b/c.txt", "signature": "/a/b/c.txt.asc", "size": 104857600 }, { "checksum": "bcd", "order": 1, "path": "/b/c/d.txt", "signature": "/b/c/d.txt.asc", "size": 104857600 }, { "checksum": "cde", "order": 1, "path": "/c/d/e.txt", "signature": "/c/d/e.txt.asc", "size": 104857600 } ], "type": "full", "version": 1300 }, { "base": 1300, "bootme": true, "description": "Delta A.1", "files": [ { "checksum": "def", "order": 1, "path": "/d/e/f.txt", "signature": "/d/e/f.txt.asc", "size": 104857600 }, { "checksum": "ef0", "order": 1, "path": "/e/f/0.txt", "signature": "/e/f/0.txt.asc", "size": 104857600 }, { "checksum": "f01", "order": 1, "path": "/f/e/1.txt", "signature": "/f/e/1.txt.asc", "size": 104857600 } ], "type": "delta", "version": 1301 }, { "base": 1301, "bootme": true, "description": "Delta A.2", "files": [ { "checksum": "012", "order": 1, "path": "/0/1/2.txt", "signature": "/0/1/2.txt.asc", "size": 104857600 }, { "checksum": "123", "order": 1, "path": "/1/2/3.txt", "signature": "/1/2/3.txt.asc", "size": 104857600 }, { "checksum": "234", "order": 1, "path": "/2/3/4.txt", "signature": "/2/3/4.txt.asc", "size": 104857600 } ], "type": "delta", "version": 1304 }, { "description": "Full B", "files": [ { "checksum": "da70dfa4d9f95ac979f921e8e623358236313f334afcd06cddf8a5621cf6a1e9", "order": 1, "path": "/3/4/5.txt", "signature": "/3/4/5.txt.asc", "size": 104857600 }, { "checksum": "b3a8e0e1f9ab1bfe3a36f231f676f78bb30a519d2b21e6c530c0eee8ebb4a5d0", "order": 1, "path": "/4/5/6.txt", "signature": "/4/5/6.txt.asc", "size": 104857600 }, { "checksum": "97a6d21df7c51e8289ac1a8c026aaac143e15aa1957f54f42e30d8f8a85c3a55", "order": 1, "path": "/5/6/7.txt", "signature": "/5/6/7.txt.asc", "size": 104857600 } ], "type": "full", "version": 1200 }, { "base": 1200, "description": "Delta B.1", "files": [ { "checksum": "cebe3d9d614ba5c19f633566104315854a11353a333bf96f16b5afa0e90abdc4", "order": 1, "path": "/6/7/8.txt", "signature": "/6/7/8.txt.asc", "size": 104857600 }, { "checksum": "35a9e381b1a27567549b5f8a6f783c167ebf809f1c4d6a9e367240484d8ce281", "order": 1, "path": "/7/8/9.txt", "signature": "/7/8/9.txt.asc", "size": 104857600 }, { "checksum": "6bd6c3f7808391e8b74f5c2d58810809eda5c134aaa7f1b27ddf4b445c421ac5", "order": 1, "path": "/8/9/a.txt", "signature": "/8/9/a.txt.asc", "size": 104857600 } ], "type": "delta", "version": 1201 }, { "base": 1201, "description": "Delta B.2", "files": [ { "checksum": "8c43d75d5b9f1aa9fc3fabb6b60b6c06553324352399a33febce95a1b588d1d6", "order": 1, "path": "/9/a/b.txt", "signature": "/9/a/b.txt.asc", "size": 104857600 }, { "checksum": "20e796c128096d229ba89bf412a53c3151d170a409c2c8c1dd8e414087b7ffae", "order": 1, "path": "/f/e/d.txt", "signature": "/f/e/d.txt.asc", "size": 104857600 }, { "checksum": "278238e8bafa4709c77aa723e168101acd6ee1fb9fcc1b6eca4762e5c7dad768", "order": 1, "path": "/e/d/c.txt", "signature": "/e/d/c.txt.asc", "size": 209715200 } ], "type": "delta", "version": 1304 }, { "description": "Full C", "files": [ { "checksum": "dcb", "order": 1, "path": "/d/c/b.txt", "signature": "/d/c/b.txt.asc", "size": 104857600 }, { "checksum": "cba", "order": 1, "path": "/c/b/a.txt", "signature": "/c/b/a.txt.asc", "size": 104857600 }, { "checksum": "ba9", "order": 1, "path": "/b/a/9.txt", "signature": "/b/a/9.txt.asc", "size": 104857600 } ], "type": "full", "version": 1100 }, { "base": 1100, "description": "Delta C.1", "files": [ { "checksum": "a98", "order": 1, "path": "/a/9/8.txt", "signature": "/a/9/8.txt.asc", "size": 104857600 }, { "checksum": "987", "order": 1, "path": "/9/8/7.txt", "signature": "/9/8/7.txt.asc", "size": 104857600 }, { "checksum": "876", "order": 1, "path": "/8/7/6.txt", "signature": "/8/7/6.txt.asc", "size": 838860800 } ], "type": "delta", "version": 1303 } ] } ./systemimage/tests/data/dbus.index_01.json0000644000015600001650000000223412701500553020734 0ustar jenkinsjenkins{ "global": { "generated_at": "Thu Aug 01 08:01:00 UTC 2013" }, "images": [ { "description": "Full", "files": [ { "checksum": "da70dfa4d9f95ac979f921e8e623358236313f334afcd06cddf8a5621cf6a1e9", "order": 3, "path": "/3/4/5.txt", "signature": "/3/4/5.txt.asc", "size": 104857600 }, { "checksum": "b3a8e0e1f9ab1bfe3a36f231f676f78bb30a519d2b21e6c530c0eee8ebb4a5d0", "order": 1, "path": "/4/5/6.txt", "signature": "/4/5/6.txt.asc", "size": 104857600 }, { "checksum": "97a6d21df7c51e8289ac1a8c026aaac143e15aa1957f54f42e30d8f8a85c3a55", "order": 2, "path": "/5/6/7.txt", "signature": "/5/6/7.txt.asc", "size": 104857600 } ], "type": "full", "version": 1600, "bootme": true } ] } ./systemimage/tests/data/config.config_10.ini0000644000015600001650000000121112701500553021202 0ustar jenkinsjenkins# Bogus configuration file missing the [system] stanza. [service] base: phablet.example.com # Negative ports are not allowed. http_port: 80 https_port: disabled channel: stable build_number: 0 [gpg] archive_master: /etc/phablet/archive-master.tar.xz image_master: /etc/phablet/image-master.tar.xz image_signing: /var/lib/phablet/image-signing.tar.xz device_signing: /var/lib/phablet/device-signing.tar.xz [updater] cache_partition: {tmpdir}/android/cache data_partition: {vardir}/lib/phablet/updater [hooks] device: systemimage.device.SystemProperty scorer: systemimage.scores.WeightedScorer apply: systemimage.apply.Reboot [dbus] lifetime: 3s ./systemimage/tests/data/state.index_06.json0000644000015600001650000000226412701500553021127 0ustar jenkinsjenkins{ "global": { "generated_at": "Mon Apr 29 18:45:27 UTC 2013" }, "images": [ { "base": 1, "description": "Delta", "files": [ { "checksum": "da70dfa4d9f95ac979f921e8e623358236313f334afcd06cddf8a5621cf6a1e9", "order": 3, "path": "/3/4/5.txt", "signature": "/3/4/5.txt.asc", "size": 104857600 }, { "checksum": "b3a8e0e1f9ab1bfe3a36f231f676f78bb30a519d2b21e6c530c0eee8ebb4a5d0", "order": 1, "path": "/4/5/6.txt", "signature": "/4/5/6.txt.asc", "size": 104857600 }, { "checksum": "97a6d21df7c51e8289ac1a8c026aaac143e15aa1957f54f42e30d8f8a85c3a55", "order": 2, "path": "/5/6/7.txt", "signature": "/5/6/7.txt.asc", "size": 104857600 } ], "type": "delta", "version": 600, "bootme": true } ] } ./systemimage/tests/data/index.index_02.json0000644000015600001650000000014312701500553021104 0ustar jenkinsjenkins{ "global": { "generated_at": "Mon Apr 29 18:45:27 UTC 2013" }, "images": [] } ./systemimage/tests/data/image-master.gpg0000644000015600001650000000224012701500553020545 0ustar jenkinsjenkins QZhFYmC++G B꒙@3MM GwAqf5]T` mH.[gD0ʭY+|#dRI;)-%R8"QZh   ;Ư2f?WER0e^~}(Dm{9=VRtP`~X`K*M4 ýޭ zoy΃CJ] oDms)NgJnRiT" -[xW=IƘ8@14'@UDMP!I2A0!h @e'.lHzTo=w#\̱i͒q*yЭzyzP$ӫsGCh: WJL=oZ GU¥9m/;*@P5}M$ꦄPm4KXε\Ftqtˇ3Z8;!2~yA[q~_4_ з5vlEHB rb@щ㋼'2 ^3'U֑9Vxa3p~hyF ('>)pE=[fDT7_kX]VK8z Q`tɾG})p5ZFFUP"].H{_N}n#M"Qʨ)#\`DVH˃(;8"Q[   ifOˆrZFo K;i.j?Y*n{je'lanmdx4.8E<;wX~fD;;v|:ʋ^[&ޅwV!XؿC예X 9N7yBPBǬ\RD4Qޝ=a:(mLb޹7yVMsK t7PoDHnt+QJHAS n"pS}9}¢Nnw;|vёǪ- 4qGkE USixg %iӥPi2yBKц៩^w1vtްowliimLZ]`HHM v/1m5؉sW,;0+5o<JtvڈeĈ?xOC19y~CqpX7F 7+l` H'jV% 7S1PώU&)r8"Q*   RkpD4,à[ 5(r0*Kk-OuΧ,m}p<9-8sT*`pRэ"fra{s0%qRen]$:COZDh鋾e7y!8~d{z[ @sQ>RY@:FDˡ9 XM"_>{[,ӦN sOwtG]8D"1k$ǸP@-ce4'Ƭ(Jp lR'l;3#UŻjureEiZn'i?rXD霶S Gz!kSv K#F](΢ͺ 3TiR uT;#|nL\&gdtíZ) .63ǣԮh ~ ('Myj%3f'\n'Pd Fٽ*8Ճ7d'QgRg$xR٨ژH+Z*gӈB}+C"myˆSL$7}xWV09k LI2 S~9ګB8"QY   ~b^1F /… qpܮv.gb肎}rAfQHeYm9w53.XY&Re-vx0=V:Ն2*g^ac_";BX9Jt&)P %4QšY!Ufs+σvJ Yi+N@G6Su6[0` #]um;VѸj9h=US𔡹9/t;D4=8_t' !ÈKS3C&8{"T_QTCP%ѹ9V#6ۖynh6@@:ߙ~lAy!Yg]!oAō=;5㖍/5͢YQYٵ L./systemimage/tests/data/state.index_03.json0000644000015600001650000000223412701500553021121 0ustar jenkinsjenkins{ "global": { "generated_at": "Thu Aug 01 08:01:00 UTC 2013" }, "images": [ { "description": "Full", "files": [ { "checksum": "da70dfa4d9f95ac979f921e8e623358236313f334afcd06cddf8a5621cf6a1e9", "order": 3, "path": "/3/4/5.txt", "signature": "/3/4/5.txt.asc", "size": 104857600 }, { "checksum": "b3a8e0e1f9ab1bfe3a36f231f676f78bb30a519d2b21e6c530c0eee8ebb4a5d0", "order": 1, "path": "/4/5/6.txt", "signature": "/4/5/6.txt.asc", "size": 104857600 }, { "checksum": "97a6d21df7c51e8289ac1a8c026aaac143e15aa1957f54f42e30d8f8a85c3a55", "order": 2, "path": "/5/6/7.txt", "signature": "/5/6/7.txt.asc", "size": 104857600 } ], "type": "full", "version": 1600, "bootme": true } ] } ./systemimage/tests/data/config.config_04.ini0000644000015600001650000000146512701500553021220 0ustar jenkinsjenkins# TEMPLATE configuration file for specifying relatively static information # about the upgrade resolution process. # # This is used by the DBus tests. [service] base: localhost http_port: 8980 https_port: 8943 channel: stable build_number: 0 [system] timeout: 1s tempdir: {tmpdir}/tmp logfile: {logfile} loglevel: {loglevel} settings_db: {vardir}/settings.db [gpg] archive_master: {vardir}/etc/archive-master.tar.xz image_master: {vardir}/keyrings/image-master.tar.xz image_signing: {vardir}/keyrings/image-signing.tar.xz device_signing: {vardir}/keyrings/device-signing.tar.xz [updater] cache_partition: {vardir}/android/cache data_partition: {vardir}/ubuntu/cache [hooks] device: systemimage.testing.demo.TestingDevice scorer: systemimage.scores.WeightedScorer apply: systemimage.apply.Reboot [dbus] lifetime: 5m ./systemimage/tests/data/state.channels_07.json0000644000015600001650000000107212701500553021610 0ustar jenkinsjenkins{ "daily": { "devices": { "nexus7": { "index": "/daily/nexus7/index.json", "keyring": { "path": "/daily/nexus7/device-keyring.tar.xz", "signature": "/daily/nexus7/device-keyring.tar.xz.asc" } }, "nexus4":{ "index": "/daily/nexus4/index.json" } } }, "stable": { "devices": { "nexus7":{ "index": "/stable/nexus7/index.json" } } } } ./systemimage/tests/data/candidates.index_02.json0000644000015600001650000000101012701500553022066 0ustar jenkinsjenkins{ "global": { "generated_at": "Mon Apr 29 18:45:27 UTC 2013" }, "images": [ { "bootme": true, "description": "New full build 1", "files": [], "minversion": 600, "type": "full", "version": 1300 }, { "bootme": true, "description": "New full build 2", "files": [], "minversion": 1100, "type": "full", "version": 1400 } ] } ./systemimage/tests/data/expired_cert.pem0000644000015600001650000000236512701500553020663 0ustar jenkinsjenkins-----BEGIN CERTIFICATE----- MIIDfTCCAmWgAwIBAgIJAL6MaQcCVQhBMA0GCSqGSIb3DQEBBQUAMFUxCzAJBgNV BAYTAlVTMRMwEQYDVQQIDApTb21lLVN0YXRlMR0wGwYDVQQKDBRJbWFnZSBCYXNl ZCBVcGdyYWRlcjESMBAGA1UEAwwJbG9jYWxob3N0MB4XDTEzMDUwMTIwMDUzM1oX DTEzMDUwMjIwMDUzM1owVTELMAkGA1UEBhMCVVMxEzARBgNVBAgMClNvbWUtU3Rh dGUxHTAbBgNVBAoMFEltYWdlIEJhc2VkIFVwZ3JhZGVyMRIwEAYDVQQDDAlsb2Nh bGhvc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDRPjLNbEBSuHhy NhiyhHO71RTe0Lcm5GE3tZa9bymADrqnjNm5RhsP85SFy/kQ0e2LdYadA3+XrHm/ +vXtMe7HWQPu8q+OMiMpW8knHmi2QUCqMqImoqe5Nvms93mosoKoNe2d5XzwhKyA DjIlW2o72OPZg5ShVZBfc5TG+2Nf5CIdagzamyOY6zfpbhfks9PiMKtHeDp05BD0 LMsPaS947tYFVzQ3RCzKNc9JNqh43TXiywQPOvBGJnTtYDydRTUh6t2nA8c+az1+ dSNrLh+v5+iY0z20x34ZY25bpaeIZhbrkn0T968yh1HE01niJWpz/1e6+oHxXvTv 2EVgLoh1AgMBAAGjUDBOMB0GA1UdDgQWBBQLbxWu2KYhdixmIgvV5POZYlntojAf BgNVHSMEGDAWgBQLbxWu2KYhdixmIgvV5POZYlntojAMBgNVHRMEBTADAQH/MA0G CSqGSIb3DQEBBQUAA4IBAQCLbGZ7gIU26FJp/OAbSVa0tT5pPCBNHjMNIfp/Smep +waPVjSzkHZCYYknP9A9/P4d+idvpTyURRkQECiiUjYzeSgF1dR3kvnsXnCJnugG GVeZpMnRlktb+dMcwprQhgChVyWDrMR6VpcMkoNC+yJ4XhbN0jhlooEfmnDAOI0j 5QJmhR+dzfH+xMfFlu41i/budHdwvRqU/G9eNA0i6jn1GI8LHiTRe3tPF4wDpljd 0/QR47QdiXLUsE9vDljjAI0KfXYo7KyjvdmyD3xX2QVNORlVv2GpzyREiyXnx7RB OmQZVDLtaFwmJSQdciZGCSm9bSLNPENAZIDpf1GKXsEx -----END CERTIFICATE----- ./systemimage/tests/data/cert.pem0000644000015600001650000000236512701500553017143 0ustar jenkinsjenkins-----BEGIN CERTIFICATE----- MIIDfTCCAmWgAwIBAgIJAKSjaMNTvxe2MA0GCSqGSIb3DQEBBQUAMFUxCzAJBgNV BAYTAlVTMRMwEQYDVQQIDApTb21lLVN0YXRlMR0wGwYDVQQKDBRJbWFnZSBCYXNl ZCBVcGdyYWRlczESMBAGA1UEAwwJbG9jYWxob3N0MB4XDTEzMDUxMzE5NDEyMFoX DTIzMDUxMTE5NDEyMFowVTELMAkGA1UEBhMCVVMxEzARBgNVBAgMClNvbWUtU3Rh dGUxHTAbBgNVBAoMFEltYWdlIEJhc2VkIFVwZ3JhZGVzMRIwEAYDVQQDDAlsb2Nh bGhvc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQClg10agVIHs/fy Lh1b7pAIN7c+uWwRk664OmVBJXA/wPEvC1G/FtDL0QnNRWyNHZBFwippXMgyslHi JFHDOTONqUH4JucsrsqYsZrk6oWJXLIZ2b+v0kAJlJ4AvCdPo0aJo1egze1ZxCtM RuJN3yiZo6q5mYKYPOBgNcZFp2WGnxJduj7p85cp101u7WbY9i/p+idcBRs/ylhZ vd8SzXWH3dmsci+6auLTD3TLYnldneNZEnjNzxPuRAl3o8+VeFblM8/qIOurBxAM R5UPrCy0XdDELXq4Fea07qVzdzchBrcY1HykF824VI3hNiYRLOXHrJIYE/TfVwKM n6+ACfgDAgMBAAGjUDBOMB0GA1UdDgQWBBSClon/pvRAiAr75kZhRnklweC/8jAf BgNVHSMEGDAWgBSClon/pvRAiAr75kZhRnklweC/8jAMBgNVHRMEBTADAQH/MA0G CSqGSIb3DQEBBQUAA4IBAQCBpXr9SgJmTTP4V73Lt+MlbCt1TGSBqZ5jKCWrEy9p vRJhGTeQyqofAXRMD1RcFJZEonDZMVIYb+Ml25XtHbBiZUX2r1h/dfUyq8v5ZTHo MLs6tUUojWjQ1TQ4iRruSdAflA5A7Lsy6yEvDPreQRxi+IPmVyX/655Iv+GUZn0g I4trcNYoIxd+Dr+PQjF8JH/ikljt8om7gYf9OvVW+/kKjhkkBoZTVO9MMwta3USp 2uT7G/C8+90pcNe1ywB74KbzxEcd4zGZDFP+mxNfZb5XWfHgEB0Lf2aasx+fl0PD e+wiDfOJ8XUhG6WUfmvSVpGS0+bQgZCdarltDQiildF4 -----END CERTIFICATE----- ./systemimage/tests/data/scores.index_01.json0000644000015600001650000001621512701500553021301 0ustar jenkinsjenkins{ "global": { "generated_at": "Mon Apr 29 18:45:27 UTC 2013" }, "images": [ { "bootme": true, "description": "Full A", "files": [ { "checksum": "abc", "order": 1, "path": "/a/b/c.txt", "signature": "/a/b/c.txt.asc", "size": 104857600 }, { "checksum": "bcd", "order": 1, "path": "/b/c/d.txt", "signature": "/b/c/d.txt.asc", "size": 104857600 }, { "checksum": "cde", "order": 1, "path": "/c/d/e.txt", "signature": "/c/d/e.txt.asc", "size": 104857600 } ], "type": "full", "version": 300 }, { "base": 300, "bootme": true, "description": "Delta A.1", "files": [ { "checksum": "def", "order": 1, "path": "/d/e/f.txt", "signature": "/d/e/f.txt.asc", "size": 104857600 }, { "checksum": "ef0", "order": 1, "path": "/e/f/0.txt", "signature": "/e/f/0.txt.asc", "size": 104857600 }, { "checksum": "f01", "order": 1, "path": "/f/e/1.txt", "signature": "/f/e/1.txt.asc", "size": 104857600 } ], "type": "delta", "version": 301 }, { "base": 301, "bootme": true, "description": "Delta A.2", "files": [ { "checksum": "012", "order": 1, "path": "/0/1/2.txt", "signature": "/0/1/2.txt.asc", "size": 104857600 }, { "checksum": "123", "order": 1, "path": "/1/2/3.txt", "signature": "/1/2/3.txt.asc", "size": 104857600 }, { "checksum": "234", "order": 1, "path": "/2/3/4.txt", "signature": "/2/3/4.txt.asc", "size": 104857600 } ], "type": "delta", "version": 304 }, { "description": "Full B", "files": [ { "checksum": "345", "order": 1, "path": "/3/4/5.txt", "signature": "/3/4/5.txt.asc", "size": 104857600 }, { "checksum": "456", "order": 1, "path": "/4/5/6.txt", "signature": "/4/5/6.txt.asc", "size": 104857600 }, { "checksum": "567", "order": 1, "path": "/5/6/7.txt", "signature": "/5/6/7.txt.asc", "size": 104857600 } ], "type": "full", "version": 200 }, { "base": 200, "description": "Delta B.1", "files": [ { "checksum": "678", "order": 1, "path": "/6/7/8.txt", "signature": "/6/7/8.txt.asc", "size": 104857600 }, { "checksum": "789", "order": 1, "path": "/7/8/9.txt", "signature": "/7/8/9.txt.asc", "size": 104857600 }, { "checksum": "89a", "order": 1, "path": "/8/9/a.txt", "signature": "/8/9/a.txt.asc", "size": 104857600 } ], "type": "delta", "version": 201 }, { "base": 201, "description": "Delta B.2", "files": [ { "checksum": "9ab", "order": 1, "path": "/9/a/b.txt", "signature": "/9/a/b.txt.asc", "size": 104857600 }, { "checksum": "fed", "order": 1, "path": "/f/e/d.txt", "signature": "/f/e/d.txt.asc", "size": 104857600 }, { "checksum": "edc", "order": 1, "path": "/e/d/c.txt", "signature": "/e/d/c.txt.asc", "size": 209715200 } ], "type": "delta", "version": 304, "phased-percentage": 0 }, { "description": "Full C", "files": [ { "checksum": "dcb", "order": 1, "path": "/d/c/b.txt", "signature": "/d/c/b.txt.asc", "size": 104857600 }, { "checksum": "cba", "order": 1, "path": "/c/b/a.txt", "signature": "/c/b/a.txt.asc", "size": 104857600 }, { "checksum": "ba9", "order": 1, "path": "/b/a/9.txt", "signature": "/b/a/9.txt.asc", "size": 104857600 } ], "type": "full", "version": 100 }, { "base": 100, "description": "Delta C.1", "files": [ { "checksum": "a98", "order": 1, "path": "/a/9/8.txt", "signature": "/a/9/8.txt.asc", "size": 104857600 }, { "checksum": "987", "order": 1, "path": "/9/8/7.txt", "signature": "/9/8/7.txt.asc", "size": 104857600 }, { "checksum": "876", "order": 1, "path": "/8/7/6.txt", "signature": "/8/7/6.txt.asc", "size": 838860800 } ], "type": "delta", "version": 303 } ] } ./systemimage/tests/data/bad_key.pem0000644000015600001650000000325012701500553017576 0ustar jenkinsjenkins-----BEGIN PRIVATE KEY----- MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDbDw62FuVhi2+f FOT7KJor/P3r9hPcNa7CevG7kujOjCReWnzcxcpcACzvFEN1csqXlRh6iwLVCHug 87PfhPa6Qx4DUnC5dvzw698pLGY+ehEnD4nTruHDLSF/94qdyUoho8wmyGZQIvwN e6YL5KzjKD/6rTFKn+5nDM5la+BAuEuMhnRRUTeA+a1pD3syOdWZgXl+tWRRp4Tm CCfHfokHDrLEgWXNcw+3Gc8YN/VfgrJQRhHp0p18pX6s0QqX6R4dSaIbmCa24E8G EftDydUbNRVX16sInclx0p3ldb9/en2dhzjKMpD1/TUZ9KaQ4NK1ykJnNCFGWJxY sEwtnebfAgMBAAECggEAEELo46mFc51HNvb1HJPiBfvs2ZRV2xX9GCDKSxNXxuVu Wba76COipvu4WjMV4uUXlvMRcx9YoXnUu5j7tE8JhcAvvS1kgtALbNMFqOfL2z1y goTLxXQI/FUgVICKwLXNk/u1zvc3aSF/tnyEUgOytdYqEbDDpPeEWhAATlUYu5UP h9vvdTp0bKrNXGmNXzLk/cEGr0X+l+wpDDWv0uiPgL53dDk0mD5ZK/qNBDKenYrf LmYTsA+w/cDuFVKIrQ70Nu4OyJze3uc/1+BsMhzr8Arf8SzUzorTPliTgq4vX4N+ k45areYfieS5Zw1db3I/e73wm4r1rV6gM4OmtSD0qQKBgQD6ebsMoVT/Iry5+iW0 C1igt/PWm0qmoVGFbjcQNcUENNuftwSQVxB1KMyRVd5t1Jry4eK50b8brxqNrbsf 6nfCLQlFPTapcLMypTY69kJSB0FQ7vH6DSdibqg3vkGO9+kJg8AkDo/B7SwwDWJs w1lqO0xj8190nScHJBF6cDuLIwKBgQDf4/ADLFUq5Apu+bs4TLM0veAfX5DzXclz Dm5tJuJiLjwdYfY9T+wh7o9H0q5qpTCkXrGItVb2ztzGdy+zcnlyyDFUC7Sazmew 642AE8xWWih+eWEj/dmNGc1SCduFUUVBANbNXapqbDQJUroWMEMcLc0LeG55LjP5 LXjDR9jfFQKBgFYbdyRWaiPzyzGcP03wAJ3v31HNvRIfryjRxPg7o+lYSpBTmKwO YkgEqJ7mCDt1tXb+FLWQ18QqpFREf2nvbxA/70nwS1RgvBoTFdubQR12BQxPuLwf vYRnSkkvjNYbf++XbXx3zQ/7+w7h6w488gZFMHPD/PLX+8zGp4OSBcZFAoGBAMku v1t6mMVDyQRTr6I1ecro294VN+EX449Z5mx6xm5G8YnNn0EU8jDDd0iozkXSTlVF Ke6YVJ7O36APNXPgZ7V2oqzNH+sDkjVuVOZD5BesSDrtwxZeYwDhsJgJd5LipoJ0 yJAdnKaouFGwXrrVf1hw55FpFIwbsmQ0MhH4G67JAoGBAMUNYhdEVHrSANOSfUgK eQ+0smuanLcH1DLaZCf1W4jrEcwWhLzIT175mwd9QhyW1st7BeBhmrb/GLs3Khrm 8QgaVvDuM1sIW22tqnCJBy1Z+5qwfyTcieXzTuMT8N1CqC7oArvrpccLg8ljC5r3 nEGmZjwQEp7vpTYfkZLPGkM6 -----END PRIVATE KEY----- ./systemimage/tests/data/config.config_08.ini0000644000015600001650000000146212701500553021221 0ustar jenkinsjenkins# Configuration file for specifying relatively static information about the # upgrade resolution process. [service] base: phablet.example.com # Negative ports are not allowed. http_port: 80 https_port: -1 channel: stable build_number: 0 [system] timeout: 10s tempdir: /tmp logfile: /var/log/system-image/client.log loglevel: error settings_db: /var/lib/phablet/settings.db [gpg] archive_master: /etc/phablet/archive-master.tar.xz image_master: /etc/phablet/image-master.tar.xz image_signing: /var/lib/phablet/image-signing.tar.xz device_signing: /var/lib/phablet/device-signing.tar.xz [updater] cache_partition: /android/cache data_partition: /var/lib/phablet/updater [hooks] device: systemimage.device.SystemProperty scorer: systemimage.scores.WeightedScorer apply: systemimage.apply.Reboot [dbus] lifetime: 3s ./systemimage/tests/data/candidates.index_12.json0000644000015600001650000000653212701500553022105 0ustar jenkinsjenkins{ "global": { "generated_at": "Mon Apr 29 18:45:27 UTC 2013" }, "images": [ { "base": 100, "description": "Delta A", "files": [ { "checksum": "da70dfa4d9f95ac979f921e8e623358236313f334afcd06cddf8a5621cf6a1e9", "order": 3, "path": "/3/4/5.txt", "signature": "/3/4/5.txt.asc", "size": 104857600 }, { "checksum": "b3a8e0e1f9ab1bfe3a36f231f676f78bb30a519d2b21e6c530c0eee8ebb4a5d0", "order": 1, "path": "/4/5/6.txt", "signature": "/4/5/6.txt.asc", "size": 104857600 }, { "checksum": "97a6d21df7c51e8289ac1a8c026aaac143e15aa1957f54f42e30d8f8a85c3a55", "order": 2, "path": "/5/6/7.txt", "signature": "/5/6/7.txt.asc", "size": 104857600 } ], "type": "delta", "version": 200, "bootme": true }, { "base": 200, "description": "Delta B", "files": [ { "checksum": "da70dfa4d9f95ac979f921e8e623358236313f334afcd06cddf8a5621cf6a1e9", "order": 3, "path": "/3/4/5.txt", "signature": "/3/4/5.txt.asc", "size": 104857600 }, { "checksum": "b3a8e0e1f9ab1bfe3a36f231f676f78bb30a519d2b21e6c530c0eee8ebb4a5d0", "order": 1, "path": "/4/5/6.txt", "signature": "/4/5/6.txt.asc", "size": 104857600 }, { "checksum": "97a6d21df7c51e8289ac1a8c026aaac143e15aa1957f54f42e30d8f8a85c3a55", "order": 2, "path": "/5/6/7.txt", "signature": "/5/6/7.txt.asc", "size": 104857600 } ], "type": "delta", "version": 300, "bootme": true }, { "base": 300, "description": "Delta C", "files": [ { "checksum": "da70dfa4d9f95ac979f921e8e623358236313f334afcd06cddf8a5621cf6a1e9", "order": 3, "path": "/3/4/5.txt", "signature": "/3/4/5.txt.asc", "size": 104857600 }, { "checksum": "b3a8e0e1f9ab1bfe3a36f231f676f78bb30a519d2b21e6c530c0eee8ebb4a5d0", "order": 1, "path": "/4/5/6.txt", "signature": "/4/5/6.txt.asc", "size": 104857600 }, { "checksum": "97a6d21df7c51e8289ac1a8c026aaac143e15aa1957f54f42e30d8f8a85c3a55", "order": 2, "path": "/5/6/7.txt", "signature": "/5/6/7.txt.asc", "size": 104857600 } ], "type": "delta", "version": 400, "bootme": true } ] } ./systemimage/tests/data/00.ini0000644000015600001650000000147412701500553016423 0ustar jenkinsjenkins# TEMPLATE configuration file for specifying relatively static information # about the upgrade resolution process. # # This is used by the command line tests. [service] base: localhost http_port: 8980 https_port: 8943 channel: stable build_number: 0 [system] timeout: 1s tempdir: {tmpdir}/tmp logfile: {tmpdir}/client.log loglevel: info settings_db: {vardir}/settings.db [gpg] archive_master: {vardir}/etc/archive-master.tar.xz image_master: {vardir}/keyrings/image-master.tar.xz image_signing: {vardir}/keyrings/image-signing.tar.xz device_signing: {vardir}/keyrings/device-signing.tar.xz [updater] cache_partition: {vardir}/android/cache data_partition: {vardir}/ubuntu/cache [hooks] device: systemimage.device.SystemProperty scorer: systemimage.scores.WeightedScorer apply: systemimage.apply.Reboot [dbus] lifetime: 2m ./systemimage/tests/data/nasty_cert.pem0000644000015600001650000000240512701500553020354 0ustar jenkinsjenkins-----BEGIN CERTIFICATE----- MIIDiTCCAnGgAwIBAgIJALS35E1u3JqeMA0GCSqGSIb3DQEBBQUAMFsxCzAJBgNV BAYTAlVTMRMwEQYDVQQIDApTb21lLVN0YXRlMR0wGwYDVQQKDBRJbWFnZSBCYXNl ZCBVcGdyYWRlcjEYMBYGA1UEAwwPd3d3LmV4YW1wbGUuY29tMB4XDTEzMDUxNDIx MzczOFoXDTIzMDUxMjIxMzczOFowWzELMAkGA1UEBhMCVVMxEzARBgNVBAgMClNv bWUtU3RhdGUxHTAbBgNVBAoMFEltYWdlIEJhc2VkIFVwZ3JhZGVyMRgwFgYDVQQD DA93d3cuZXhhbXBsZS5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB AQDUSmVCIHUanrTM018h8kYAc+IAXv8EO8y1B8eO41ojd2OOq1tlhATBdgSQYvhr 0593EUHkrgu/7ouHiAcuXYR8OyliQEwlSYWLcKAL5QVrff1cTpyBWCy+mHbAPHmK ZK2qkLtjM7MoE1v8v+MNyXC5xodkcvlDpsci99fOAW1JbokgcQGrPFq5fRhZCk78 mAkTYN4KV0GNIf5ubNQPrspha2DP73WxTWgv6HOKH3IcnO3rXBGWo1ywnam8vtVH 6OexphTMssdhhYxyay4cW5Jk8atvbM8xc+NGisk2KZOzL6mXJ/i8qS+3uDJoRlz2 uGNRmN3qdCoeGk6/quY9BlQNAgMBAAGjUDBOMB0GA1UdDgQWBBT9RsLjGzSYEyzu qFR+KG9F8RKYhTAfBgNVHSMEGDAWgBT9RsLjGzSYEyzuqFR+KG9F8RKYhTAMBgNV HRMEBTADAQH/MA0GCSqGSIb3DQEBBQUAA4IBAQA/ZPCrw6RLJtzUMZbTDjrDd0Wg UflA3K4BTXe8pJOInkwcZG/7cTs0TWpNVIXmBHxzmvWgLd8aFWTfWx1gzOyl8q4t CIBVsHYsqlLsIfR1vojhDDC37+9mL/lbKq6lYs45tEiDfMuRZRxK06U5tDQouEep DP1QSt3JOUwidGp0Xs+WsIh5WDZNr9A6BibOV14/S1e5KQ53zKEUmuw9LgmEnZ9b LT/DS/syhKlgTfG1iZZFqsxAWGajaohcJ1PCSFedkdUDyPsszDBlZ8OFwIZKmle7 YKM6KV5uPrvTi5jAyjo/dfCCTqlrGbi0hSnMKR7uuwdx+f4yiIsLhW46Q/ra -----END CERTIFICATE----- ./systemimage/tests/data/candidates.index_06.json0000644000015600001650000000122412701500553022101 0ustar jenkinsjenkins{ "global": { "generated_at": "Mon Apr 29 18:45:27 UTC 2013" }, "images": [ { "description": "Full 1", "files": [], "type": "full", "version": 1300, "bootme": true }, { "base": 1300, "description": "Delta 1", "files": [], "type": "delta", "version": 1301, "bootme": true }, { "base": 1301, "description": "Delta 2", "files": [], "type": "delta", "version": 1302, "bootme": true } ] } ./systemimage/tests/data/candidates.index_01.json0000644000015600001650000000014312701500553022073 0ustar jenkinsjenkins{ "global": { "generated_at": "Mon Apr 29 18:45:27 UTC 2013" }, "images": [] } ./systemimage/tests/data/winner.channels_02.json0000644000015600001650000000050112701500553021761 0ustar jenkinsjenkins{ "stable": { "devices": { "nexus7":{ "index": "/stable/nexus7/index.json", "keyring": { "path": "stable/nexus7/device.tar.xz", "signature": "stable/nexus7/device.tar.xz.asc" } } } } } ./systemimage/tests/data/main.config_01.ini0000644000015600001650000000144412701500553020671 0ustar jenkinsjenkins# Configuration file for specifying relatively static information about the # upgrade resolution process. [service] base: phablet.example.com http_port: 80 https_port: 443 channel: special build_number: 0 [system] timeout: 10s tempdir: /tmp logfile: /var/log/system-image/client.log loglevel: error settings_db: /var/lib/phablet/settings.db [gpg] archive_master: /usr/share/phablet/archive-master.tar.xz image_master: /etc/phablet/image-master.tar.xz image_signing: /var/lib/phablet/image-signing.tar.xz device_signing: /var/lib/phablet/device-signing.tar.xz [updater] cache_partition: {tmpdir}/android/cache data_partition: {vardir}/lib/phablet/updater [hooks] device: systemimage.device.SystemProperty scorer: systemimage.scores.WeightedScorer apply: systemimage.apply.Reboot [dbus] lifetime: 2m ./systemimage/tests/data/state.index_04.json0000644000015600001650000000226712701500553021130 0ustar jenkinsjenkins{ "global": { "generated_at": "Mon Apr 29 18:45:27 UTC 2013" }, "images": [ { "base": 100, "description": "Delta", "files": [ { "checksum": "da70dfa4d9f95ac979f921e8e623358236313f334afcd06cddf8a5621cf6a1e9", "order": 3, "path": "/3/4/5.txt", "signature": "/3/4/5.txt.asc", "size": 104857600 }, { "checksum": "b3a8e0e1f9ab1bfe3a36f231f676f78bb30a519d2b21e6c530c0eee8ebb4a5d0", "order": 1, "path": "/4/5/6.txt", "signature": "/4/5/6.txt.asc", "size": 104857600 }, { "checksum": "97a6d21df7c51e8289ac1a8c026aaac143e15aa1957f54f42e30d8f8a85c3a55", "order": 2, "path": "/5/6/7.txt", "signature": "/5/6/7.txt.asc", "size": 104857600 } ], "type": "delta", "version": 1600, "bootme": true } ] } ./systemimage/tests/data/index.channels_02.json0000644000015600001650000000050112701500553021566 0ustar jenkinsjenkins{ "stable": { "devices": { "nexus7":{ "index": "/stable/nexus7/index.json", "keyring": { "path": "stable/nexus7/device.tar.xz", "signature": "stable/nexus7/device.tar.xz.asc" } } } } } ./systemimage/tests/data/scores.index_04.json0000644000015600001650000020223512701500553021303 0ustar jenkinsjenkins{ "images": [ { "type": "full", "descriptions": { "description": "20130726.1" }, "files": [ { "order": 0, "signature": "/daily/ubuntu/ubuntu-20130726.1.full.tar.xz.asc", "size": 263581532, "path": "/daily/ubuntu/ubuntu-20130726.1.full.tar.xz", "checksum": "edc23d02ee2ba48b45e2cf1e1a736eb1436b47a76a94f5b9375d26a9e5fc4e4c" }, { "order": 1, "signature": "/daily/grouper/grouper-20130726.full.tar.xz.asc", "size": 29476584, "path": "/daily/grouper/grouper-20130726.full.tar.xz", "checksum": "7d6698264774c2e663c3f3237baa9d5d14790edf133e959ce4abcd1f58b533b8" }, { "order": 2, "signature": "/daily/grouper/version-20130725.tar.xz.asc", "size": 196, "path": "/daily/grouper/version-20130725.tar.xz", "checksum": "c62bbb25efa17e9a75361da0cb045e714be4ba11e2ed84c51821049d73bcd594" } ], "version": 1725 }, { "type": "delta", "descriptions": { "description": "20130727" }, "version": 1726, "files": [ { "order": 0, "signature": "/daily/ubuntu/ubuntu-20130727.delta-20130726.1.tar.xz.asc", "size": 15364620, "path": "/daily/ubuntu/ubuntu-20130727.delta-20130726.1.tar.xz", "checksum": "8d002d126e5149d08c0f97ac16f3416cb5ed9c246e7d063366c65a00c14c6581" }, { "order": 1, "signature": "/daily/grouper/grouper-20130727.delta-20130726.tar.xz.asc", "size": 27281492, "path": "/daily/grouper/grouper-20130727.delta-20130726.tar.xz", "checksum": "34eb0d19455e27040fd8e7505125a100c2c9af434b92546015efeb88953ca99a" }, { "order": 2, "signature": "/daily/grouper/version-20130726.tar.xz.asc", "size": 196, "path": "/daily/grouper/version-20130726.tar.xz", "checksum": "e03476af725db9a468d094e560e4c6910222b7f3a6d3b707d146eafe67114df6" } ], "base": 1725 }, { "type": "delta", "descriptions": { "description": "20130728" }, "version": 1727, "files": [ { "order": 0, "signature": "/daily/ubuntu/ubuntu-20130728.delta-20130727.tar.xz.asc", "size": 16213432, "path": "/daily/ubuntu/ubuntu-20130728.delta-20130727.tar.xz", "checksum": "8764073b29fdafaf45574aa442a8810d965efc05e5975de41f8379a9389545de" }, { "order": 1, "signature": "/daily/grouper/grouper-20130728.delta-20130727.tar.xz.asc", "size": 27281596, "path": "/daily/grouper/grouper-20130728.delta-20130727.tar.xz", "checksum": "15811cd7634e5d40386f6160187410d393bc2f1d3ac72e9179c70e8606de617d" }, { "order": 2, "signature": "/daily/grouper/version-20130727.tar.xz.asc", "size": 196, "path": "/daily/grouper/version-20130727.tar.xz", "checksum": "a12a2217700984fd167c7c831700320e760c041f10e9aa4a6f01e38ae8d2cf7d" } ], "base": 1726 }, { "type": "delta", "descriptions": { "description": "20130729" }, "version": 1728, "files": [ { "order": 0, "signature": "/daily/ubuntu/ubuntu-20130729.delta-20130728.tar.xz.asc", "size": 15368380, "path": "/daily/ubuntu/ubuntu-20130729.delta-20130728.tar.xz", "checksum": "0860c60161a45c30a57b7fa5a975a1f8ad74cd66a91c0a799f409a569f37af0d" }, { "order": 1, "signature": "/daily/grouper/grouper-20130729.delta-20130728.tar.xz.asc", "size": 27281856, "path": "/daily/grouper/grouper-20130729.delta-20130728.tar.xz", "checksum": "11ea5647a442666167f9bf886740bce6ad67f4aa6be8e6365e3414a54077c08f" }, { "order": 2, "signature": "/daily/grouper/version-20130728.tar.xz.asc", "size": 196, "path": "/daily/grouper/version-20130728.tar.xz", "checksum": "50c68a5a04f6d463b776b431b136eb2b7b08e4e326df5ab958e778d3fb72360c" } ], "base": 1727 }, { "type": "delta", "descriptions": { "description": "20130730" }, "version": 1729, "files": [ { "order": 0, "signature": "/daily/ubuntu/ubuntu-20130730.delta-20130729.tar.xz.asc", "size": 21963560, "path": "/daily/ubuntu/ubuntu-20130730.delta-20130729.tar.xz", "checksum": "5641b9668034b51e9ac0ca9aa7cf93255f501a792a7430703718fb0ae6185a69" }, { "order": 1, "signature": "/daily/grouper/grouper-20130730.delta-20130729.tar.xz.asc", "size": 27281332, "path": "/daily/grouper/grouper-20130730.delta-20130729.tar.xz", "checksum": "32ed374c7098989fe8e6a00bda22eb7bf1686325ef0b5aa99b9bd77aeec66d01" }, { "order": 2, "signature": "/daily/grouper/version-20130729.tar.xz.asc", "size": 196, "path": "/daily/grouper/version-20130729.tar.xz", "checksum": "ffae1bb0654c1716141e05bf5f3c30afcc4c3db91df302635f0a06ce800c8cb4" } ], "base": 1728 }, { "type": "delta", "descriptions": { "description": "20130730.1" }, "version": 1730, "files": [ { "order": 0, "signature": "/daily/ubuntu/ubuntu-20130730.1.delta-20130730.tar.xz.asc", "size": 16208916, "path": "/daily/ubuntu/ubuntu-20130730.1.delta-20130730.tar.xz", "checksum": "f4700a9d45daaafccdcea9e4c33829fbeacf48f6a57617936099036b33dc5fff" }, { "order": 1, "signature": "/daily/grouper/version-20130730.tar.xz.asc", "size": 192, "path": "/daily/grouper/version-20130730.tar.xz", "checksum": "c7073504c85ef6b3c3ba551c9ca50417489f34536f0779cea771ac1b74e5fd21" } ], "base": 1729 }, { "type": "delta", "descriptions": { "description": "20130730.2" }, "version": 1731, "files": [ { "order": 0, "signature": "/daily/ubuntu/ubuntu-20130730.2.delta-20130730.1.tar.xz.asc", "size": 11856888, "path": "/daily/ubuntu/ubuntu-20130730.2.delta-20130730.1.tar.xz", "checksum": "d8be295e3d0feed604353ea28373866217f8d69465647f97028237688144b02b" }, { "order": 1, "signature": "/daily/grouper/version-20130731.tar.xz.asc", "size": 196, "path": "/daily/grouper/version-20130731.tar.xz", "checksum": "69ef2059734bee22fe9e46761a32851a04f59471afc0234d653ddaa7e341b8e6" } ], "base": 1730 }, { "type": "delta", "descriptions": { "description": "20130731" }, "version": 1732, "files": [ { "order": 0, "signature": "/daily/ubuntu/ubuntu-20130731.delta-20130730.2.tar.xz.asc", "size": 194004, "path": "/daily/ubuntu/ubuntu-20130731.delta-20130730.2.tar.xz", "checksum": "a6f7671d8329ce47521d1b86ec13ac536c36c56ab8e5eded7d9198590536c7bf" }, { "order": 1, "signature": "/daily/grouper/version-20130732.tar.xz.asc", "size": 196, "path": "/daily/grouper/version-20130732.tar.xz", "checksum": "d99d58e71042286886250d94f8d25002d392f635e72be72b3e842bfb0611ef8c" } ], "base": 1731 }, { "type": "delta", "descriptions": { "description": "20130731.3" }, "version": 1733, "files": [ { "order": 0, "signature": "/daily/ubuntu/ubuntu-20130731.3.delta-20130731.tar.xz.asc", "size": 14993132, "path": "/daily/ubuntu/ubuntu-20130731.3.delta-20130731.tar.xz", "checksum": "002c10476be7a4db9d2fda3b9583eb67c2873b82c5f95483f6df7b253d7a8171" }, { "order": 1, "signature": "/daily/grouper/grouper-20130731.3.delta-20130730.tar.xz.asc", "size": 27282040, "path": "/daily/grouper/grouper-20130731.3.delta-20130730.tar.xz", "checksum": "199bd9ec51808f48bc81d5fd460cd8591a47b7911ac78eb7af1d7536963d79ff" }, { "order": 2, "signature": "/daily/grouper/version-20130733.tar.xz.asc", "size": 196, "path": "/daily/grouper/version-20130733.tar.xz", "checksum": "b97a22325987c3bf4b1769c10464c93f17c072c31b5b324a06e9861aed532ee9" } ], "base": 1732 }, { "type": "delta", "descriptions": { "description": "20130801" }, "version": 1800, "files": [ { "order": 0, "signature": "/daily/ubuntu/ubuntu-20130801.delta-20130731.3.tar.xz.asc", "size": 18625000, "path": "/daily/ubuntu/ubuntu-20130801.delta-20130731.3.tar.xz", "checksum": "f81a98769b92a9ba2c61b75ee83aa7fa7417767788f518553ae195e0a846768f" }, { "order": 1, "signature": "/daily/grouper/grouper-20130801.delta-20130731.3.tar.xz.asc", "size": 27285408, "path": "/daily/grouper/grouper-20130801.delta-20130731.3.tar.xz", "checksum": "b255325b494bc700109ca1e3f7eb8e2e2d3f0e590d938ee9a9438746c8eb9fe0" }, { "order": 2, "signature": "/daily/grouper/version-20130800.tar.xz.asc", "size": 192, "path": "/daily/grouper/version-20130800.tar.xz", "checksum": "f38ce3721013182d5d810e52c690d319aa5de5da62b98ff217fa02a3c4cf0741" } ], "base": 1733 }, { "type": "full", "descriptions": { "description": "20130727" }, "files": [ { "order": 0, "signature": "/daily/ubuntu/ubuntu-20130727.full.tar.xz.asc", "size": 263541776, "path": "/daily/ubuntu/ubuntu-20130727.full.tar.xz", "checksum": "278776baa51ed542285427aab5196526924f63829b9f1a3818393a279f7f57c7" }, { "order": 1, "signature": "/daily/grouper/grouper-20130727.full.tar.xz.asc", "size": 29478256, "path": "/daily/grouper/grouper-20130727.full.tar.xz", "checksum": "6dbd61f0f80f4d23b36e86f27f663ec44a40d2beee6d3a86d934112c8286cf7b" }, { "order": 2, "signature": "/daily/grouper/version-20130726.tar.xz.asc", "size": 196, "path": "/daily/grouper/version-20130726.tar.xz", "checksum": "e03476af725db9a468d094e560e4c6910222b7f3a6d3b707d146eafe67114df6" } ], "version": 1726 }, { "type": "delta", "descriptions": { "description": "20130728" }, "version": 1727, "files": [ { "order": 0, "signature": "/daily/ubuntu/ubuntu-20130728.delta-20130727.tar.xz.asc", "size": 16213432, "path": "/daily/ubuntu/ubuntu-20130728.delta-20130727.tar.xz", "checksum": "8764073b29fdafaf45574aa442a8810d965efc05e5975de41f8379a9389545de" }, { "order": 1, "signature": "/daily/grouper/grouper-20130728.delta-20130727.tar.xz.asc", "size": 27281596, "path": "/daily/grouper/grouper-20130728.delta-20130727.tar.xz", "checksum": "15811cd7634e5d40386f6160187410d393bc2f1d3ac72e9179c70e8606de617d" }, { "order": 2, "signature": "/daily/grouper/version-20130727.tar.xz.asc", "size": 196, "path": "/daily/grouper/version-20130727.tar.xz", "checksum": "a12a2217700984fd167c7c831700320e760c041f10e9aa4a6f01e38ae8d2cf7d" } ], "base": 1726 }, { "type": "delta", "descriptions": { "description": "20130729" }, "version": 1728, "files": [ { "order": 0, "signature": "/daily/ubuntu/ubuntu-20130729.delta-20130728.tar.xz.asc", "size": 15368380, "path": "/daily/ubuntu/ubuntu-20130729.delta-20130728.tar.xz", "checksum": "0860c60161a45c30a57b7fa5a975a1f8ad74cd66a91c0a799f409a569f37af0d" }, { "order": 1, "signature": "/daily/grouper/grouper-20130729.delta-20130728.tar.xz.asc", "size": 27281856, "path": "/daily/grouper/grouper-20130729.delta-20130728.tar.xz", "checksum": "11ea5647a442666167f9bf886740bce6ad67f4aa6be8e6365e3414a54077c08f" }, { "order": 2, "signature": "/daily/grouper/version-20130728.tar.xz.asc", "size": 196, "path": "/daily/grouper/version-20130728.tar.xz", "checksum": "50c68a5a04f6d463b776b431b136eb2b7b08e4e326df5ab958e778d3fb72360c" } ], "base": 1727 }, { "type": "delta", "descriptions": { "description": "20130730" }, "version": 1729, "files": [ { "order": 0, "signature": "/daily/ubuntu/ubuntu-20130730.delta-20130729.tar.xz.asc", "size": 21963560, "path": "/daily/ubuntu/ubuntu-20130730.delta-20130729.tar.xz", "checksum": "5641b9668034b51e9ac0ca9aa7cf93255f501a792a7430703718fb0ae6185a69" }, { "order": 1, "signature": "/daily/grouper/grouper-20130730.delta-20130729.tar.xz.asc", "size": 27281332, "path": "/daily/grouper/grouper-20130730.delta-20130729.tar.xz", "checksum": "32ed374c7098989fe8e6a00bda22eb7bf1686325ef0b5aa99b9bd77aeec66d01" }, { "order": 2, "signature": "/daily/grouper/version-20130729.tar.xz.asc", "size": 196, "path": "/daily/grouper/version-20130729.tar.xz", "checksum": "ffae1bb0654c1716141e05bf5f3c30afcc4c3db91df302635f0a06ce800c8cb4" } ], "base": 1728 }, { "type": "delta", "descriptions": { "description": "20130730.1" }, "version": 1730, "files": [ { "order": 0, "signature": "/daily/ubuntu/ubuntu-20130730.1.delta-20130730.tar.xz.asc", "size": 16208916, "path": "/daily/ubuntu/ubuntu-20130730.1.delta-20130730.tar.xz", "checksum": "f4700a9d45daaafccdcea9e4c33829fbeacf48f6a57617936099036b33dc5fff" }, { "order": 1, "signature": "/daily/grouper/version-20130730.tar.xz.asc", "size": 192, "path": "/daily/grouper/version-20130730.tar.xz", "checksum": "c7073504c85ef6b3c3ba551c9ca50417489f34536f0779cea771ac1b74e5fd21" } ], "base": 1729 }, { "type": "delta", "descriptions": { "description": "20130730.2" }, "version": 1731, "files": [ { "order": 0, "signature": "/daily/ubuntu/ubuntu-20130730.2.delta-20130730.1.tar.xz.asc", "size": 11856888, "path": "/daily/ubuntu/ubuntu-20130730.2.delta-20130730.1.tar.xz", "checksum": "d8be295e3d0feed604353ea28373866217f8d69465647f97028237688144b02b" }, { "order": 1, "signature": "/daily/grouper/version-20130731.tar.xz.asc", "size": 196, "path": "/daily/grouper/version-20130731.tar.xz", "checksum": "69ef2059734bee22fe9e46761a32851a04f59471afc0234d653ddaa7e341b8e6" } ], "base": 1730 }, { "type": "delta", "descriptions": { "description": "20130731" }, "version": 1732, "files": [ { "order": 0, "signature": "/daily/ubuntu/ubuntu-20130731.delta-20130730.2.tar.xz.asc", "size": 194004, "path": "/daily/ubuntu/ubuntu-20130731.delta-20130730.2.tar.xz", "checksum": "a6f7671d8329ce47521d1b86ec13ac536c36c56ab8e5eded7d9198590536c7bf" }, { "order": 1, "signature": "/daily/grouper/version-20130732.tar.xz.asc", "size": 196, "path": "/daily/grouper/version-20130732.tar.xz", "checksum": "d99d58e71042286886250d94f8d25002d392f635e72be72b3e842bfb0611ef8c" } ], "base": 1731 }, { "type": "delta", "descriptions": { "description": "20130731.3" }, "version": 1733, "files": [ { "order": 0, "signature": "/daily/ubuntu/ubuntu-20130731.3.delta-20130731.tar.xz.asc", "size": 14993132, "path": "/daily/ubuntu/ubuntu-20130731.3.delta-20130731.tar.xz", "checksum": "002c10476be7a4db9d2fda3b9583eb67c2873b82c5f95483f6df7b253d7a8171" }, { "order": 1, "signature": "/daily/grouper/grouper-20130731.3.delta-20130730.tar.xz.asc", "size": 27282040, "path": "/daily/grouper/grouper-20130731.3.delta-20130730.tar.xz", "checksum": "199bd9ec51808f48bc81d5fd460cd8591a47b7911ac78eb7af1d7536963d79ff" }, { "order": 2, "signature": "/daily/grouper/version-20130733.tar.xz.asc", "size": 196, "path": "/daily/grouper/version-20130733.tar.xz", "checksum": "b97a22325987c3bf4b1769c10464c93f17c072c31b5b324a06e9861aed532ee9" } ], "base": 1732 }, { "type": "delta", "descriptions": { "description": "20130801" }, "version": 1800, "files": [ { "order": 0, "signature": "/daily/ubuntu/ubuntu-20130801.delta-20130731.3.tar.xz.asc", "size": 18625000, "path": "/daily/ubuntu/ubuntu-20130801.delta-20130731.3.tar.xz", "checksum": "f81a98769b92a9ba2c61b75ee83aa7fa7417767788f518553ae195e0a846768f" }, { "order": 1, "signature": "/daily/grouper/grouper-20130801.delta-20130731.3.tar.xz.asc", "size": 27285408, "path": "/daily/grouper/grouper-20130801.delta-20130731.3.tar.xz", "checksum": "b255325b494bc700109ca1e3f7eb8e2e2d3f0e590d938ee9a9438746c8eb9fe0" }, { "order": 2, "signature": "/daily/grouper/version-20130800.tar.xz.asc", "size": 192, "path": "/daily/grouper/version-1800.tar.xz", "checksum": "f38ce3721013182d5d810e52c690d319aa5de5da62b98ff217fa02a3c4cf0741" } ], "base": 1733 }, { "type": "full", "descriptions": { "description": "20130730" }, "files": [ { "order": 0, "signature": "/daily/ubuntu/ubuntu-20130730.full.tar.xz.asc", "size": 263677940, "path": "/daily/ubuntu/ubuntu-20130730.full.tar.xz", "checksum": "da85183c4db5e7f4ab2396c37bc504ee12e77d8b4571a49d5d2decf38aa40615" }, { "order": 1, "signature": "/daily/grouper/grouper-20130730.full.tar.xz.asc", "size": 29475508, "path": "/daily/grouper/grouper-20130730.full.tar.xz", "checksum": "9cc07362d569c575af47717dfe2ff4a48e6a59f8189690c4208428ebe00c86ce" }, { "order": 2, "signature": "/daily/grouper/version-20130729.tar.xz.asc", "size": 196, "path": "/daily/grouper/version-20130729.tar.xz", "checksum": "ffae1bb0654c1716141e05bf5f3c30afcc4c3db91df302635f0a06ce800c8cb4" } ], "version": 1729 }, { "type": "delta", "descriptions": { "description": "20130730.1" }, "version": 1730, "files": [ { "order": 0, "signature": "/daily/ubuntu/ubuntu-20130730.1.delta-20130730.tar.xz.asc", "size": 16208916, "path": "/daily/ubuntu/ubuntu-20130730.1.delta-20130730.tar.xz", "checksum": "f4700a9d45daaafccdcea9e4c33829fbeacf48f6a57617936099036b33dc5fff" }, { "order": 1, "signature": "/daily/grouper/version-20130730.tar.xz.asc", "size": 192, "path": "/daily/grouper/version-20130730.tar.xz", "checksum": "c7073504c85ef6b3c3ba551c9ca50417489f34536f0779cea771ac1b74e5fd21" } ], "base": 1729 }, { "type": "delta", "descriptions": { "description": "20130730.2" }, "version": 1731, "files": [ { "order": 0, "signature": "/daily/ubuntu/ubuntu-20130730.2.delta-20130730.1.tar.xz.asc", "size": 11856888, "path": "/daily/ubuntu/ubuntu-20130730.2.delta-20130730.1.tar.xz", "checksum": "d8be295e3d0feed604353ea28373866217f8d69465647f97028237688144b02b" }, { "order": 1, "signature": "/daily/grouper/version-20130731.tar.xz.asc", "size": 196, "path": "/daily/grouper/version-20130731.tar.xz", "checksum": "69ef2059734bee22fe9e46761a32851a04f59471afc0234d653ddaa7e341b8e6" } ], "base": 1730 }, { "type": "delta", "descriptions": { "description": "20130731" }, "version": 1732, "files": [ { "order": 0, "signature": "/daily/ubuntu/ubuntu-20130731.delta-20130730.2.tar.xz.asc", "size": 194004, "path": "/daily/ubuntu/ubuntu-20130731.delta-20130730.2.tar.xz", "checksum": "a6f7671d8329ce47521d1b86ec13ac536c36c56ab8e5eded7d9198590536c7bf" }, { "order": 1, "signature": "/daily/grouper/version-20130732.tar.xz.asc", "size": 196, "path": "/daily/grouper/version-20130732.tar.xz", "checksum": "d99d58e71042286886250d94f8d25002d392f635e72be72b3e842bfb0611ef8c" } ], "base": 1731 }, { "type": "delta", "descriptions": { "description": "20130731.3" }, "version": 1733, "files": [ { "order": 0, "signature": "/daily/ubuntu/ubuntu-20130731.3.delta-20130731.tar.xz.asc", "size": 14993132, "path": "/daily/ubuntu/ubuntu-20130731.3.delta-20130731.tar.xz", "checksum": "002c10476be7a4db9d2fda3b9583eb67c2873b82c5f95483f6df7b253d7a8171" }, { "order": 1, "signature": "/daily/grouper/grouper-20130731.3.delta-20130730.tar.xz.asc", "size": 27282040, "path": "/daily/grouper/grouper-20130731.3.delta-20130730.tar.xz", "checksum": "199bd9ec51808f48bc81d5fd460cd8591a47b7911ac78eb7af1d7536963d79ff" }, { "order": 2, "signature": "/daily/grouper/version-20130733.tar.xz.asc", "size": 196, "path": "/daily/grouper/version-20130733.tar.xz", "checksum": "b97a22325987c3bf4b1769c10464c93f17c072c31b5b324a06e9861aed532ee9" } ], "base": 1732 }, { "type": "delta", "descriptions": { "description": "20130801" }, "version": 1800, "files": [ { "order": 0, "signature": "/daily/ubuntu/ubuntu-20130801.delta-20130731.3.tar.xz.asc", "size": 18625000, "path": "/daily/ubuntu/ubuntu-20130801.delta-20130731.3.tar.xz", "checksum": "f81a98769b92a9ba2c61b75ee83aa7fa7417767788f518553ae195e0a846768f" }, { "order": 1, "signature": "/daily/grouper/grouper-20130801.delta-20130731.3.tar.xz.asc", "size": 27285408, "path": "/daily/grouper/grouper-20130801.delta-20130731.3.tar.xz", "checksum": "b255325b494bc700109ca1e3f7eb8e2e2d3f0e590d938ee9a9438746c8eb9fe0" }, { "order": 2, "signature": "/daily/grouper/version-20130800.tar.xz.asc", "size": 192, "path": "/daily/grouper/version-20130800.tar.xz", "checksum": "f38ce3721013182d5d810e52c690d319aa5de5da62b98ff217fa02a3c4cf0741" } ], "base": 1733 }, { "type": "full", "descriptions": { "description": "20130728" }, "files": [ { "order": 0, "signature": "/daily/ubuntu/ubuntu-20130728.full.tar.xz.asc", "size": 263627440, "path": "/daily/ubuntu/ubuntu-20130728.full.tar.xz", "checksum": "e312b28c234d939063b33c3ad88539e4d76cca97c79fb3578021a57ca9bee6af" }, { "order": 1, "signature": "/daily/grouper/grouper-20130728.full.tar.xz.asc", "size": 29475000, "path": "/daily/grouper/grouper-20130728.full.tar.xz", "checksum": "5c772c4dc523e403ca7b6fe8edc90fd4f3270c6d8831cd114de56ba8528d402d" }, { "order": 2, "signature": "/daily/grouper/version-20130727.tar.xz.asc", "size": 196, "path": "/daily/grouper/version-20130727.tar.xz", "checksum": "a12a2217700984fd167c7c831700320e760c041f10e9aa4a6f01e38ae8d2cf7d" } ], "version": 1727 }, { "type": "delta", "descriptions": { "description": "20130729" }, "version": 1728, "files": [ { "order": 0, "signature": "/daily/ubuntu/ubuntu-20130729.delta-20130728.tar.xz.asc", "size": 15368380, "path": "/daily/ubuntu/ubuntu-20130729.delta-20130728.tar.xz", "checksum": "0860c60161a45c30a57b7fa5a975a1f8ad74cd66a91c0a799f409a569f37af0d" }, { "order": 1, "signature": "/daily/grouper/grouper-20130729.delta-20130728.tar.xz.asc", "size": 27281856, "path": "/daily/grouper/grouper-20130729.delta-20130728.tar.xz", "checksum": "11ea5647a442666167f9bf886740bce6ad67f4aa6be8e6365e3414a54077c08f" }, { "order": 2, "signature": "/daily/grouper/version-20130728.tar.xz.asc", "size": 196, "path": "/daily/grouper/version-20130728.tar.xz", "checksum": "50c68a5a04f6d463b776b431b136eb2b7b08e4e326df5ab958e778d3fb72360c" } ], "base": 1727 }, { "type": "delta", "descriptions": { "description": "20130730" }, "version": 1729, "files": [ { "order": 0, "signature": "/daily/ubuntu/ubuntu-20130730.delta-20130729.tar.xz.asc", "size": 21963560, "path": "/daily/ubuntu/ubuntu-20130730.delta-20130729.tar.xz", "checksum": "5641b9668034b51e9ac0ca9aa7cf93255f501a792a7430703718fb0ae6185a69" }, { "order": 1, "signature": "/daily/grouper/grouper-20130730.delta-20130729.tar.xz.asc", "size": 27281332, "path": "/daily/grouper/grouper-20130730.delta-20130729.tar.xz", "checksum": "32ed374c7098989fe8e6a00bda22eb7bf1686325ef0b5aa99b9bd77aeec66d01" }, { "order": 2, "signature": "/daily/grouper/version-20130729.tar.xz.asc", "size": 196, "path": "/daily/grouper/version-20130729.tar.xz", "checksum": "ffae1bb0654c1716141e05bf5f3c30afcc4c3db91df302635f0a06ce800c8cb4" } ], "base": 1728 }, { "type": "delta", "descriptions": { "description": "20130730.1" }, "version": 1730, "files": [ { "order": 0, "signature": "/daily/ubuntu/ubuntu-20130730.1.delta-20130730.tar.xz.asc", "size": 16208916, "path": "/daily/ubuntu/ubuntu-20130730.1.delta-20130730.tar.xz", "checksum": "f4700a9d45daaafccdcea9e4c33829fbeacf48f6a57617936099036b33dc5fff" }, { "order": 1, "signature": "/daily/grouper/version-20130730.tar.xz.asc", "size": 192, "path": "/daily/grouper/version-20130730.tar.xz", "checksum": "c7073504c85ef6b3c3ba551c9ca50417489f34536f0779cea771ac1b74e5fd21" } ], "base": 1729 }, { "type": "delta", "descriptions": { "description": "20130730.2" }, "version": 1731, "files": [ { "order": 0, "signature": "/daily/ubuntu/ubuntu-20130730.2.delta-20130730.1.tar.xz.asc", "size": 11856888, "path": "/daily/ubuntu/ubuntu-20130730.2.delta-20130730.1.tar.xz", "checksum": "d8be295e3d0feed604353ea28373866217f8d69465647f97028237688144b02b" }, { "order": 1, "signature": "/daily/grouper/version-20130731.tar.xz.asc", "size": 196, "path": "/daily/grouper/version-20130731.tar.xz", "checksum": "69ef2059734bee22fe9e46761a32851a04f59471afc0234d653ddaa7e341b8e6" } ], "base": 1730 }, { "type": "delta", "descriptions": { "description": "20130731" }, "version": 1732, "files": [ { "order": 0, "signature": "/daily/ubuntu/ubuntu-20130731.delta-20130730.2.tar.xz.asc", "size": 194004, "path": "/daily/ubuntu/ubuntu-20130731.delta-20130730.2.tar.xz", "checksum": "a6f7671d8329ce47521d1b86ec13ac536c36c56ab8e5eded7d9198590536c7bf" }, { "order": 1, "signature": "/daily/grouper/version-20130732.tar.xz.asc", "size": 196, "path": "/daily/grouper/version-20130732.tar.xz", "checksum": "d99d58e71042286886250d94f8d25002d392f635e72be72b3e842bfb0611ef8c" } ], "base": 1731 }, { "type": "delta", "descriptions": { "description": "20130731.3" }, "version": 1733, "files": [ { "order": 0, "signature": "/daily/ubuntu/ubuntu-20130731.3.delta-20130731.tar.xz.asc", "size": 14993132, "path": "/daily/ubuntu/ubuntu-20130731.3.delta-20130731.tar.xz", "checksum": "002c10476be7a4db9d2fda3b9583eb67c2873b82c5f95483f6df7b253d7a8171" }, { "order": 1, "signature": "/daily/grouper/grouper-20130731.3.delta-20130730.tar.xz.asc", "size": 27282040, "path": "/daily/grouper/grouper-20130731.3.delta-20130730.tar.xz", "checksum": "199bd9ec51808f48bc81d5fd460cd8591a47b7911ac78eb7af1d7536963d79ff" }, { "order": 2, "signature": "/daily/grouper/version-20130733.tar.xz.asc", "size": 196, "path": "/daily/grouper/version-20130733.tar.xz", "checksum": "b97a22325987c3bf4b1769c10464c93f17c072c31b5b324a06e9861aed532ee9" } ], "base": 1732 }, { "type": "delta", "descriptions": { "description": "20130801" }, "version": 1800, "files": [ { "order": 0, "signature": "/daily/ubuntu/ubuntu-20130801.delta-20130731.3.tar.xz.asc", "size": 18625000, "path": "/daily/ubuntu/ubuntu-20130801.delta-20130731.3.tar.xz", "checksum": "f81a98769b92a9ba2c61b75ee83aa7fa7417767788f518553ae195e0a846768f" }, { "order": 1, "signature": "/daily/grouper/grouper-20130801.delta-20130731.3.tar.xz.asc", "size": 27285408, "path": "/daily/grouper/grouper-20130801.delta-20130731.3.tar.xz", "checksum": "b255325b494bc700109ca1e3f7eb8e2e2d3f0e590d938ee9a9438746c8eb9fe0" }, { "order": 2, "signature": "/daily/grouper/version-20130800.tar.xz.asc", "size": 192, "path": "/daily/grouper/version-20130800.tar.xz", "checksum": "f38ce3721013182d5d810e52c690d319aa5de5da62b98ff217fa02a3c4cf0741" } ], "base": 1733 }, { "type": "full", "descriptions": { "description": "20130729" }, "files": [ { "order": 0, "signature": "/daily/ubuntu/ubuntu-20130729.full.tar.xz.asc", "size": 263605800, "path": "/daily/ubuntu/ubuntu-20130729.full.tar.xz", "checksum": "f094af5462bcb74b9f99c9fa4c5753fba22b2ed20386f3593d93b6d22bada2af" }, { "order": 1, "signature": "/daily/grouper/grouper-20130729.full.tar.xz.asc", "size": 29480448, "path": "/daily/grouper/grouper-20130729.full.tar.xz", "checksum": "18b87c7b33620b62adb3a52f74b7c7e3076e9851980a61bb27a74aca74843c34" }, { "order": 2, "signature": "/daily/grouper/version-20130728.tar.xz.asc", "size": 196, "path": "/daily/grouper/version-20130728.tar.xz", "checksum": "50c68a5a04f6d463b776b431b136eb2b7b08e4e326df5ab958e778d3fb72360c" } ], "version": 1728 }, { "type": "delta", "descriptions": { "description": "20130730" }, "version": 1729, "files": [ { "order": 0, "signature": "/daily/ubuntu/ubuntu-20130730.delta-20130729.tar.xz.asc", "size": 21963560, "path": "/daily/ubuntu/ubuntu-20130730.delta-20130729.tar.xz", "checksum": "5641b9668034b51e9ac0ca9aa7cf93255f501a792a7430703718fb0ae6185a69" }, { "order": 1, "signature": "/daily/grouper/grouper-20130730.delta-20130729.tar.xz.asc", "size": 27281332, "path": "/daily/grouper/grouper-20130730.delta-20130729.tar.xz", "checksum": "32ed374c7098989fe8e6a00bda22eb7bf1686325ef0b5aa99b9bd77aeec66d01" }, { "order": 2, "signature": "/daily/grouper/version-20130729.tar.xz.asc", "size": 196, "path": "/daily/grouper/version-20130729.tar.xz", "checksum": "ffae1bb0654c1716141e05bf5f3c30afcc4c3db91df302635f0a06ce800c8cb4" } ], "base": 1728 }, { "type": "delta", "descriptions": { "description": "20130730.1" }, "version": 1730, "files": [ { "order": 0, "signature": "/daily/ubuntu/ubuntu-20130730.1.delta-20130730.tar.xz.asc", "size": 16208916, "path": "/daily/ubuntu/ubuntu-20130730.1.delta-20130730.tar.xz", "checksum": "f4700a9d45daaafccdcea9e4c33829fbeacf48f6a57617936099036b33dc5fff" }, { "order": 1, "signature": "/daily/grouper/version-20130730.tar.xz.asc", "size": 192, "path": "/daily/grouper/version-20130730.tar.xz", "checksum": "c7073504c85ef6b3c3ba551c9ca50417489f34536f0779cea771ac1b74e5fd21" } ], "base": 1729 }, { "type": "delta", "descriptions": { "description": "20130730.2" }, "version": 1731, "files": [ { "order": 0, "signature": "/daily/ubuntu/ubuntu-20130730.2.delta-20130730.1.tar.xz.asc", "size": 11856888, "path": "/daily/ubuntu/ubuntu-20130730.2.delta-20130730.1.tar.xz", "checksum": "d8be295e3d0feed604353ea28373866217f8d69465647f97028237688144b02b" }, { "order": 1, "signature": "/daily/grouper/version-20130731.tar.xz.asc", "size": 196, "path": "/daily/grouper/version-20130731.tar.xz", "checksum": "69ef2059734bee22fe9e46761a32851a04f59471afc0234d653ddaa7e341b8e6" } ], "base": 1730 }, { "type": "delta", "descriptions": { "description": "20130731" }, "version": 1732, "files": [ { "order": 0, "signature": "/daily/ubuntu/ubuntu-20130731.delta-20130730.2.tar.xz.asc", "size": 194004, "path": "/daily/ubuntu/ubuntu-20130731.delta-20130730.2.tar.xz", "checksum": "a6f7671d8329ce47521d1b86ec13ac536c36c56ab8e5eded7d9198590536c7bf" }, { "order": 1, "signature": "/daily/grouper/version-20130732.tar.xz.asc", "size": 196, "path": "/daily/grouper/version-20130732.tar.xz", "checksum": "d99d58e71042286886250d94f8d25002d392f635e72be72b3e842bfb0611ef8c" } ], "base": 1731 }, { "type": "delta", "descriptions": { "description": "20130731.3" }, "version": 1733, "files": [ { "order": 0, "signature": "/daily/ubuntu/ubuntu-20130731.3.delta-20130731.tar.xz.asc", "size": 14993132, "path": "/daily/ubuntu/ubuntu-20130731.3.delta-20130731.tar.xz", "checksum": "002c10476be7a4db9d2fda3b9583eb67c2873b82c5f95483f6df7b253d7a8171" }, { "order": 1, "signature": "/daily/grouper/grouper-20130731.3.delta-20130730.tar.xz.asc", "size": 27282040, "path": "/daily/grouper/grouper-20130731.3.delta-20130730.tar.xz", "checksum": "199bd9ec51808f48bc81d5fd460cd8591a47b7911ac78eb7af1d7536963d79ff" }, { "order": 2, "signature": "/daily/grouper/version-20130733.tar.xz.asc", "size": 196, "path": "/daily/grouper/version-20130733.tar.xz", "checksum": "b97a22325987c3bf4b1769c10464c93f17c072c31b5b324a06e9861aed532ee9" } ], "base": 1732 }, { "type": "delta", "descriptions": { "description": "20130801" }, "version": 1800, "files": [ { "order": 0, "signature": "/daily/ubuntu/ubuntu-20130801.delta-20130731.3.tar.xz.asc", "size": 18625000, "path": "/daily/ubuntu/ubuntu-20130801.delta-20130731.3.tar.xz", "checksum": "f81a98769b92a9ba2c61b75ee83aa7fa7417767788f518553ae195e0a846768f" }, { "order": 1, "signature": "/daily/grouper/grouper-20130801.delta-20130731.3.tar.xz.asc", "size": 27285408, "path": "/daily/grouper/grouper-20130801.delta-20130731.3.tar.xz", "checksum": "b255325b494bc700109ca1e3f7eb8e2e2d3f0e590d938ee9a9438746c8eb9fe0" }, { "order": 2, "signature": "/daily/grouper/version-20130800.tar.xz.asc", "size": 192, "path": "/daily/grouper/version-20130800.tar.xz", "checksum": "f38ce3721013182d5d810e52c690d319aa5de5da62b98ff217fa02a3c4cf0741" } ], "base": 1733 }, { "type": "full", "descriptions": { "description": "20130801" }, "files": [ { "order": 0, "signature": "/daily/ubuntu/ubuntu-20130801.full.tar.xz.asc", "size": 264000016, "path": "/daily/ubuntu/ubuntu-20130801.full.tar.xz", "checksum": "963c3bf9367450ab17c47f670f91db1d15f0ef6bdcf194481b5f458379129d7e" }, { "order": 1, "signature": "/daily/grouper/grouper-20130801.full.tar.xz.asc", "size": 29471212, "path": "/daily/grouper/grouper-20130801.full.tar.xz", "checksum": "8ab7f9a566546cab1b1dded4bd6a2243cd3617f60b0b43272dbb362bf78ba46f" }, { "order": 2, "signature": "/daily/grouper/version-20130800.tar.xz.asc", "size": 192, "path": "/daily/grouper/version-20130800.tar.xz", "checksum": "f38ce3721013182d5d810e52c690d319aa5de5da62b98ff217fa02a3c4cf0741" } ], "version": 1800 }, { "type": "full", "descriptions": { "description": "20130731.3" }, "files": [ { "order": 0, "signature": "/daily/ubuntu/ubuntu-20130731.3.full.tar.xz.asc", "size": 264081876, "path": "/daily/ubuntu/ubuntu-20130731.3.full.tar.xz", "checksum": "7c9ce28bb751e25e80e5cee540ef1bdc929e97801528a14f066516f3ae2343d1" }, { "order": 1, "signature": "/daily/grouper/grouper-20130731.3.full.tar.xz.asc", "size": 29476056, "path": "/daily/grouper/grouper-20130731.3.full.tar.xz", "checksum": "5ec8aee85a4c895cfd7b2e9e460c67e2f2500ce9250ea4c209097690a73beeb2" }, { "order": 2, "signature": "/daily/grouper/version-20130733.tar.xz.asc", "size": 196, "path": "/daily/grouper/version-20130733.tar.xz", "checksum": "b97a22325987c3bf4b1769c10464c93f17c072c31b5b324a06e9861aed532ee9" } ], "version": 1733 }, { "type": "delta", "descriptions": { "description": "20130801" }, "version": 1800, "files": [ { "order": 0, "signature": "/daily/ubuntu/ubuntu-20130801.delta-20130731.3.tar.xz.asc", "size": 18625000, "path": "/daily/ubuntu/ubuntu-20130801.delta-20130731.3.tar.xz", "checksum": "f81a98769b92a9ba2c61b75ee83aa7fa7417767788f518553ae195e0a846768f" }, { "order": 1, "signature": "/daily/grouper/grouper-20130801.delta-20130731.3.tar.xz.asc", "size": 27285408, "path": "/daily/grouper/grouper-20130801.delta-20130731.3.tar.xz", "checksum": "b255325b494bc700109ca1e3f7eb8e2e2d3f0e590d938ee9a9438746c8eb9fe0" }, { "order": 2, "signature": "/daily/grouper/version-20130800.tar.xz.asc", "size": 192, "path": "/daily/grouper/version-20130800.tar.xz", "checksum": "f38ce3721013182d5d810e52c690d319aa5de5da62b98ff217fa02a3c4cf0741" } ], "base": 1733 }, { "type": "full", "descriptions": { "description": "20130730.2" }, "files": [ { "order": 0, "signature": "/daily/ubuntu/ubuntu-20130730.2.full.tar.xz.asc", "size": 264025988, "path": "/daily/ubuntu/ubuntu-20130730.2.full.tar.xz", "checksum": "26cf16b1458f5ec33a88567ce7b43f5d58669ec7e469a0922839cefa1f5e92a8" }, { "order": 1, "signature": "/daily/grouper/grouper-20130730.full.tar.xz.asc", "size": 29475508, "path": "/daily/grouper/grouper-20130730.full.tar.xz", "checksum": "9cc07362d569c575af47717dfe2ff4a48e6a59f8189690c4208428ebe00c86ce" }, { "order": 2, "signature": "/daily/grouper/version-20130731.tar.xz.asc", "size": 196, "path": "/daily/grouper/version-20130731.tar.xz", "checksum": "69ef2059734bee22fe9e46761a32851a04f59471afc0234d653ddaa7e341b8e6" } ], "version": 1731 }, { "type": "delta", "descriptions": { "description": "20130731" }, "version": 1732, "files": [ { "order": 0, "signature": "/daily/ubuntu/ubuntu-20130731.delta-20130730.2.tar.xz.asc", "size": 194004, "path": "/daily/ubuntu/ubuntu-20130731.delta-20130730.2.tar.xz", "checksum": "a6f7671d8329ce47521d1b86ec13ac536c36c56ab8e5eded7d9198590536c7bf" }, { "order": 1, "signature": "/daily/grouper/version-20130732.tar.xz.asc", "size": 196, "path": "/daily/grouper/version-20130732.tar.xz", "checksum": "d99d58e71042286886250d94f8d25002d392f635e72be72b3e842bfb0611ef8c" } ], "base": 1731 }, { "type": "delta", "descriptions": { "description": "20130731.3" }, "version": 1733, "files": [ { "order": 0, "signature": "/daily/ubuntu/ubuntu-20130731.3.delta-20130731.tar.xz.asc", "size": 14993132, "path": "/daily/ubuntu/ubuntu-20130731.3.delta-20130731.tar.xz", "checksum": "002c10476be7a4db9d2fda3b9583eb67c2873b82c5f95483f6df7b253d7a8171" }, { "order": 1, "signature": "/daily/grouper/grouper-20130731.3.delta-20130730.tar.xz.asc", "size": 27282040, "path": "/daily/grouper/grouper-20130731.3.delta-20130730.tar.xz", "checksum": "199bd9ec51808f48bc81d5fd460cd8591a47b7911ac78eb7af1d7536963d79ff" }, { "order": 2, "signature": "/daily/grouper/version-20130733.tar.xz.asc", "size": 196, "path": "/daily/grouper/version-20130733.tar.xz", "checksum": "b97a22325987c3bf4b1769c10464c93f17c072c31b5b324a06e9861aed532ee9" } ], "base": 1732 }, { "type": "delta", "descriptions": { "description": "20130801" }, "version": 1800, "files": [ { "order": 0, "signature": "/daily/ubuntu/ubuntu-20130801.delta-20130731.3.tar.xz.asc", "size": 18625000, "path": "/daily/ubuntu/ubuntu-20130801.delta-20130731.3.tar.xz", "checksum": "f81a98769b92a9ba2c61b75ee83aa7fa7417767788f518553ae195e0a846768f" }, { "order": 1, "signature": "/daily/grouper/grouper-20130801.delta-20130731.3.tar.xz.asc", "size": 27285408, "path": "/daily/grouper/grouper-20130801.delta-20130731.3.tar.xz", "checksum": "b255325b494bc700109ca1e3f7eb8e2e2d3f0e590d938ee9a9438746c8eb9fe0" }, { "order": 2, "signature": "/daily/grouper/version-20130800.tar.xz.asc", "size": 192, "path": "/daily/grouper/version-20130800.tar.xz", "checksum": "f38ce3721013182d5d810e52c690d319aa5de5da62b98ff217fa02a3c4cf0741" } ], "base": 1733 }, { "type": "full", "descriptions": { "description": "20130731" }, "files": [ { "order": 0, "signature": "/daily/ubuntu/ubuntu-20130731.full.tar.xz.asc", "size": 264008228, "path": "/daily/ubuntu/ubuntu-20130731.full.tar.xz", "checksum": "8d4dd8ae84e09ad73a2d566ec1ea5a83fa61206d487a85013308bd4cc33ddc7d" }, { "order": 1, "signature": "/daily/grouper/grouper-20130730.full.tar.xz.asc", "size": 29475508, "path": "/daily/grouper/grouper-20130730.full.tar.xz", "checksum": "9cc07362d569c575af47717dfe2ff4a48e6a59f8189690c4208428ebe00c86ce" }, { "order": 2, "signature": "/daily/grouper/version-20130732.tar.xz.asc", "size": 196, "path": "/daily/grouper/version-20130732.tar.xz", "checksum": "d99d58e71042286886250d94f8d25002d392f635e72be72b3e842bfb0611ef8c" } ], "version": 1732 }, { "type": "delta", "descriptions": { "description": "20130731.3" }, "version": 1733, "files": [ { "order": 0, "signature": "/daily/ubuntu/ubuntu-20130731.3.delta-20130731.tar.xz.asc", "size": 14993132, "path": "/daily/ubuntu/ubuntu-20130731.3.delta-20130731.tar.xz", "checksum": "002c10476be7a4db9d2fda3b9583eb67c2873b82c5f95483f6df7b253d7a8171" }, { "order": 1, "signature": "/daily/grouper/grouper-20130731.3.delta-20130730.tar.xz.asc", "size": 27282040, "path": "/daily/grouper/grouper-20130731.3.delta-20130730.tar.xz", "checksum": "199bd9ec51808f48bc81d5fd460cd8591a47b7911ac78eb7af1d7536963d79ff" }, { "order": 2, "signature": "/daily/grouper/version-20130733.tar.xz.asc", "size": 196, "path": "/daily/grouper/version-20130733.tar.xz", "checksum": "b97a22325987c3bf4b1769c10464c93f17c072c31b5b324a06e9861aed532ee9" } ], "base": 1732 }, { "type": "delta", "descriptions": { "description": "20130801" }, "version": 1800, "files": [ { "order": 0, "signature": "/daily/ubuntu/ubuntu-20130801.delta-20130731.3.tar.xz.asc", "size": 18625000, "path": "/daily/ubuntu/ubuntu-20130801.delta-20130731.3.tar.xz", "checksum": "f81a98769b92a9ba2c61b75ee83aa7fa7417767788f518553ae195e0a846768f" }, { "order": 1, "signature": "/daily/grouper/grouper-20130801.delta-20130731.3.tar.xz.asc", "size": 27285408, "path": "/daily/grouper/grouper-20130801.delta-20130731.3.tar.xz", "checksum": "b255325b494bc700109ca1e3f7eb8e2e2d3f0e590d938ee9a9438746c8eb9fe0" }, { "order": 2, "signature": "/daily/grouper/version-20130800.tar.xz.asc", "size": 192, "path": "/daily/grouper/version-20130800.tar.xz", "checksum": "f38ce3721013182d5d810e52c690d319aa5de5da62b98ff217fa02a3c4cf0741" } ], "base": 1733 }, { "type": "full", "descriptions": { "description": "20130730.1" }, "files": [ { "order": 0, "signature": "/daily/ubuntu/ubuntu-20130730.1.full.tar.xz.asc", "size": 264017228, "path": "/daily/ubuntu/ubuntu-20130730.1.full.tar.xz", "checksum": "c87fbde0007c2bd556005e4f5877d2e718da2855aa56321db5e38c86bf88a3bd" }, { "order": 1, "signature": "/daily/grouper/grouper-20130730.full.tar.xz.asc", "size": 29475508, "path": "/daily/grouper/grouper-20130730.full.tar.xz", "checksum": "9cc07362d569c575af47717dfe2ff4a48e6a59f8189690c4208428ebe00c86ce" }, { "order": 2, "signature": "/daily/grouper/version-20130730.tar.xz.asc", "size": 192, "path": "/daily/grouper/version-20130730.tar.xz", "checksum": "c7073504c85ef6b3c3ba551c9ca50417489f34536f0779cea771ac1b74e5fd21" } ], "version": 1730 }, { "type": "delta", "descriptions": { "description": "20130730.2" }, "version": 1731, "files": [ { "order": 0, "signature": "/daily/ubuntu/ubuntu-20130730.2.delta-20130730.1.tar.xz.asc", "size": 11856888, "path": "/daily/ubuntu/ubuntu-20130730.2.delta-20130730.1.tar.xz", "checksum": "d8be295e3d0feed604353ea28373866217f8d69465647f97028237688144b02b" }, { "order": 1, "signature": "/daily/grouper/version-20130731.tar.xz.asc", "size": 196, "path": "/daily/grouper/version-20130731.tar.xz", "checksum": "69ef2059734bee22fe9e46761a32851a04f59471afc0234d653ddaa7e341b8e6" } ], "base": 1730 }, { "type": "delta", "descriptions": { "description": "20130731" }, "version": 1732, "files": [ { "order": 0, "signature": "/daily/ubuntu/ubuntu-20130731.delta-20130730.2.tar.xz.asc", "size": 194004, "path": "/daily/ubuntu/ubuntu-20130731.delta-20130730.2.tar.xz", "checksum": "a6f7671d8329ce47521d1b86ec13ac536c36c56ab8e5eded7d9198590536c7bf" }, { "order": 1, "signature": "/daily/grouper/version-20130732.tar.xz.asc", "size": 196, "path": "/daily/grouper/version-20130732.tar.xz", "checksum": "d99d58e71042286886250d94f8d25002d392f635e72be72b3e842bfb0611ef8c" } ], "base": 1731 }, { "type": "delta", "descriptions": { "description": "20130731.3" }, "version": 1733, "files": [ { "order": 0, "signature": "/daily/ubuntu/ubuntu-20130731.3.delta-20130731.tar.xz.asc", "size": 14993132, "path": "/daily/ubuntu/ubuntu-20130731.3.delta-20130731.tar.xz", "checksum": "002c10476be7a4db9d2fda3b9583eb67c2873b82c5f95483f6df7b253d7a8171" }, { "order": 1, "signature": "/daily/grouper/grouper-20130731.3.delta-20130730.tar.xz.asc", "size": 27282040, "path": "/daily/grouper/grouper-20130731.3.delta-20130730.tar.xz", "checksum": "199bd9ec51808f48bc81d5fd460cd8591a47b7911ac78eb7af1d7536963d79ff" }, { "order": 2, "signature": "/daily/grouper/version-20130733.tar.xz.asc", "size": 196, "path": "/daily/grouper/version-20130733.tar.xz", "checksum": "b97a22325987c3bf4b1769c10464c93f17c072c31b5b324a06e9861aed532ee9" } ], "base": 1732 }, { "type": "delta", "descriptions": { "description": "20130801" }, "version": 1800, "files": [ { "order": 0, "signature": "/daily/ubuntu/ubuntu-20130801.delta-20130731.3.tar.xz.asc", "size": 18625000, "path": "/daily/ubuntu/ubuntu-20130801.delta-20130731.3.tar.xz", "checksum": "f81a98769b92a9ba2c61b75ee83aa7fa7417767788f518553ae195e0a846768f" }, { "order": 1, "signature": "/daily/grouper/grouper-20130801.delta-20130731.3.tar.xz.asc", "size": 27285408, "path": "/daily/grouper/grouper-20130801.delta-20130731.3.tar.xz", "checksum": "b255325b494bc700109ca1e3f7eb8e2e2d3f0e590d938ee9a9438746c8eb9fe0" }, { "order": 2, "signature": "/daily/grouper/version-20130800.tar.xz.asc", "size": 192, "path": "/daily/grouper/version-20130800.tar.xz", "checksum": "f38ce3721013182d5d810e52c690d319aa5de5da62b98ff217fa02a3c4cf0741" } ], "base": 1733 } ], "global": { "generated_at": "Thu Aug 01 11:30:01 UTC 2013" } } ./systemimage/tests/data/state.channels_05.json0000644000015600001650000000363712701500553021617 0ustar jenkinsjenkins{ "13.10": { "devices": { "grouper": { "index": "/13.10/grouper/index.json" }, "maguro": { "index": "/13.10/maguro/index.json" }, "mako": { "index": "/13.10/mako/index.json" }, "manta": { "index": "/13.10/manta/index.json" } } }, "13.10-proposed": { "devices": { "grouper": { "index": "/13.10-proposed/grouper/index.json" }, "maguro": { "index": "/13.10-proposed/maguro/index.json" }, "mako": { "index": "/13.10-proposed/mako/index.json" }, "manta": { "index": "/13.10-proposed/manta/index.json" } } }, "14.04": { "devices": { "grouper": { "index": "/14.04/grouper/index.json" }, "maguro": { "index": "/14.04/maguro/index.json" }, "mako": { "index": "/14.04/mako/index.json" }, "manta": { "index": "/14.04/manta/index.json" } } }, "14.04-proposed": { "devices": { "grouper": { "index": "/14.04-proposed/grouper/index.json", "keyring": { "path": "/14.04-proposed/grouper/device-signing.tar.xz", "signature": "/14.04-proposed/grouper/device-signing.tar.xz.asc" } }, "maguro": { "index": "/14.04-proposed/maguro/index.json" }, "mako": { "index": "/14.04-proposed/mako/index.json" }, "manta": { "index": "/14.04-proposed/manta/index.json" } } } } ./systemimage/tests/data/dbus-system.conf.in0000644000015600001650000000131412701500553021227 0ustar jenkinsjenkins system unix:tmpdir={tmpdir} {tmpdir} ./systemimage/tests/data/config.config_03.ini0000644000015600001650000000146612701500553021220 0ustar jenkinsjenkins# Configuration file for specifying relatively static information about the # upgrade resolution process. [service] base: phablet.example.com # Non-standard ports http_port: 8080 https_port: 80443 channel: stable build_number: 0 [system] timeout: 10s tempdir: /tmp logfile: /var/log/system-image/client.log loglevel: error settings_db: /var/lib/phablet/settings.db [gpg] archive_master: /etc/phablet/archive-master.tar.xz image_master: /etc/phablet/image-master.tar.xz image_signing: /var/lib/phablet/image-signing.tar.xz device_signing: /var/lib/phablet/device-signing.tar.xz [updater] cache_partition: {tmpdir}/android/cache data_partition: {vardir}/lib/phablet/updater [hooks] device: systemimage.device.SystemProperty scorer: systemimage.scores.WeightedScorer apply: systemimage.apply.Reboot [dbus] lifetime: 3s ./systemimage/tests/data/config.config_09.ini0000644000015600001650000000017612701500553021223 0ustar jenkinsjenkins[service] base: systum-imaje.ubuntu.com http_port: 88 https_port: 89 channel: proposed build_number: 833 [dbus] lifetime: 1h ./systemimage/tests/data/index.index_04.json0000644000015600001650000001616612701500553021122 0ustar jenkinsjenkins{ "global": { "generated_at": "Mon Apr 29 18:45:27 UTC 2013" }, "images": [ { "bootme": true, "description": "Full A", "files": [ { "checksum": "abc", "order": 1, "path": "/a/b/c.txt", "signature": "/a/b/c.txt.asc", "size": 104857600 }, { "checksum": "bcd", "order": 1, "path": "/b/c/d.txt", "signature": "/b/c/d.txt.asc", "size": 104857600 }, { "checksum": "cde", "order": 1, "path": "/c/d/e.txt", "signature": "/c/d/e.txt.asc", "size": 104857600 } ], "type": "full", "version": 1300 }, { "base": 1300, "bootme": true, "description": "Delta A.1", "files": [ { "checksum": "def", "order": 1, "path": "/d/e/f.txt", "signature": "/d/e/f.txt.asc", "size": 104857600 }, { "checksum": "ef0", "order": 1, "path": "/e/f/0.txt", "signature": "/e/f/0.txt.asc", "size": 104857600 }, { "checksum": "f01", "order": 1, "path": "/f/e/1.txt", "signature": "/f/e/1.txt.asc", "size": 104857600 } ], "type": "delta", "version": 1301 }, { "base": 1301, "bootme": true, "description": "Delta A.2", "files": [ { "checksum": "012", "order": 1, "path": "/0/1/2.txt", "signature": "/0/1/2.txt.asc", "size": 104857600 }, { "checksum": "123", "order": 1, "path": "/1/2/3.txt", "signature": "/1/2/3.txt.asc", "size": 104857600 }, { "checksum": "234", "order": 1, "path": "/2/3/4.txt", "signature": "/2/3/4.txt.asc", "size": 104857600 } ], "type": "delta", "version": 1304 }, { "description": "Full B", "files": [ { "checksum": "345", "order": 1, "path": "/3/4/5.txt", "signature": "/3/4/5.txt.asc", "size": 104857600 }, { "checksum": "456", "order": 1, "path": "/4/5/6.txt", "signature": "/4/5/6.txt.asc", "size": 104857600 }, { "checksum": "567", "order": 1, "path": "/5/6/7.txt", "signature": "/5/6/7.txt.asc", "size": 104857600 } ], "type": "full", "version": 1200 }, { "base": 1200, "description": "Delta B.1", "files": [ { "checksum": "678", "order": 1, "path": "/6/7/8.txt", "signature": "/6/7/8.txt.asc", "size": 104857600 }, { "checksum": "789", "order": 1, "path": "/7/8/9.txt", "signature": "/7/8/9.txt.asc", "size": 104857600 }, { "checksum": "89a", "order": 1, "path": "/8/9/a.txt", "signature": "/8/9/a.txt.asc", "size": 104857600 } ], "type": "delta", "version": 1201 }, { "base": 1201, "description": "Delta B.2", "files": [ { "checksum": "9ab", "order": 1, "path": "/9/a/b.txt", "signature": "/9/a/b.txt.asc", "size": 104857600 }, { "checksum": "fed", "order": 1, "path": "/f/e/d.txt", "signature": "/f/e/d.txt.asc", "size": 104857600 }, { "checksum": "edc", "order": 1, "path": "/e/d/c.txt", "signature": "/e/d/c.txt.asc", "size": 209715200 } ], "type": "delta", "version": 1304 }, { "description": "Full C", "files": [ { "checksum": "dcb", "order": 1, "path": "/d/c/b.txt", "signature": "/d/c/b.txt.asc", "size": 104857600 }, { "checksum": "cba", "order": 1, "path": "/c/b/a.txt", "signature": "/c/b/a.txt.asc", "size": 104857600 }, { "checksum": "ba9", "order": 1, "path": "/b/a/9.txt", "signature": "/b/a/9.txt.asc", "size": 104857600 } ], "type": "full", "version": 1100 }, { "base": 1100, "description": "Delta C.1", "files": [ { "checksum": "a98", "order": 1, "path": "/a/9/8.txt", "signature": "/a/9/8.txt.asc", "size": 104857600 }, { "checksum": "987", "order": 1, "path": "/9/8/7.txt", "signature": "/9/8/7.txt.asc", "size": 104857600 }, { "checksum": "876", "order": 1, "path": "/8/7/6.txt", "signature": "/8/7/6.txt.asc", "size": 838860800 } ], "type": "delta", "version": 1303 } ] } ./systemimage/tests/data/dbus.channels_01.json0000644000015600001650000000052412701500553021420 0ustar jenkinsjenkins{ "stable": { "devices": { "nexus7": { "index": "/stable/nexus7/index.json", "keyring": { "path": "/stable/nexus7/device-signing.tar.xz", "signature": "/stable/nexus7/device-signing.tar.xz.asc" } } } } } ./systemimage/tests/data/com.canonical.applications.Downloader.service.in0000644000015600001650000000011212701500553026744 0ustar jenkinsjenkins[D-BUS Service] Name=com.canonical.applications.Downloader Exec={command} ./systemimage/tests/data/gpg.channels_01.json0000644000015600001650000000107212701500553021237 0ustar jenkinsjenkins{ "daily": { "devices": { "nexus7": { "index": "/daily/nexus7/index.json", "keyring": { "path": "/daily/nexus7/device-keyring.tar.xz", "signature": "/daily/nexus7/device-keyring.tar.xz.asc" } }, "nexus4":{ "index": "/daily/nexus4/index.json" } } }, "stable": { "devices": { "nexus7":{ "index": "/stable/nexus7/index.json" } } } } ./systemimage/tests/data/state.index_08.json0000644000015600001650000000223412701500553021126 0ustar jenkinsjenkins{ "global": { "generated_at": "Thu Aug 01 08:01:00 UTC 2013" }, "images": [ { "description": "Full", "files": [ { "checksum": "da70dfa4d9f95ac979f921e8e623358236313f334afcd06cddf8a5621cf6a1e9", "order": 3, "path": "/3/4/5.txt", "signature": "/3/4/5.txt.asc", "size": 104857600 }, { "checksum": "b3a8e0e1f9ab1bfe3a36f231f676f78bb30a519d2b21e6c530c0eee8ebb4a5d0", "order": 1, "path": "/4/5/6.txt", "signature": "/4/5/6.txt.asc", "size": 104857600 }, { "checksum": "97a6d21df7c51e8289ac1a8c026aaac143e15aa1957f54f42e30d8f8a85c3a55", "order": 2, "path": "/5/6/5.txt", "signature": "/5/6/5.txt.asc", "size": 104857600 } ], "type": "full", "version": 1600, "bootme": true } ] } ./systemimage/tests/data/channel.channels_03.json0000644000015600001650000000363712701500553022105 0ustar jenkinsjenkins{ "13.10": { "devices": { "grouper": { "index": "/13.10/grouper/index.json" }, "maguro": { "index": "/13.10/maguro/index.json" }, "mako": { "index": "/13.10/mako/index.json" }, "manta": { "index": "/13.10/manta/index.json" } } }, "13.10-proposed": { "devices": { "grouper": { "index": "/13.10-proposed/grouper/index.json" }, "maguro": { "index": "/13.10-proposed/maguro/index.json" }, "mako": { "index": "/13.10-proposed/mako/index.json" }, "manta": { "index": "/13.10-proposed/manta/index.json" } } }, "14.04": { "devices": { "grouper": { "index": "/14.04/grouper/index.json" }, "maguro": { "index": "/14.04/maguro/index.json" }, "mako": { "index": "/14.04/mako/index.json" }, "manta": { "index": "/14.04/manta/index.json" } } }, "14.04-proposed": { "devices": { "grouper": { "index": "/14.04-proposed/grouper/index.json", "keyring": { "path": "/14.04-proposed/grouper/device-signing.tar.xz", "signature": "/14.04-proposed/grouper/device-signing.tar.xz.asc" } }, "maguro": { "index": "/14.04-proposed/maguro/index.json" }, "mako": { "index": "/14.04-proposed/mako/index.json" }, "manta": { "index": "/14.04-proposed/manta/index.json" } } } } ./systemimage/tests/data/com.canonical.SystemImage.service.in0000644000015600001650000000007612701500553024421 0ustar jenkinsjenkins[D-BUS Service] Name=com.canonical.SystemImage Exec={command} ./systemimage/tests/data/config.config_02.ini0000644000015600001650000000144612701500553021215 0ustar jenkinsjenkins# Configuration file for specifying relatively static information about the # upgrade resolution process. [service] base: phablet.example.com http_port: 80 https_port: 443 channel: stable build_number: 0 [system] timeout: 10s tempdir: /tmp logfile: /var/log/system-image/client.log loglevel: critical:debug settings_db: /var/lib/phablet/settings.db [gpg] archive_master: /etc/phablet/archive-master.tar.xz image_master: /etc/phablet/image-master.tar.xz image_signing: /var/lib/phablet/image-signing.tar.xz device_signing: /var/lib/phablet/device-signing.tar.xz [updater] cache_partition: {tmpdir}/android/cache data_partition: {vardir}/lib/phablet/updater [hooks] device: systemimage.device.SystemProperty scorer: systemimage.scores.WeightedScorer apply: systemimage.apply.Reboot [dbus] lifetime: 2m ./systemimage/tests/data/bad_cert.pem0000644000015600001650000000236512701500553017751 0ustar jenkinsjenkins-----BEGIN CERTIFICATE----- MIIDezCCAmOgAwIBAgIJAPQ/j6faumV6MA0GCSqGSIb3DQEBBQUAMFQxCzAJBgNV BAYTAlVTMRMwEQYDVQQIDApTb21lLVN0YXRlMRwwGgYDVQQKDBNJbWFnZSBCYXNl ZCBVcGRhdGVzMRIwEAYDVQQDDAlsb2NhbGhvc3QwHhcNMTMwNTEzMjIwODExWhcN MjMwNTExMjIwODExWjBUMQswCQYDVQQGEwJVUzETMBEGA1UECAwKU29tZS1TdGF0 ZTEcMBoGA1UECgwTSW1hZ2UgQmFzZWQgVXBkYXRlczESMBAGA1UEAwwJbG9jYWxo b3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2w8OthblYYtvnxTk +yiaK/z96/YT3DWuwnrxu5LozowkXlp83MXKXAAs7xRDdXLKl5UYeosC1Qh7oPOz 34T2ukMeA1JwuXb88OvfKSxmPnoRJw+J067hwy0hf/eKnclKIaPMJshmUCL8DXum C+Ss4yg/+q0xSp/uZwzOZWvgQLhLjIZ0UVE3gPmtaQ97MjnVmYF5frVkUaeE5ggn x36JBw6yxIFlzXMPtxnPGDf1X4KyUEYR6dKdfKV+rNEKl+keHUmiG5gmtuBPBhH7 Q8nVGzUVV9erCJ3JcdKd5XW/f3p9nYc4yjKQ9f01GfSmkODStcpCZzQhRlicWLBM LZ3m3wIDAQABo1AwTjAdBgNVHQ4EFgQUWNYFPW/h1vsiUtc7EpSqh8sfrpowHwYD VR0jBBgwFoAUWNYFPW/h1vsiUtc7EpSqh8sfrpowDAYDVR0TBAUwAwEB/zANBgkq hkiG9w0BAQUFAAOCAQEAbwbWBubZxS/D9VGfbOnZ9z/ZvqREZ6TD8wPfDywGUdHP W036tMSZM9ORRvPmbHTRRspiASH66zfh3e3k0i0dT9JJnbxpsgLw/GnomwbrvqEX NuZTTwYCRSZ8gFgRYgDYQxQKk60Nb/9CmNGef0KDVbO0mKGPudSuENTwuviG/FH8 hdqPPq63UrEe79FC+yCe0bFZDv4+ZYH3GsaB7CRw7HQAePBHSJPqFzwlw69X4bhi TzC14gNpWsTLeGutODE+pCzwpr1l7CfwIOmCu/N92hmyBDT4GFYClJVL9YCbJHq1 OXHUstNghnd1SppFlteS3Id6mBrHQ4E5gFfP7xKBXg== -----END CERTIFICATE----- ./systemimage/tests/data/image-signing.gpg0000644000015600001650000000223112701500553020710 0ustar jenkinsjenkins Q[]*Y,fuG~~s@lySXVF/![]\})K ,)u ڀ.lSj7+^u#Ƕ>,]P0f%.DHÜ䤓V.He5~% ?'JHd@i+̭:Pux*j=O Zd6S$Tך5׀:W5cyɋOVfHR&}£ 1Wlƈc:܂HH׹qF4hԅl!0 ? PX, +N^`j +WSȚ#܇A_ʕ<}HUbuntu System Image Signing Key (TEST) 8"Q[   Z onKP =[7gwbThN0atf]+o;S1.JT[$4ɇtS`;啴"aҐ=R_ɤ+ WY; "ǭWȟ cW!;Guc aQ[H؛B#ן?.E&![.ъNCӊ:ύ*"KRJeݰHYWz SaihoIQ5I®[M B5 gI$`1UH> e/o@4h9k }ɷ?9]E*~ږ+WbcY?/ް./systemimage/tests/data/main.index_01.json0000644000015600001650000002000412701500553020716 0ustar jenkinsjenkins{ "global": { "generated_at": "Mon Apr 29 18:45:27 UTC 2013" }, "images": [ { "bootme": true, "description": "Full A", "files": [ { "checksum": "abc", "order": 1, "path": "/a/b/c.txt", "signature": "/a/b/c.txt.asc", "size": 104857600 }, { "checksum": "bcd", "order": 1, "path": "/b/c/d.txt", "signature": "/b/c/d.txt.asc", "size": 104857600 }, { "checksum": "cde", "order": 1, "path": "/c/d/e.txt", "signature": "/c/d/e.txt.asc", "size": 104857600 } ], "type": "full", "version": 1300 }, { "base": 1300, "bootme": true, "description": "Delta A.1", "files": [ { "checksum": "def", "order": 1, "path": "/d/e/f.txt", "signature": "/d/e/f.txt.asc", "size": 104857600 }, { "checksum": "ef0", "order": 1, "path": "/e/f/0.txt", "signature": "/e/f/0.txt.asc", "size": 104857600 }, { "checksum": "f01", "order": 1, "path": "/f/e/1.txt", "signature": "/f/e/1.txt.asc", "size": 104857600 } ], "type": "delta", "version": 1301 }, { "base": 1301, "bootme": true, "description": "Delta A.2", "files": [ { "checksum": "012", "order": 1, "path": "/0/1/2.txt", "signature": "/0/1/2.txt.asc", "size": 104857600 }, { "checksum": "123", "order": 1, "path": "/1/2/3.txt", "signature": "/1/2/3.txt.asc", "size": 104857600 }, { "checksum": "234", "order": 1, "path": "/2/3/4.txt", "signature": "/2/3/4.txt.asc", "size": 104857600 } ], "type": "delta", "version": 1304 }, { "description": "Full B", "description-en": "The full B", "files": [ { "checksum": "da70dfa4d9f95ac979f921e8e623358236313f334afcd06cddf8a5621cf6a1e9", "order": 1, "path": "/3/4/5.txt", "signature": "/3/4/5.txt.asc", "size": 10000 }, { "checksum": "b3a8e0e1f9ab1bfe3a36f231f676f78bb30a519d2b21e6c530c0eee8ebb4a5d0", "order": 1, "path": "/4/5/6.txt", "signature": "/4/5/6.txt.asc", "size": 10001 }, { "checksum": "97a6d21df7c51e8289ac1a8c026aaac143e15aa1957f54f42e30d8f8a85c3a55", "order": 1, "path": "/5/6/7.txt", "signature": "/5/6/7.txt.asc", "size": 10002 } ], "type": "full", "version": 1200 }, { "base": 1200, "description": "Delta B.1", "description-en_US": "This is the delta B.1", "description-xx": "XX This is the delta B.1", "description-yy": "YY This is the delta B.1", "description-yy_ZZ": "YY-ZZ This is the delta B.1", "files": [ { "checksum": "cebe3d9d614ba5c19f633566104315854a11353a333bf96f16b5afa0e90abdc4", "order": 1, "path": "/6/7/8.txt", "signature": "/6/7/8.txt.asc", "size": 20000 }, { "checksum": "35a9e381b1a27567549b5f8a6f783c167ebf809f1c4d6a9e367240484d8ce281", "order": 1, "path": "/7/8/9.txt", "signature": "/7/8/9.txt.asc", "size": 20001 }, { "checksum": "6bd6c3f7808391e8b74f5c2d58810809eda5c134aaa7f1b27ddf4b445c421ac5", "order": 1, "path": "/8/9/a.txt", "signature": "/8/9/a.txt.asc", "size": 20002 } ], "type": "delta", "version": 1201 }, { "base": 1201, "description": "Delta B.2", "description-xx": "Oh delta, my delta", "description-xx_CC": "This hyar is the delta B.2", "files": [ { "checksum": "8c43d75d5b9f1aa9fc3fabb6b60b6c06553324352399a33febce95a1b588d1d6", "order": 1, "path": "/9/a/b.txt", "signature": "/9/a/b.txt.asc", "size": 30000 }, { "checksum": "20e796c128096d229ba89bf412a53c3151d170a409c2c8c1dd8e414087b7ffae", "order": 1, "path": "/f/e/d.txt", "signature": "/f/e/d.txt.asc", "size": 30001 }, { "checksum": "278238e8bafa4709c77aa723e168101acd6ee1fb9fcc1b6eca4762e5c7dad768", "order": 1, "path": "/e/d/c.txt", "signature": "/e/d/c.txt.asc", "size": 30002 } ], "type": "delta", "version": 1304 }, { "description": "Full C", "files": [ { "checksum": "dcb", "order": 1, "path": "/d/c/b.txt", "signature": "/d/c/b.txt.asc", "size": 104857600 }, { "checksum": "cba", "order": 1, "path": "/c/b/a.txt", "signature": "/c/b/a.txt.asc", "size": 104857600 }, { "checksum": "ba9", "order": 1, "path": "/b/a/9.txt", "signature": "/b/a/9.txt.asc", "size": 104857600 } ], "type": "full", "version": 1100 }, { "base": 1100, "description": "Delta C.1", "files": [ { "checksum": "a98", "order": 1, "path": "/a/9/8.txt", "signature": "/a/9/8.txt.asc", "size": 104857600 }, { "checksum": "987", "order": 1, "path": "/9/8/7.txt", "signature": "/9/8/7.txt.asc", "size": 104857600 }, { "checksum": "876", "order": 1, "path": "/8/7/6.txt", "signature": "/8/7/6.txt.asc", "size": 838860800 } ], "type": "delta", "version": 1303 } ] } ./systemimage/tests/data/main.index_05.json0000644000015600001650000000223412701500553020727 0ustar jenkinsjenkins{ "global": { "generated_at": "Thu Aug 01 08:01:00 UTC 2013" }, "images": [ { "description": "Full", "files": [ { "checksum": "da70dfa4d9f95ac979f921e8e623358236313f334afcd06cddf8a5621cf6a1e9", "order": 3, "path": "/3/4/5.txt", "signature": "/3/4/5.txt.asc", "size": 104857600 }, { "checksum": "b3a8e0e1f9ab1bfe3a36f231f676f78bb30a519d2b21e6c530c0eee8ebb4a5d0", "order": 1, "path": "/4/5/6.txt", "signature": "/4/5/6.txt.asc", "size": 104857600 }, { "checksum": "97a6d21df7c51e8289ac1a8c026aaac143e15aa1957f54f42e30d8f8a85c3a55", "order": 2, "path": "/5/6/7.txt", "signature": "/5/6/7.txt.asc", "size": 104857600 } ], "type": "full", "version": 1600, "bootme": true } ] } ./systemimage/tests/data/channel.channels_05.json0000644000015600001650000000107212701500553022076 0ustar jenkinsjenkins{ "daily": { "devices": { "nexus7": { "index": "/daily/nexus7/index.json", "keyring": { "path": "/daily/nexus7/device-keyring.tar.xz", "signature": "/daily/nexus7/device-keyring.tar.xz.asc" } }, "nexus4":{ "index": "/daily/nexus4/index.json" } } }, "stable": { "devices": { "nexus7":{ "index": "/stable/nexus7/index.json" } } } } ./systemimage/tests/data/state.channels_03.json0000644000015600001650000000022412701500553021602 0ustar jenkinsjenkins{ "stable": { "devices": { "nexus7": { "index": "/stable/nexus7/index.json" } } } } ./systemimage/tests/data/candidates.index_03.json0000644000015600001650000000071112701500553022076 0ustar jenkinsjenkins{ "global": { "generated_at": "Mon Apr 29 18:45:27 UTC 2013" }, "images": [ { "description": "New full build 1", "files": [], "type": "full", "version": 1300, "bootme": true }, { "description": "New full build 2", "files": [], "type": "full", "version": 1400, "bootme": true } ] } ./systemimage/tests/data/candidates.index_13.json0000644000015600001650000001615112701500553022104 0ustar jenkinsjenkins{ "global": { "generated_at": "Mon Apr 29 18:45:27 UTC 2013" }, "images": [ { "bootme": true, "description": "Full A", "files": [ { "checksum": "abc", "order": 1, "path": "/a/b/c.txt", "signature": "/a/b/c.txt.asc", "size": 104857600 }, { "checksum": "bcd", "order": 1, "path": "/b/c/d.txt", "signature": "/b/c/d.txt.asc", "size": 104857600 }, { "checksum": "cde", "order": 1, "path": "/c/d/e.txt", "signature": "/c/d/e.txt.asc", "size": 104857600 } ], "type": "full", "version": 300 }, { "base": 300, "bootme": true, "description": "Delta A.1", "files": [ { "checksum": "def", "order": 1, "path": "/d/e/f.txt", "signature": "/d/e/f.txt.asc", "size": 104857600 }, { "checksum": "ef0", "order": 1, "path": "/e/f/0.txt", "signature": "/e/f/0.txt.asc", "size": 104857600 }, { "checksum": "f01", "order": 1, "path": "/f/e/1.txt", "signature": "/f/e/1.txt.asc", "size": 104857600 } ], "type": "delta", "version": 301 }, { "base": 301, "bootme": true, "description": "Delta A.2", "files": [ { "checksum": "012", "order": 1, "path": "/0/1/2.txt", "signature": "/0/1/2.txt.asc", "size": 104857600 }, { "checksum": "123", "order": 1, "path": "/1/2/3.txt", "signature": "/1/2/3.txt.asc", "size": 104857600 }, { "checksum": "234", "order": 1, "path": "/2/3/4.txt", "signature": "/2/3/4.txt.asc", "size": 104857600 } ], "type": "delta", "version": 304 }, { "description": "Full B", "files": [ { "checksum": "345", "order": 1, "path": "/3/4/5.txt", "signature": "/3/4/5.txt.asc", "size": 104857600 }, { "checksum": "456", "order": 1, "path": "/4/5/6.txt", "signature": "/4/5/6.txt.asc", "size": 104857600 }, { "checksum": "567", "order": 1, "path": "/5/6/7.txt", "signature": "/5/6/7.txt.asc", "size": 104857600 } ], "type": "full", "version": 200 }, { "base": 200, "description": "Delta B.1", "files": [ { "checksum": "678", "order": 1, "path": "/6/7/8.txt", "signature": "/6/7/8.txt.asc", "size": 104857600 }, { "checksum": "789", "order": 1, "path": "/7/8/9.txt", "signature": "/7/8/9.txt.asc", "size": 104857600 }, { "checksum": "89a", "order": 1, "path": "/8/9/a.txt", "signature": "/8/9/a.txt.asc", "size": 104857600 } ], "type": "delta", "version": 201 }, { "base": 201, "description": "Delta B.2", "files": [ { "checksum": "9ab", "order": 1, "path": "/9/a/b.txt", "signature": "/9/a/b.txt.asc", "size": 104857600 }, { "checksum": "fed", "order": 1, "path": "/f/e/d.txt", "signature": "/f/e/d.txt.asc", "size": 104857600 }, { "checksum": "edc", "order": 1, "path": "/e/d/c.txt", "signature": "/e/d/c.txt.asc", "size": 209715200 } ], "type": "delta", "version": 304 }, { "description": "Full C", "files": [ { "checksum": "dcb", "order": 1, "path": "/d/c/b.txt", "signature": "/d/c/b.txt.asc", "size": 104857600 }, { "checksum": "cba", "order": 1, "path": "/c/b/a.txt", "signature": "/c/b/a.txt.asc", "size": 104857600 }, { "checksum": "ba9", "order": 1, "path": "/b/a/9.txt", "signature": "/b/a/9.txt.asc", "size": 104857600 } ], "type": "full", "version": 100 }, { "base": 100, "description": "Delta C.1", "files": [ { "checksum": "a98", "order": 1, "path": "/a/9/8.txt", "signature": "/a/9/8.txt.asc", "size": 104857600 }, { "checksum": "987", "order": 1, "path": "/9/8/7.txt", "signature": "/9/8/7.txt.asc", "size": 104857600 }, { "checksum": "876", "order": 1, "path": "/8/7/6.txt", "signature": "/8/7/6.txt.asc", "size": 838860800 } ], "type": "delta", "version": 303 } ] } ./systemimage/tests/data/main.config_04.ini0000644000015600001650000000023112701500553020665 0ustar jenkinsjenkins[service] base: systum-imaje.ubuntu.com http_port: 88 https_port: 89 channel: proposed build_number: 1833 version_detail: ubuntu=123,mako=456,custom=789 ./systemimage/tests/data/master-secring.gpg0000644000015600001650000003004512701500553021121 0ustar jenkinsjenkinsQY͖}울 H&PH sm+w8>"1k$ǸP@-ce4'Ƭ(Jp lR'l;3#UŻjureEiZn'i?rXD霶S Gz!kSv K#F](΢ͺ 3TiR uT;#|nL\&gdtíZ) .63ǣԮh ~ ('Myj%3f'\n'Pd Fٽ*8Ճ7d'QgRg$xR٨ژH+Z*gӈB}+C"myˆSL$7}xWV09k LI2 S~9ګBRV|4h@nE !;LR\Zw@;qoD~r!|htkMѝXڶ8$ء;Z@~Q C>F?Saέ@JkgK~pL8R[>hi3yE[HWxOƘχݖQ"ȱe@WJϟʒliHk>J0J' X=h4f_N*+BA>k=7jdiZ2:hοAvLh14j\7rDÿqP};n]yP6$),3J NJ5Mj];YlXI*9D!>Ĭzf5R!"Q0vꐉ"Ar0g` O!mJې VJՉ3L0pN܏1Lަa%KҼQS;ŠֳD,!j= Kȓ\u{ a켎~ 7nGC!5:2Csd$^% 16-Mw\ #/C:y@iF̨vu1x7ݫiUٔRz/C\@ ʇ0C۔mVã b,_R -0x#YĐ_!{ֺH~.M0Z(0.H}P8: ir?U%IFԠY#tw]N,2e``vRH9 "7TH^'m<\6q;()%ݫY*di(amygԊgQtp]'kӎv`DbBLJeu_~/?E&rƨBz@?\, &MS|bd4"%q{I.|_{lGUbuntu Archive Master Signing Key (TEST) 8"QY   ~b^1F /… qpܮv.gb肎}rAfQHeYm9w53.XY&Re-vx0=V:Ն2*g^ac_";BX9Jt&)P %4QšY!Ufs+σvJ Yi+N@G6Su6[0` #]um;VѸj9h=US𔡹9/t;D4=8_t' !ÈKS3C&8{"T_QTCP%ѹ9V#6ۖynh6@@:ߙ~lAy!Yg]!oAō=;5㖍/5͢YQYٵ LQZhFYmC++G B꒙@3MM GwAqf5]T` mH.[gD0ʭY+|#dRI;)-%R0NwJ4[2[_\?@U {Wi=r[31@fhsFi 8q/XT/ )є 0:w':dSMoN:4j ˵?PH,.8/ 2XλM#s\c2HhdRzmQ" q7,it.Js%ÒfK$;Ut!zEOZ=AsH j}xfz"yb\C<غs-(Z %hR 5 \ЙeUtOtI.CTvx ]l(,\7hjWuNLɄF{>GN!%ز?}ie*3?|*&:PΘ/[K-^b!!S<_VmF5QJUzwh=1x:GX:rK"ÕCWO)Yhե/Ɯ%3 '@99͚'Giқ RQ:jᜠ38)o h$.YXɤ}X.m?vzAbˮd v]Qc%;?Kt`8W:PqW‡]h<xs+}%r ?gM(` Xkw/71pNQϘঠ;X0cF:JtH `0AHmecc_F\.y;q鹋CenɍmiJޟnuz-rru߄,m דpjW'1NA?$\^h5EF6YPJ784-6i?BujHYxiI(n8"QZh   ;Ư2f?WER0e^~}(Dm{9=VRtP`~X`K*M4 ýޭ zoy΃CJ] oDms)NgJnRiT" -[xW=IƘ8@14'@UDMP!I2A0!h @e'.lHzTo=w#\̱i͒q*yЭzyzP$ӫsGCh: WJL=oZ GU¥9m/;*@P5}M$ꦄPm4KXε\Ftqtˇ3Z,]P0f%.DHÜ䤓V.He5~% ?'JHd@i+̭:Pux*j=O Zd6S$Tך5׀:W5cyɋOVfHR&}£ 1Wlƈc:܂HH׹qF4hԅl!0 ? PX, +N^`j +WSȚ#܇A_ʕ<}psA G'~tBG@AQˋ)T˶Xԯ4ZVx~.Ϯ5 LRLNAlv$Hi![No"\"(78|]&\y>K"媵o^H>4gyݚDZu@*(5HY_6mrV1"Te*g^cTxΛ`K1M=.WOh ,wc\i.`iޢO[n _7XjܛBL2ܟr_lU #KbA(s!-4ZMI7U'0Q<CMMJTʄ52>0tt5+#^p"4ٽP5Ab)<9YU1yD:ژ"^gǪ,"}*xJ(5&?R~E!뛐1x2^ S[m'(޷}+HfZ5ӆ"Oe=T~}}s;{cպS(9%lsT=\wQG,[͖ h0UoZbӾHJ`E,3–Z(GMEFlL|F_Q% uGazɮ"|q LqZEVol%M{}G 4"z4@O3V#Hz_ZٱLbBO~8#dm#fZ=+ܖe9U M|ݤ0}thlv0+ʌI_)!p)m1T͚0Or0ȣ!(ht_=Y5tTѴEq!Tp8UNMý j?*d "vՆ ڔ/{3#QD氒u}2xsdV*_![ӥ"ϔ%@?I_˓܄=U\"?&-e~be>SRetжU>mρzq}ӮޡSrɇΦv7Lwqdʈ 8H. HnQn8b2"4Uʬ/%kь̮Can##eiYm`oY̓GVb.: 5>ex2Hl:ް`k' r-2?'6ۖ?HUbuntu System Image Signing Key (TEST) 8"Q[   Z onKP =[7gwbThN0atf]+o;S1.JT[$4ɇtS`;啴"aҐ=R_ɤ+ WY; "ǭWȟ cW!;Guc aQ[H؛B#ן?.E&![.ъNCӊ:ύ*"KRJeݰHYWz SaihoIQ5I®[M B5 gI$`1UH> e/o@4h9k }ɷ?9]E*~ږ+WbcY?/ްQ[6GU])BySlݎv2NKh? R>8;!2~yA[q~_4_ з5vlEHB rb@щ㋼'2 ^3'U֑9Vxa3p~hyF ('>)pE=[fDT7_kX]VK8z Q`tɾG})p5ZFFUP"].H{_N}n#M"Qʨ)#\`DVH˃(;Z0W>gH~ JoxC¹0x05Q,@ ԉy0`:O2@bq^E&D}+'v? ׸)ʪCM*1yڈ= ?= vt.ޢ)!ULT8mN%~g,`KkyE1u>#c7 `H#$aCX8kcC"/jZ\11˧aLS?c^(DSn&5@9DB_UͮFF^J!2X.N›zhJn-ƣTӞeY@9x^:OOPbSr-mB~.qb$@, vxp5=lcs14AS#hYǒz=:\gZ&i+eMU4 p5u,0ezEf0|kc'#  I~lۼȗOUp }Ld:dDdUDPڤ{=up=4!Kd1=b>3'-lR <+IّqQTcԉM\z/f4r[bshYoA׃ 9ӂY~E!1-qHiB*)* J&z>om~߶hyPAcme Phones, LLC Image Signing Key (TEST) 8"Q[   ifOˆrZFo K;i.j?Y*n{je'lanmdx4.8E<;wX~fD;;v|:ʋ^[&ޅwV!XؿC예X 9N7yBPBǬ\RD4Qޝ=a:(mLb޹7yVMsK t7PoDHnt+QJHAS n"pS}9}¢Nnw;|vёǪ- 4qGkE USixg %iӥPi2yBKц៩^w1vtްowliimLZ]`HHM v/1m5؉sW,;0+5o<JtvڈeĈ?xOC19y~CqpX7F 7+l` H'jV% 7S1PώU&)rBXYc*SL3[W`&نy 84]8>8[) 0кPJư~l34;оſgjCppTc IDk^\'`%Xu 6HO18ԫk\XbNN牃I6?Ot#>B^ݬ^D'C*1n4Jq.M{ Q@jb54€W|Q {q7n%ǃ_ٯbWݰU,pjB=x1T'0]B/v˫ls[@t$į7U| 8dY+hɀ[2cwfN,g-v n-P #(Γv\YcJF|!F!RddKltM6\GMLj?0vg gj?uׄ꣌k uã.F KGAqCRMGs#vj;dBJ:*5JGaf@ q9X$#}?eö|R(ӭJ9䲷;_Slo"8k1Y;>CeMa-HM-61g ZGO$ #k&d2OЏ٦gL6[~UN{ ]!^ky=)qy)"1)PmIMF=#`M~J02yKjݙpG'I !e?.CjO a3XfZ1 }k#Y^=CwvќR"/#Riϻ|"JXqB"Zo Ӹ~QXT̋(B4@_PZN:q6 @ѨX>\z+Spare Key (TEST) 8"Q*   RkpD4,à[ 5(r0*Kk-OuΧ,m}p<9-8sT*`pRэ"fra{s0%qRen]$:COZDh鋾e7y!8~d{z[ @sQ>RY@:FDˡ9 XM"_>{[,ӦN sOwtG]8D # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 3 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . """Test persistent settings.""" __all__ = [ 'TestSettings', ] import os import unittest from contextlib import ExitStack from pathlib import Path from systemimage.helpers import temporary_directory from systemimage.settings import Settings from systemimage.testing.helpers import chmod, configuration from unittest.mock import patch class TestSettings(unittest.TestCase): @configuration def test_creation(self, config): self.assertFalse(os.path.exists(config.system.settings_db)) settings = Settings() self.assertTrue(os.path.exists(config.system.settings_db)) del settings # The file still exists. self.assertTrue(os.path.exists(config.system.settings_db)) Settings() self.assertTrue(os.path.exists(config.system.settings_db)) @configuration def test_get_set(self): settings = Settings() settings.set('permanent', 'waves') self.assertEqual(settings.get('permanent'), 'waves') @configuration def test_delete(self): # Keys can be deleted. settings = Settings() settings.set('moving', 'pictures') self.assertEqual(settings.get('moving'), 'pictures') settings.delete('moving') # The empty string is the default. self.assertEqual(settings.get('moving'), '') @configuration def test_delete_missing(self): # Nothing much happens if you ask to delete a missing key. settings = Settings() settings.delete('missing') self.assertEqual(settings.get('missing'), '') @configuration def test_update(self): settings = Settings() settings.set('animal', 'ant') self.assertEqual(settings.get('animal'), 'ant') settings.set('animal', 'bee') self.assertEqual(settings.get('animal'), 'bee') @configuration def test_get_before_set(self): settings = Settings() self.assertEqual(settings.get('nothing'), '') @configuration def test_persistence(self): settings = Settings() settings.set('animal', 'cat') del settings self.assertEqual(Settings().get('animal'), 'cat') @configuration def test_prepopulated(self): # Some keys are pre-populated with default values. self.assertEqual(Settings().get('auto_download'), '1') @configuration def test_iterate(self): # Iterate over all keys. settings = Settings() settings.set('a', 'ant') settings.set('b', 'bee') settings.set('c', 'cat') keyval = list(settings) keyval.sort() self.assertEqual(keyval, [('a', 'ant'), ('b', 'bee'), ('c', 'cat')]) @unittest.skipIf(os.getuid() == 0, 'Test cannot succeed when run as root') @configuration def test_settings_db_permission_denied(self, config): # LP: #1349478 - some tests are run as non-root, meaning they don't # have write permission to /var/lib/system-image. This is where # settings.db gets created, but if the process can't create files # there, we get a sqlite3 exception. db_file = Path(config.system.settings_db) self.assertFalse(db_file.exists()) with ExitStack() as resources: resources.enter_context(chmod(str(db_file.parent), 0o555)) # With no fallback, this will fail. with patch('systemimage.settings.Settings._check_fallback', side_effect=RuntimeError): self.assertRaises(RuntimeError, Settings) # Now, set the XDG cache directory to a temporary directory, allow # the fallback to work and try again. tmpdir = resources.enter_context(temporary_directory()) resources.enter_context( patch('systemimage.settings.xdg_cache_home', tmpdir)) settings = Settings() settings.set('bar', 'baz') self.assertEqual(Settings().get('bar'), 'baz') # The settings.db file still doesn't exist because it got # created in a different place. self.assertFalse(db_file.exists()) ./systemimage/tests/test_winner.py0000644000015600001650000003572312701500553017511 0ustar jenkinsjenkins# Copyright (C) 2013-2016 Canonical Ltd. # Author: Barry Warsaw # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 3 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . """Test downloading the candidate winner files.""" __all__ = [ 'TestWinnerDownloads', ] import os import unittest from contextlib import ExitStack from systemimage.candidates import get_candidates from systemimage.config import config from systemimage.gpg import SignatureError from systemimage.helpers import temporary_directory from systemimage.state import State from systemimage.testing.helpers import ( configuration, copy, make_http_server, setup_index, setup_keyring_txz, setup_keyrings, sign, touch_build) from systemimage.testing.nose import SystemImagePlugin class TestWinnerDownloads(unittest.TestCase): """Test full end-to-end downloads through index.json.""" @classmethod def setUpClass(self): SystemImagePlugin.controller.set_mode(cert_pem='cert.pem') def setUp(self): # Start both an HTTP and an HTTPS server running. The former is for # the zip files and the latter is for everything else. Vend them out # of a temporary directory which we load up with the right files. self._stack = ExitStack() try: self._serverdir = self._stack.enter_context(temporary_directory()) copy('winner.channels_01.json', self._serverdir, 'channels.json') sign(os.path.join(self._serverdir, 'channels.json'), 'image-signing.gpg') # Path B will win, with no bootme flags. self._indexpath = os.path.join('stable', 'nexus7', 'index.json') copy('winner.index_02.json', self._serverdir, self._indexpath) sign(os.path.join(self._serverdir, self._indexpath), 'image-signing.gpg') # Create every file in path B. The file contents will be the # checksum value. We need to create the signatures on the fly. setup_index('winner.index_02.json', self._serverdir, 'image-signing.gpg') self._stack.push( make_http_server(self._serverdir, 8943, 'cert.pem', 'key.pem')) self._stack.push(make_http_server(self._serverdir, 8980)) except: self._stack.close() raise def tearDown(self): self._stack.close() @configuration def test_calculate_candidates(self): # Calculate the candidate paths. setup_keyrings() state = State() # Run the state machine until we get an index file. state.run_until('calculate_winner') candidates = get_candidates(state.index, 100) # There are three candidate upgrade paths. self.assertEqual(len(candidates), 3) descriptions = [] for image in candidates[0]: # There's only one description per image so order doesn't matter. descriptions.extend(image.descriptions.values()) self.assertEqual(descriptions, ['Full A', 'Delta A.1', 'Delta A.2']) descriptions = [] for image in candidates[1]: # There's only one description per image so order doesn't matter. descriptions.extend(image.descriptions.values()) self.assertEqual(descriptions, ['Full B', 'Delta B.1', 'Delta B.2']) descriptions = [] for image in candidates[2]: # There's only one description per image so order doesn't matter. descriptions.extend(image.descriptions.values()) self.assertEqual(descriptions, ['Full C', 'Delta C.1']) @configuration def test_calculate_winner(self): # Calculate the winning upgrade path. setup_keyrings() state = State() touch_build(100) # Run the state machine long enough to get the candidates and winner. state.run_thru('calculate_winner') # There are three candidate upgrade paths. descriptions = [] for image in state.winner: # There's only one description per image so order doesn't matter. descriptions.extend(image.descriptions.values()) self.assertEqual(descriptions, ['Full B', 'Delta B.1', 'Delta B.2']) @configuration def test_download_winners(self): # Check that all the winning path's files are downloaded. setup_keyrings() state = State() touch_build(100) # Run the state machine until we download the files. state.run_thru('download_files') # The B path files contain their checksums. def assert_file_contains(filename, contents): path = os.path.join(config.updater.cache_partition, filename) with open(path, encoding='utf-8') as fp: self.assertEqual(fp.read(), contents) assert_file_contains('5.txt', '345') assert_file_contains('6.txt', '456') assert_file_contains('7.txt', '567') # Delta B.1 files. assert_file_contains('8.txt', '678') assert_file_contains('9.txt', '789') assert_file_contains('a.txt', '89a') # Delta B.2 files. assert_file_contains('b.txt', '9ab') assert_file_contains('d.txt', 'fed') assert_file_contains('c.txt', 'edc') @configuration def test_download_winners_overwrite(self): # Check that all the winning path's files are downloaded, even if # those files already exist in their destination paths. setup_keyrings() state = State() touch_build(100) # Run the state machine until we download the files. for basename in '56789abcd': base = os.path.join(config.updater.cache_partition, basename) path = base + '.txt' with open(path, 'w', encoding='utf-8') as fp: print('stale', file=fp) state.run_thru('download_files') # The B path files contain their checksums. def assert_file_contains(filename, contents): path = os.path.join(config.updater.cache_partition, filename) with open(path, encoding='utf-8') as fp: self.assertEqual(fp.read(), contents) assert_file_contains('5.txt', '345') assert_file_contains('6.txt', '456') assert_file_contains('7.txt', '567') # Delta B.1 files. assert_file_contains('8.txt', '678') assert_file_contains('9.txt', '789') assert_file_contains('a.txt', '89a') # Delta B.2 files. assert_file_contains('b.txt', '9ab') assert_file_contains('d.txt', 'fed') assert_file_contains('c.txt', 'edc') @configuration def test_download_winners_signed_by_device_key(self): # Check that all the winning path's files are downloaded, even when # they are signed by the device key instead of the image signing # master. setup_keyrings() # To set up the device signing key, we need to load channels_03.json # and copy the device keyring to the server. copy('winner.channels_02.json', self._serverdir, 'channels.json') sign(os.path.join(self._serverdir, 'channels.json'), 'image-signing.gpg') setup_keyring_txz( 'device-signing.gpg', 'image-signing.gpg', dict(type='device-signing'), os.path.join(self._serverdir, 'stable', 'nexus7', 'device.tar.xz')) # The index.json file and all the downloadable files must now be # signed with the device key. sign(os.path.join(self._serverdir, self._indexpath), 'device-signing.gpg') setup_index('winner.index_02.json', self._serverdir, 'device-signing.gpg') touch_build(100) # Run the state machine until we download the files. state = State() state.run_thru('download_files') # The B path files contain their checksums. def assert_file_contains(filename, contents): path = os.path.join(config.updater.cache_partition, filename) with open(path, encoding='utf-8') as fp: self.assertEqual(fp.read(), contents) assert_file_contains('5.txt', '345') assert_file_contains('6.txt', '456') assert_file_contains('7.txt', '567') # Delta B.1 files. assert_file_contains('8.txt', '678') assert_file_contains('9.txt', '789') assert_file_contains('a.txt', '89a') # Delta B.2 files. assert_file_contains('b.txt', '9ab') assert_file_contains('d.txt', 'fed') assert_file_contains('c.txt', 'edc') @configuration def test_download_winners_signed_by_signing_key_with_device_key(self): # Check that all the winning path's files are downloaded, even when # they are signed by the device key instead of the image signing # master. setup_keyrings() # To set up the device signing key, we need to load this channels.json # file and copy the device keyring to the server. copy('winner.channels_02.json', self._serverdir, 'channels.json') sign(os.path.join(self._serverdir, 'channels.json'), 'image-signing.gpg') setup_keyring_txz( 'device-signing.gpg', 'image-signing.gpg', dict(type='device-signing'), os.path.join(self._serverdir, 'stable', 'nexus7', 'device.tar.xz')) sign(os.path.join(self._serverdir, self._indexpath), 'device-signing.gpg') # All the downloadable files are now signed with the image signing key. setup_index('winner.index_02.json', self._serverdir, 'image-signing.gpg') touch_build(100) # Run the state machine until we download the files. state = State() state.run_thru('download_files') # The B path files contain their checksums. def assert_file_contains(filename, contents): path = os.path.join(config.updater.cache_partition, filename) with open(path, encoding='utf-8') as fp: self.assertEqual(fp.read(), contents) assert_file_contains('5.txt', '345') assert_file_contains('6.txt', '456') assert_file_contains('7.txt', '567') # Delta B.1 files. assert_file_contains('8.txt', '678') assert_file_contains('9.txt', '789') assert_file_contains('a.txt', '89a') # Delta B.2 files. assert_file_contains('b.txt', '9ab') assert_file_contains('d.txt', 'fed') assert_file_contains('c.txt', 'edc') @configuration def test_download_winners_bad_checksums(self): # Similar to the various good paths, except because the checksums are # wrong in this index.json file, we'll get a error when downloading. copy('winner.index_01.json', self._serverdir, self._indexpath) sign(os.path.join(self._serverdir, self._indexpath), 'image-signing.gpg') setup_index('winner.index_01.json', self._serverdir, 'image-signing.gpg') setup_keyrings() state = State() touch_build(100) # Run the state machine until we're prepped to download state.run_until('download_files') # Now try to download the files and get the error. with self.assertRaises(FileNotFoundError) as cm: next(state) self.assertIn('HASH ERROR', str(cm.exception)) @configuration def test_download_winners_signed_by_wrong_key(self): # There is a device key, but the image files are signed by the image # signing key, which according to the spec means the files are not # signed correctly. setup_keyrings() # To set up the device signing key, we need to load this channels.json # file and copy the device keyring to the server. copy('winner.channels_02.json', self._serverdir, 'channels.json') sign(os.path.join(self._serverdir, 'channels.json'), 'image-signing.gpg') setup_keyring_txz( 'device-signing.gpg', 'image-signing.gpg', dict(type='device-signing'), os.path.join(self._serverdir, 'stable', 'nexus7', 'device.tar.xz')) sign(os.path.join(self._serverdir, self._indexpath), 'device-signing.gpg') # All the downloadable files are now signed with a bogus key. setup_index('winner.index_02.json', self._serverdir, 'spare.gpg') touch_build(100) # Run the state machine until just before we download the files. state = State() state.run_until('download_files') # The next state transition will fail because of the missing signature. self.assertRaises(SignatureError, next, state) # There are no downloaded files. txtfiles = set(filename for filename in os.listdir(config.tempdir) if os.path.splitext(filename)[1] == '.txt') self.assertEqual(len(txtfiles), 0) @configuration def test_no_download_winners_with_missing_signature(self): # If one of the download files is missing a signature, none of the # files get downloaded and get_files() fails. setup_keyrings() state = State() touch_build(100) # Remove a signature. os.remove(os.path.join(self._serverdir, '6/7/8.txt.asc')) # Run the state machine to calculate the winning path. state.run_until('download_files') # The next state transition will fail because of the missing signature. self.assertRaises(FileNotFoundError, next, state) # There are no downloaded files. txtfiles = set(filename for filename in os.listdir(config.tempdir) if os.path.splitext(filename)[1] == '.txt') self.assertEqual(len(txtfiles), 0, txtfiles) @configuration def test_no_download_winners_with_bad_signature(self): # If one of the download files has a bad a signature, none of the # downloaded files are available. setup_keyrings() state = State() touch_build(100) # Break a signature sign(os.path.join(self._serverdir, '6', '7', '8.txt'), 'spare.gpg') # Run the state machine to calculate the winning path. state.run_until('download_files') # The next state transition will fail because of the missing signature. self.assertRaises(SignatureError, next, state) # There are no downloaded files. txtfiles = set(filename for filename in os.listdir(config.tempdir) if os.path.splitext(filename)[1] == '.txt') self.assertEqual(len(txtfiles), 0) ./systemimage/tests/test_state.py0000644000015600001650000024276612701500553017336 0ustar jenkinsjenkins# Copyright (C) 2013-2016 Canonical Ltd. # Author: Barry Warsaw # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 3 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . """Test the state machine.""" __all__ = [ 'TestCachedFiles', 'TestChannelAlias', 'TestCommandFileDelta', 'TestCommandFileFull', 'TestDailyProposed', 'TestFileOrder', 'TestKeyringDoubleChecks', 'TestMaximumImage', 'TestMiscellaneous', 'TestPhasedUpdates', 'TestState', 'TestStateDuplicateDestinations', 'TestStateNewChannelsFormat', 'TestUpdateApplied', ] import os import shutil import hashlib import unittest from contextlib import ExitStack from datetime import datetime, timedelta, timezone from functools import partial from subprocess import CalledProcessError from systemimage.candidates import version_filter from systemimage.config import config from systemimage.download import DuplicateDestinationError from systemimage.gpg import Context, SignatureError from systemimage.helpers import calculate_signature from systemimage.state import ChecksumError, State from systemimage.testing.demo import DemoDevice from systemimage.testing.helpers import ( ServerTestBase, configuration, copy, data_path, descriptions, get_index, make_http_server, setup_keyring_txz, setup_keyrings, sign, temporary_directory, touch_build) from systemimage.testing.nose import SystemImagePlugin from unittest.mock import call, patch BAD_SIGNATURE = 'f' * 64 class TestState(unittest.TestCase): """Test various state transitions.""" @classmethod def setUpClass(self): SystemImagePlugin.controller.set_mode(cert_pem='cert.pem') def setUp(self): self._stack = ExitStack() self._state = State() try: self._serverdir = self._stack.enter_context(temporary_directory()) self._stack.push(make_http_server( self._serverdir, 8943, 'cert.pem', 'key.pem')) copy('state.channels_07.json', self._serverdir, 'channels.json') self._channels_path = os.path.join( self._serverdir, 'channels.json') except: self._stack.close() raise def tearDown(self): self._stack.close() @configuration def test_cleanup(self): # All residual files from the data partitions are removed. The cache # partition is not touched (that clean up happens later). wopen = partial(open, mode='w', encoding='utf-8') cache_partition = config.updater.cache_partition data_partition = config.updater.data_partition with wopen(os.path.join(cache_partition, 'log')) as fp: print('logger keeper', file=fp) with wopen(os.path.join(cache_partition, 'last_log')) as fp: print('logger keeper', file=fp) with wopen(os.path.join(cache_partition, 'xxx.txt')) as fp: print('xxx', file=fp) with wopen(os.path.join(cache_partition, 'yyy.txt')) as fp: print('yyy', file=fp) with wopen(os.path.join(data_partition, 'log')) as fp: print('stale log', file=fp) with wopen(os.path.join(data_partition, 'last_log')) as fp: print('stale log', file=fp) with wopen(os.path.join(data_partition, 'blacklist.tar.xz')) as fp: print('black list', file=fp) with wopen(os.path.join(data_partition, 'blacklist.tar.xz.asc')) as fp: print('black list', file=fp) with wopen(os.path.join(data_partition, 'keyring.tar.xz')) as fp: print('black list', file=fp) with wopen(os.path.join(data_partition, 'keyring.tar.xz.asc')) as fp: print('black list', file=fp) # Here are all the files before we start up the state machine. self.assertEqual(len(os.listdir(cache_partition)), 4) self.assertEqual(len(os.listdir(data_partition)), 6) # Clean up step. State().run_thru('cleanup') # The blacklist and keyring files are removed from the data partition. contents = os.listdir(data_partition) self.assertEqual(len(contents), 2) self.assertNotIn('blacklist.tar.xz', contents) self.assertNotIn('blacklist.tar.xz.asc', contents) self.assertNotIn('keyring.tar.xz', contents) self.assertNotIn('keyring.tar.xz.asc', contents) # None of the files in the cache partition are removed. self.assertEqual(set(os.listdir(cache_partition)), set(['log', 'last_log', 'xxx.txt', 'yyy.txt'])) @configuration def test_cleanup_no_partition(self): # If one or more of the partitions doesn't exist, no big deal. # # The cache partition doesn't exist. os.rename(config.updater.cache_partition, config.updater.cache_partition + '.aside') State().run_thru('cleanup') # The data partition doesn't exist. os.rename(config.updater.cache_partition + '.aside', config.updater.cache_partition) os.rename(config.updater.data_partition, config.updater.data_partition + '.aside') State().run_thru('cleanup') # Neither partitions exist. os.rename(config.updater.cache_partition, config.updater.cache_partition + '.aside') State().run_thru('cleanup') @configuration def test_first_signature_fails_get_new_image_signing_key(self): # The first time we check the channels.json file, the signature fails, # because it's blacklisted. Everything works out in the end though # because a new system image signing key is downloaded. # # Start by signing the channels file with a blacklisted key. sign(self._channels_path, 'spare.gpg') setup_keyrings() # Make the spare keyring the image signing key, which would normally # make the channels.json signature good, except that we're going to # blacklist it. setup_keyring_txz( 'spare.gpg', 'image-master.gpg', dict(type='image-signing'), os.path.join(config.gpg.image_signing)) # Blacklist the spare keyring. setup_keyring_txz( 'spare.gpg', 'image-master.gpg', dict(type='blacklist'), os.path.join(self._serverdir, 'gpg', 'blacklist.tar.xz')) # Here's the new image signing key. setup_keyring_txz( 'image-signing.gpg', 'image-master.gpg', dict(type='image-signing'), os.path.join(self._serverdir, 'gpg', 'image-signing.tar.xz')) # Run through the state machine twice so that we get the blacklist and # the channels.json file. Since the channels.json file will not be # signed correctly, new state transitions will be added to re-aquire a # new image signing key. state = State() state.run_thru('get_channel') # Where we would expect a channels object, there is none. self.assertIsNone(state.channels) # Just to prove that the image signing key is going to change, let's # calculate the current one's checksum. with open(config.gpg.image_signing, 'rb') as fp: checksum = hashlib.md5(fp.read()).digest() next(state) # Now we have a new image signing key. with open(config.gpg.image_signing, 'rb') as fp: self.assertNotEqual(checksum, hashlib.md5(fp.read()).digest()) # Let's re-sign the channels.json file with the new image signing # key. Then step the state machine once more and we should get a # valid channels object. sign(self._channels_path, 'image-signing.gpg') next(state) self.assertEqual(state.channels.stable.devices.nexus7.index, '/stable/nexus7/index.json') @configuration def test_first_signature_fails_get_bad_image_signing_key(self): # The first time we check the channels.json file, the signature fails. # We try to get the new image signing key, but it is bogus. setup_keyrings() # Start by signing the channels file with a blacklisted key. sign(self._channels_path, 'spare.gpg') # Make the new image signing key bogus by not signing it with the # image master key. setup_keyring_txz( 'image-signing.gpg', 'spare.gpg', dict(type='image-signing'), os.path.join(self._serverdir, 'gpg', 'image-signing.tar.xz')) # Run through the state machine twice so that we get the blacklist and # the channels.json file. Since the channels.json file will not be # signed correctly, new state transitions will be added to re-aquire a # new image signing key. state = State() state.run_thru('get_channel') # Where we would expect a channels object, there is none. self.assertIsNone(state.channels) # Just to prove that the image signing key is not going to change, # let's calculate the current one's checksum. with open(config.gpg.image_signing, 'rb') as fp: checksum = hashlib.md5(fp.read()).digest() # The next state transition will attempt to get the new image signing # key, but that will fail because it is not signed correctly. self.assertRaises(SignatureError, next, state) # And the old image signing key hasn't changed. with open(config.gpg.image_signing, 'rb') as fp: self.assertEqual(checksum, hashlib.md5(fp.read()).digest()) @configuration def test_bad_system_image_master_exposed_by_blacklist(self): # The blacklist is signed by the image master key. If the blacklist's # signature is bad, the state machine will attempt to download a new # image master key. setup_keyrings() # Start by creating a blacklist signed by a bogus key, along with a # new image master key. setup_keyring_txz( 'spare.gpg', 'spare.gpg', dict(type='blacklist'), os.path.join(self._serverdir, 'gpg', 'blacklist.tar.xz')) setup_keyring_txz( 'spare.gpg', 'archive-master.gpg', dict(type='image-master'), os.path.join(self._serverdir, 'gpg', 'image-master.tar.xz')) # Run the state machine long enough to grab the blacklist. This # should fail with a signature error (internally). There will be no # blacklist. state = State() state.run_thru('get_blacklist_1') self.assertIsNone(state.blacklist) # Just to prove that the system image master key is going to change, # let's calculate the current one's checksum. with open(config.gpg.image_master, 'rb') as fp: checksum = hashlib.md5(fp.read()).digest() # The next state transition should get us a new image master. state.run_until('get_blacklist_2') # Now we have a new system image master key. with open(config.gpg.image_master, 'rb') as fp: self.assertNotEqual(checksum, hashlib.md5(fp.read()).digest()) # Now the blacklist file's signature should be good. state.run_thru('get_blacklist_2') self.assertEqual(os.path.basename(state.blacklist), 'blacklist.tar.xz') @configuration def test_bad_system_image_master_new_one_is_no_better(self): # The blacklist is signed by the system image master key. If the # blacklist's signature is bad, the state machine will attempt to # download a new system image master key. In this case, the signature # on the new system image master key is bogus. setup_keyrings() # Start by creating a blacklist signed by a bogus key, along with a # new image master key. setup_keyring_txz( 'spare.gpg', 'spare.gpg', dict(type='blacklist'), os.path.join(self._serverdir, 'gpg', 'blacklist.tar.xz')) setup_keyring_txz( 'spare.gpg', 'spare.gpg', dict(type='image-master'), os.path.join(self._serverdir, 'gpg', 'image-master.tar.xz')) # Run the state machine long enough to grab the blacklist. This # should fail with a signature error (internally). There will be no # blacklist. state = State() state.run_thru('get_blacklist_1') self.assertIsNone(state.blacklist) # Just to provide that the system image master key is going to change, # let's calculate the current one's checksum. with open(config.gpg.image_master, 'rb') as fp: checksum = hashlib.md5(fp.read()).digest() # The next state transition should get us a new image master, but its # signature is not good. self.assertRaises(SignatureError, state.run_until, 'get_blacklist_2') # And the old system image master key hasn't changed. with open(config.gpg.image_master, 'rb') as fp: self.assertEqual(checksum, hashlib.md5(fp.read()).digest()) @configuration def test_image_master_is_missing(self): # The system only comes pre-seeded with the archive master public # keyring. All others are downloaded. setup_keyrings('archive-master') # Put a system image master key on the server. setup_keyring_txz( 'image-master.gpg', 'archive-master.gpg', dict(type='image-master'), os.path.join(self._serverdir, 'gpg', 'image-master.tar.xz')) # Run the state machine long enough to get the blacklist. This should # download the system image master key, which will be signed against # the archive master. Prove that the image master doesn't exist yet. self.assertFalse(os.path.exists(config.gpg.image_master)) State().run_thru('get_blacklist_1') # Now the image master key exists. self.assertTrue(os.path.exists(config.gpg.image_master)) @configuration def test_image_master_is_missing_with_blacklist(self): # The system only comes pre-seeded with the archive master public # keyring. All others are downloaded. This time there is a # blacklist and downloading that will also get the image master key. setup_keyrings('archive-master') # Put a system image master key on the server. setup_keyring_txz( 'image-master.gpg', 'archive-master.gpg', dict(type='image-master'), os.path.join(self._serverdir, 'gpg', 'image-master.tar.xz')) setup_keyring_txz( 'spare.gpg', 'spare.gpg', dict(type='blacklist'), os.path.join(self._serverdir, 'gpg', 'blacklist.tar.xz')) # Run the state machine log enough to get the blacklist. This should # download the system image master key, which will be signed against # the archive master. Prove that the image master doesn't exist yet. self.assertFalse(os.path.exists(config.gpg.image_master)) State().run_thru('get_blacklist_1') # Now the image master key exists. self.assertTrue(os.path.exists(config.gpg.image_master)) @configuration def test_image_signing_is_missing(self): # The system only comes pre-seeded with the archive master public # keyring. All others are downloaded. setup_keyrings('archive-master') # Put a system image master key on the server. setup_keyring_txz( 'image-master.gpg', 'archive-master.gpg', dict(type='image-master'), os.path.join(self._serverdir, 'gpg', 'image-master.tar.xz')) # Put an image signing key on the server. setup_keyring_txz( 'image-signing.gpg', 'image-master.gpg', dict(type='image-signing'), os.path.join(self._serverdir, 'gpg', 'image-signing.tar.xz')) sign(self._channels_path, 'image-signing.gpg') # Run the state machine twice. The first time downloads the # blacklist, which triggers a download of the image master key. The # second one grabs the channels.json file which triggers a download of # the image signing key. Prove that the image master and signing keys # dont exist yet. self.assertFalse(os.path.exists(config.gpg.image_master)) self.assertFalse(os.path.exists(config.gpg.image_signing)) state = State() state.run_thru('get_channel') # Now the image master and signing keys exist. self.assertTrue(os.path.exists(config.gpg.image_master)) self.assertTrue(os.path.exists(config.gpg.image_signing)) @configuration def test_downloaded_image_signing_is_still_bad(self): # LP: #1191979: Let's say there's a blacklist.tar.xz file but it is # not signed with the system image master key. The state machine will # catch the SignatureError and re-download the system image master. # But let's say that the signature still fails (perhaps because the # blacklist was signed with the wrong key). The client should log the # second signature failure and quit. setup_keyrings() # Put a blacklist file up that is signed by a bogus key. Also, put up # the real image master key. The blacklist verification check will # never succeed. setup_keyring_txz( 'spare.gpg', 'spare.gpg', dict(type='blacklist'), os.path.join(self._serverdir, 'gpg', 'blacklist.tar.xz')) setup_keyring_txz( 'image-master.gpg', 'archive-master.gpg', dict(type='image-master'), os.path.join(self._serverdir, 'gpg', 'image-master.tar.xz')) # Run the state machine three times: # blacklist -(sig fail)-> get master -> blacklist (sig fail) state = State() state.run_thru('get_master_key') self.assertRaises(SignatureError, next, state) class TestUpdateApplied(ServerTestBase): """Test various state transitions leading to the applying of the update.""" INDEX_FILE = 'state.index_03.json' CHANNEL_FILE = 'state.channels_02.json' CHANNEL = 'stable' DEVICE = 'nexus7' @configuration def test_keyrings_copied_to_upgrader_paths(self): # The following keyrings get copied to system paths that the upgrader # consults: # * blacklist.tar.xz{,.asc} - data partition (if one exists) # * image-master.tar.xz{,.asc} - cache partition # * image-signing.tar.xz{,.asc} - cache partition # * device-signing.tar.xz{,.asc} - cache partition (if one exists) self._setup_server_keyrings() cache_dir = config.updater.cache_partition data_dir = config.updater.data_partition blacklist_path = os.path.join(data_dir, 'blacklist.tar.xz') master_path = os.path.join(cache_dir, 'image-master.tar.xz') signing_path = os.path.join(cache_dir, 'image-signing.tar.xz') device_path = os.path.join(cache_dir, 'device-signing.tar.xz') # None of the keyrings or .asc files are found yet. self.assertFalse(os.path.exists(blacklist_path)) self.assertFalse(os.path.exists(master_path)) self.assertFalse(os.path.exists(signing_path)) self.assertFalse(os.path.exists(device_path)) self.assertFalse(os.path.exists(blacklist_path + '.asc')) self.assertFalse(os.path.exists(master_path + '.asc')) self.assertFalse(os.path.exists(signing_path + '.asc')) self.assertFalse(os.path.exists(device_path + '.asc')) # None of the data files are found yet. for image in get_index('state.index_03.json').images: for filerec in image.files: path = os.path.join(cache_dir, os.path.basename(filerec.path)) asc = os.path.join( cache_dir, os.path.basename(filerec.signature)) self.assertFalse(os.path.exists(path)) self.assertFalse(os.path.exists(asc)) # Run the state machine enough times to download all the keyrings and # data files, then to move the files into place just before a reboot # is issued. Steps preceded by * are steps that fail. # *get blacklist/get master -> get channels/signing # -> get device signing -> get index -> calculate winner # -> download files -> move files state = State() state.run_thru('move_files') # All of the keyrings and .asc files are found. self.assertTrue(os.path.exists(blacklist_path)) self.assertTrue(os.path.exists(master_path)) self.assertTrue(os.path.exists(signing_path)) self.assertTrue(os.path.exists(device_path)) self.assertTrue(os.path.exists(blacklist_path + '.asc')) self.assertTrue(os.path.exists(master_path + '.asc')) self.assertTrue(os.path.exists(signing_path + '.asc')) self.assertTrue(os.path.exists(device_path + '.asc')) # All of the data files are found. for image in get_index('state.index_03.json').images: for filerec in image.files: path = os.path.join(cache_dir, os.path.basename(filerec.path)) asc = os.path.join( cache_dir, os.path.basename(filerec.signature)) self.assertTrue(os.path.exists(path)) self.assertTrue(os.path.exists(asc)) @configuration def test_update_applied(self, config): # The update gets applied self._setup_server_keyrings() ini_path = os.path.join(config.config_d, '10_state.ini') shutil.copy(data_path('state.config_01.ini'), ini_path) config.reload() with patch('systemimage.apply.Noop.apply') as mock: list(State()) self.assertEqual(mock.call_count, 1) @configuration def test_no_update_available_no_apply(self, config): # LP: #1202915. If there's no update available, running the state # machine to completion should not make the call to apply it. self._setup_server_keyrings() ini_path = os.path.join(config.config_d, '10_state.ini') shutil.copy(data_path('state.config_01.ini'), ini_path) config.reload() # Hack the current build number so that no update is available. touch_build(5000) with patch('systemimage.apply.Noop.apply') as mock: list(State()) self.assertEqual(mock.call_count, 0) @unittest.skipIf(os.getuid() == 0, 'This test would actually reboot!') @configuration def test_reboot_fails(self): # The reboot fails, e.g. because we are not root. self._setup_server_keyrings() self.assertRaises(CalledProcessError, list, State()) @configuration def test_run_until(self, config): # It is possible to run the state machine either until some specific # state is completed, or it runs to the end. self._setup_server_keyrings() ini_path = os.path.join(config.config_d, '10_state.ini') shutil.copy(data_path('state.config_01.ini'), ini_path) config.reload() state = State() self.assertIsNone(state.channels) state.run_thru('get_channel') self.assertIsNotNone(state.channels) # But there is no index file yet. self.assertIsNone(state.index) # Run it some more. state.run_thru('get_index') self.assertIsNotNone(state.index) # Run until just before the apply step. with patch('systemimage.apply.Noop.apply') as mock: state.run_until('apply') self.assertEqual(mock.call_count, 0) # Run to the end of the state machine. with patch('systemimage.apply.Noop.apply', mock): list(state) self.assertEqual(mock.call_count, 1) class TestRebootingNoDeviceSigning(ServerTestBase): INDEX_FILE = 'state.index_03.json' CHANNEL_FILE = 'state.channels_03.json' CHANNEL = 'stable' DEVICE = 'nexus7' SIGNING_KEY = 'image-signing.gpg' @configuration def test_keyrings_copied_to_upgrader_paths_no_device_keyring(self): # The following keyrings get copied to system paths that the upgrader # consults: # * blacklist.tar.xz{,.asc} - data partition (if one exists) # * image-master.tar.xz{,.asc} - cache partition # * image-signing.tar.xz{,.asc} - cache partition # # In this test, there is no device signing keyring. self._setup_server_keyrings(device_signing=False) cache_dir = config.updater.cache_partition data_dir = config.updater.data_partition blacklist_path = os.path.join(data_dir, 'blacklist.tar.xz') master_path = os.path.join(cache_dir, 'image-master.tar.xz') signing_path = os.path.join(cache_dir, 'image-signing.tar.xz') device_path = os.path.join(cache_dir, 'device-signing.tar.xz') # None of the keyrings or .asc files are found yet. self.assertFalse(os.path.exists(blacklist_path)) self.assertFalse(os.path.exists(master_path)) self.assertFalse(os.path.exists(signing_path)) self.assertFalse(os.path.exists(device_path)) self.assertFalse(os.path.exists(blacklist_path + '.asc')) self.assertFalse(os.path.exists(master_path + '.asc')) self.assertFalse(os.path.exists(signing_path + '.asc')) self.assertFalse(os.path.exists(device_path + '.asc')) # None of the data files are found yet. for image in get_index('state.index_03.json').images: for filerec in image.files: path = os.path.join(cache_dir, os.path.basename(filerec.path)) asc = os.path.join( cache_dir, os.path.basename(filerec.signature)) self.assertFalse(os.path.exists(path)) self.assertFalse(os.path.exists(asc)) # Run the state machine enough times to download all the keyrings and # data files, then to move the files into place just before a reboot # is issued. Steps preceded by * are steps that fail. # *get blacklist/get master -> get channels/signing # -> get device signing -> get index -> calculate winner # -> download files -> move files state = State() state.run_thru('move_files') # All of the keyrings and .asc files are found, except for the device # singing keys. self.assertTrue(os.path.exists(blacklist_path)) self.assertTrue(os.path.exists(master_path)) self.assertTrue(os.path.exists(signing_path)) self.assertFalse(os.path.exists(device_path)) self.assertTrue(os.path.exists(blacklist_path + '.asc')) self.assertTrue(os.path.exists(master_path + '.asc')) self.assertTrue(os.path.exists(signing_path + '.asc')) self.assertFalse(os.path.exists(device_path + '.asc')) # All of the data files are found. for image in get_index('state.index_03.json').images: for filerec in image.files: path = os.path.join(cache_dir, os.path.basename(filerec.path)) asc = os.path.join( cache_dir, os.path.basename(filerec.signature)) self.assertTrue(os.path.exists(path)) self.assertTrue(os.path.exists(asc)) class TestCommandFileFull(ServerTestBase): INDEX_FILE = 'state.index_03.json' CHANNEL_FILE = 'state.channels_02.json' CHANNEL = 'stable' DEVICE = 'nexus7' @configuration def test_full_command_file(self): # A full update's command file gets properly filled. self._setup_server_keyrings() State().run_until('apply') path = os.path.join(config.updater.cache_partition, 'ubuntu_command') with open(path, 'r', encoding='utf-8') as fp: command = fp.read() self.assertMultiLineEqual(command, """\ load_keyring image-master.tar.xz image-master.tar.xz.asc load_keyring image-signing.tar.xz image-signing.tar.xz.asc load_keyring device-signing.tar.xz device-signing.tar.xz.asc format system mount system update 6.txt 6.txt.asc update 7.txt 7.txt.asc update 5.txt 5.txt.asc unmount system """) @configuration def test_write_command_file_atomically(self): # LP: #1241236 - write the ubuntu_command file atomically. self._setup_server_keyrings() self._state.run_until('prepare_recovery') # This is a little proxy object which interposes printing. When it # sees the string 'unmount system' written to it, it raises an # IOError. We use this to prove that the ubuntu_command file is # written atomically. old_print = print def broken_print(arg0, *args, **kws): if arg0.startswith('unmount system'): raise IOError('barf') old_print(arg0, *args, **kws) with patch('builtins.print', broken_print): with self.assertRaises(IOError) as cm: next(self._state) self.assertEqual(str(cm.exception), 'barf') path = os.path.join(config.updater.cache_partition, 'ubuntu_command') self.assertFalse(os.path.exists(path)) class TestCommandFileDelta(ServerTestBase): INDEX_FILE = 'state.index_04.json' CHANNEL_FILE = 'state.channels_02.json' CHANNEL = 'stable' DEVICE = 'nexus7' @configuration def test_delta_command_file(self): # A delta update's command file gets properly filled. self._setup_server_keyrings() # Set the current build number so a delta update will work. touch_build(100) State().run_until('apply') path = os.path.join(config.updater.cache_partition, 'ubuntu_command') with open(path, 'r', encoding='utf-8') as fp: command = fp.read() self.assertMultiLineEqual(command, """\ load_keyring image-master.tar.xz image-master.tar.xz.asc load_keyring image-signing.tar.xz image-signing.tar.xz.asc load_keyring device-signing.tar.xz device-signing.tar.xz.asc mount system update 6.txt 6.txt.asc update 7.txt 7.txt.asc update 5.txt 5.txt.asc unmount system """) class TestFileOrder(ServerTestBase): INDEX_FILE = 'state.index_05.json' CHANNEL_FILE = 'state.channels_02.json' CHANNEL = 'stable' DEVICE = 'nexus7' @configuration def test_file_order(self): # Updates are applied sorted first by image positional order, then # within the image by the 'order' key. self._setup_server_keyrings() # Set the current build number so a delta update will work. touch_build(100) State().run_until('apply') path = os.path.join(config.updater.cache_partition, 'ubuntu_command') with open(path, 'r', encoding='utf-8') as fp: command = fp.read() self.assertMultiLineEqual(command, """\ load_keyring image-master.tar.xz image-master.tar.xz.asc load_keyring image-signing.tar.xz image-signing.tar.xz.asc load_keyring device-signing.tar.xz device-signing.tar.xz.asc format system mount system update a.txt a.txt.asc update b.txt b.txt.asc update c.txt c.txt.asc update d.txt d.txt.asc update e.txt e.txt.asc update f.txt f.txt.asc update g.txt g.txt.asc update h.txt h.txt.asc update i.txt i.txt.asc unmount system """) class TestDailyProposed(ServerTestBase): """Test that the daily-proposed channel works as expected.""" INDEX_FILE = 'state.index_03.json' CHANNEL_FILE = 'state.channels_04.json' CHANNEL = 'daily-proposed' DEVICE = 'grouper' @configuration def test_daily_proposed_channel(self): # Resolve the index.json path for a channel with a dash in it. self._setup_server_keyrings() state = State() with ExitStack() as resources: resources.enter_context( patch('systemimage.state.config._channel', 'daily-proposed')) resources.enter_context( patch('systemimage.state.config.hooks.device', DemoDevice)) state.run_thru('get_index') self.assertEqual(state.index.global_.generated_at, datetime(2013, 8, 1, 8, 1, tzinfo=timezone.utc)) @configuration def test_bogus_channel(self): # Try and fail to resolve the index.json path for a non-existent # channel with a dash in it. self._setup_server_keyrings() state = State() with ExitStack() as resources: resources.enter_context( patch('systemimage.state.config._channel', 'daily-testing')) resources.enter_context( patch('systemimage.state.config.hooks.device', DemoDevice)) state.run_thru('get_index') self.assertIsNone(state.index) class TestVersionedProposed(ServerTestBase): INDEX_FILE = 'state.index_03.json' CHANNEL_FILE = 'state.channels_05.json' CHANNEL = '14.04-proposed' DEVICE = 'grouper' @configuration def test_version_proposed_channel(self): # Resolve the index.json path for a channel with a dash and a dot in # it. self._setup_server_keyrings() state = State() with ExitStack() as resources: resources.enter_context( patch('systemimage.state.config._channel', '14.04-proposed')) resources.enter_context( patch('systemimage.state.config.hooks.device', DemoDevice)) state.run_thru('get_index') self.assertEqual(state.index.global_.generated_at, datetime(2013, 8, 1, 8, 1, tzinfo=timezone.utc)) class TestFilters(ServerTestBase): INDEX_FILE = 'state.index_04.json' CHANNEL_FILE = 'state.channels_02.json' CHANNEL = 'stable' DEVICE = 'nexus7' @configuration def test_filter_none(self): # With no filter, we get the unadulterated candidate paths. self._setup_server_keyrings() touch_build(100) state = State() state.run_thru('calculate_winner') self.assertEqual(len(state.winner), 1) @configuration def test_filter_1(self): # The state machine can use a filter to come up with a different set # of candidate upgrade paths. In this case, no candidates. self._setup_server_keyrings() touch_build(100) def filter_out_everything(candidates): return [] state = State() state.candidate_filter=filter_out_everything state.run_thru('calculate_winner') self.assertEqual(state.winner, []) class TestMaximumImage(ServerTestBase): INDEX_FILE = 'state.index_01.json' CHANNEL_FILE = 'state.channels_02.json' CHANNEL = 'stable' DEVICE = 'nexus7' @configuration def test_maximum_image(self, config): # Given a winning upgrade path, we can ceiling the maximum image # number from that path to be applied. This is useful for image # testing purposes. self._setup_server_keyrings() touch_build(100) state = State() state.run_thru('calculate_winner') self.assertEqual([image.version for image in state.winner], [200, 201, 304]) # Now we'll try again, but this time, put a cap on the upper # bound of the images. state = State() state.winner_filter = version_filter(200) state.run_thru('calculate_winner') self.assertEqual([image.version for image in state.winner], [200]) class TestStateNewChannelsFormat(ServerTestBase): CHANNEL_FILE = 'state.channels_06.json' CHANNEL = 'saucy' DEVICE = 'manta' INDEX_FILE = 'state.index_06.json' @configuration def test_full_reboot(self, config_d): # Test that state transitions through reboot work for the new channel # format. Also check that the right files get moved into place. shutil.copy(data_path('state.config_01.ini'), os.path.join(config_d, '11_state.ini')) shutil.copy(data_path('state.config_02.ini'), os.path.join(config_d, '12_state.ini')) config.reload() self._setup_server_keyrings() state = State() # Do not use self._resources to manage the check_output mock. Because # of the nesting order of the @configuration decorator and the base # class's tearDown(), using self._resources causes the mocks to be # unwound in the wrong order, affecting future tests. with patch('systemimage.device.check_output', return_value='manta'): state.run_until('apply') path = os.path.join(config.updater.cache_partition, 'ubuntu_command') with open(path, 'r', encoding='utf-8') as fp: command = fp.read() self.assertMultiLineEqual(command, """\ load_keyring image-master.tar.xz image-master.tar.xz.asc load_keyring image-signing.tar.xz image-signing.tar.xz.asc load_keyring device-signing.tar.xz device-signing.tar.xz.asc mount system update 6.txt 6.txt.asc update 7.txt 7.txt.asc update 5.txt 5.txt.asc unmount system """) with patch('systemimage.apply.Noop.apply') as mock: list(state) self.assertEqual(mock.call_count, 1) class TestChannelAlias(ServerTestBase): CHANNEL_FILE = 'state.channels_01.json' CHANNEL = 'daily' DEVICE = 'manta' INDEX_FILE = 'state.index_01.json' @configuration def test_channel_alias_switch(self, config_d): # Channels in the channel.json files can have an optional "alias" key, # which if set, describes the other channel this channel is based on # (only in a server-side generated way; the client sees all channels # as fully "stocked"). # # The [service] section can have a `channel_target` key which names the # channel alias this device has been tracking. If the channel_target # does not match the channel alias, then the client considers its # internal version number to be 0 and does a full update. # # This is used to support version downgrades when changing the alias # to point to a different series (LP: #1221844). # # Here's an example. Let's say a device has been tracking the 'daily' # channel, which is aliased to 'saucy'. Suddenly, Tubular Tapir is # released and the 'daily' channel is aliased to 'tubular'. When the # device goes to update, it sees that it was tracking the saucy alias # and now must track the tubular alias, so it needs to do a full # upgrade from build number 0 to get on the right track. # # To test this condition, we calculate the upgrade path first in the # absence of a [service]channel_target key. The device is tracking the # daily channel, so we get the latest build on that channel. self._setup_server_keyrings() touch_build(300) config.channel = 'daily' state = State() # Do not use self._resources to manage the check_output mock. Because # of the nesting order of the @configuration decorator and the base # class's tearDown(), using self._resources causes the mocks to be # unwound in the wrong order, affecting future tests. with patch('systemimage.device.check_output', return_value='manta'): state.run_thru('calculate_winner') self.assertEqual([image.version for image in state.winner], [301, 304]) # Here's what the upgrade path would be if we were using a build # number of 0 (ignoring any channel alias switching). del config.build_number touch_build(0) state = State() state.run_thru('calculate_winner') self.assertEqual([image.version for image in state.winner], [200, 201, 304]) # Set the build number back to 300 for the next test. del config.build_number touch_build(300) # Now we drop in a configuration file which sets the # [service]channel_target key. This also tells us the current build # number is 300, but through the channel_target field it tells us that # the previous daily channel alias was saucy. Now (via the # channels.json file) it's tubular, and the upgrade path starting at # build 0 is different. override_path = os.path.join(config_d, '02_override.ini') with open(override_path, 'w', encoding='utf-8') as fp: print('[service]\nchannel_target: saucy\n', file=fp) config.reload() # All things being equal to the first test above, except that now # we're in the middle of an alias switch. The upgrade path is exactly # the same as if we were upgrading from build 0. self.assertEqual(config.build_number, 300) state = State() state.run_thru('calculate_winner') self.assertEqual([image.version for image in state.winner], [200, 201, 304]) @configuration def test_channel_alias_switch_with_cli_option(self, config_d): # Like the above test, but in similating the use of `system-image-cli # --build 300`, we set the build number explicitly. This prevent the # channel alias squashing of the build number to 0. self._setup_server_keyrings() # This sets the build number via the /etc/ubuntu_build file. touch_build(300) config.channel = 'daily' state = State() # Do not use self._resources to manage the check_output mock. Because # of the nesting order of the @configuration decorator and the base # class's tearDown(), using self._resources causes the mocks to be # unwound in the wrong order, affecting future tests. with patch('systemimage.device.check_output', return_value='manta'): state.run_thru('calculate_winner') self.assertEqual([image.version for image in state.winner], [301, 304]) # Now we have an override file. This also tells us the current build # number is 300, but through the channel_target field it tells us that # the previous daily channel alias was saucy. Now (via the # channels.json file) it's tubular. override_path = os.path.join(config_d, '02_override.ini') with open(override_path, 'w', encoding='utf-8') as fp: print("""\ [service] channel_target: saucy channeL: daily build_number: 300 """, file=fp) config.reload() # All things being equal to the first test above, except that now # we're in the middle of an alias switch. The upgrade path is exactly # the same as if we were upgrading from build 0. del config.build_number self.assertEqual(config.build_number, 300) state = State() state.run_thru('calculate_winner') self.assertEqual([image.version for image in state.winner], [200, 201, 304]) # Finally, this mimics the effect of --build 300, thus giving us back # the original upgrade path. config.build_number = 300 state = State() state.run_thru('calculate_winner') self.assertEqual([image.version for image in state.winner], [301, 304]) class TestPhasedUpdates(ServerTestBase): CHANNEL_FILE = 'state.channels_01.json' CHANNEL = 'daily' DEVICE = 'manta' INDEX_FILE = 'state.index_07.json' @configuration def test_inside_phased_updates_0(self): # With our threshold at 22, the normal upgrade to "Full B" image is ok. self._setup_server_keyrings() config.channel = 'daily' state = State() self._resources.enter_context( patch('systemimage.scores.phased_percentage', return_value=22)) # Do not use self._resources to manage the check_output mock. Because # of the nesting order of the @configuration decorator and the base # class's tearDown(), using self._resources causes the mocks to be # unwound in the wrong order, affecting future tests. with patch('systemimage.device.check_output', return_value='manta'): state.run_thru('calculate_winner') self.assertEqual(descriptions(state.winner), ['Full B', 'Delta B.1', 'Delta B.2']) @configuration def test_outside_phased_updates(self): # With our threshold at 66, the normal upgrade to "Full B" image is # discarded, and the previous Full A update is chosen instead. self._setup_server_keyrings() config.channel = 'daily' state = State() self._resources.enter_context( patch('systemimage.scores.phased_percentage', return_value=66)) # Do not use self._resources to manage the check_output mock. Because # of the nesting order of the @configuration decorator and the base # class's tearDown(), using self._resources causes the mocks to be # unwound in the wrong order, affecting future tests. with patch('systemimage.device.check_output', return_value='manta'): state.run_thru('calculate_winner') self.assertEqual(descriptions(state.winner), ['Full A', 'Delta A.1', 'Delta A.2']) @configuration def test_equal_phased_updates_0(self): # With our threshold at 50, i.e. exactly equal to the image's # percentage, the normal upgrade to "Full B" image is ok. self._setup_server_keyrings() config.channel = 'daily' state = State() self._resources.enter_context( patch('systemimage.scores.phased_percentage', return_value=50)) # Do not use self._resources to manage the check_output mock. Because # of the nesting order of the @configuration decorator and the base # class's tearDown(), using self._resources causes the mocks to be # unwound in the wrong order, affecting future tests. with patch('systemimage.device.check_output', return_value='manta'): state.run_thru('calculate_winner') self.assertEqual(descriptions(state.winner), ['Full B', 'Delta B.1', 'Delta B.2']) @configuration def test_phased_updates_0(self): # With our threshold at 0, all images are good, so it's a "normal" # update path. self._setup_server_keyrings() config.channel = 'daily' state = State() self._resources.enter_context( patch('systemimage.scores.phased_percentage', return_value=0)) # Do not use self._resources to manage the check_output mock. Because # of the nesting order of the @configuration decorator and the base # class's tearDown(), using self._resources causes the mocks to be # unwound in the wrong order, affecting future tests. with patch('systemimage.device.check_output', return_value='manta'): state.run_thru('calculate_winner') self.assertEqual(descriptions(state.winner), ['Full B', 'Delta B.1', 'Delta B.2']) @configuration def test_phased_updates_100(self): # With our threshold at 100, the "Full B" image is discarded and the # backup "Full A" image is chosen. self._setup_server_keyrings() config.channel = 'daily' state = State() self._resources.enter_context( patch('systemimage.scores.phased_percentage', return_value=77)) # Do not use self._resources to manage the check_output mock. Because # of the nesting order of the @configuration decorator and the base # class's tearDown(), using self._resources causes the mocks to be # unwound in the wrong order, affecting future tests. with patch('systemimage.device.check_output', return_value='manta'): state.run_thru('calculate_winner') self.assertEqual(descriptions(state.winner), ['Full A', 'Delta A.1', 'Delta A.2']) class TestPhasedUpdatesPulled(ServerTestBase): CHANNEL_FILE = 'state.channels_01.json' CHANNEL = 'daily' DEVICE = 'manta' INDEX_FILE = 'state.index_02.json' @configuration def test_pulled_update(self): # Regardless of the device's phase percentage, when the image has a # percentage of 0, it will never be considered. In this case Full B # has a phased percentage of 0, so the fallback Full A is chosen. self._setup_server_keyrings() config.channel = 'daily' state = State() self._resources.enter_context( patch('systemimage.scores.phased_percentage', return_value=0)) # Do not use self._resources to manage the check_output mock. Because # of the nesting order of the @configuration decorator and the base # class's tearDown(), using self._resources causes the mocks to be # unwound in the wrong order, affecting future tests. with patch('systemimage.device.check_output', return_value='manta'): state.run_thru('calculate_winner') self.assertEqual(descriptions(state.winner), ['Full A', 'Delta A.1', 'Delta A.2']) @configuration def test_pulled_update_insanely_negative_randint(self): # Regardless of the device's phase percentage, when the image has a # percentage of 0, it will never be considered. In this case Full B # has a phased percentage of 0, so the fallback Full A is chosen. self._setup_server_keyrings() config.channel = 'daily' state = State() self._resources.enter_context( patch('systemimage.scores.phased_percentage', return_value=-100)) # Do not use self._resources to manage the check_output mock. Because # of the nesting order of the @configuration decorator and the base # class's tearDown(), using self._resources causes the mocks to be # unwound in the wrong order, affecting future tests. with patch('systemimage.device.check_output', return_value='manta'): state.run_thru('calculate_winner') self.assertEqual(descriptions(state.winner), ['Full A', 'Delta A.1', 'Delta A.2']) @configuration def test_pulled_update_insanely_positive_randint(self): # Regardless of the device's phase percentage, when the image has a # percentage of 0, it will never be considered. self._setup_server_keyrings() config.channel = 'daily' state = State() self._resources.enter_context( patch('systemimage.scores.phased_percentage', return_value=1000)) # Do not use self._resources to manage the check_output mock. Because # of the nesting order of the @configuration decorator and the base # class's tearDown(), using self._resources causes the mocks to be # unwound in the wrong order, affecting future tests. with patch('systemimage.device.check_output', return_value='manta'): state.run_thru('calculate_winner') self.assertEqual(len(state.winner), 0) class TestCachedFiles(ServerTestBase): CHANNEL_FILE = 'state.channels_03.json' CHANNEL = 'stable' DEVICE = 'nexus7' INDEX_FILE = 'state.index_03.json' SIGNING_KEY = 'image-signing.gpg' @configuration def test_all_files_are_cached(self): # All files in an upgrade are already downloaded, so all that's # necessary is to verify them but not re-download them. self._setup_server_keyrings() touch_build(0) # Run the state machine far enough to calculate the winning path. state = State() state.run_thru('calculate_winner') self.assertIsNotNone(state.winner) # Let's install all the data files into their final location. The # signature files must be included. for path in ('3/4/5.txt', '4/5/6.txt', '5/6/7.txt'): data_file = os.path.join(self._serverdir, path) shutil.copy(data_file, config.updater.cache_partition) shutil.copy(data_file + '.asc', config.updater.cache_partition) def get_files(downloads, *args, **kws): if len(downloads) != 0: raise AssertionError('get_files() was called with downloads') state.downloader.get_files = get_files state.run_thru('download_files') # Yet all the data files should still be available. self.assertEqual(set(os.listdir(config.updater.cache_partition)), set(('5.txt', '6.txt', '7.txt', '5.txt.asc', '6.txt.asc', '7.txt.asc'))) @configuration def test_some_files_are_cached(self): # Some of the files in an upgrade are already downloaded, so only # download the ones that are missing. self._setup_server_keyrings() touch_build(0) # Run the state machine far enough to calculate the winning path. state = State() state.run_thru('calculate_winner') self.assertIsNotNone(state.winner) # Let's install all the data files into their final location. The # signature files must be included. for path in ('3/4/5.txt', '4/5/6.txt'): data_file = os.path.join(self._serverdir, path) shutil.copy(data_file, config.updater.cache_partition) shutil.copy(data_file + '.asc', config.updater.cache_partition) old_get_files = state.downloader.get_files def get_files(downloads, *args, **kws): if len(downloads) != 2: raise AssertionError('Unexpected get_files() call') for record in downloads: dst = os.path.basename(record.destination) if os.path.basename(record.url) != dst: raise AssertionError('Mismatched downloads') if dst not in ('7.txt', '7.txt.asc'): raise AssertionError('Unexpected download') return old_get_files(downloads, *args, **kws) state.downloader.get_files = get_files state.run_thru('download_files') # Yet all the data files should still be available. self.assertEqual(set(os.listdir(config.updater.cache_partition)), set(('5.txt', '6.txt', '7.txt', '5.txt.asc', '6.txt.asc', '7.txt.asc'))) @configuration def test_some_signature_files_are_missing(self): # Some of the signature files are missing, so we have to download both # the data and signature files. self._setup_server_keyrings() touch_build(0) # Run the state machine far enough to calculate the winning path. state = State() state.run_thru('calculate_winner') self.assertIsNotNone(state.winner) # Let's install all the data files into their final location. The # signature files must be included. for path in ('3/4/5.txt', '4/5/6.txt', '5/6/7.txt'): data_file = os.path.join(self._serverdir, path) shutil.copy(data_file, config.updater.cache_partition) if os.path.basename(path) != '6.txt': shutil.copy(data_file + '.asc', config.updater.cache_partition) old_get_files = state.downloader.get_files def get_files(downloads, *args, **kws): if len(downloads) != 2: raise AssertionError('Unexpected get_files() call') for record in downloads: dst = os.path.basename(record.destination) if os.path.basename(record.url) != dst: raise AssertionError('Mismatched downloads') if dst not in ('6.txt', '6.txt.asc'): raise AssertionError('Unexpected download') return old_get_files(downloads, *args, **kws) state.downloader.get_files = get_files state.run_thru('download_files') # Yet all the data files should still be available. self.assertEqual(set(os.listdir(config.updater.cache_partition)), set(('5.txt', '6.txt', '7.txt', '5.txt.asc', '6.txt.asc', '7.txt.asc'))) @configuration def test_some_data_files_are_missing(self): # Some of the data files are missing, so we have to download both the # data and signature files. self._setup_server_keyrings() touch_build(0) # Run the state machine far enough to calculate the winning path. state = State() state.run_thru('calculate_winner') self.assertIsNotNone(state.winner) # Let's install all the data files into their final location. The # signature files must be included. for path in ('3/4/5.txt', '4/5/6.txt', '5/6/7.txt'): data_file = os.path.join(self._serverdir, path) if os.path.basename(path) != '5.txt': shutil.copy(data_file, config.updater.cache_partition) shutil.copy(data_file + '.asc', config.updater.cache_partition) old_get_files = state.downloader.get_files def get_files(downloads, *args, **kws): if len(downloads) != 2: raise AssertionError('Unexpected get_files() call') for record in downloads: dst = os.path.basename(record.destination) if os.path.basename(record.url) != dst: raise AssertionError('Mismatched downloads') if dst not in ('5.txt', '5.txt.asc'): raise AssertionError('Unexpected download') return old_get_files(downloads, *args, **kws) state.downloader.get_files = get_files state.run_thru('download_files') # Yet all the data files should still be available. self.assertEqual(set(os.listdir(config.updater.cache_partition)), set(('5.txt', '6.txt', '7.txt', '5.txt.asc', '6.txt.asc', '7.txt.asc'))) @configuration def test_cached_signatures_are_blacklisted(self): # All files in an upgrade are already downloaded, but the key used to # sign the files has been blacklisted, so everything has to be # downloaded again. self._setup_server_keyrings() touch_build(0) # Run the state machine far enough to calculate the winning path. state = State() state.run_thru('calculate_winner') self.assertIsNotNone(state.winner) # Let's install all the data files into their final location. The # signature files must be included. for path in ('3/4/5.txt', '4/5/6.txt', '5/6/7.txt'): data_file = os.path.join(self._serverdir, path) shutil.copy(data_file, config.updater.cache_partition) # Sign the file with what will be the blacklist. dst = os.path.join(config.updater.cache_partition, os.path.basename(data_file)) sign(dst, 'spare.gpg') # Set up the blacklist file. setup_keyring_txz( 'spare.gpg', 'image-master.gpg', dict(type='blacklist'), os.path.join(self._serverdir, 'gpg', 'blacklist.tar.xz')) # All the files will be downloaded. requested_downloads = set() old_get_files = state.downloader.get_files def get_files(downloads, *args, **kws): for record in downloads: requested_downloads.add(os.path.basename(record.destination)) return old_get_files(downloads, *args, **kws) state.downloader.get_files = get_files state.run_thru('download_files') # All the files were re-downloaded. self.assertEqual(requested_downloads, set(('5.txt', '5.txt.asc', '6.txt', '6.txt.asc', '7.txt', '7.txt.asc'))) # All the data files should still be available. self.assertEqual(set(os.listdir(config.updater.cache_partition)), set(('5.txt', '6.txt', '7.txt', '5.txt.asc', '6.txt.asc', '7.txt.asc'))) @configuration def test_cached_files_all_have_bad_signatures(self): # All the data files are cached, but the signatures don't match. self._setup_server_keyrings() touch_build(0) # Run the state machine far enough to calculate the winning path. state = State() state.run_thru('calculate_winner') self.assertIsNotNone(state.winner) # Let's install all the data files into their final location. The # signature files must be included. for path in ('3/4/5.txt', '4/5/6.txt', '5/6/7.txt'): data_file = os.path.join(self._serverdir, path) shutil.copy(data_file, config.updater.cache_partition) # Sign the file with a bogus key. dst = os.path.join(config.updater.cache_partition, os.path.basename(data_file)) sign(dst, 'spare.gpg') # All the files will be downloaded. requested_downloads = set() old_get_files = state.downloader.get_files def get_files(downloads, *args, **kws): for record in downloads: requested_downloads.add(os.path.basename(record.destination)) return old_get_files(downloads, *args, **kws) state.downloader.get_files = get_files state.run_thru('download_files') # All the files were re-downloaded. self.assertEqual(requested_downloads, set(('5.txt', '5.txt.asc', '6.txt', '6.txt.asc', '7.txt', '7.txt.asc'))) # All the data files should still be available. self.assertEqual(set(os.listdir(config.updater.cache_partition)), set(('5.txt', '6.txt', '7.txt', '5.txt.asc', '6.txt.asc', '7.txt.asc'))) @configuration def test_cached_files_all_have_bad_hashes(self): # All the data files are cached, and the signatures match, but the # data file hashes are bogus, so they all get downloaded again. self._setup_server_keyrings() touch_build(0) # Run the state machine far enough to calculate the winning path. state = State() state.run_thru('calculate_winner') self.assertIsNotNone(state.winner) # Let's install all the data files into their final location. The # signature files must be included. for filename in ('5.txt', '6.txt', '7.txt'): data_file = os.path.join(config.updater.cache_partition, filename) with open(data_file, 'wb') as fp: fp.write(b'xxx') # Sign the file with the right key. dst = os.path.join(config.updater.cache_partition, os.path.basename(data_file)) sign(dst, 'image-signing.gpg') # All the files will be downloaded. requested_downloads = set() old_get_files = state.downloader.get_files def get_files(downloads, *args, **kws): for record in downloads: requested_downloads.add(os.path.basename(record.destination)) return old_get_files(downloads, *args, **kws) state.downloader.get_files = get_files state.run_thru('download_files') # All the files were re-downloaded. self.assertEqual(requested_downloads, set(('5.txt', '5.txt.asc', '6.txt', '6.txt.asc', '7.txt', '7.txt.asc'))) # All the data files should still be available. self.assertEqual(set(os.listdir(config.updater.cache_partition)), set(('5.txt', '6.txt', '7.txt', '5.txt.asc', '6.txt.asc', '7.txt.asc'))) @configuration def test_previously_cached_files(self): # In this test, we model what happens through the D-Bus API and u/i # when a user initiates an upgrade, everything gets downloaded, but # they fail to apply and reboot. Then the D-Bus process times out and # exits. Then the user clicks on Apply and a *new* D-Bus process gets # activated with a new state machine. # # Previously, we'd basically throw everything away and re-download # all the files again, and re-calculate the upgrade, but LP: #1217098 # asks us to do a more bandwidth efficient job of avoiding a # re-download of the cached files, assuming all the signatures match # and what not. # # This is harder than it sounds because the state machine, while it # can avoid re-downloading data files (note that metadata files like # channels.json, index.json, and the blacklist are *always* # re-downloaded), a new state machine must try to figure out what the # state of the previous invocation was. # # What the state machine now does first is look for an # `ubuntu_command` file in the cache partition. If that file exists, # it indicates that a previous invocation may have existing state that # can be preserved for better efficiency. We'll make those checks and # if it looks okay, we'll short-circuit through the state machine. # Otherwise we clean those files out and start from scratch. self._setup_server_keyrings() state = State() state.run_until('apply') self.assertTrue(os.path.exists( os.path.join(config.updater.cache_partition, 'ubuntu_command'))) # Now, to prove that the data files are not re-downloaded with a new # state machine, we do two things: we remove the files from the server # and we collect the current mtimes (in nanoseconds) of the files in # the cache partition. for path in ('3/4/5', '4/5/6', '5/6/7'): os.remove(os.path.join(self._serverdir, path) + '.txt') os.remove(os.path.join(self._serverdir, path) + '.txt.asc') mtimes = {} for filename in os.listdir(config.updater.cache_partition): if filename.endswith('.txt') or filename.endswith('.txt.asc'): path = os.path.join(config.updater.cache_partition, filename) mtimes[filename] = os.stat(path).st_mtime_ns self.assertGreater(len(mtimes), 0) # Now create a new state machine, and run until the update gets applied # again. Even though there are no data files on the server, this still # completes successfully. state = State() state.run_until('apply') # Check all the mtimes. for filename in os.listdir(config.updater.cache_partition): if filename.endswith('.txt') or filename.endswith('.txt.asc'): path = os.path.join(config.updater.cache_partition, filename) self.assertEqual(mtimes[filename], os.stat(path).st_mtime_ns) @configuration def test_cleanup_in_download(self): # Any residual cache partition files which aren't used in the current # update, or which don't validate will be removed before the new files # are downloaded. Except for 'log' and 'last_log'. self._setup_server_keyrings() touch_build(0) # Run the state machine once through downloading the files so we have # a bunch of valid cached files. State().run_thru('download_files') # Now run a new state machine up to just before the step that cleans # up the cache partition. state = State() state.run_until('download_files') # Put some files in the cache partition, including the two log files # which will be preserved, some dummy files which will be deleted, and # a normally preserved cache file which gets invalidated. wopen = partial(open, mode='w', encoding='utf-8') cache_dir = config.updater.cache_partition with wopen(os.path.join(cache_dir, 'log')) as fp: print('logger keeper', file=fp) with wopen(os.path.join(cache_dir, 'last_log')) as fp: print('logger keeper', file=fp) with wopen(os.path.join(cache_dir, 'xxx.txt')) as fp: print('xxx', file=fp) with wopen(os.path.join(cache_dir, 'yyy.txt')) as fp: print('yyy', file=fp) with open(os.path.join(cache_dir, 'xxx.txt.asc'), 'wb') as fp: fp.write(b'xxx') with open(os.path.join(cache_dir, 'yyy.txt.asc'), 'wb') as fp: fp.write(b'yyy') # By filling the asc file with bogus data, we invalidate the data # file. txt_path = os.path.join(cache_dir, '6.txt') asc_path = os.path.join(cache_dir, '6.txt.asc') with open(asc_path, 'wb') as fp: fp.write(b'zzz') # Take the checksum of the 6.txt.asc file so we know it has been # replaced. Get the mtime of the 6.txt file for the same reason (the # checksum will still match because the content is the same). with open(asc_path, 'rb') as fp: checksum = hashlib.md5(fp.read()).digest() mtime = os.stat(txt_path).st_mtime_ns state.run_until('apply') with open(asc_path, 'rb') as fp: self.assertNotEqual(checksum, hashlib.md5(fp.read()).digest) self.assertNotEqual(mtime, os.stat(txt_path).st_mtime_ns) class TestKeyringDoubleChecks(ServerTestBase): CHANNEL_FILE = 'state.channels_03.json' CHANNEL = 'stable' DEVICE = 'nexus7' INDEX_FILE = 'state.index_03.json' SIGNING_KEY = 'image-signing.gpg' @configuration def test_image_master_asc_is_corrupted(self): # The state machine will use an existing image master key, unless it # is found to be corrupted (i.e. its signature is broken). If that's # the case, it will re-download a new image master. setup_keyrings() # Re-sign the image master with the wrong key, so as to corrupt its # signature via bogus .asc file. path = config.gpg.image_master sign(path, 'spare.gpg') # Prove that the signature is bad. with Context(config.gpg.archive_master) as ctx: self.assertFalse(ctx.verify(path + '.asc', path)) # Grab the checksum of the .asc file to prove that it's been # downloaded anew. with open(path + '.asc', 'rb') as fp: checksum = hashlib.md5(fp.read()).digest() # Run the state machine long enough to get the new image master. self._setup_server_keyrings() State().run_thru('get_blacklist_1') # Prove that the signature is good now. with Context(config.gpg.archive_master) as ctx: self.assertTrue(ctx.verify(path + '.asc', path)) # We have a new .asc file. with open(path + '.asc', 'rb') as fp: self.assertNotEqual(checksum, hashlib.md5(fp.read()).digest()) @configuration def test_image_master_tarxz_is_corrupted(self): # As above, except the .tar.xz file is corrupted instead. setup_keyrings() # Re-sign the image master with the wrong key, so as to corrupt its # signature via bogus .asc file. path = config.gpg.image_master shutil.copy(config.gpg.archive_master, path) # Prove that the signature is bad. with Context(config.gpg.archive_master) as ctx: self.assertFalse(ctx.verify(path + '.asc', path)) # Grab the checksum of the .tar.xz file to prove that it's been # downloaded anew. with open(path, 'rb') as fp: checksum = hashlib.md5(fp.read()).digest() # Run the state machine long enough to get the new image master. self._setup_server_keyrings() State().run_thru('get_blacklist_1') # Prove that the signature is good now. with Context(config.gpg.archive_master) as ctx: self.assertTrue(ctx.verify(path + '.asc', path)) # We have a new .asc file. with open(path, 'rb') as fp: self.assertNotEqual(checksum, hashlib.md5(fp.read()).digest()) @configuration def test_image_signing_asc_is_corrupted(self): # The state machine will use an existing image signing key, unless it # is found to be corrupted (i.e. its signature is broken). If that's # the case, it will re-download a new image signing key. setup_keyrings() # Re-sign the image signing with the wrong key, so as to corrupt its # signature via bogus .asc file. path = config.gpg.image_signing sign(path, 'spare.gpg') # Prove that the signature is bad. with Context(config.gpg.image_master) as ctx: self.assertFalse(ctx.verify(path + '.asc', path)) # Grab the checksum of the .asc file to prove that it's been # downloaded anew. with open(path + '.asc', 'rb') as fp: checksum = hashlib.md5(fp.read()).digest() # Run the state machine long enough to get the new image master. self._setup_server_keyrings() State().run_thru('get_channel') # Prove that the signature is good now. with Context(config.gpg.image_master) as ctx: self.assertTrue(ctx.verify(path + '.asc', path)) # We have a new .asc file. with open(path + '.asc', 'rb') as fp: self.assertNotEqual(checksum, hashlib.md5(fp.read()).digest()) @configuration def test_image_signing_tarxz_is_corrupted(self): # As above, except the .tar.xz file is corrupted instead. setup_keyrings() # Re-sign the image master with the wrong key, so as to corrupt its # signature via bogus .asc file. path = config.gpg.image_signing shutil.copy(config.gpg.archive_master, path) # Prove that the signature is bad. with Context(config.gpg.image_master) as ctx: self.assertFalse(ctx.verify(path + '.asc', path)) # Grab the checksum of the .tar.xz file to prove that it's been # downloaded anew. with open(path, 'rb') as fp: checksum = hashlib.md5(fp.read()).digest() # Run the state machine long enough to get the new image master. self._setup_server_keyrings() State().run_thru('get_channel') # Prove that the signature is good now. with Context(config.gpg.image_master) as ctx: self.assertTrue(ctx.verify(path + '.asc', path)) # We have a new .asc file. with open(path, 'rb') as fp: self.assertNotEqual(checksum, hashlib.md5(fp.read()).digest()) @configuration def test_image_master_is_expired(self): # Like above, but the keyring.json has an 'expiry' value that # indicates the key has expired. expiry = datetime.utcnow() - timedelta(days=10) setup_keyrings('image-master', expiry=expiry.timestamp()) setup_keyrings('archive-master', 'image-signing', 'device-signing') # When the state machine re-downloads the image-master, it will change # the timestamps on both it and the .asc files. Grab the mtimes of # both now to verify that they've changed later. txz_path = config.gpg.image_master asc_path = txz_path + '.asc' txz_mtime = os.stat(txz_path).st_mtime_ns asc_mtime = os.stat(asc_path).st_mtime_ns # Additionally, they checksum of the tar.xz file will change because # the new one won't have the expiry key in its .json file. with open(txz_path, 'rb') as fp: checksum = hashlib.md5(fp.read()).digest() # Run the state machine long enough to get the new image master. self._setup_server_keyrings() State().run_thru('get_blacklist_1') # We have a new tar.xz file. with open(txz_path, 'rb') as fp: self.assertNotEqual(checksum, hashlib.md5(fp.read()).digest()) self.assertGreater(os.stat(txz_path).st_mtime_ns, txz_mtime) self.assertGreater(os.stat(asc_path).st_mtime_ns, asc_mtime) @configuration def test_image_signing_is_expired(self): # Like above, but the keyring.json has an 'expiry' value that # indicates the key has expired. expiry = datetime.utcnow() - timedelta(days=10) setup_keyrings('image-signing', expiry=expiry.timestamp()) setup_keyrings('archive-master', 'image-master', 'device-signing') # When the state machine re-downloads the image-master, it will change # the timestamps on both it and the .asc files. Grab the mtimes of # both now to verify that they've changed later. txz_path = config.gpg.image_signing asc_path = txz_path + '.asc' txz_mtime = os.stat(txz_path).st_mtime_ns asc_mtime = os.stat(asc_path).st_mtime_ns # Additionally, they checksum of the tar.xz file will change because # the new one won't have the expiry key in its .json file. with open(txz_path, 'rb') as fp: checksum = hashlib.md5(fp.read()).digest() # Run the state machine long enough to get the new image master. self._setup_server_keyrings() State().run_thru('get_channel') # We have a new tar.xz file. with open(txz_path, 'rb') as fp: self.assertNotEqual(checksum, hashlib.md5(fp.read()).digest()) self.assertGreater(os.stat(txz_path).st_mtime_ns, txz_mtime) self.assertGreater(os.stat(asc_path).st_mtime_ns, asc_mtime) class TestStateDuplicateDestinations(ServerTestBase): """An index.json with duplicate destination files is broken.""" INDEX_FILE = 'state.index_08.json' CHANNEL_FILE = 'state.channels_02.json' CHANNEL = 'stable' DEVICE = 'nexus7' @configuration def test_duplicate_destinations(self): # state.index_08.json has the bug we saw in the wild in LP: #1250181. # There, the server erroneously included a data file twice in two # different images. This can't happen and indicates a server # problem. The client must refuse to upgrade in this case, by raising # an exception. self._setup_server_keyrings() state = State() state.run_until('download_files') with self.assertRaises(DuplicateDestinationError) as cm: next(state) self.assertEqual(len(cm.exception.duplicates), 2) dst, dupes = cm.exception.duplicates[0] self.assertEqual(os.path.basename(dst), '5.txt') self.assertEqual([r[0] for r in dupes], ['http://localhost:8980/3/4/5.txt', 'http://localhost:8980/5/6/5.txt', ]) dst, dupes = cm.exception.duplicates[1] self.assertEqual(os.path.basename(dst), '5.txt.asc') self.assertEqual([r[0] for r in dupes], ['http://localhost:8980/3/4/5.txt.asc', 'http://localhost:8980/5/6/5.txt.asc', ]) class TestMiscellaneous(ServerTestBase): """Test a few additional things for full code coverage.""" INDEX_FILE = 'state.index_03.json' CHANNEL_FILE = 'state.channels_02.json' CHANNEL = 'stable' DEVICE = 'nexus7' @configuration def test_checksum_error(self): # _download_files() verifies the checksums of all the downloaded # files. If any of them fail, you get an exception. self._setup_server_keyrings() state = State() state.run_until('download_files') # It's tricky to cause a checksum error. We can't corrupt the local # downloaded copy of the data file because _download_files() doesn't # give us a good hook into the post-download, pre-checksum logic. We # can't corrupt the server file because the lower-level downloading # logic will complain. Instead, we mock the calculate_signature() # function to produce a broken checksum for one of the files. real_signature = None def broken_calc(fp, hash_class=None): nonlocal real_signature signature = calculate_signature(fp, hash_class) if os.path.basename(fp.name) == '6.txt': real_signature = signature return BAD_SIGNATURE return signature with patch('systemimage.state.calculate_signature', broken_calc): with self.assertRaises(ChecksumError) as cm: state.run_thru('download_files') self.assertEqual(os.path.basename(cm.exception.destination), '6.txt') self.assertEqual(cm.exception.got, BAD_SIGNATURE) self.assertIsNotNone(real_signature) self.assertEqual(cm.exception.expected, real_signature) @configuration def test_get_blacklist_2_finds_no_blacklist(self): # Getting the blacklist can fail even the second time. That's fine, # but output gets logged. self._setup_server_keyrings() state = State() # we want get_blacklist_1 to fail with a SignatureError so that it # will try to get the master key and then attempt a refetch of the # blacklist. Let's just corrupt the original blacklist file. blacklist = os.path.join(self._serverdir, 'gpg', 'blacklist.tar.xz') with open(blacklist, 'ba+') as fp: fp.write(b'x') state.run_until('get_blacklist_2') # Now we delete the blacklist file from the server, so as to trigger # the expected log message. os.remove(blacklist) with patch('systemimage.state.log.info') as capture: state.run_thru('get_blacklist_2') self.assertEqual(capture.call_args, call('No blacklist found on second attempt')) # Even though there's no blacklist file, everything still gets # downloaded correctly. state.run_until('apply') path = os.path.join(config.updater.cache_partition, 'ubuntu_command') with open(path, 'r', encoding='utf-8') as fp: command = fp.read() self.assertMultiLineEqual(command, """\ load_keyring image-master.tar.xz image-master.tar.xz.asc load_keyring image-signing.tar.xz image-signing.tar.xz.asc load_keyring device-signing.tar.xz device-signing.tar.xz.asc format system mount system update 6.txt 6.txt.asc update 7.txt 7.txt.asc update 5.txt 5.txt.asc unmount system """) ./systemimage/tests/test_channel.py0000644000015600001650000003066512701500553017617 0ustar jenkinsjenkins# Copyright (C) 2013-2016 Canonical Ltd. # Author: Barry Warsaw # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 3 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . """Test the node classes.""" __all__ = [ 'TestChannels', 'TestLoadChannel', 'TestLoadChannelOverHTTPS', 'TestChannelsNewFormat', ] import os import shutil import hashlib import unittest from contextlib import ExitStack from operator import getitem from systemimage.config import Configuration from systemimage.gpg import SignatureError from systemimage.helpers import temporary_directory from systemimage.state import State from systemimage.testing.helpers import ( configuration, copy, get_channels, make_http_server, setup_keyring_txz, setup_keyrings, sign) from systemimage.testing.nose import SystemImagePlugin class TestChannels(unittest.TestCase): def test_channels(self): # Test that parsing a simple top level channels.json file produces the # expected set of channels. The Nexus 7 daily images have a device # specific keyring. channels = get_channels('channel.channels_01.json') self.assertEqual(channels.daily.devices.nexus7.index, '/daily/nexus7/index.json') self.assertEqual(channels.daily.devices.nexus7.keyring.path, '/daily/nexus7/device-keyring.tar.xz') self.assertEqual(channels.daily.devices.nexus7.keyring.signature, '/daily/nexus7/device-keyring.tar.xz.asc') self.assertEqual(channels.daily.devices.nexus4.index, '/daily/nexus4/index.json') self.assertIsNone( getattr(channels.daily.devices.nexus4, 'keyring', None)) self.assertEqual(channels.stable.devices.nexus7.index, '/stable/nexus7/index.json') def test_getattr_failure(self): # Test the getattr syntax on an unknown channel or device combination. channels = get_channels('channel.channels_01.json') self.assertRaises(AttributeError, getattr, channels, 'bleeding') self.assertRaises(AttributeError, getattr, channels.stable, 'nexus3') def test_daily_proposed(self): # The channel name has a dash in it. channels = get_channels('channel.channels_02.json') self.assertEqual(channels['daily-proposed'].devices.grouper.index, '/daily-proposed/grouper/index.json') def test_bad_getitem(self): # Trying to get a channel via getitem which doesn't exist. channels = get_channels('channel.channels_02.json') self.assertRaises(KeyError, getitem, channels, 'daily-testing') def test_channel_version(self): # The channel name has a dot in it. channels = get_channels('channel.channels_03.json') self.assertEqual(channels['13.10'].devices.grouper.index, '/13.10/grouper/index.json') def test_channel_version_proposed(self): # The channel name has both a dot and a dash in it. channels = get_channels('channel.channels_03.json') self.assertEqual(channels['14.04-proposed'].devices.grouper.index, '/14.04-proposed/grouper/index.json') class TestLoadChannel(unittest.TestCase): """Test downloading and caching the channels.json file.""" @classmethod def setUpClass(cls): SystemImagePlugin.controller.set_mode(cert_pem='cert.pem') def setUp(self): self._stack = ExitStack() self._state = State() try: self._serverdir = self._stack.enter_context(temporary_directory()) self._stack.push(make_http_server( self._serverdir, 8943, 'cert.pem', 'key.pem')) copy('channel.channels_01.json', self._serverdir, 'channels.json') self._channels_path = os.path.join( self._serverdir, 'channels.json') except: self._stack.close() raise def tearDown(self): self._stack.close() @configuration def test_load_channel_good_path(self): # A channels.json file signed by the image signing key, no blacklist. sign(self._channels_path, 'image-signing.gpg') setup_keyrings() self._state.run_thru('get_channel') channels = self._state.channels self.assertEqual(channels.daily.devices.nexus7.keyring.signature, '/daily/nexus7/device-keyring.tar.xz.asc') @configuration def test_load_channel_bad_signature(self): # We get an error if the signature on the channels.json file is bad. sign(self._channels_path, 'spare.gpg') setup_keyrings() self._state.run_thru('get_channel') # At this point, the state machine has determined that the # channels.json file is not signed with the cached image signing key, # so it will try to download a new imaging signing key. Let's put one # on the server, but it will not match the key that channels.json is # signed with. key_path = os.path.join(self._serverdir, 'gpg', 'image-signing.tar.xz') setup_keyring_txz('image-signing.gpg', 'image-master.gpg', dict(type='image-signing'), key_path) # This will succeed by grabbing a new image-signing key. from systemimage.testing.controller import stop_downloader stop_downloader(SystemImagePlugin.controller) next(self._state) # With the next state transition, we'll go back to trying to get the # channel.json file. Since the signature will still be bad, we'll get # a SignatureError this time. self.assertRaises(SignatureError, next, self._state) @configuration def test_load_channel_bad_signature_gets_fixed(self, config_d): # Like above, but the second download of the image signing key results # in a properly signed channels.json file. sign(self._channels_path, 'spare.gpg') setup_keyrings() self._state.run_thru('get_channel') # At this point, the state machine has determined that the # channels.json file is not signed with the cached image signing key, # so it will try to download a new imaging signing key. Let's put one # on the server, but it will not match the key that channels.json is # signed with. self.assertIsNone(self._state.channels) setup_keyring_txz('spare.gpg', 'image-master.gpg', dict(type='image-signing'), os.path.join(self._serverdir, 'gpg', 'image-signing.tar.xz')) # This will succeed by grabbing a new image-signing key. config = Configuration(config_d) with open(config.gpg.image_signing, 'rb') as fp: checksum = hashlib.md5(fp.read()).digest() next(self._state) with open(config.gpg.image_signing, 'rb') as fp: self.assertNotEqual(checksum, hashlib.md5(fp.read()).digest()) # The next state transition will find that the channels.json file is # properly signed. next(self._state) self.assertIsNotNone(self._state.channels) self.assertEqual( self._state.channels.daily.devices.nexus7.keyring.signature, '/daily/nexus7/device-keyring.tar.xz.asc') @configuration def test_load_channel_blacklisted_signature(self, config_d): # We get an error if the signature on the channels.json file is good # but the key is blacklisted. sign(self._channels_path, 'image-signing.gpg') setup_keyrings() setup_keyring_txz( 'image-signing.gpg', 'image-master.gpg', dict(type='blacklist'), os.path.join(self._serverdir, 'gpg', 'blacklist.tar.xz')) self._state.run_thru('get_channel') # We now have an image-signing key which is blacklisted. This will # cause the state machine to try to download a new image signing key, # so let's put the cached one up on the server. This will still be # backlisted though. config = Configuration(config_d) key_path = os.path.join(self._serverdir, 'gpg', 'image-signing.tar.xz') shutil.copy(config.gpg.image_signing, key_path) shutil.copy(config.gpg.image_signing + '.asc', key_path + '.asc') # Run the state machine through _get_channel() again, only this time # because the key is still blacklisted, we'll get an exception. self.assertRaises(SignatureError, self._state.run_thru, 'get_channel') class TestLoadChannelOverHTTPS(unittest.TestCase): """channels.json MUST be downloaded over HTTPS. Start an HTTP server, no HTTPS server to show the download fails. """ @classmethod def setUpClass(cls): SystemImagePlugin.controller.set_mode(cert_pem='cert.pem') def setUp(self): self._stack = ExitStack() try: self._serverdir = self._stack.enter_context(temporary_directory()) copy('channel.channels_01.json', self._serverdir, 'channels.json') sign(os.path.join(self._serverdir, 'channels.json'), 'image-signing.gpg') except: self._stack.close() raise def tearDown(self): self._stack.close() @configuration def test_load_channel_over_https_port_with_http_fails(self): # We maliciously put an HTTP server on the HTTPS port. setup_keyrings() state = State() # Try to get the blacklist. This will fail silently since it's okay # not to find a blacklist. state.run_thru('get_blacklist_1') # This will fail to get the channels.json file. with make_http_server(self._serverdir, 8943): self.assertRaises(FileNotFoundError, next, state) class TestChannelsNewFormat(unittest.TestCase): """LP: #1221841 introduces a new format to channels.json.""" def test_channels(self): # We can parse new-style channels.json files. channels = get_channels('channel.channels_04.json') self.assertEqual(channels.daily.alias, 'saucy') self.assertEqual(channels.daily.devices.grouper.index, '/daily/grouper/index.json') self.assertEqual(channels.daily.devices.mako.index, '/daily/mako/index.json') # 'saucy' channel has no alias. self.assertRaises(AttributeError, getattr, channels.saucy, 'alias') self.assertEqual(channels.saucy.devices.mako.index, '/saucy/mako/index.json') # 'saucy-proposed' has a hidden field. self.assertTrue(channels.saucy_proposed.hidden) self.assertEqual(channels.saucy_proposed.devices.maguro.index, '/saucy-proposed/maguro/index.json') # Device specific keyrings are still supported. self.assertEqual(channels.saucy.devices.manta.keyring.path, '/saucy/manta/device-signing.tar.xz') def test_hidden_defaults_to_false(self): # If a channel does not have a hidden field, it defaults to false. channels = get_channels('channel.channels_04.json') self.assertFalse(channels.daily.hidden) def test_getattr_failure(self): # Test the getattr syntax on an unknown channel or device combination. channels = get_channels('channel.channels_04.json') self.assertRaises(AttributeError, getattr, channels, 'bleeding') self.assertRaises( AttributeError, getattr, channels.daily.devices, 'nexus3') def test_daily_proposed(self): # The channel name has a dash in it. channels = get_channels('channel.channels_04.json') self.assertEqual(channels['saucy-proposed'].devices.grouper.index, '/saucy-proposed/grouper/index.json') def test_bad_getitem(self): # Trying to get a channel via getitem which doesn't exist. channels = get_channels('channel.channels_04.json') self.assertRaises(KeyError, getitem, channels, 'daily-testing') ./systemimage/tests/test_download.py0000644000015600001650000007206112701500553020012 0ustar jenkinsjenkins# Copyright (C) 2013-2016 Canonical Ltd. # Author: Barry Warsaw # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 3 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . """Test asynchronous downloads.""" __all__ = [ 'TestCURL', 'TestDownload', 'TestDownloadBigFiles', 'TestDownloadManagerFactory', 'TestDuplicateDownloads', 'TestGSMDownloads', 'TestHTTPSDownloads', 'TestHTTPSDownloadsExpired', 'TestHTTPSDownloadsNasty', 'TestHTTPSDownloadsNoSelfSigned', 'TestRecord', ] import os import random import unittest from contextlib import ExitStack from dbus.exceptions import DBusException from hashlib import sha256 from systemimage.config import Configuration, config from systemimage.curl import CurlDownloadManager from systemimage.download import ( Canceled, DuplicateDestinationError, Record, get_download_manager) from systemimage.helpers import temporary_directory from systemimage.settings import Settings from systemimage.testing.controller import USING_PYCURL from systemimage.testing.helpers import ( configuration, data_path, make_http_server, reset_envar, write_bytes) from systemimage.testing.nose import SystemImagePlugin from systemimage.udm import DOWNLOADER_INTERFACE, UDMDownloadManager from unittest.mock import patch from urllib.parse import urljoin if USING_PYCURL: import pycurl def _http_pathify(downloads): return [ (urljoin(config.http_base, url), os.path.join(config.tempdir, filename) ) for url, filename in downloads] def _https_pathify(downloads): return [ (urljoin(config.https_base, url), os.path.join(config.tempdir, filename) ) for url, filename in downloads] class TestDownload(unittest.TestCase): """Base class for testing the PyCURL and udm downloaders.""" def setUp(self): super().setUp() self._resources = ExitStack() try: # Start the HTTP server running, vending files out of our test # data directory. directory = os.path.dirname(data_path('__init__.py')) self._resources.push(make_http_server(directory, 8980)) except: self._resources.close() raise def tearDown(self): self._resources.close() super().tearDown() def _downloader(self, *args): return get_download_manager(*args) @configuration def test_good_path(self): # Download a bunch of files that exist. No callback. self._downloader().get_files(_http_pathify([ ('channel.channels_05.json', 'channels.json'), ('download.index_01.json', 'index.json'), ])) self.assertEqual( set(os.listdir(config.tempdir)), set(['channels.json', 'index.json'])) @configuration def test_empty_download(self): # Empty download set completes successfully. LP: #1245597. self._downloader().get_files([]) # No TimeoutError is raised. @configuration def test_user_agent(self): # The User-Agent request header contains the build number. version = random.randint(0, 99) config.build_number = version config.device = 'geddyboard' config.channel = 'devel-trio' # Download a magic path which the server will interpret to return us # the User-Agent header value. self._downloader().get_files(_http_pathify([ ('user-agent.txt', 'user-agent.txt'), ])) path = os.path.join(config.tempdir, 'user-agent.txt') with open(path, 'r', encoding='utf-8') as fp: user_agent = fp.read() self.assertEqual( user_agent, 'Ubuntu System Image Upgrade Client: ' 'device=geddyboard;channel=devel-trio;build={}'.format( version)) @configuration def test_download_with_callback(self): # Downloading calls the callback with some arguments. received_bytes = 0 total_bytes = 0 def callback(received, total): nonlocal received_bytes, total_bytes received_bytes = received total_bytes = total downloader = self._downloader(callback) downloader.get_files(_http_pathify([ ('channel.channels_05.json', 'channels.json'), ('download.index_01.json', 'index.json'), ])) self.assertEqual( set(os.listdir(config.tempdir)), set(['channels.json', 'index.json'])) self.assertEqual(received_bytes, 669) self.assertEqual(total_bytes, 669) @configuration def test_download_with_broken_callback(self): # If the callback raises an exception, it is logged and ignored. def callback(receive, total): raise RuntimeError exception = None def capture(message): nonlocal exception exception = message downloader = self._downloader(callback) with patch('systemimage.download.log.exception', capture): downloader.get_files(_http_pathify([ ('channel.channels_05.json', 'channels.json'), ])) # The exception got logged. self.assertEqual(exception, 'Exception in progress callback') # The file still got downloaded. self.assertEqual(os.listdir(config.tempdir), ['channels.json']) # This test helps bump the udm-based downloader test coverage to 100%. @unittest.skipIf(USING_PYCURL, 'Test is not relevant for PyCURL') @configuration def test_timeout(self): # If the reactor times out, we get an exception. We fake the timeout # by setting the attribute on the reactor, even though it successfully # completes its download without timing out. def finish_with_timeout(self, *args, **kws): self.timed_out = True self.quit() with patch('systemimage.udm.DownloadReactor._do_finished', finish_with_timeout): self.assertRaises( TimeoutError, self._downloader().get_files, _http_pathify([('channel.channels_05.json', 'channels.json')]) ) class TestHTTPSDownloads(unittest.TestCase): @classmethod def setUpClass(self): SystemImagePlugin.controller.set_mode(cert_pem='cert.pem') def setUp(self): self._directory = os.path.dirname(data_path('__init__.py')) @configuration def test_good_path(self): # The HTTPS server has a valid self-signed certificate, so downloading # over https succeeds. with ExitStack() as stack: stack.push(make_http_server( self._directory, 8943, 'cert.pem', 'key.pem')) get_download_manager().get_files(_https_pathify([ ('channel.channels_05.json', 'channels.json'), ])) self.assertEqual( set(os.listdir(config.tempdir)), set(['channels.json'])) class TestHTTPSDownloadsNoSelfSigned(unittest.TestCase): @classmethod def setUpClass(self): SystemImagePlugin.controller.set_mode() def setUp(self): self._directory = os.path.dirname(data_path('__init__.py')) @configuration def test_https_cert_not_in_capath(self): # The self-signed certificate fails because it's not in the system's # CApath (no known-good CA). with make_http_server(self._directory, 8943, 'cert.pem', 'key.pem'): self.assertRaises( FileNotFoundError, get_download_manager().get_files, _https_pathify([ ('channel.channels_05.json', 'channels.json'), ])) @configuration def test_http_masquerades_as_https(self): # There's an HTTP server pretending to be an HTTPS server. This # should fail to download over https URLs. with ExitStack() as stack: # By not providing an SSL context wrapped socket, this isn't # really an https server. stack.push(make_http_server(self._directory, 8943)) self.assertRaises( FileNotFoundError, get_download_manager().get_files, _https_pathify([ ('channel.channels_05.json', 'channels.json'), ])) class TestHTTPSDownloadsExpired(unittest.TestCase): @classmethod def setUpClass(self): SystemImagePlugin.controller.set_mode(cert_pem='expired_cert.pem') def setUp(self): self._directory = os.path.dirname(data_path('__init__.py')) @configuration def test_expired(self): # The HTTPS server has an expired certificate (mocked so that its CA # is in the system's trusted path). with ExitStack() as stack: stack.push(make_http_server( self._directory, 8943, 'expired_cert.pem', 'expired_key.pem')) self.assertRaises( FileNotFoundError, get_download_manager().get_files, _https_pathify([ ('channel.channels_05.json', 'channels.json'), ])) class TestHTTPSDownloadsNasty(unittest.TestCase): @classmethod def setUpClass(self): SystemImagePlugin.controller.set_mode(cert_pem='nasty_cert.pem') def setUp(self): self._directory = os.path.dirname(data_path('__init__.py')) @configuration def test_bad_host(self): # The HTTPS server has a certificate with a non-matching hostname # (mocked so that its CA is in the system's trusted path). with ExitStack() as stack: stack.push(make_http_server( self._directory, 8943, 'nasty_cert.pem', 'nasty_key.pem')) self.assertRaises( FileNotFoundError, get_download_manager().get_files, _https_pathify([ ('channel.channels_05.json', 'channels.json'), ])) # These tests don't strictly improve coverage for the udm-based downloader, # but they are still useful to keep because they test a implicit code path. # These can be removed once GSM-testing is pulled into s-i via LP: #1388886. @unittest.skipIf(USING_PYCURL, 'Test is not relevant for PyCURL') class TestGSMDownloads(unittest.TestCase): def setUp(self): super().setUp() # Patch this method so that we can verify both the value of the flag # that system-image sets and the value that u-d-m's group downloader # records and uses. This is the only thing we can really # automatically test given that e.g. we won't have GSM in development. self._gsm_set_flag = None self._gsm_get_flag = None self._original = None def set_gsm(iface, *, allow_gsm): self._gsm_set_flag = allow_gsm self._original(iface, allow_gsm=allow_gsm) self._gsm_get_flag = iface.isGSMDownloadAllowed() self._resources = ExitStack() try: # Start the HTTP server running, vending files out of our test # data directory. directory = os.path.dirname(data_path('__init__.py')) self._resources.push(make_http_server(directory, 8980)) # Patch the GSM setting method to capture what actually happens. self._original = getattr(UDMDownloadManager, '_set_gsm') self._resources.enter_context(patch( 'systemimage.udm.UDMDownloadManager._set_gsm', set_gsm)) self._resources.callback(setattr, self, '_original', None) except: self._resources.close() raise def tearDown(self): self._resources.close() super().tearDown() @configuration def test_manual_downloads_gsm_allowed(self, config_d): # When auto_download is 0, manual downloads are enabled so assuming # the user knows what they're doing, GSM downloads are allowed. config = Configuration(config_d) Settings(config).set('auto_download', '0') get_download_manager().get_files(_http_pathify([ ('channel.channels_05.json', 'channels.json') ])) self.assertTrue(self._gsm_set_flag) self.assertTrue(self._gsm_get_flag) @configuration def test_wifi_downloads_gsm_disallowed(self, config_d): # Obviously GSM downloads are not allowed when downloading # automatically on wifi-only. config = Configuration(config_d) Settings(config).set('auto_download', '1') get_download_manager().get_files(_http_pathify([ ('channel.channels_05.json', 'channels.json') ])) self.assertFalse(self._gsm_set_flag) self.assertFalse(self._gsm_get_flag) @configuration def test_always_downloads_gsm_allowed(self, config_d): # GSM downloads are allowed when always downloading. config = Configuration(config_d) Settings(config).set('auto_download', '2') get_download_manager().get_files(_http_pathify([ ('channel.channels_05.json', 'channels.json') ])) self.assertTrue(self._gsm_set_flag) self.assertTrue(self._gsm_get_flag) class TestDownloadBigFiles(unittest.TestCase): # This test helps bump the udm-based downloader test coverage to 100%. @unittest.skipIf(USING_PYCURL, 'Test is not relevant for PyCURL') @configuration def test_cancel(self): # Try to cancel the download of a big file. self.assertEqual(os.listdir(config.tempdir), []) with ExitStack() as stack: serverdir = stack.enter_context(temporary_directory()) stack.push(make_http_server(serverdir, 8980)) # Create a couple of big files to download. write_bytes(os.path.join(serverdir, 'bigfile_1.dat'), 10) write_bytes(os.path.join(serverdir, 'bigfile_2.dat'), 10) # The download service doesn't provide reliable cancel # granularity, so instead, we mock the 'started' signal to # immediately cancel the download. downloader = get_download_manager() def cancel_on_start(self, signal, path, started): if started: downloader.cancel() stack.enter_context(patch( 'systemimage.udm.DownloadReactor._do_started', cancel_on_start)) self.assertRaises( Canceled, downloader.get_files, _http_pathify([ ('bigfile_1.dat', 'bigfile_1.dat'), ('bigfile_2.dat', 'bigfile_2.dat'), ])) self.assertEqual(os.listdir(config.tempdir), []) @configuration def test_download_404(self): # Start a group download of some big files. One of the files won't # exist, so the entire group download should fail, and none of the # files should exist in the destination. self.assertEqual(os.listdir(config.tempdir), []) with ExitStack() as stack: serverdir = stack.enter_context(temporary_directory()) stack.push(make_http_server(serverdir, 8980)) # Create a couple of big files to download. write_bytes(os.path.join(serverdir, 'bigfile_1.dat'), 10) write_bytes(os.path.join(serverdir, 'bigfile_2.dat'), 10) write_bytes(os.path.join(serverdir, 'bigfile_3.dat'), 10) downloads = _http_pathify([ ('bigfile_1.dat', 'bigfile_1.dat'), ('bigfile_2.dat', 'bigfile_2.dat'), ('bigfile_3.dat', 'bigfile_3.dat'), ('missing.txt', 'missing.txt'), ]) self.assertRaises(FileNotFoundError, get_download_manager().get_files, downloads) # The temporary directory is empty. self.assertEqual(os.listdir(config.tempdir), []) class TestRecord(unittest.TestCase): def test_record(self): # A record can provide three arguments, the url, destination, and # checksum. record = Record('src', 'dst', 'hash') self.assertEqual(record.url, 'src') self.assertEqual(record.destination, 'dst') self.assertEqual(record.checksum, 'hash') def test_record_default_checksum(self): # The checksum is optional, and defaults to the empty string. record = Record('src', 'dst') self.assertEqual(record.url, 'src') self.assertEqual(record.destination, 'dst') self.assertEqual(record.checksum, '') def test_too_few_arguments(self): # At least two arguments must be given. self.assertRaises(TypeError, Record, 'src') def test_too_many_arguments(self): # No more than three arguments may be given. self.assertRaises(TypeError, Record, 'src', 'dst', 'hash', 'foo') class TestDuplicateDownloads(unittest.TestCase): maxDiff = None def setUp(self): super().setUp() self._resources = ExitStack() try: self._serverdir = self._resources.enter_context( temporary_directory()) self._resources.push(make_http_server(self._serverdir, 8980)) except: self._resources.close() raise def tearDown(self): self._resources.close() super().tearDown() @configuration def test_matched_duplicates(self): # A download that duplicates the destination location, but for which # the sources and checksums are the same is okay. content = b'x' * 100 checksum = sha256(content).hexdigest() with open(os.path.join(self._serverdir, 'source.dat'), 'wb') as fp: fp.write(content) downloader = get_download_manager() downloads = [] for url, dst in _http_pathify([('source.dat', 'local.dat'), ('source.dat', 'local.dat'), ]): downloads.append(Record(url, dst, checksum)) downloader.get_files(downloads) self.assertEqual(os.listdir(config.tempdir), ['local.dat']) @configuration def test_mismatched_urls(self): # A download that duplicates the destination location, but for which # the source urls don't match, is not allowed. content = b'x' * 100 checksum = sha256(content).hexdigest() with open(os.path.join(self._serverdir, 'source1.dat'), 'wb') as fp: fp.write(content) with open(os.path.join(self._serverdir, 'source2.dat'), 'wb') as fp: fp.write(content) downloader = get_download_manager() downloads = [] for url, dst in _http_pathify([('source1.dat', 'local.dat'), ('source2.dat', 'local.dat'), ]): downloads.append(Record(url, dst, checksum)) with self.assertRaises(DuplicateDestinationError) as cm: downloader.get_files(downloads) self.assertEqual(len(cm.exception.duplicates), 1) dst, dupes = cm.exception.duplicates[0] self.assertEqual(os.path.basename(dst), 'local.dat') self.assertEqual([r[0] for r in dupes], ['http://localhost:8980/source1.dat', 'http://localhost:8980/source2.dat']) self.assertEqual(os.listdir(config.tempdir), []) @configuration def test_mismatched_checksums(self): # A download that duplicates the destination location, but for which # the checksums don't match, is not allowed. content = b'x' * 100 checksum = sha256(content).hexdigest() with open(os.path.join(self._serverdir, 'source.dat'), 'wb') as fp: fp.write(content) downloader = get_download_manager() url = urljoin(config.http_base, 'source.dat') downloads = [ Record(url, 'local.dat', checksum), # Mutate the checksum so they won't match. Record(url, 'local.dat', checksum[-1] + checksum[:-1]), ] with self.assertRaises(DuplicateDestinationError) as cm: downloader.get_files(downloads) self.assertEqual(len(cm.exception.duplicates), 1) dst, dupes = cm.exception.duplicates[0] self.assertEqual(os.path.basename(dst), 'local.dat') self.assertEqual([r[0] for r in dupes], ['http://localhost:8980/source.dat', 'http://localhost:8980/source.dat']) # The records in the exception aren't sorted by checksum. self.assertEqual( sorted(r[2] for r in dupes), ['09ecb6ebc8bcefc733f6f2ec44f791abeed6a99edf0cc31519637898aebd52d8' , '809ecb6ebc8bcefc733f6f2ec44f791abeed6a99edf0cc31519637898aebd52d' ]) self.assertEqual(os.listdir(config.tempdir), []) @configuration def test_duplicate_error_message(self): # When a duplicate destination error occurs, an error message gets # logged. Make sure the error message is helpful. content = b'x' * 100 checksum = sha256(content).hexdigest() with open(os.path.join(self._serverdir, 'source.dat'), 'wb') as fp: fp.write(content) downloader = get_download_manager() url = urljoin(config.http_base, 'source.dat') downloads = [ Record(url, 'local.dat', checksum), # Mutate the checksum so they won't match. Record(url, 'local.dat', checksum[-1] + checksum[:-1]), ] with self.assertRaises(DuplicateDestinationError) as cm: downloader.get_files(downloads) self.assertMultiLineEqual(str(cm.exception), """ [ ( 'local.dat', [ ( 'http://localhost:8980/source.dat', 'local.dat', '09ecb6ebc8bcefc733f6f2ec44f791abeed6a99edf0cc31519637898aebd52d8'), ( 'http://localhost:8980/source.dat', 'local.dat', '809ecb6ebc8bcefc733f6f2ec44f791abeed6a99edf0cc31519637898aebd52d')])]""") # This class only bumps coverage to 100% for the cURL-based downloader, so it # can be skipped when the test suite runs under u-d-m. Checking the # environment variable wouldn't be enough for production (see download.py # get_download_manager() for other cases where the downloader is chosen), but # it's sufficient for the test suite. See tox.ini. @unittest.skipUnless(USING_PYCURL, 'Test is not relevant for UDM') class TestCURL(unittest.TestCase): def setUp(self): super().setUp() self._resources = ExitStack() try: # Start the HTTP server running, vending files out of our test # data directory. directory = os.path.dirname(data_path('__init__.py')) self._resources.push(make_http_server(directory, 8980)) except: self._resources.close() raise def tearDown(self): self._resources.close() super().tearDown() @configuration def test_multi_perform(self): # PyCURL's multi.perform() can return the E_CALL_MULTI_PEFORM status # which tells us to just try again. This doesn't happen in practice, # but the code path needs coverage. However, .perform() itself can't # be mocked because pycurl.CurlMulti is a built-in. Fun. class FakeMulti: def perform(self): return pycurl.E_CALL_MULTI_PERFORM, 2 done_once = False class Testable(CurlDownloadManager): def _do_once(self, multi, handles): nonlocal done_once if done_once: return super()._do_once(multi, handles) else: done_once = True return super()._do_once(FakeMulti(), handles) Testable().get_files(_http_pathify([ ('channel.channels_05.json', 'channels.json'), ('download.index_01.json', 'index.json'), ])) self.assertTrue(done_once) # The files still get downloaded. self.assertEqual( set(os.listdir(config.tempdir)), set(['channels.json', 'index.json'])) @configuration def test_multi_fail(self): # PyCURL's multi.perform() can return a failure code (i.e. not E_OK) # which triggers a FileNotFoundError. It doesn't really matter which # failure code it returns. class FakeMulti: def perform(self): return pycurl.E_READ_ERROR, 2 class Testable(CurlDownloadManager): def _do_once(self, multi, handles): return super()._do_once(FakeMulti(), handles) with self.assertRaises(FileNotFoundError) as cm: Testable().get_files(_http_pathify([ ('channel.channels_05.json', 'channels.json'), ('download.index_01.json', 'index.json'), ])) # One of the two files will be contained in the error message, but # which one is undefined, although in practice it will be the first # one. self.assertRegex( cm.exception.args[0], 'http://localhost:8980/(channel.channels_05|index_01).json') class TestDownloadManagerFactory(unittest.TestCase): """We have a factory for creating the download manager to use.""" def test_get_downloader_forced_curl(self): # Setting SYSTEMIMAGE_PYCURL envar to 1, yes, or true forces the # PyCURL downloader. with reset_envar('SYSTEMIMAGE_PYCURL'): os.environ['SYSTEMIMAGE_PYCURL'] = '1' self.assertIsInstance(get_download_manager(), CurlDownloadManager) with reset_envar('SYSTEMIMAGE_PYCURL'): os.environ['SYSTEMIMAGE_PYCURL'] = 'tRuE' self.assertIsInstance(get_download_manager(), CurlDownloadManager) with reset_envar('SYSTEMIMAGE_PYCURL'): os.environ['SYSTEMIMAGE_PYCURL'] = 'YES' self.assertIsInstance(get_download_manager(), CurlDownloadManager) def test_get_downloader_forced_udm(self): # Setting SYSTEMIMAGE_PYCURL envar to anything else forces the udm # downloader. with reset_envar('SYSTEMIMAGE_PYCURL'): os.environ['SYSTEMIMAGE_PYCURL'] = '0' self.assertIsInstance(get_download_manager(), UDMDownloadManager) with reset_envar('SYSTEMIMAGE_PYCURL'): os.environ['SYSTEMIMAGE_PYCURL'] = 'false' self.assertIsInstance(get_download_manager(), UDMDownloadManager) with reset_envar('SYSTEMIMAGE_PYCURL'): os.environ['SYSTEMIMAGE_PYCURL'] = 'nope' self.assertIsInstance(get_download_manager(), UDMDownloadManager) def test_auto_detect_udm(self): # If the environment variable is not set, we do auto-detection. For # backward compatibility, if udm is available on the system bus, we # use it. with reset_envar('SYSTEMIMAGE_PYCURL'): if 'SYSTEMIMAGE_PYCURL' in os.environ: del os.environ['SYSTEMIMAGE_PYCURL'] with patch('dbus.SystemBus.get_object') as mock: self.assertIsInstance( get_download_manager(), UDMDownloadManager) mock.assert_called_once_with(DOWNLOADER_INTERFACE, '/') def test_auto_detect_curl(self): # If the environment variable is not set, we do auto-detection. If udm # is not available on the system bus, we use the cURL downloader. import systemimage.download with ExitStack() as resources: resources.enter_context(reset_envar('SYSTEMIMAGE_PYCURL')) if 'SYSTEMIMAGE_PYCURL' in os.environ: del os.environ['SYSTEMIMAGE_PYCURL'] mock = resources.enter_context( patch('dbus.SystemBus.get_object', side_effect=DBusException)) resources.enter_context( patch.object(systemimage.download, 'pycurl', object())) self.assertIsInstance( get_download_manager(), CurlDownloadManager) mock.assert_called_once_with(DOWNLOADER_INTERFACE, '/') def test_auto_detect_none_available(self): # Again, we're auto-detecting, but in this case, we have neither udm # nor pycurl available. import systemimage.download with ExitStack() as resources: resources.enter_context(reset_envar('SYSTEMIMAGE_PYCURL')) if 'SYSTEMIMAGE_PYCURL' in os.environ: del os.environ['SYSTEMIMAGE_PYCURL'] mock = resources.enter_context( patch('dbus.SystemBus.get_object', side_effect=DBusException)) resources.enter_context( patch.object(systemimage.download, 'pycurl', None)) self.assertRaises(ImportError, get_download_manager) mock.assert_called_once_with(DOWNLOADER_INTERFACE, '/') ./systemimage/tests/test_gpg.py0000644000015600001650000007335712701500553016771 0ustar jenkinsjenkins# Copyright (C) 2013-2016 Canonical Ltd. # Author: Barry Warsaw # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 3 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . """Test that we can verify GPG signatures.""" __all__ = [ 'TestKeyrings', 'TestSignature', 'TestSignatureError', 'TestSignatureWithOverrides', ] import os import sys import hashlib import unittest import traceback from contextlib import ExitStack from io import StringIO from systemimage.config import config from systemimage.gpg import Context, SignatureError from systemimage.helpers import temporary_directory from systemimage.testing.helpers import ( configuration, copy, setup_keyring_txz, setup_keyrings, sign) class TestKeyrings(unittest.TestCase): """Test various attributes of the 5 defined keyrings.""" @configuration def test_archive_master(self): # The archive master keyring contains the master key. This a # persistent, mandatory, shipped, non-expiring key. setup_keyrings() with Context(config.gpg.archive_master) as ctx: # There is only one key in the master keyring. self.assertEqual( ctx.fingerprints, set(['289518ED3A0C4CFE975A0B32E0979A7EADE8E880'])) self.assertEqual( ctx.key_ids, set(['E0979A7EADE8E880'])) # Here is some useful information about the master key. self.assertEqual(len(ctx.keys), 1) master = ctx.keys[0] self.assertEqual( master['uids'], ['Ubuntu Archive Master Signing Key (TEST) ' '']) @configuration def test_archive_master_cached(self): # Unpacking the .tar.xz caches the .gpg file contained within, so it # only needs to be unpacked once. Test that the cached .gpg file is # used by not actually having a .tar.xz file. copy('archive-master.gpg', config.tempdir) self.assertFalse(os.path.exists(config.gpg.archive_master)) with Context(config.gpg.archive_master) as ctx: self.assertEqual( ctx.fingerprints, set(['289518ED3A0C4CFE975A0B32E0979A7EADE8E880'])) @configuration def test_archive_and_image_masters(self): # There is also a system image master key which is also persistent, # mandatory, shipped, and non-expiring. It should never need # changing, but it is possible to do so if it gets compromised. setup_keyrings() keyrings = [ config.gpg.archive_master, config.gpg.image_master, ] with Context(*keyrings) as ctx: # The context now knows about two keys. self.assertEqual( ctx.fingerprints, set(['289518ED3A0C4CFE975A0B32E0979A7EADE8E880', '47691DEF271FB2B1FD3364513BC6AF1818E7F5FB'])) self.assertEqual( ctx.key_ids, set(['E0979A7EADE8E880', '3BC6AF1818E7F5FB'])) # Here are all the available uids. uids = [] for key in ctx.keys: uids.extend(key['uids']) self.assertEqual(uids, [ 'Ubuntu Archive Master Signing Key (TEST) ' '', 'Ubuntu System Image Master Signing Key (TEST) ' '' ]) @configuration def test_archive_image_masters_image_signing(self): # In addition to the above, there is also a image signing key which is # generally what downloaded files are signed with. This key is also # persistent, mandatory, and shipped. It is updated regularly and # expires every two years. setup_keyrings() keyrings = [ config.gpg.archive_master, config.gpg.image_master, config.gpg.image_signing, ] with Context(*keyrings) as ctx: # The context now knows about two keys. self.assertEqual( ctx.fingerprints, set(['289518ED3A0C4CFE975A0B32E0979A7EADE8E880', '47691DEF271FB2B1FD3364513BC6AF1818E7F5FB', 'C5E39F07D159687BA3E82BD15A0DE8A4F1F1846F'])) self.assertEqual( ctx.key_ids, set(['E0979A7EADE8E880', '3BC6AF1818E7F5FB', '5A0DE8A4F1F1846F'])) # Here are all the available uids. uids = [] for key in ctx.keys: uids.extend(key['uids']) self.assertEqual(uids, [ 'Ubuntu Archive Master Signing Key (TEST) ' '', 'Ubuntu System Image Master Signing Key (TEST) ' '', 'Ubuntu System Image Signing Key (TEST) ' '', ]) @configuration def test_archive_image_masters_image_device_signing(self): # In addition to the above, there is also a device signing key which # downloaded files can also be signed with. This key is also # persistent, mandatory, and shipped. It is optional, so doesn't need # to exist, but it is also updated regularly and expires after one # month. setup_keyrings() keyrings = [ config.gpg.archive_master, config.gpg.image_master, config.gpg.image_signing, config.gpg.device_signing, ] with Context(*keyrings) as ctx: # The context now knows about two keys. self.assertEqual( ctx.fingerprints, set(['289518ED3A0C4CFE975A0B32E0979A7EADE8E880', '47691DEF271FB2B1FD3364513BC6AF1818E7F5FB', 'C5E39F07D159687BA3E82BD15A0DE8A4F1F1846F', 'C43D6575FDD935D2F9BC2A4669BC664FCB86D917'])) self.assertEqual( ctx.key_ids, set(['E0979A7EADE8E880', '3BC6AF1818E7F5FB', '5A0DE8A4F1F1846F', '69BC664FCB86D917'])) # Here are all the available uids. uids = [] for key in ctx.keys: uids.extend(key['uids']) self.assertEqual(uids, [ 'Ubuntu Archive Master Signing Key (TEST) ' '', 'Ubuntu System Image Master Signing Key (TEST) ' '', 'Ubuntu System Image Signing Key (TEST) ' '', 'Acme Phones, LLC Image Signing Key (TEST) ' '', ]) @configuration def test_missing_keyring(self): # The keyring file does not exist. self.assertRaises( FileNotFoundError, Context, os.path.join(config.tempdir, 'does-not-exist.tar.xz')) @configuration def test_missing_blacklist(self): # The blacklist file does not exist. blacklist = os.path.join(config.tempdir, 'no-blacklist.tar.xz') self.assertRaises( FileNotFoundError, Context, blacklist=blacklist) class TestSignature(unittest.TestCase): def setUp(self): self._stack = ExitStack() self.addCleanup(self._stack.close) self._tmpdir = self._stack.enter_context(temporary_directory()) @configuration def test_good_signature(self): # We have a channels.json file signed with the imaging signing key, as # would be the case in production. The signature will match a context # loaded with the public key. channels_json = os.path.join(self._tmpdir, 'channels.json') copy('gpg.channels_01.json', self._tmpdir, dst=channels_json) sign(channels_json, 'image-signing.gpg') with temporary_directory() as tmpdir: keyring = os.path.join(tmpdir, 'image-signing.tar.xz') setup_keyring_txz('image-signing.gpg', 'image-master.gpg', dict(type='image-signing'), keyring) with Context(keyring) as ctx: self.assertTrue( ctx.verify(channels_json + '.asc', channels_json)) @configuration def test_bad_signature(self): # In this case, the file is signed with the device key, so it will not # verify against the image signing key. channels_json = os.path.join(self._tmpdir, 'channels.json') copy('gpg.channels_01.json', self._tmpdir, dst=channels_json) sign(channels_json, 'device-signing.gpg') # Verify the signature with the pubkey. with temporary_directory() as tmpdir: dst = os.path.join(tmpdir, 'image-signing.tar.xz') setup_keyring_txz('image-signing.gpg', 'image-master.gpg', dict(type='image-signing'), dst) with Context(dst) as ctx: self.assertFalse( ctx.verify(channels_json + '.asc', channels_json)) @configuration def test_good_signature_with_multiple_keyrings(self): # Like above, the file is signed with the device key, but this time we # include both the image signing and device signing pubkeys. channels_json = os.path.join(self._tmpdir, 'channels.json') copy('gpg.channels_01.json', self._tmpdir, dst=channels_json) sign(channels_json, 'device-signing.gpg') with temporary_directory() as tmpdir: keyring_1 = os.path.join(tmpdir, 'image-signing.tar.xz') keyring_2 = os.path.join(tmpdir, 'device-signing.tar.xz') setup_keyring_txz('image-signing.gpg', 'image-master.gpg', dict(type='image-signing'), keyring_1) setup_keyring_txz('device-signing.gpg', 'image-signing.gpg', dict(type='device-signing'), keyring_2) with Context(keyring_1, keyring_2) as ctx: self.assertTrue( ctx.verify(channels_json + '.asc', channels_json)) @configuration def test_bad_signature_with_multiple_keyrings(self): # The file is signed with the image master key, but it won't verify # against the image signing and device signing pubkeys. channels_json = os.path.join(self._tmpdir, 'channels.json') copy('gpg.channels_01.json', self._tmpdir, dst=channels_json) sign(channels_json, 'image-master.gpg') # Verify the signature with the pubkey. with temporary_directory() as tmpdir: keyring_1 = os.path.join(tmpdir, 'image-signing.tar.xz') keyring_2 = os.path.join(tmpdir, 'device-signing.tar.xz') setup_keyring_txz('image-signing.gpg', 'image-master.gpg', dict(type='image-signing'), keyring_1) setup_keyring_txz('device-signing.gpg', 'image-signing.gpg', dict(type='device-signing'), keyring_2) with Context(keyring_1, keyring_2) as ctx: self.assertFalse( ctx.verify(channels_json + '.asc', channels_json)) @configuration def test_bad_not_even_a_signature(self): # The signature file isn't even a signature file. channels_json = os.path.join(self._tmpdir, 'channels.json') copy('gpg.channels_01.json', self._tmpdir, dst=channels_json) copy('gpg.channels_01.json', self._tmpdir, dst=channels_json + '.asc') with temporary_directory() as tmpdir: dst = os.path.join(tmpdir, 'device-signing.tar.xz') setup_keyring_txz('device-signing.gpg', 'image-signing.gpg', dict(type='device-signing'), dst) with Context(dst) as ctx: self.assertFalse(ctx.verify( channels_json + '.asc', channels_json)) @configuration def test_good_signature_not_in_blacklist(self): # We sign the file with the device signing key, and verify it against # the imaging signing and device signing keyrings. In this case # though, we also have a blacklist keyring, but none of the keyids in # the blacklist match the keyid that the file was signed with. channels_json = os.path.join(self._tmpdir, 'channels.json') copy('gpg.channels_01.json', self._tmpdir, dst='channels.json') sign(channels_json, 'device-signing.gpg') # Verify the signature with the pubkey. with temporary_directory() as tmpdir: keyring_1 = os.path.join(tmpdir, 'image-signing.tar.xz') keyring_2 = os.path.join(tmpdir, 'device-signing.tar.xz') blacklist = os.path.join(tmpdir, 'blacklist.tar.xz') setup_keyring_txz('image-signing.gpg', 'image-master.gpg', dict(type='image-signing'), keyring_1) setup_keyring_txz('device-signing.gpg', 'image-signing.gpg', dict(type='device-signing'), keyring_2) setup_keyring_txz('spare.gpg', 'image-master.gpg', dict(type='blacklist'), blacklist) with Context(keyring_1, keyring_2, blacklist=blacklist) as ctx: self.assertTrue( ctx.verify(channels_json + '.asc', channels_json)) @configuration def test_bad_signature_in_blacklist(self): # Like above, but we put the device signing key id in the blacklist. channels_json = os.path.join(self._tmpdir, 'channels.json') copy('gpg.channels_01.json', self._tmpdir, dst=channels_json) sign(channels_json, 'device-signing.gpg') # Verify the signature with the pubkey. with temporary_directory() as tmpdir: keyring_1 = os.path.join(tmpdir, 'image-signing.tar.xz') keyring_2 = os.path.join(tmpdir, 'device-signing.tar.xz') blacklist = os.path.join(tmpdir, 'blacklist.tar.xz') setup_keyring_txz('image-signing.gpg', 'image-master.gpg', dict(type='image-signing'), keyring_1) setup_keyring_txz('device-signing.gpg', 'image-signing.gpg', dict(type='device-signing'), keyring_2) # We're letting the device signing pubkey stand in for a blacklist. setup_keyring_txz('device-signing.gpg', 'image-master.gpg', dict(type='blacklist'), blacklist) with Context(keyring_1, keyring_2, blacklist=blacklist) as ctx: self.assertFalse( ctx.verify(channels_json + '.asc', channels_json)) @configuration def test_good_validation(self): # The .validate() method does nothing if the signature is good. channels_json = os.path.join(self._tmpdir, 'channels.json') copy('gpg.channels_01.json', self._tmpdir, dst=channels_json) sign(channels_json, 'image-signing.gpg') with temporary_directory() as tmpdir: keyring = os.path.join(tmpdir, 'image-signing.tar.xz') setup_keyring_txz('image-signing.gpg', 'image-master.gpg', dict(type='image-signing'), keyring) with Context(keyring) as ctx: self.assertIsNone( ctx.validate(channels_json + '.asc', channels_json)) class TestSignatureError(unittest.TestCase): def setUp(self): self._stack = ExitStack() self.addCleanup(self._stack.close) self._tmpdir = self._stack.enter_context(temporary_directory()) def test_extra_data(self): # A SignatureError includes extra information about the path to the # signature file, and the path to the data file. You also get the md5 # checksums of those two paths. signature_path = os.path.join(self._tmpdir, 'signature') data_path = os.path.join(self._tmpdir, 'data') with open(signature_path, 'wb') as fp: fp.write(b'012345') with open(data_path, 'wb') as fp: fp.write(b'67890a') error = SignatureError(signature_path, data_path) self.assertEqual(error.signature_path, signature_path) self.assertEqual(error.data_path, data_path) self.assertEqual( error.signature_checksum, 'd6a9a933c8aafc51e55ac0662b6e4d4a') self.assertEqual( error.data_checksum, 'e82780258de250078f7ad3f595d71f6d') @configuration def test_signature_invalid(self): # The .validate() method raises a SignatureError exception with extra # information when the signature is invalid. channels_json = os.path.join(self._tmpdir, 'channels.json') copy('gpg.channels_01.json', self._tmpdir, dst=channels_json) sign(channels_json, 'device-signing.gpg') # Verify the signature with the pubkey. with temporary_directory() as tmpdir: dst = os.path.join(tmpdir, 'image-signing.tar.xz') setup_keyring_txz('image-signing.gpg', 'image-master.gpg', dict(type='image-signing'), dst) # Get the dst's checksum now, because the file will get deleted # when the tmpdir context manager exits. with open(dst, 'rb') as fp: dst_checksum = hashlib.md5(fp.read()).hexdigest() with Context(dst) as ctx: with self.assertRaises(SignatureError) as cm: ctx.validate(channels_json + '.asc', channels_json) error = cm.exception basename = os.path.basename self.assertEqual(basename(error.signature_path), 'channels.json.asc') self.assertEqual(basename(error.data_path), 'channels.json') # The contents of the signature file are not predictable. with open(channels_json + '.asc', 'rb') as fp: checksum = hashlib.md5(fp.read()).hexdigest() self.assertEqual(error.signature_checksum, checksum) self.assertEqual( error.data_checksum, '715c63fecbf44b62f9fa04a82dfa7d29') basenames = [basename(path) for path in error.keyrings] self.assertEqual(basenames, ['image-signing.tar.xz']) self.assertIsNone(error.blacklist) self.assertEqual(error.keyring_checksums, [dst_checksum]) self.assertIsNone(error.blacklist_checksum) @configuration def test_signature_invalid_due_to_blacklist(self): # Like above, but we put the device signing key id in the blacklist. channels_json = os.path.join(self._tmpdir, 'channels.json') copy('gpg.channels_01.json', self._tmpdir, dst=channels_json) sign(channels_json, 'device-signing.gpg') # Verify the signature with the pubkey. with temporary_directory() as tmpdir: keyring_1 = os.path.join(tmpdir, 'image-signing.tar.xz') keyring_2 = os.path.join(tmpdir, 'device-signing.tar.xz') blacklist = os.path.join(tmpdir, 'blacklist.tar.xz') setup_keyring_txz('image-signing.gpg', 'image-master.gpg', dict(type='image-signing'), keyring_1) setup_keyring_txz('device-signing.gpg', 'image-signing.gpg', dict(type='device-signing'), keyring_2) # We're letting the device signing pubkey stand in for a blacklist. setup_keyring_txz('device-signing.gpg', 'image-master.gpg', dict(type='blacklist'), blacklist) # Get the keyring checksums now, because the files will get # deleted when the tmpdir context manager exits. keyring_checksums = [] for path in (keyring_1, keyring_2): with open(path, 'rb') as fp: checksum = hashlib.md5(fp.read()).hexdigest() keyring_checksums.append(checksum) with open(blacklist, 'rb') as fp: blacklist_checksum = hashlib.md5(fp.read()).hexdigest() with Context(keyring_1, keyring_2, blacklist=blacklist) as ctx: with self.assertRaises(SignatureError) as cm: ctx.validate(channels_json + '.asc', channels_json) error = cm.exception basename = os.path.basename self.assertEqual(basename(error.signature_path), 'channels.json.asc') self.assertEqual(basename(error.data_path), 'channels.json') # The contents of the signature file are not predictable. with open(channels_json + '.asc', 'rb') as fp: checksum = hashlib.md5(fp.read()).hexdigest() self.assertEqual(error.signature_checksum, checksum) self.assertEqual( error.data_checksum, '715c63fecbf44b62f9fa04a82dfa7d29') basenames = [basename(path) for path in error.keyrings] self.assertEqual(basenames, ['image-signing.tar.xz', 'device-signing.tar.xz']) self.assertEqual(basename(error.blacklist), 'blacklist.tar.xz') self.assertEqual(error.keyring_checksums, keyring_checksums) self.assertEqual(error.blacklist_checksum, blacklist_checksum) @configuration def test_signature_error_logging(self): # The repr/str of the SignatureError should contain lots of useful # information that will make debugging easier. channels_json = os.path.join(self._tmpdir, 'channels.json') copy('gpg.channels_01.json', self._tmpdir, dst=channels_json) sign(channels_json, 'device-signing.gpg') # Verify the signature with the pubkey. tmpdir = self._stack.enter_context(temporary_directory()) dst = os.path.join(tmpdir, 'image-signing.tar.xz') setup_keyring_txz('image-signing.gpg', 'image-master.gpg', dict(type='image-signing'), dst) output = StringIO() with Context(dst) as ctx: try: ctx.validate(channels_json + '.asc', channels_json) except SignatureError: # For our purposes, log.exception() is essentially a wrapper # around this traceback call. We don't really care about the # full stack trace though. e = sys.exc_info() traceback.print_exception(e[0], e[1], e[2], limit=0, file=output) # 2014-02-12 BAW: Yuck, but I can't get assertRegex() to work properly. for i, line in enumerate(output.getvalue().splitlines()): if i == 0: self.assertEqual(line, 'Traceback (most recent call last):') elif i == 1: self.assertEqual(line, 'systemimage.gpg.SignatureError: ') elif i == 2: self.assertTrue(line.startswith(' sig path :')) elif i == 3: self.assertTrue(line.endswith('/channels.json.asc')) elif i == 4: self.assertEqual( line, ' data path: 715c63fecbf44b62f9fa04a82dfa7d29') elif i == 5: self.assertTrue(line.endswith('/channels.json')) elif i == 6: self.assertTrue(line.startswith(' keyrings :')) elif i == 7: self.assertTrue(line.endswith("/image-signing.tar.xz']")) elif i == 8: self.assertEqual(line, ' blacklist: no blacklist ') class TestSignatureWithOverrides(unittest.TestCase): """system-image-cli supports a --skip-gpg-verification flag.""" def setUp(self): self._stack = ExitStack() self.addCleanup(self._stack.close) self._tmpdir = self._stack.enter_context(temporary_directory()) @configuration def test_bad_signature(self, config): # In this case, the file is signed with the device key, so it will not # verify against the image signing key, unless the # --skip-gpg-verification flag is set. channels_json = os.path.join(self._tmpdir, 'channels.json') channels_asc = channels_json + '.asc' copy('gpg.channels_01.json', self._tmpdir, dst=channels_json) sign(channels_json, 'device-signing.gpg') # Verify the signature with the pubkey. with temporary_directory() as tmpdir: dst = os.path.join(tmpdir, 'image-signing.tar.xz') setup_keyring_txz('image-signing.gpg', 'image-master.gpg', dict(type='image-signing'), dst) with Context(dst) as ctx: self.assertFalse(ctx.verify(channels_asc, channels_json)) # But with the --skip-gpg-verification flag set, the verify # call returns success. config.skip_gpg_verification = True self.assertTrue(ctx.verify(channels_asc, channels_json)) @configuration def test_bad_signature_with_multiple_keyrings(self, config): # The file is signed with the image master key, but it won't verify # against the image signing and device signing pubkeys, unless the # --skip-gpg-verification flag is set. channels_json = os.path.join(self._tmpdir, 'channels.json') channels_asc = channels_json + '.asc' copy('gpg.channels_01.json', self._tmpdir, dst=channels_json) sign(channels_json, 'image-master.gpg') # Verify the signature with the pubkey. with temporary_directory() as tmpdir: keyring_1 = os.path.join(tmpdir, 'image-signing.tar.xz') keyring_2 = os.path.join(tmpdir, 'device-signing.tar.xz') setup_keyring_txz('image-signing.gpg', 'image-master.gpg', dict(type='image-signing'), keyring_1) setup_keyring_txz('device-signing.gpg', 'image-signing.gpg', dict(type='device-signing'), keyring_2) with Context(keyring_1, keyring_2) as ctx: self.assertFalse(ctx.verify(channels_asc, channels_json)) config.skip_gpg_verification = True self.assertTrue(ctx.verify(channels_asc, channels_json)) @configuration def test_bad_not_even_a_signature(self, config): # The signature file isn't even a signature file. Verification will # fail unless the --skip-gpg-verification flag is set. channels_json = os.path.join(self._tmpdir, 'channels.json') channels_asc = channels_json + '.asc' copy('gpg.channels_01.json', self._tmpdir, dst=channels_json) copy('gpg.channels_01.json', self._tmpdir, dst=channels_json + '.asc') with temporary_directory() as tmpdir: dst = os.path.join(tmpdir, 'device-signing.tar.xz') setup_keyring_txz('device-signing.gpg', 'image-signing.gpg', dict(type='device-signing'), dst) with Context(dst) as ctx: self.assertFalse(ctx.verify(channels_asc, channels_json)) config.skip_gpg_verification = True self.assertTrue(ctx.verify(channels_asc, channels_json)) @configuration def test_bad_signature_in_blacklist(self): # Like above, but we put the device signing key id in the blacklist. channels_json = os.path.join(self._tmpdir, 'channels.json') channels_asc = channels_json + '.asc' copy('gpg.channels_01.json', self._tmpdir, dst=channels_json) sign(channels_json, 'device-signing.gpg') # Verify the signature with the pubkey. with temporary_directory() as tmpdir: keyring_1 = os.path.join(tmpdir, 'image-signing.tar.xz') keyring_2 = os.path.join(tmpdir, 'device-signing.tar.xz') blacklist = os.path.join(tmpdir, 'blacklist.tar.xz') setup_keyring_txz('image-signing.gpg', 'image-master.gpg', dict(type='image-signing'), keyring_1) setup_keyring_txz('device-signing.gpg', 'image-signing.gpg', dict(type='device-signing'), keyring_2) # We're letting the device signing pubkey stand in for a blacklist. setup_keyring_txz('device-signing.gpg', 'image-master.gpg', dict(type='blacklist'), blacklist) with Context(keyring_1, keyring_2, blacklist=blacklist) as ctx: self.assertFalse(ctx.verify(channels_asc, channels_json)) config.skip_gpg_verification = True self.assertTrue(ctx.verify(channels_asc, channels_json)) @configuration def test_bad_signature_with_validate(self, config): # This is similar to the above, except that the .validate() API is # used instead. channels_json = os.path.join(self._tmpdir, 'channels.json') channels_asc = channels_json + '.asc' copy('gpg.channels_01.json', self._tmpdir, dst=channels_json) sign(channels_json, 'device-signing.gpg') # Verify the signature with the pubkey. with temporary_directory() as tmpdir: dst = os.path.join(tmpdir, 'image-signing.tar.xz') setup_keyring_txz('image-signing.gpg', 'image-master.gpg', dict(type='image-signing'), dst) with Context(dst) as ctx: self.assertRaises(SignatureError, ctx.validate, channels_asc, channels_json) config.skip_gpg_verification = True ctx.validate(channels_asc, channels_json) ./systemimage/tests/test_keyring.py0000644000015600001650000003573312701500553017660 0ustar jenkinsjenkins# Copyright (C) 2013-2016 Canonical Ltd. # Author: Barry Warsaw # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 3 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . """Test downloading and unpacking a keyring.""" __all__ = [ 'TestKeyring', ] import os import hashlib import unittest from contextlib import ExitStack from datetime import datetime, timedelta, timezone from systemimage.config import config from systemimage.gpg import Context, SignatureError from systemimage.helpers import temporary_directory from systemimage.keyring import KeyringError, get_keyring from systemimage.testing.helpers import ( configuration, make_http_server, setup_keyring_txz, setup_keyrings) from systemimage.testing.nose import SystemImagePlugin class TestKeyring(unittest.TestCase): """Test downloading and unpacking a keyring.""" @classmethod def setUpClass(self): SystemImagePlugin.controller.set_mode(cert_pem='cert.pem') def setUp(self): self._stack = ExitStack() try: self._serverdir = self._stack.enter_context( temporary_directory()) self._stack.push(make_http_server( self._serverdir, 8943, 'cert.pem', 'key.pem')) except: self._stack.close() raise def tearDown(self): self._stack.close() @configuration def test_good_path(self): # Everything checks out, with the simplest possible keyring.json. setup_keyrings('archive-master') setup_keyring_txz( 'spare.gpg', 'archive-master.gpg', dict(type='image-master'), os.path.join(self._serverdir, 'gpg', 'image-master.tar.xz')) get_keyring('image-master', 'gpg/image-master.tar.xz', 'archive-master') with Context(config.gpg.archive_master) as ctx: self.assertEqual(ctx.fingerprints, set(['289518ED3A0C4CFE975A0B32E0979A7EADE8E880'])) @configuration def test_good_path_full_json(self): # Everything checks out, with a fully loaded keyring.json file. next_year = datetime.now(tz=timezone.utc) + timedelta(days=365) setup_keyrings('archive-master') setup_keyring_txz( 'spare.gpg', 'archive-master.gpg', dict(type='image-master', expiry=next_year.timestamp(), model='nexus7'), os.path.join(self._serverdir, 'gpg', 'image-master.tar.xz')) get_keyring('image-master', 'gpg/image-master.tar.xz', 'archive-master') with Context(config.gpg.archive_master) as ctx: self.assertEqual(ctx.fingerprints, set(['289518ED3A0C4CFE975A0B32E0979A7EADE8E880'])) @configuration def test_good_path_model(self): # Everything checks out with the model specified. setup_keyrings() setup_keyring_txz( 'spare.gpg', 'archive-master.gpg', dict(type='image-master', model='nexus7'), os.path.join(self._serverdir, 'gpg', 'image-master.tar.xz')) get_keyring('image-master', 'gpg/image-master.tar.xz', 'archive-master') with Context(config.gpg.archive_master) as ctx: self.assertEqual(ctx.fingerprints, set(['289518ED3A0C4CFE975A0B32E0979A7EADE8E880'])) @configuration def test_good_path_expiry(self): # Everything checks out, with the expiration date specified. next_year = datetime.now(tz=timezone.utc) + timedelta(days=365) setup_keyrings('archive-master') setup_keyring_txz( 'spare.gpg', 'archive-master.gpg', dict(type='image-master', expiry=next_year.timestamp()), os.path.join(self._serverdir, 'gpg', 'image-master.tar.xz')) get_keyring('image-master', 'gpg/image-master.tar.xz', 'archive-master') with Context(config.gpg.archive_master) as ctx: self.assertEqual(ctx.fingerprints, set(['289518ED3A0C4CFE975A0B32E0979A7EADE8E880'])) @configuration def test_path_device_signing_keyring(self): # Get the device signing keyring. setup_keyrings('archive-master', 'image-master', 'image-signing') setup_keyring_txz( 'spare.gpg', 'image-signing.gpg', dict(type='device-signing'), os.path.join(self._serverdir, 'gpg', 'stable', 'nexus7', 'device-signing.tar.xz')) url = 'gpg/{}/{}/device-signing.tar.xz'.format( config.channel, config.device) get_keyring('device-signing', url, 'image-signing') with Context(config.gpg.device_signing) as ctx: self.assertEqual(ctx.fingerprints, set(['94BE2CECF8A5AF9F3A10E2A6526B7016C3D2FB44'])) @configuration def test_path_blacklist(self): # Get the blacklist keyring. setup_keyrings('archive-master', 'image-master') setup_keyring_txz( 'spare.gpg', 'image-master.gpg', dict(type='blacklist'), os.path.join(self._serverdir, 'gpg/blacklist.tar.xz')) url = 'gpg/blacklist.tar.xz'.format(config.channel, config.device) get_keyring('blacklist', url, 'image-master') blacklist_path = os.path.join(config.tempdir, 'blacklist.tar.xz') with Context(blacklist_path) as ctx: self.assertEqual(ctx.fingerprints, set(['94BE2CECF8A5AF9F3A10E2A6526B7016C3D2FB44'])) @configuration def test_tar_xz_file_missing(self): # If the tar.xz file cannot be downloaded, an error is raised. tarxz_path = os.path.join(self._serverdir, 'gpg', 'blacklist.tar.xz') setup_keyrings() setup_keyring_txz( 'spare.gpg', 'archive-master.gpg', dict(type='blacklist'), tarxz_path) os.remove(tarxz_path) self.assertRaises(FileNotFoundError, get_keyring, 'blacklist', 'gpg/blacklist.tar.xz', 'image-master') @configuration def test_asc_file_missing(self): # If the tar.xz.asc file cannot be downloaded, an error is raised. tarxz_path = os.path.join(self._serverdir, 'gpg', 'blacklist.tar.xz') setup_keyrings() setup_keyring_txz( 'spare.gpg', 'archive-master.gpg', dict(type='blacklist'), tarxz_path) os.remove(tarxz_path + '.asc') self.assertRaises(FileNotFoundError, get_keyring, 'blacklist', 'gpg/blacklist.tar.xz', 'image-master') @configuration def test_bad_signature(self): # Both files are downloaded, but the signature does not match the # image-master key. setup_keyrings() # Use the spare key as the blacklist, signed by itself. Since this # won't match the image-signing key, the check will fail. server_path = os.path.join(self._serverdir, 'gpg', 'blacklist.tar.xz') setup_keyring_txz( 'spare.gpg', 'spare.gpg', dict(type='blacklist'), server_path) with self.assertRaises(SignatureError) as cm: get_keyring('blacklist', 'gpg/blacklist.tar.xz', 'image-master') error = cm.exception # The local file name will be keyring.tar.xz in the cache directory. basename = os.path.basename self.assertEqual(basename(error.data_path), 'keyring.tar.xz') self.assertEqual(basename(error.signature_path), 'keyring.tar.xz.asc') # The crafted blacklist.tar.xz file will have an unpredictable # checksum due to tarfile variablility. with open(server_path, 'rb') as fp: checksum = hashlib.md5(fp.read()).hexdigest() self.assertEqual(error.data_checksum, checksum) # The signature file's checksum is also unpredictable. with open(server_path + '.asc', 'rb') as fp: checksum = hashlib.md5(fp.read()).hexdigest() self.assertEqual(error.signature_checksum, checksum) @configuration def test_blacklisted_signature(self): # Normally, the signature would be good, except that the fingerprint # of the device signing key is blacklisted. setup_keyrings('archive-master', 'image-master') blacklist = os.path.join(config.tempdir, 'gpg', 'blacklist.tar.xz') # Blacklist the image-master keyring. setup_keyring_txz( 'image-master.gpg', 'image-master.gpg', dict(type='blacklist'), blacklist) setup_keyring_txz( 'image-signing.gpg', 'image-master.gpg', dict(type='image-signing'), os.path.join(self._serverdir, 'gpg', 'image-signing.tar.xz')) # Now put an image-signing key on the server and attempt to download # it. Because the image-master is blacklisted, this will fail. self.assertRaises(SignatureError, get_keyring, 'image-signing', 'gpg/image-signing.tar.xz', 'image-master', blacklist) @configuration def test_bad_json_type(self): # This type, while the signatures match, the keyring type in the # keyring.json file does not match. setup_keyrings() setup_keyring_txz( 'device-signing.gpg', 'image-master.gpg', dict(type='master'), os.path.join(self._serverdir, 'gpg', 'blacklist.tar.xz')) with self.assertRaises(KeyringError) as cm: get_keyring('blacklist', 'gpg/blacklist.tar.xz', 'image-master') self.assertEqual( cm.exception.message, 'keyring type mismatch; wanted: blacklist, got: master') @configuration def test_bad_json_model(self): # Similar to above, but with a non-matching model name. setup_keyrings() setup_keyring_txz( 'device-signing.gpg', 'image-master.gpg', dict(type='blacklist', model='nexus0'), os.path.join(self._serverdir, 'gpg', 'blacklist.tar.xz')) with self.assertRaises(KeyringError) as cm: get_keyring('blacklist', 'gpg/blacklist.tar.xz', 'image-master') self.assertEqual( cm.exception.message, 'keyring model mismatch; wanted: nexus7, got: nexus0') @configuration def test_expired(self): # Similar to above, but the expiry key in the json names a utc # timestamp that has already elapsed. last_year = datetime.now(tz=timezone.utc) + timedelta(days=-365) setup_keyrings() setup_keyring_txz( 'device-signing.gpg', 'image-master.gpg', dict(type='blacklist', model='nexus7', expiry=last_year.timestamp()), os.path.join(self._serverdir, 'gpg', 'blacklist.tar.xz')) with self.assertRaises(KeyringError) as cm: get_keyring('blacklist', 'gpg/blacklist.tar.xz', 'image-master') self.assertEqual( cm.exception.message, 'expired keyring timestamp') @configuration def test_destination_image_master(self): # When a keyring is downloaded, we preserve its .tar.xz and # .tar.xz.asc files. setup_keyrings('archive-master') setup_keyring_txz( 'image-master.gpg', 'archive-master.gpg', dict(type='image-master'), os.path.join(self._serverdir, 'gpg', 'image-master.tar.xz')) asc_path = config.gpg.image_master + '.asc' self.assertFalse(os.path.exists(config.gpg.image_master)) self.assertFalse(os.path.exists(asc_path)) get_keyring( 'image-master', 'gpg/image-master.tar.xz', 'archive-master') self.assertTrue(os.path.exists(config.gpg.image_master)) self.assertTrue(os.path.exists(asc_path)) with Context(config.gpg.archive_master) as ctx: self.assertTrue(ctx.verify(asc_path, config.gpg.image_master)) @configuration def test_destination_image_signing(self): # When a keyring is downloaded, we preserve its .tar.xz and # .tar.xz.asc files. setup_keyrings('archive-master', 'image-master') setup_keyring_txz( 'image-signing.gpg', 'image-master.gpg', dict(type='image-signing'), os.path.join(self._serverdir, 'gpg', 'image-signing.tar.xz')) asc_path = config.gpg.image_signing + '.asc' self.assertFalse(os.path.exists(config.gpg.image_signing)) self.assertFalse(os.path.exists(asc_path)) get_keyring( 'image-signing', 'gpg/image-signing.tar.xz', 'image-master') self.assertTrue(os.path.exists(config.gpg.image_signing)) self.assertTrue(os.path.exists(asc_path)) with Context(config.gpg.image_master) as ctx: self.assertTrue(ctx.verify(asc_path, config.gpg.image_signing)) @configuration def test_destination_device_signing(self): # When a keyring is downloaded, we preserve its .tar.xz and # .tar.xz.asc files. setup_keyrings('archive-master', 'image-master', 'image-signing') setup_keyring_txz( 'device-signing.gpg', 'image-signing.gpg', dict(type='device-signing'), os.path.join(self._serverdir, 'stable', 'nexus7', 'device-signing.tar.xz')) asc_path = config.gpg.device_signing + '.asc' self.assertFalse(os.path.exists(config.gpg.device_signing)) self.assertFalse(os.path.exists(asc_path)) get_keyring('device-signing', 'stable/nexus7/device-signing.tar.xz', 'image-signing') self.assertTrue(os.path.exists(config.gpg.device_signing)) self.assertTrue(os.path.exists(asc_path)) with Context(config.gpg.image_signing) as ctx: self.assertTrue(ctx.verify(asc_path, config.gpg.device_signing)) @configuration def test_destination_blacklist(self): # Like above, but the blacklist files end up in the temporary # directory, since it's never persistent. setup_keyrings('archive-master', 'image-master') setup_keyring_txz( 'spare.gpg', 'image-master.gpg', dict(type='blacklist'), os.path.join(self._serverdir, 'gpg', 'blacklist.tar.xz')) txz_path = os.path.join( config.updater.data_partition, 'blacklist.tar.xz') asc_path = txz_path + '.asc' self.assertFalse(os.path.exists(txz_path)) self.assertFalse(os.path.exists(asc_path)) get_keyring('blacklist', 'gpg/blacklist.tar.xz', 'image-master') self.assertTrue(os.path.exists(txz_path)) self.assertTrue(os.path.exists(asc_path)) with Context(config.gpg.image_master) as ctx: self.assertTrue(ctx.verify(asc_path, txz_path)) ./systemimage/tests/test_helpers.py0000644000015600001650000004402512701500553017644 0ustar jenkinsjenkins# Copyright (C) 2013-2016 Canonical Ltd. # Author: Barry Warsaw # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 3 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . """Test helpers.""" __all__ = [ 'TestConverters', 'TestLastUpdateDate', 'TestMiscellaneous', 'TestPhasedPercentage', 'TestSignature', ] import os import shutil import hashlib import logging import tempfile import unittest from contextlib import ExitStack from datetime import datetime, timedelta from pathlib import Path from systemimage.bag import Bag from systemimage.config import Configuration from systemimage.helpers import ( MiB, NO_PORT, as_loglevel, as_object, as_port, as_stripped, as_timedelta, calculate_signature, last_update_date, phased_percentage, temporary_directory, version_detail) from systemimage.testing.helpers import configuration, data_path, touch_build from unittest.mock import patch class TestConverters(unittest.TestCase): def test_as_object_good_path(self): self.assertEqual(as_object('systemimage.bag.Bag'), Bag) def test_as_object_no_dot(self): self.assertRaises(ValueError, as_object, 'foo') def test_as_object_import_error(self): # Because as_object() returns a proxy in order to avoid circular # imports, we actually have to call the return value of as_object() in # order to trigger the module lookup. self.assertRaises( ImportError, as_object('systemimage.doesnotexist.Foo')) def test_as_object_attribute_error(self): # Because as_object() returns a proxy in order to avoid circular # imports, we actually have to call the return value of as_object() in # order to trigger the module lookup. self.assertRaises( AttributeError, as_object('systemimage.tests.test_helpers.NoSuchTest')) def test_as_object_not_equal(self): self.assertNotEqual(as_object('systemimage.bag.Bag'), object()) def test_as_timedelta_seconds(self): self.assertEqual(as_timedelta('2s'), timedelta(seconds=2)) def test_as_timedelta_unadorned(self): self.assertRaises(ValueError, as_timedelta, '5') def test_as_timedelta_minutes(self): self.assertEqual(as_timedelta('10m'), timedelta(seconds=600)) def test_as_timedelta_unknown(self): self.assertRaises(ValueError, as_timedelta, '3x') def test_as_timedelta_no_keywords(self): self.assertRaises(ValueError, as_timedelta, '') def test_as_timedelta_repeated_interval(self): self.assertRaises(ValueError, as_timedelta, '2s2s') def test_as_timedelta_float(self): self.assertEqual(as_timedelta('0.5d'), timedelta(hours=12)) def test_as_loglevel(self): # The default D-Bus log level is ERROR. self.assertEqual(as_loglevel('critical'), (logging.CRITICAL, logging.ERROR)) def test_as_loglevel_uppercase(self): self.assertEqual(as_loglevel('CRITICAL'), (logging.CRITICAL, logging.ERROR)) def test_as_dbus_loglevel(self): self.assertEqual(as_loglevel('error:info'), (logging.ERROR, logging.INFO)) def test_as_loglevel_unknown(self): self.assertRaises(ValueError, as_loglevel, 'BADNESS') def test_as_bad_dbus_loglevel(self): self.assertRaises(ValueError, as_loglevel, 'error:basicConfig') def test_as_port(self): self.assertEqual(as_port('801'), 801) def test_as_non_int_port(self): self.assertRaises(ValueError, as_port, 'not-a-port') def test_as_port_disabled(self): self.assertIs(as_port('disabled'), NO_PORT) self.assertIs(as_port('disable'), NO_PORT) self.assertIs(as_port('DISABLED'), NO_PORT) self.assertIs(as_port('DISABLE'), NO_PORT) def test_stripped(self): self.assertEqual(as_stripped(' field '), 'field') class TestLastUpdateDate(unittest.TestCase): @configuration def test_date_from_userdata(self): # The last upgrade data can come from /userdata/.last_update. with ExitStack() as stack: tmpdir = stack.enter_context(temporary_directory()) userdata_path = Path(tmpdir) / '.last_update' stack.enter_context(patch('systemimage.helpers.LAST_UPDATE_FILE', str(userdata_path))) timestamp = int(datetime(2012, 11, 10, 9, 8, 7).timestamp()) userdata_path.touch() os.utime(str(userdata_path), (timestamp, timestamp)) self.assertEqual(last_update_date(), '2012-11-10 09:08:07') @configuration def test_date_from_config_d(self, config): # The latest mtime from all the config.d files is taken as the last # update date. Add a bunch of ini files where the higher numbered # ones have higher numbered year mtimes. for year in range(18, 22): ini_file = Path(config.config_d) / '{:02d}_config.ini'.format(year) ini_file.touch() timestamp = int(datetime(2000 + year, 1, 2, 3, 4, 5).timestamp()) os.utime(str(ini_file), (timestamp, timestamp)) config.reload() self.assertEqual(last_update_date(), '2021-01-02 03:04:05') @configuration def test_date_from_config_d_reversed(self, config): # As above, but the higher numbered ini files have earlier mtimes. for year in range(22, 18, -1): ini_file = Path(config.config_d) / '{:02d}_config.ini'.format(year) ini_file.touch() timestamp = int(datetime(2040-year, 1, 2, 3, 4, 5).timestamp()) os.utime(str(ini_file), (timestamp, timestamp)) config.reload() self.assertEqual(last_update_date(), '2021-01-02 03:04:05') @configuration def test_date_from_userdata_takes_precedence(self, config_d): # The last upgrade data will come from /userdata/.last_update, even if # there are .ini files with later mtimes in them. for year in range(18, 22): ini_file = Path(config_d) / '{:02d}_config.ini'.format(year) ini_file.touch() timestamp = int(datetime(2000 + year, 1, 2, 3, 4, 5).timestamp()) os.utime(str(ini_file), (timestamp, timestamp)) with ExitStack() as stack: tmpdir = stack.enter_context(temporary_directory()) userdata_path = Path(tmpdir) / '.last_update' stack.enter_context(patch('systemimage.helpers.LAST_UPDATE_FILE', str(userdata_path))) timestamp = int(datetime(2012, 11, 10, 9, 8, 7).timestamp()) userdata_path.touch() os.utime(str(userdata_path), (timestamp, timestamp)) self.assertEqual(last_update_date(), '2012-11-10 09:08:07') def test_date_unknown(self): # If there is no /userdata/.last_update file and no ini files, then # the last update date is unknown. with ExitStack() as stack: config_d = stack.enter_context(temporary_directory()) tempdir = stack.enter_context(temporary_directory()) userdata_path = os.path.join(tempdir, '.last_update') stack.enter_context(patch('systemimage.helpers.LAST_UPDATE_FILE', userdata_path)) config = Configuration(config_d) stack.enter_context(patch('systemimage.config._config', config)) self.assertEqual(last_update_date(), 'Unknown') @configuration def test_date_no_microseconds(self, config): # Resolution is seconds. ini_file = Path(config.config_d) / '01_config.ini' ini_file.touch() timestamp = datetime(2022, 12, 11, 10, 9, 8, 7).timestamp() # We need nanoseconds. timestamp *= 1000000000 os.utime(str(ini_file), ns=(timestamp, timestamp)) config.reload() self.assertEqual(last_update_date(), '2022-12-11 10:09:08') @configuration def test_version_detail(self, config): shutil.copy(data_path('helpers.config_01.ini'), os.path.join(config.config_d, '00_config.ini')) config.reload() self.assertEqual(version_detail(), dict(ubuntu='123', mako='456', custom='789')) @configuration def test_no_version_detail(self, config): shutil.copy(data_path('helpers.config_02.ini'), os.path.join(config.config_d, '00_config.ini')) config.reload() self.assertEqual(version_detail(), {}) def test_version_detail_from_argument(self): self.assertEqual(version_detail('ubuntu=123,mako=456,custom=789'), dict(ubuntu='123', mako='456', custom='789')) def test_no_version_in_version_detail(self): self.assertEqual(version_detail('ubuntu,mako,custom'), {}) @unittest.skipIf(os.getuid() == 0, 'Test cannot succeed when run as root') @configuration def test_last_date_no_permission(self, config): # LP: #1365761 reports a problem where stat'ing /userdata/.last_update # results in a PermissionError. In that case it should fall back to # using the mtimes of the config.d ini files. timestamp_1 = int(datetime(2022, 1, 2, 3, 4, 5).timestamp()) touch_build(2, timestamp_1) # Now create an unstat'able /userdata/.last_update file. with ExitStack() as stack: tmpdir = stack.enter_context(temporary_directory()) userdata_path = Path(tmpdir) / '.last_update' stack.enter_context(patch('systemimage.helpers.LAST_UPDATE_FILE', str(userdata_path))) timestamp = int(datetime(2012, 11, 10, 9, 8, 7).timestamp()) # Make the file unreadable. userdata_path.touch() os.utime(str(userdata_path), (timestamp, timestamp)) stack.callback(os.chmod, tmpdir, 0o777) os.chmod(tmpdir, 0o000) config.reload() # The last update date will be the date of the 99_build.ini file. self.assertEqual(last_update_date(), '2022-01-02 03:04:05') @configuration def test_dangling_symlink(self, config): # LP: #1495688 reports a problem where /userdata/.last_update doesn't # exist, and the files in the config.d directory are dangling # symlinks. In this case, there's really little that can be done to # find a reliable last update date, but at least we don't crash. # # Start by deleting any existing .ini files in config.d. for path in Path(config.config_d).iterdir(): if path.suffix == '.ini': path.unlink() with ExitStack() as stack: tmpdir = stack.enter_context(temporary_directory()) userdata_path = Path(tmpdir) / '.last_update' stack.enter_context(patch('systemimage.helpers.LAST_UPDATE_FILE', str(userdata_path))) # Do not create the .last_update file. missing_ini = Path(tmpdir) / 'missing.ini' config.ini_files = [missing_ini] # Do not create the missing.ini file, but do create a symlink from # a config.d file to this missing file. default_ini = Path(config.config_d) / '00_default.ini' default_ini.symlink_to(missing_ini) last_update_date() self.assertEqual(last_update_date(), 'Unknown') @configuration def test_post_startup_delete(self, config): # Like test_dangling_symlink() except that an existing ini file gets # deleted after system startup, so some of the files that # last_update_date() looks at will throw an exception. # # Start by deleting any existing .ini files in config.d. This time # however we don't update config.ini_files. for path in Path(config.config_d).iterdir(): if path.suffix == '.ini': path.unlink() with ExitStack() as stack: tmpdir = stack.enter_context(temporary_directory()) userdata_path = Path(tmpdir) / '.last_update' stack.enter_context(patch('systemimage.helpers.LAST_UPDATE_FILE', str(userdata_path))) # Do not create the .last_update file. last_update_date() self.assertEqual(last_update_date(), 'Unknown') class TestPhasedPercentage(unittest.TestCase): def setUp(self): self._resources = ExitStack() tmpdir = self._resources.enter_context(temporary_directory()) self._mid_path = os.path.join(tmpdir, 'machine-id') self._resources.enter_context(patch( 'systemimage.helpers.UNIQUE_MACHINE_ID_FILES', [self._mid_path])) def tearDown(self): self._resources.close() def _set_machine_id(self, machine_id): with open(self._mid_path, 'w', encoding='utf-8') as fp: fp.write(machine_id) def test_phased_percentage(self): # The phased percentage is used to determine whether a calculated # winning path is to be applied or not. It returns a number between 0 # and 100 based on the machine's unique machine id (as kept in a # file), the update channel, and the target build number. self._set_machine_id('0123456789abcdef') self.assertEqual(phased_percentage(channel='ubuntu', target=11), 51) # The phased percentage is always the same, given the same # machine-id, channel, and target. self.assertEqual(phased_percentage(channel='ubuntu', target=11), 51) def test_phased_percentage_different_machine_id(self): # All else being equal, a different machine_id gives different %. self._set_machine_id('0123456789abcdef') self.assertEqual(phased_percentage(channel='ubuntu', target=11), 51) self._set_machine_id('fedcba9876543210') self.assertEqual(phased_percentage(channel='ubuntu', target=11), 25) def test_phased_percentage_different_channel(self): # All else being equal, a different channel gives different %. self._set_machine_id('0123456789abcdef') self.assertEqual(phased_percentage(channel='ubuntu', target=11), 51) self._set_machine_id('0123456789abcdef') self.assertEqual(phased_percentage(channel='devel', target=11), 96) def test_phased_percentage_different_target(self): # All else being equal, a different target gives different %. self._set_machine_id('0123456789abcdef') self.assertEqual(phased_percentage(channel='ubuntu', target=11), 51) self._set_machine_id('0123456789abcdef') self.assertEqual(phased_percentage(channel='ubuntu', target=12), 1) @configuration def test_phased_percentage_override(self, config): # The phased percentage can be overridden. self._set_machine_id('0123456789abcdef') self.assertEqual(phased_percentage(channel='ubuntu', target=11), 51) config.phase_override = 33 self.assertEqual(phased_percentage(channel='ubuntu', target=11), 33) # And reset. del config.phase_override self.assertEqual(phased_percentage(channel='ubuntu', target=11), 51) def test_phased_percentage_machine_id_file_fallback(self): # Ensure that the fallbacks for the machine-id file. with ExitStack() as resources: resources.enter_context(patch( 'systemimage.helpers.UNIQUE_MACHINE_ID_FILES', ['/does/not/exist', self._mid_path])) self._set_machine_id('0123456789abcdef') self.assertEqual( phased_percentage(channel='ubuntu', target=11), 51) def test_phased_percentage_machine_id_file_fallbacks_exhausted(self): # Not much we can do if there are no machine-id files. with ExitStack() as resources: resources.enter_context(patch( 'systemimage.helpers.UNIQUE_MACHINE_ID_FILES', ['/does/not/exist', '/is/not/present'])) self._set_machine_id('0123456789abcdef') self.assertRaises(RuntimeError, phased_percentage, channel='ubuntu', target=11) class TestSignature(unittest.TestCase): def test_calculate_signature(self): # Check the default hash algorithm. with tempfile.TemporaryFile() as fp: # Ensure the file is bigger than chunk size. fp.write(b'\0' * (MiB + 1)) fp.seek(0) hash1 = calculate_signature(fp) fp.seek(0) hash2 = hashlib.sha256(fp.read()).hexdigest() self.assertEqual(hash1, hash2) def test_calculate_signature_alternative_hash(self): # Check an alternative hash algorithm. with tempfile.TemporaryFile() as fp: # Ensure the file is bigger than chunk size. fp.write(b'\0' * (MiB + 1)) fp.seek(0) hash1 = calculate_signature(fp, hashlib.md5) fp.seek(0) hash2 = hashlib.md5(fp.read()).hexdigest() self.assertEqual(hash1, hash2) def test_calculate_signature_chunk_size(self): # Check that a file of exactly the chunk size works. with tempfile.TemporaryFile() as fp: fp.write(b'\0' * MiB) fp.seek(0) hash1 = calculate_signature(fp) fp.seek(0) hash2 = hashlib.sha256(fp.read()).hexdigest() self.assertEqual(hash1, hash2) class TestMiscellaneous(unittest.TestCase): def test_temporary_directory_finally_test_coverage(self): with temporary_directory() as path: shutil.rmtree(path) self.assertFalse(os.path.exists(path)) self.assertFalse(os.path.exists(path)) ./systemimage/tests/test_image.py0000644000015600001650000001242312701500553017261 0ustar jenkinsjenkins# Copyright (C) 2013-2016 Canonical Ltd. # Author: Barry Warsaw # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 3 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . """Test Image objects.""" __all__ = [ 'TestImage', 'TestNewVersionRegime', ] import unittest from systemimage.image import Image class TestImage(unittest.TestCase): def test_full_hash(self): image = Image(type='full', version=400) self.assertEqual(hash(image), 0b1100100000000000000000000) def test_full_hash_ignores_base(self): image = Image(type='full', version=400, base=300) self.assertEqual(hash(image), 0b1100100000000000000000000) def test_delta_includes_base(self): image = Image(type='delta', version=400, base=300) self.assertEqual(hash(image), 0b1100100000000000100101100) def test_delta_with_more_info(self): image = Image(type='delta', version=299, base=1212) self.assertEqual(hash(image), 0b1001010110000010010111100) def test_full_equal(self): image_1 = Image(type='full', version=400) image_2 = Image(type='full', version=400) self.assertEqual(image_1, image_2) def test_full_inequal(self): image_1 = Image(type='full', version=400) image_2 = Image(type='full', version=401) self.assertNotEqual(image_1, image_2) def test_full_equal_ignores_base(self): image_1 = Image(type='full', version=400, base=300) image_2 = Image(type='full', version=400, base=299) self.assertEqual(image_1, image_2) def test_full_equal_ignores_missing_base(self): image_1 = Image(type='full', version=400, base=300) image_2 = Image(type='full', version=400) self.assertEqual(image_1, image_2) def test_full_delta_with_base_inequal(self): image_1 = Image(type='full', version=400, base=300) image_2 = Image(type='delta', version=400, base=300) self.assertNotEqual(image_1, image_2) def test_default_phased_percentage(self): image = Image(type='full', version=10) self.assertEqual(image.phased_percentage, 100) def test_explicit_phased_percentage(self): kws = dict(type='full', version=10) kws['phased-percentage'] = '39' image = Image(**kws) self.assertEqual(image.phased_percentage, 39) class TestNewVersionRegime(unittest.TestCase): """LP: #1218612""" def test_full_hash(self): image = Image(type='full', version=3) self.assertEqual(hash(image), 0b00000000000000110000000000000000) def test_full_hash_ignores_base(self): image = Image(type='full', version=3, base=2) self.assertEqual(hash(image), 0b00000000000000110000000000000000) def test_delta_includes_base(self): image = Image(type='delta', version=3, base=2) self.assertEqual(hash(image), 0b00000000000000110000000000000010) def test_delta_with_more_info(self): image = Image(type='delta', version=99, base=83) self.assertEqual(hash(image), 0b00000000011000110000000001010011) def test_full_equal(self): image_1 = Image(type='full', version=17) image_2 = Image(type='full', version=17) self.assertEqual(image_1, image_2) def test_full_inequal(self): image_1 = Image(type='full', version=17) image_2 = Image(type='full', version=18) self.assertNotEqual(image_1, image_2) def test_full_equal_ignores_base(self): image_1 = Image(type='full', version=400, base=300) image_2 = Image(type='full', version=400, base=299) self.assertEqual(image_1, image_2) def test_full_equal_ignores_missing_base(self): image_1 = Image(type='full', version=400, base=300) image_2 = Image(type='full', version=400) self.assertEqual(image_1, image_2) def test_full_delta_with_base_inequal(self): image_1 = Image(type='full', version=400, base=300) image_2 = Image(type='delta', version=400, base=300) self.assertNotEqual(image_1, image_2) def test_signed_version_rejects(self): self.assertRaises(AssertionError, hash, Image(type='full', version=-1)) def test_17bit_version_rejects(self): self.assertRaises(AssertionError, hash, Image(type='full', version=1 << 16)) def test_mixed_regime_rejects(self): self.assertRaises(AssertionError, hash, Image(type='delta', version=3, base=20130899)) def test_mixed_regime_full_okay(self): self.assertEqual(hash(Image(type='full', version=3, base=20130899)), 0b00000000000000110000000000000000) def test_mixed_regime_reversed_rejects(self): self.assertRaises(AssertionError, hash, Image(type='delta', version=20130899, base=3)) ./systemimage/keyring.py0000644000015600001650000001617212701500553015453 0ustar jenkinsjenkins# Copyright (C) 2013-2016 Canonical Ltd. # Author: Barry Warsaw # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 3 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . """Downloading, verifying, and unpacking a keyring.""" __all__ = [ 'KeyringError', 'get_keyring', ] import os import json import shutil import tarfile from contextlib import ExitStack from datetime import datetime, timezone from systemimage.config import config from systemimage.download import get_download_manager from systemimage.gpg import Context from systemimage.helpers import makedirs, safe_remove from urllib.parse import urljoin class KeyringError(Exception): """An error occurred getting the keyring.""" def __init__(self, message): self.message = message def get_keyring(keyring_type, urls, sigkr, blacklist=None): """Download, verify, and unpack a keyring. The keyring .tar.xz file and its signature file are downloaded. The signature is verified against the keys in the signature keyring gpg file. If this fails, a SignatureError is raised and the files are deleted. If this succeeds, the tar.xz is unpacked, which should produce a keyring.gpg file containing the keyring, and a keyring.json file describing the keyring. We load up the json file and verify that the keyring 'type' matches the type parameter and that the 'expiry' key, which names a UTC UNIX epoch timestamp, has not yet expired. Also, the 'model' key is checked - it is optional in the json file, and when it's missing, it means it applies to any model. If any of these condition occurred, a KeyringError is raised and the files are deleted. Assuming everything checks out, the .gpg file is copied to the cache location for the unpacked keyring, and the downloaded .tar.xz and .tar.xz.asc files are moved into place. All the other ancillary files are deleted. :param keyring_type: The type of keyring file to download. This can be one of 'archive-master', 'image-master', 'image-signing', 'device-signing', or 'blacklist'. :param url: Either a string naming the url to the source of the keyring .tar.xz file (in which case the url to the associated .asc file will be calculated), or a 2-tuple naming the .tar.xz and .tar.xz.asc files. :param sigkr: The local keyring file that should be used to verify the downloaded signature. :param blacklist: When given, this is the signature blacklist file. :raises SignatureError: when the keyring signature does not match. :raises KeyringError: when any of the other verifying attributes of the downloaded keyring fails. """ # Calculate the urls to the .tar.xz and .asc files. if isinstance(urls, tuple): srcurl, ascurl = urls else: srcurl = urls ascurl = urls + '.asc' tarxz_src = urljoin(config.https_base, srcurl) ascxz_src = urljoin(config.https_base, ascurl) # Calculate the local paths to the temporary download files. The # blacklist goes to the data partition and all the other files go to the # cache partition. dstdir = (config.updater.data_partition if keyring_type == 'blacklist' else config.updater.cache_partition) tarxz_dst = os.path.join(dstdir, 'keyring.tar.xz') ascxz_dst = tarxz_dst + '.asc' # Delete any files that were previously present. The download manager # will raise an exception if it finds a file already there. safe_remove(tarxz_dst) safe_remove(ascxz_dst) with ExitStack() as stack: # Let FileNotFoundError percolate up. get_download_manager().get_files([ (tarxz_src, tarxz_dst), (ascxz_src, ascxz_dst), ]) stack.callback(os.remove, tarxz_dst) stack.callback(os.remove, ascxz_dst) signing_keyring = getattr(config.gpg, sigkr.replace('-', '_')) with Context(signing_keyring, blacklist=blacklist) as ctx: ctx.validate(ascxz_dst, tarxz_dst) # The signature is good, so now unpack the tarball, load the json file # and verify its contents. keyring_gpg = os.path.join(config.tempdir, 'keyring.gpg') keyring_json = os.path.join(config.tempdir, 'keyring.json') with tarfile.open(tarxz_dst, 'r:xz') as tf: tf.extractall(config.tempdir) stack.callback(os.remove, keyring_gpg) stack.callback(os.remove, keyring_json) with open(keyring_json, 'r', encoding='utf-8') as fp: data = json.load(fp) # Check the mandatory keys first. json_type = data['type'] if keyring_type != json_type: raise KeyringError( 'keyring type mismatch; wanted: {}, got: {}'.format( keyring_type, json_type)) # Check the optional keys next. json_model = data.get('model') if json_model not in (config.device, None): raise KeyringError( 'keyring model mismatch; wanted: {}, got: {}'.format( config.device, json_model)) expiry = data.get('expiry') if expiry is not None: # Get our current timestamp in UTC. timestamp = datetime.now(tz=timezone.utc).timestamp() if expiry < timestamp: # We've passed the expiration date for this keyring. raise KeyringError('expired keyring timestamp') # Everything checks out. We now have the generic keyring.tar.xz and # keyring.tar.xz.asc files inside the cache (or data, in the case of # the blacklist) partition, which is where they need to be for # recovery. # # These files need to be renamed to their actual .tar.xz # and .asc file names. # # We also want copies of these latter files to live in /var/lib so # that we don't have to download them again if we don't need to. if keyring_type == 'blacklist': tarxz_path = os.path.join( config.updater.data_partition, 'blacklist.tar.xz') else: tarxz_path = getattr(config.gpg, keyring_type.replace('-', '_')) ascxz_path = tarxz_path + '.asc' makedirs(os.path.dirname(tarxz_path)) safe_remove(tarxz_path) safe_remove(ascxz_path) shutil.copy(tarxz_dst, tarxz_path) shutil.copy(ascxz_dst, ascxz_path) # For all keyrings, copy the extracted .gpg file to the tempdir. We # will always fallback to this path to avoid unpacking the .tar.xz # file every single time. gpg_path = os.path.join(config.tempdir, keyring_type + '.gpg') shutil.copy(keyring_gpg, gpg_path) ./systemimage/scores.py0000644000015600001650000001601012701500553015270 0ustar jenkinsjenkins# Copyright (C) 2013-2016 Canonical Ltd. # Author: Barry Warsaw # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 3 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . """Upgrade policy decisions. Choose which upgrade path to use based on the available candidates. """ __all__ = [ 'Scorer', 'WeightedScorer', ] import logging from itertools import count from systemimage.helpers import MiB, phased_percentage log = logging.getLogger('systemimage') COLON = ':' class Scorer: """Abstract base class providing an API for candidate selection.""" def choose(self, candidates, channel): """Choose the candidate upgrade paths. Lowest score wins. :param candidates: A list of lists of image records needed to upgrade the device from the current version to the latest version, sorted in order from oldest verson to newest. :type candidates: list of lists :param channel: The channel being upgraded to. This is used in the phased update calculate. :type channel: str :return: The chosen path. :rtype: list """ if len(candidates) == 0: log.debug('No candidates, so no winner') return [] # We want to zip together the score for each candidate path, plus the # candidate path, so that when we sort the sequence, we'll always get # the lowest scoring upgrade path first. The problem is that when two # paths have the same score, sorted()'s comparison will find the first # element of the tuple is the same and fall back to the second item. # If that item is a list of Image objects, then it will try to compare # Image objects, which are not comparable. # # We solve this by zipping in a second element which is guaranteed to # be a monotomically increasing integer. Thus if two paths score the # same, we'll just end up picking the first one we saw, and comparison # will never fall back to the list of Images. # # Be sure that after all is said and done we return the list of Images # though! scores = sorted(zip(self.score(candidates), count(), candidates)) # Calculate the phase percentage for the device. Use the highest # available build number as input into the random seed. max_target_number = -1 for score, i, path in scores: # The last image will be the target image. assert len(path) > 0, 'Empty upgrade candidate path?' max_target_number = max(max_target_number, path[-1].version) assert max_target_number != -1, 'No max target version?' device_percentage = phased_percentage(channel, max_target_number) log.debug('Device phased percentage: {}%'.format(device_percentage)) log.debug('{} path scores:'.format(self.__class__.__name__)) # Log the candidate paths, their scores, and their phases. for score, i, path in reversed(scores): log.debug('\t[{:4d}] -> {} ({}%)'.format( score, COLON.join(str(image.version) for image in path), (path[-1].phased_percentage if len(path) > 0 else '--') )) for score, i, path in scores: image_percentage = path[-1].phased_percentage # An image percentage of 0 means that it's been pulled. if image_percentage > 0 and device_percentage <= image_percentage: return path # No upgrade path. return [] def score(self, candidates): # pragma: no cover """Like `choose()` except returns the candidate path scores. Subclasses are expected to override this method. :param candidates: A list of lists of image records needed to upgrade the device from the current version to the latest version, sorted in order from oldest verson to newest. :type candidates: list of lists :return: The list of path scores. This will be the same size as the list of paths in `candidates`. :rtype: list """ raise NotImplementedError class WeightedScorer(Scorer): """Use the following inputs and weights. Lowest score wins. reboots - Look at the entire path and add 100 for every extra reboot required. The implicit end-of-update reboot is not counted. total download size - add 1 for every 1MiB over the smallest image. destination build number - absolute value of the total distance from the highest version number + 9000. Examples: - Path A requires three extra reboots, is the smallest total download and leaves you at the highest available version. Score: 300 - Path B requires one extra reboot, but is 100MiB bigger and leaves you at the highest available version. Score: 200 - Path C requires no extra reboots, but is 400MiB bigger and leaves you at 20130303 instead of the highest 20130304. Score: 401 Path B wins. """ def score(self, candidates): # Iterate over every path, calculating the total download size of the # path, the number of extra reboots required, and the destination # build number. Remember the smallest size seen and highest build # number. max_build = 0 min_size = None candidate_data = [] for path in candidates: build = path[-1].version size = 0 for image in path: image_size = sum(filerec.size for filerec in image.files) size += image_size reboots = sum(1 for image in path if getattr(image, 'bootme', False)) candidate_data.append((build, size, reboots, path)) max_build = max(build, max_build) min_size = (size if (min_size is None or size < min_size) else min_size) # Score the candidates. Any path that doesn't leave you at the # maximum build number gets a ridiculously high score so it won't # possibly be chosen. scores = [] for build, size, reboots, path in candidate_data: score = (100 * reboots) + ((size - min_size) // MiB) # If the path does not leave you at the maximum build number, add # a ridiculously high value which essentially prevents that # candidate path from winning. distance = max_build - build score += (9000 * distance) + distance scores.append(score) return scores ./systemimage/udm.py0000644000015600001650000002302212701500553014560 0ustar jenkinsjenkins# Copyright (C) 2014-2016 Canonical Ltd. # Author: Barry Warsaw # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 3 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . """Download files via ubuntu-download-manager.""" __all__ = [ 'UDMDownloadManager', ] import os import dbus import logging from systemimage.config import config from systemimage.download import Canceled, DownloadManagerBase from systemimage.reactor import Reactor from systemimage.settings import Settings log = logging.getLogger('systemimage') # Parameterized for testing purposes. DOWNLOADER_INTERFACE = 'com.canonical.applications.Downloader' MANAGER_INTERFACE = 'com.canonical.applications.DownloadManager' OBJECT_NAME = 'com.canonical.applications.Downloader' OBJECT_INTERFACE = 'com.canonical.applications.GroupDownload' def _headers(): return {'User-Agent': config.user_agent} def _print(*args, **kws): # We must import this here to avoid circular imports. ## from systemimage.testing.helpers import debug ## with debug() as ddlog: ## ddlog(*args, **kws) pass class DownloadReactor(Reactor): def __init__(self, bus, object_path, callback=None, pausable=False, signal_started=False): super().__init__(bus) self._callback = callback self._pausable = pausable self._signal_started = signal_started # For _do_pause() percentage calculation. self._received = 0 self._total = 0 self.error = None self.canceled = False self.local_paths = None self.react_to('canceled', object_path) self.react_to('error', object_path) self.react_to('finished', object_path) self.react_to('paused', object_path) self.react_to('progress', object_path) self.react_to('resumed', object_path) self.react_to('started', object_path) def _do_started(self, signal, path, started): _print('STARTED:', started) if self._signal_started and config.dbus_service is not None: config.dbus_service.DownloadStarted() def _do_finished(self, signal, path, local_paths): _print('FINISHED:', local_paths) self.local_paths = local_paths self.quit() def _do_error(self, signal, path, error_message): _print('ERROR:', error_message) log.error(error_message) self.error = error_message self.quit() def _do_progress(self, signal, path, received, total): _print('PROGRESS:', received, total) # For _do_pause() percentage calculation. self._received = received self._total = total self._callback(received, total) def _do_canceled(self, signal, path, canceled): # Why would we get this signal if it *wasn't* canceled? Anyway, # this'll be a D-Bus data type so converted it to a vanilla Python # boolean. _print('CANCELED:', canceled) self.canceled = bool(canceled) self.quit() def _do_paused(self, signal, path, paused): _print('PAUSE:', paused, self._pausable) send_paused = self._pausable and config.dbus_service is not None if send_paused: # pragma: no branch # We could plumb through the `service` object from service.py (the # main entry point for system-image-dbus, but that's actually a # bit of a pain, so do the expedient thing and grab the interface # here. percentage = (int(self._received / self._total * 100.0) if self._total > 0 else 0) config.dbus_service.UpdatePaused(percentage) def _do_resumed(self, signal, path, resumed): _print('RESUME:', resumed) # There currently is no UpdateResumed() signal. def _default(self, *args, **kws): _print('SIGNAL:', args, kws) # pragma: no cover class UDMDownloadManager(DownloadManagerBase): """Download via ubuntu-download-manager (UDM).""" def __init__(self, callback=None): super().__init__() if callback is not None: self.callbacks.append(callback) self._iface = None def _get_files(self, records, pausable, signal_started): assert self._iface is None bus = dbus.SystemBus() service = bus.get_object(DOWNLOADER_INTERFACE, '/') iface = dbus.Interface(service, MANAGER_INTERFACE) object_path = iface.createDownloadGroup( records, 'sha256', False, # Don't allow GSM yet. # https://bugs.freedesktop.org/show_bug.cgi?id=55594 dbus.Dictionary(signature='sv'), _headers()) download = bus.get_object(OBJECT_NAME, object_path) self._iface = dbus.Interface(download, OBJECT_INTERFACE) # Are GSM downloads allowed? Yes, except if auto_download is set to 1 # (i.e. wifi-only). allow_gsm = Settings().get('auto_download') != '1' # See if the CLI was called with --override-gsm. if not allow_gsm and config.override_gsm: log.info('GSM-only overridden') allow_gsm = True log.info('Allow GSM? {}', ('Yes' if allow_gsm else 'No')) UDMDownloadManager._set_gsm(self._iface, allow_gsm=allow_gsm) # Start the download. reactor = DownloadReactor( bus, object_path, self._reactor_callback, pausable, signal_started) reactor.schedule(self._iface.start) log.info('[{}] Running group download reactor', object_path) log.info('self: {}, self._iface: {}', self, self._iface) reactor.run() # This download is complete so the object path is no longer # applicable. Setting this to None will cause subsequent cancels to # be queued. self._iface = None log.info('[{}] Group download reactor done', object_path) if reactor.error is not None: log.error('Reactor error: {}'.format(reactor.error)) if reactor.canceled: log.info('Reactor canceled') # Report any other problems. if reactor.error is not None: raise FileNotFoundError(reactor.error) if reactor.canceled: raise Canceled if reactor.timed_out: raise TimeoutError # Sanity check the downloaded results. # First, every requested destination file must exist, otherwise # udm would not have given us a `finished` signal. missing = [record.destination for record in records if not os.path.exists(record.destination)] if len(missing) > 0: # pragma: no cover local_paths = sorted(reactor.local_paths) raise AssertionError( 'Missing destination files: {}\nlocal_paths: {}'.format( missing, local_paths)) def _reactor_callback(self, received, total): self.received = received self.total = total self._do_callback() @staticmethod def _set_gsm(iface, *, allow_gsm): # This is a separate method for easier testing via mocks. iface.allowGSMDownload(allow_gsm) @staticmethod def allow_gsm(): """See `DownloadManagerBase`.""" # We can't rely on self._iface being the interface of the group # download object. Use getAllDownloads() on UDM to get the group # download object path, assert that there is only one group download # in progress, then call allowGSMDownload() on that. bus = dbus.SystemBus() service = bus.get_object(DOWNLOADER_INTERFACE, '/') iface = dbus.Interface(service, MANAGER_INTERFACE) try: object_paths = iface.getAllDownloads() except TypeError: # If there is no download in progress, udm will cause this # exception to occur. Allow this to no-op. log.info('Ignoring GSM force when no download is in progress.') return assert len(object_paths) == 1, object_paths download = bus.get_object(OBJECT_NAME, object_paths[0]) dbus.Interface(download, OBJECT_INTERFACE).allowGSMDownload(True) def cancel(self): """Cancel any current downloads.""" if self._iface is None: # Since there's no download in progress right now, there's nothing # to cancel. Setting this flag queues the cancel signal once the # reactor starts running again. Yes, this is a bit weird, but if # we don't do it this way, the caller will immediately get a # Canceled exception, which isn't helpful because it's expecting # one when the next download begins. super().cancel() else: self._iface.cancel() def pause(self): """Pause the download, but only if one is in progress.""" if self._iface is not None: # pragma: no branch self._iface.pause() def resume(self): """Resume the download, but only if one is in progress.""" if self._iface is not None: # pragma: no branch self._iface.resume() ./systemimage/candidates.py0000644000015600001650000001425312701500553016100 0ustar jenkinsjenkins# Copyright (C) 2013-2016 Canonical Ltd. # Author: Barry Warsaw # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 3 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . """Determine candidate images.""" __all__ = [ 'delta_filter', 'full_filter', 'get_candidates', 'iter_path', 'version_filter', ] from collections import deque class _Chaser: def __init__(self): # Paths are represented by lists, so we need to listify each element # of the initial set of roots. self._paths = deque() def __iter__(self): while self._paths: yield self._paths.pop() def push(self, new_path): # new_path must be a list. self._paths.appendleft(new_path) def get_candidates(index, build): """Calculate all the candidate upgrade paths. This function returns a list of candidate upgrades paths, from the current build number to the latest build available in the index file. Each element of this list of candidates is itself a list of `Image` objects, in the order that they should be applied to upgrade the device. The upgrade candidate chains are not sorted, ordered, or prioritized in any way. They are simply the list of upgrades that will satisfy the requirements. It is possible that there are no upgrade candidates if the device is already at the latest build, or if the device is at a build too old to update. :param index: The index of available upgrades. :type index: An `Index` :param build: The build version number that the device is currently at. :type build: str :return: list-of-lists of upgrade paths. The empty list is returned if there are no candidate paths. """ # Start by splitting the images into fulls and delta. Throw out any full # updates which have a minimum version greater than our version. fulls = set() deltas = set() for image in index.images: if image.type == 'full': if getattr(image, 'minversion', 0) <= build: fulls.add(image) elif image.type == 'delta': deltas.add(image) else: # pragma: no cover # BAW 2013-04-30: log and ignore. raise AssertionError('unknown image type: {}'.format(image.type)) # Load up the roots of candidate upgrade paths. chaser = _Chaser() # Each full version that is newer than our current version provides the # start of an upgrade path. for image in fulls: if image.version > build: chaser.push([image]) # Each delta with a base that matches our version also provides the start # of an upgrade path. for image in deltas: if image.base == build: chaser.push([image]) # Chase the back pointers from the deltas until we run out of newer # versions. It's possible to push new paths into the chaser if we find a # fork in the road (i.e. two deltas with the same base). paths = list() for path in chaser: current = path[-1] while True: # Find all the deltas that have this step as their base. next_steps = [delta for delta in deltas if delta.base == current.version] # If there is no next step, then we're done with this path. if len(next_steps) == 0: paths.append(path) break # If there's only one next step, append that to path and keep # going, with this step as the current image. elif len(next_steps) == 1: current = next_steps[0] path.append(current) # Otherwise, we have a fork. Take one fork now and push the other # paths onto the chaser. else: current = next_steps.pop() for fork in next_steps: new_path = path.copy() new_path.append(fork) chaser.push(new_path) path.append(current) return paths def iter_path(winner): """Iterate over all the file records for a given upgrade path. Image traversal will stop after the first `bootme` flag is seen, so the list of files to download may not include all the files in the upgrade candidate. :param winner: The list of images for the winning candidate. :return: A sequence of 2-tuples, where the first item is a "image number", i.e. which image in the path of winning images this file record belongs to, and the second item is the file record. """ for n, image in enumerate(winner): for filerec in image.files: yield (n, filerec) if getattr(image, 'bootme', False): break def full_filter(candidates): filtered = [] for path in candidates: full_image = None for image in path: # Take the last full update we find from the start of the path. if image.type != 'full': break full_image = image if full_image is not None: filtered.append([full_image]) return filtered def delta_filter(candidates): filtered = [] for path in candidates: new_path = [] for image in path: # Add all the deltas from the start of the path to the first full. if image.type != 'delta': break new_path.append(image) if len(new_path) != 0: filtered.append(new_path) return filtered class version_filter: def __init__(self, maximum_version): self.maximum_version = maximum_version def __call__(self, winner): return [image for image in winner if image.version <= self.maximum_version] ./systemimage/reactor.py0000644000015600001650000000771612701500553015446 0ustar jenkinsjenkins# Copyright (C) 2013-2016 Canonical Ltd. # Author: Barry Warsaw # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 3 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . """A D-Bus signal reactor class.""" import os import logging from gi.repository import GLib log = logging.getLogger('systemimage') # LP: #1240106 - We get intermittent and unreproducible TimeoutErrors in the # DEP 8 when the default timeout is used. It seems like cranking this up to # 20 minutes makes the tests pass. This must be some weird interaction # between ubuntu-download-manager and the autopkgtest environment because a # TimeoutError means we don't hear from u-d-m for 10 minutes... at all! No # signals of any kind. It's possible this is related to LP: #1240157 in # u-d-m, but as that won't likely get fixed for Saucy, this is a hack that # allows the DEP 8 tests to increase the timeout and hopefully succeed. OVERRIDE = os.environ.get('SYSTEMIMAGE_REACTOR_TIMEOUT') TIMEOUT_SECONDS = (600 if OVERRIDE is None else int(OVERRIDE)) class Reactor: """A reactor base class for DBus signals.""" def __init__(self, bus): self._bus = bus self._loop = None # Keep track of the GLib handles to the loop-quitting callback, and # all the signal matching callbacks. Once the reactor run loop quits, # we want to remove all callbacks so they can't accidentally be called # again later. self._quitter = None self._signal_matches = [] self._active_timeout = None self.timeout = TIMEOUT_SECONDS self.timed_out = False def _handle_signal(self, *args, **kws): # We've seen some activity from the D-Bus service, so reset our # timeout loop. self._reset_timeout() # Now dispatch the signal. signal = kws.pop('member') path = kws.pop('path') method = getattr(self, '_do_' + signal, None) if method is None: # See if there's a default catch all. method = getattr(self, '_default', None) if method is None: # pragma: no cover log.info('No handler for signal {}: {} {}', signal, args, kws) else: method(signal, path, *args, **kws) def _reset_timeout(self, *, try_again=True): if self._quitter is not None: GLib.source_remove(self._quitter) self._quitter = None if try_again: self._quitter = GLib.timeout_add_seconds( self._active_timeout, self._quit_with_error) def react_to(self, signal, object_path=None): signal_match = self._bus.add_signal_receiver( self._handle_signal, signal_name=signal, path=object_path, member_keyword='member', path_keyword='path', ) self._signal_matches.append(signal_match) def schedule(self, method, milliseconds=50): GLib.timeout_add(milliseconds, method) def run(self, timeout=None): self._active_timeout = (self.timeout if timeout is None else timeout) self._loop = GLib.MainLoop() self._reset_timeout() self._loop.run() def quit(self): self._loop.quit() for match in self._signal_matches: match.remove() del self._signal_matches[:] self._reset_timeout(try_again=False) self._quitter = None self._active_timeout = None def _quit_with_error(self): self.timed_out = True self.quit() ./systemimage/testing/0000755000015600001650000000000012701500553015077 5ustar jenkinsjenkins./systemimage/testing/__init__.py0000644000015600001650000000000012701500553017176 0ustar jenkinsjenkins./systemimage/testing/demo.py0000644000015600001650000000226712701500553016404 0ustar jenkinsjenkins# Copyright (C) 2013-2016 Canonical Ltd. # Author: Barry Warsaw # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 3 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . """A helper for demos.""" __all__ = [ 'DemoDevice', 'DemoReboot', 'TestingDevice', ] from systemimage.apply import BaseApply from systemimage.device import BaseDevice class DemoReboot(BaseApply): def apply(self): print("If I was a phone, I'd be rebooting right about now.") class DemoDevice(BaseDevice): def get_device(self): # Sure, why not be a grouper? return 'grouper' class TestingDevice(BaseDevice): # For test_dbus.py def get_device(self): return 'nexus7' ./systemimage/testing/dbus.py0000644000015600001650000003264212701500553016415 0ustar jenkinsjenkins# Copyright (C) 2013-2016 Canonical Ltd. # Author: Barry Warsaw # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 3 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . """Helpers for the DBus service when run with --testing.""" __all__ = [ 'get_service', 'instrument', ] import os import logging try: import pycurl except ImportError: pycurl = None from dbus.service import method, signal from gi.repository import GLib from pathlib import Path from systemimage.api import Mediator from systemimage.config import config from systemimage.dbus import Service, log_and_exit from systemimage.helpers import MiB, makedirs, safe_remove, version_detail from systemimage.logging import make_handler from unittest.mock import patch SPACE = ' ' SIGNAL_DELAY_SECS = 5 class _ActionLog: def __init__(self, filename): self._path = os.path.join(config.updater.cache_partition, filename) def write(self, *args, **kws): with open(self._path, 'w', encoding='utf-8') as fp: fp.write(SPACE.join(args[0]).strip()) def instrument(config, stack, cert_file): """Instrument the system for testing.""" # Ensure the destination directories exist. makedirs(config.updater.data_partition) makedirs(config.updater.cache_partition) # Patch the subprocess call to write the reboot command to a log # file which the testing parent process can open and read. safe_reboot = _ActionLog('reboot.log') stack.enter_context( patch('systemimage.apply.check_call', safe_reboot.write)) stack.enter_context( patch('systemimage.device.check_output', return_value='nexus7')) # If available, patch the PyCURL downloader to accept self-signed # certificates. if pycurl is not None: def self_sign(c): c.setopt(pycurl.CAINFO, cert_file) stack.enter_context( patch('systemimage.curl.make_testable', self_sign)) class _LiveTestableService(Service): """For testing purposes only.""" def __init__(self, bus, object_path, loop): super().__init__(bus, object_path, loop) self._debug_handler = None @log_and_exit @method('com.canonical.SystemImage') def Reset(self): config.reload() self._api = Mediator() try: self._checking.release() except RuntimeError: # Lock is already released. pass self._update = None self._rebootable = False self._failure_count = 0 del config.build_number safe_remove(config.system.settings_db) @log_and_exit @method('com.canonical.SystemImage') def TearDown(self): # Like CancelUpdate() except it sends a different signal that's only # useful for the test suite. self._api.cancel() self.TornDown() @log_and_exit @signal('com.canonical.SystemImage') def TornDown(self): pass @log_and_exit @method('com.canonical.SystemImage', in_signature='ss', out_signature='ss') def DebugDBusTo(self, filename, level_name): # Get the existing logging level and logging file name. dbus_log = logging.getLogger('systemimage.dbus') old_level = logging.getLevelName(dbus_log.getEffectiveLevel()) old_filename = config.system.logfile # Remove any previous D-Bus debugging handler. if self._debug_handler is not None: dbus_log.removeHandler(self._debug_handler) self._debug_handler = None new_level = getattr(logging, level_name.upper()) dbus_log.setLevel(new_level) if filename != '': self._debug_handler = make_handler(Path(filename)) self._debug_handler.setLevel(new_level) dbus_log.addHandler(self._debug_handler) return old_filename, old_level class _UpdateAutoSuccess(Service): """Normal update in auto-download mode.""" def __init__(self, bus, object_path, loop): super().__init__(bus, object_path, loop) self._reset() def _reset(self): self._auto_download = True self._canceled = False self._downloading = False self._eta = 50.0 self._failure_count = 0 self._paused = False self._percentage = 0 self._rebootable = False @log_and_exit @method('com.canonical.SystemImage') def Reset(self): self._reset() @log_and_exit @method('com.canonical.SystemImage') def CheckForUpdate(self): if self._failure_count > 0: self._reset() GLib.timeout_add_seconds(3, self._send_status) @log_and_exit def _send_status(self): if self._auto_download: self._downloading = True self.UpdateAvailableStatus( True, self._downloading, '42', 1337 * MiB, '1983-09-13T12:13:14', '') if self._downloading and not self._rebootable: self.UpdateProgress(0, 50.0) GLib.timeout_add(500, self._send_more_status) if self._paused: self.UpdatePaused(self._percentage) elif self._rebootable: self.UpdateDownloaded() return False @log_and_exit def _send_more_status(self): if self._canceled: self._downloading = False self._failure_count += 1 self.UpdateFailed(self._failure_count, 'canceled') return False if not self._paused: self._percentage += 1 self._eta -= 0.5 if self._percentage == 100: # We're done. self._downloading = False self._rebootable = True self.UpdateDownloaded() return False self.UpdateProgress(self._percentage, self._eta) # Continue sending more status. return True @log_and_exit @method('com.canonical.SystemImage', out_signature='s') def PauseDownload(self): if self._downloading: self._paused = True self.UpdatePaused(self._percentage) # Otherwise it's a no-op. return '' @log_and_exit @method('com.canonical.SystemImage') def DownloadUpdate(self): self._paused = False if not self._downloading: if not self._auto_download: self._downloading = True self.UpdateProgress(0, 50.0) GLib.timeout_add(500, self._send_more_status) @log_and_exit @method('com.canonical.SystemImage', out_signature='s') def CancelUpdate(self): if self._downloading: self._canceled = True # Otherwise it's a no-op. return '' @log_and_exit @method('com.canonical.SystemImage') def ApplyUpdate(self): # Always succeeds. def _applied(): self.Applied(True) GLib.timeout_add(50, _applied) class _UpdateManualSuccess(_UpdateAutoSuccess): def _reset(self): super()._reset() self._auto_download = False class _UpdateFailed(Service): def __init__(self, bus, object_path, loop): super().__init__(bus, object_path, loop) self._reset() def _reset(self): self._failure_count = 1 self._last_error = 'mock service failed' @log_and_exit @method('com.canonical.SystemImage') def Reset(self): self._reset() @log_and_exit @method('com.canonical.SystemImage') def CheckForUpdate(self): msg = ('You need some network for downloading' if self._failure_count > 0 else '') # Fake enough of the update status to trick _download() into checking # the failure state. class Update: is_available = True self._update = Update() self.UpdateAvailableStatus( True, False, '42', 1337 * MiB, '1983-09-13T12:13:14', msg) if self._failure_count > 0: self._failure_count += 1 self.UpdateFailed(self._failure_count, msg) @log_and_exit @method('com.canonical.SystemImage', out_signature='s') def CancelUpdate(self): self._failure_count = 0 return '' class _FailApply(Service): @log_and_exit @method('com.canonical.SystemImage') def Reset(self): pass @log_and_exit @method('com.canonical.SystemImage') def CheckForUpdate(self): self.UpdateAvailableStatus( True, False, '42', 1337 * MiB, '1983-09-13T12:13:14', '') self.UpdateDownloaded() @log_and_exit @method('com.canonical.SystemImage') def ApplyUpdate(self): # The update cannot be applied. def _applied(): self.Applied(False) GLib.timeout_add(50, _applied) class _FailResume(Service): @log_and_exit @method('com.canonical.SystemImage') def Reset(self): pass @log_and_exit @method('com.canonical.SystemImage') def CheckForUpdate(self): self.UpdateAvailableStatus( True, False, '42', 1337 * MiB, '1983-09-13T12:13:14', '') self.UpdatePaused(42) @log_and_exit @method('com.canonical.SystemImage') def DownloadUpdate(self): self.UpdateFailed(9, 'You need some network for downloading') class _FailPause(Service): @log_and_exit @method('com.canonical.SystemImage') def Reset(self): pass @log_and_exit @method('com.canonical.SystemImage') def CheckForUpdate(self): self.UpdateAvailableStatus( True, True, '42', 1337 * MiB, '1983-09-13T12:13:14', '') self.UpdateProgress(10, 0) @log_and_exit @method('com.canonical.SystemImage', out_signature='s') def PauseDownload(self): return 'no no, not now' class _NoUpdate(Service): @log_and_exit @method('com.canonical.SystemImage') def Reset(self): pass @log_and_exit @method('com.canonical.SystemImage') def CheckForUpdate(self): GLib.timeout_add_seconds(3, self._send_status) @log_and_exit def _send_status(self): self.UpdateAvailableStatus( False, False, '', 0, '1983-09-13T12:13:14', '') class _MoreInfo(Service): def __init__(self, bus, object_path, loop): super().__init__(bus, object_path, loop) self._buildno = 45 self._device = 'nexus11' self._channel = 'daily-proposed' self._updated = '2099-08-01 04:45:45' self._version = 'ubuntu=123,mako=456,custom=789' self._checked = '2099-08-01 04:45:00' self._target = 53 @log_and_exit @method('com.canonical.SystemImage') def Reset(self): pass @log_and_exit @method('com.canonical.SystemImage', out_signature='isssa{ss}') def Info(self): return (self._buildno, self._device, self._channel, self._updated, version_detail(self._version)) @log_and_exit @method('com.canonical.SystemImage', out_signature='a{ss}') def Information(self): return dict(current_build_number=str(self._buildno), device_name=self._device, channel_name=self._channel, last_update_date=self._updated, version_detail=self._version, last_check_date=self._checked, target_build_number=str(self._target)) class _Crasher(Service): @log_and_exit @method('com.canonical.SystemImage') def Crash(self): 1/0 @log_and_exit @signal('com.canonical.SystemImage') def SignalCrash(self): 1/0 @log_and_exit @signal('com.canonical.SystemImage') def SignalOkay(self): pass @log_and_exit @method('com.canonical.SystemImage') def CrashSignal(self): self.SignalCrash() @log_and_exit @method('com.canonical.SystemImage') def Okay(self): pass @log_and_exit @method('com.canonical.SystemImage') def CrashAfterSignal(self): self.SignalOkay() 1/0 def get_service(testing_mode, system_bus, object_path, loop): """Return the appropriate service class for the testing mode.""" if testing_mode == 'live': ServiceClass = _LiveTestableService elif testing_mode == 'update-auto-success': ServiceClass = _UpdateAutoSuccess elif testing_mode == 'update-manual-success': ServiceClass = _UpdateManualSuccess elif testing_mode == 'update-failed': ServiceClass = _UpdateFailed elif testing_mode == 'fail-apply': ServiceClass = _FailApply elif testing_mode == 'fail-resume': ServiceClass = _FailResume elif testing_mode == 'fail-pause': ServiceClass = _FailPause elif testing_mode == 'no-update': ServiceClass = _NoUpdate elif testing_mode == 'more-info': ServiceClass = _MoreInfo elif testing_mode == 'crasher': ServiceClass = _Crasher else: raise RuntimeError('Invalid testing mode: {}'.format(testing_mode)) return ServiceClass(system_bus, object_path, loop) ./systemimage/testing/helpers.py0000644000015600001650000006444112701500553017124 0ustar jenkinsjenkins# Copyright (C) 2013-2016 Canonical Ltd. # Author: Barry Warsaw # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 3 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . """Test helpers.""" __all__ = [ 'ServerTestBase', 'chmod', 'configuration', 'copy', 'data_path', 'debug', 'debuggable', 'descriptions', 'find_dbus_process', 'get_channels', 'get_index', 'make_http_server', 'reset_envar', 'setup_index', 'setup_keyring_txz', 'setup_keyrings', 'sign', 'terminate_service', 'touch_build', 'wait_for_service', 'write_bytes', ] import os import ssl import dbus import json import time import gnupg import psutil import shutil import inspect import tarfile import unittest from contextlib import ExitStack, contextmanager, suppress from functools import partial, partialmethod, wraps from http.server import HTTPServer, SimpleHTTPRequestHandler from pathlib import Path from pkg_resources import resource_filename, resource_string as resource_bytes from socket import SHUT_RDWR from systemimage.channel import Channels from systemimage.config import Configuration, config from systemimage.helpers import MiB, atomic, makedirs, temporary_directory from systemimage.index import Index from threading import Thread from unittest.mock import patch EMPTYSTRING = '' SPACE = ' ' def get_index(filename): json_bytes = resource_bytes('systemimage.tests.data', filename) return Index.from_json(json_bytes.decode('utf-8')) def get_channels(filename): json_bytes = resource_bytes('systemimage.tests.data', filename) return Channels.from_json(json_bytes.decode('utf-8')) def data_path(filename): return os.path.abspath( resource_filename('systemimage.tests.data', filename)) def make_http_server(directory, port, certpem=None, keypem=None): """Create an HTTP/S server to vend from the file system. :param directory: The file system directory to vend files from. :param port: The port to listen on for the server. :param certpem: For HTTPS servers, the path to the certificate PEM file. If the file name does not start with a slash, it is considered relative to the test data directory. :param keypem: For HTTPS servers, the path to the key PEM file. If the file name does not start with a slash, it is considered relative to the test data directory. :return: A context manager that when closed, stops the server. """ # We need an HTTP/S server to vend the file system, or at least parts of # it, that we want to test. Since all the files are static, and we're # only going to GET files, this makes our lives much easier. We'll just # vend all the files in the directory. class RequestHandler(SimpleHTTPRequestHandler): # The base class hardcodes the use of os.getcwd() to vend the # files from, but we want to be able to pass in any directory. I # suppose we could chdir in the server thread, but let's hack the # path instead. def translate_path(self, path): with patch('http.server.os.getcwd', return_value=directory): return super().translate_path(path) def log_message(self, *args, **kws): # Please shut up. pass def handle_one_request(self): try: super().handle_one_request() except ConnectionResetError: super().handle_one_request() def do_HEAD(self): # Just tell the client we have the magic file. if self.path == '/user-agent.txt': self.send_response(200) self.end_headers() else: # Canceling a download can cause our internal server to # see various ignorable errors. No worries. with suppress(BrokenPipeError, ConnectionResetError): super().do_HEAD() def do_GET(self): # If we requested the magic 'user-agent.txt' file, send back the # value of the User-Agent header. Otherwise, vend as normal. if self.path == '/user-agent.txt': self.send_response(200) self.send_header('Content-Type', 'text/plain') user_agent = self.headers.get('user-agent', 'no agent') self.end_headers() self.wfile.write(user_agent.encode('utf-8')) else: # Canceling a download can cause our internal server to # see various ignorable errors. No worries. with suppress(BrokenPipeError, ConnectionResetError): super().do_GET() # Create the server in the main thread, but start it in the sub-thread. # This lets the main thread call .shutdown() to stop everything. Return # just the shutdown method to the caller. RequestHandler.directory = directory # Wrap the socket in the SSL context if given. ssl_context = None if certpem is not None and keypem is not None: data_dir = os.path.dirname(data_path('__init__.py')) if not os.path.isabs(certpem): certpem = os.path.join(data_dir, certpem) if not os.path.isabs(keypem): keypem = os.path.join(data_dir, keypem) ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) ssl_context.load_cert_chain(certfile=certpem, keyfile=keypem) # This subclass specializes connection requests so that we can keep track # of connections and force shutting down both sides of the socket. The # download service has a hack to clear its connection hack during the test # suite (specifically, when -stoppable is given), but this just ensures # that we do everything we can on our end to close the connections. If we # don't our HTTP/S servers hang around for 2+ minutes due to issues with # the Qt networking stack, causing huge slowdowns on our test teardown # methods. connections = [] class Server(HTTPServer): def get_request(self): conn, addr = super().get_request() connections.append(conn) return conn, addr # Define a small class with a method that arranges for the self-signed # certificates to be valid in the client. with ExitStack() as resources: server = Server(('localhost', port), RequestHandler) server.allow_reuse_address = True resources.callback(server.server_close) if ssl_context is not None: server.socket = ssl_context.wrap_socket( server.socket, server_side=True) thread = Thread(target=server.serve_forever) thread.daemon = True def shutdown(): for conn in connections: if conn.fileno() != -1: # Disallow sends and receives. try: conn.shutdown(SHUT_RDWR) except OSError: # I'm ignoring all OSErrors here, although the only # one I've seen semi-consistency is ENOTCONN [107] # "Transport endpoint is not connected". I don't know # why this happens, but it tells me that the client # has already exited. We're shutting down, so who # cares? (Or am I masking a real error?) pass conn.close() server.shutdown() thread.join() resources.callback(shutdown) thread.start() # Everything succeeded, so transfer the resource management to a new # ExitStack(). This way, when the with statement above completes, the # server will still be running and urlopen() will still be patched. # The caller is responsible for closing the new ExitStack. return resources.pop_all() # This defines the @configuration decorator used in various test suites to # create a temporary config.d/ directory for a test. This is all fairly # complicated, but here's what's going on. # # The _wrapper() function is the inner part of the decorator, and it does the # heart of the operation, which is to create a temporary directory for # config.d, along with temporary var and tmp directories. These latter two # will be interpolated into any configuration file copied into config.d. # # The outer decorator function differs depending on whether @configuration was # given without arguments, or called with arguments at the time of the # function definition. # # In the former case, e.g. # # @configuration # def test_something(self): # # The default 00.ini file is interpolated and copied into config.d. Simple. # # In the latter case, e.g. # # @configuration('some-config.ini') # def test_something(self): # # There's actually another level of interior function, because the outer # decorator itself is getting called. Here, any named configuration file is # additionally copied to the config.d directory, renaming it sequentionally to # something like 01_override.ini, with the numeric part incrementing # monotonically. # # The implementation is tricky because we want the call sites to be simple. def _wrapper(self, function, ini_files, *args, **kws): start = 0 with ExitStack() as resources: # Create the config.d directory and copy all the source ini files to # this directory in sequential order, interpolating in the temporary # tmp and var directories. config_d = resources.enter_context(temporary_directory()) temp_tmpdir = resources.enter_context(temporary_directory()) temp_vardir = resources.enter_context(temporary_directory()) for ini_file in ini_files: dst = os.path.join(config_d, '{:02d}_override.ini'.format(start)) start += 1 template = resource_bytes( 'systemimage.tests.data', ini_file).decode('utf-8') with atomic(dst) as fp: print(template.format(tmpdir=temp_tmpdir, vardir=temp_vardir), file=fp) # Patch the global configuration object so that it can be used # directly, which is good enough in most cases. Also patch the bit of # code that detects the device name. config = Configuration(config_d) resources.enter_context( patch('systemimage.config._config', config)) resources.enter_context( patch('systemimage.device.check_output', return_value='nexus7')) # Make sure the cache_partition and data_partition exist. makedirs(config.updater.cache_partition) makedirs(config.updater.data_partition) # The method under test is allowed to specify some additional # keyword arguments, in order to pass some variables in from the # wrapper. signature = inspect.signature(function) if 'config_d' in signature.parameters: kws['config_d'] = config_d if 'config' in signature.parameters: kws['config'] = config # Call the function with the given arguments and return the result. return function(self, *args, **kws) def configuration(*args): """Outer decorator which can be called or not at function definition time. If called, the arguments are positional only, and name the test data .ini files which are to be copied to config.d directory. If none are given, then 00.ini is used. """ if len(args) == 1 and callable(args[0]): # We assume this was the bare @configuration decorator flavor. function = args[0] inner = partialmethod(_wrapper, function, ('00.ini',)) return wraps(function)(inner) else: # We assume this was the called @configuration(...) decorator flavor, # so create the actual decorator that wraps the _wrapper function. def decorator(function): inner = partialmethod(_wrapper, function, args) return wraps(function)(inner) return decorator def sign(filename, pubkey_ring): """GPG sign the given file, producing an armored detached signature. :param filename: The path to the file to sign. :param pubkey_ring: The public keyring containing the key to sign the file with. This keyring must contain only one key, and its key id must exist in the master secret keyring. """ # filename could be a Path object. For now, just str-ify it. filename = str(filename) with ExitStack() as resources: home = resources.enter_context(temporary_directory()) secring = data_path('master-secring.gpg') pubring = data_path(pubkey_ring) ctx = gnupg.GPG(gnupghome=home, keyring=pubring, #verbose=True, secret_keyring=secring) public_keys = ctx.list_keys() assert len(public_keys) != 0, 'No keys found' assert len(public_keys) == 1, 'Too many keys' key_id = public_keys[0]['keyid'] dfp = resources.enter_context(open(filename, 'rb')) signed_data = ctx.sign_file(dfp, keyid=key_id, detach=True) sfp = resources.enter_context(open(filename + '.asc', 'wb')) sfp.write(signed_data.data) def copy(filename, todir, dst=None): src = data_path(filename) dst = os.path.join(str(todir), filename if dst is None else dst) makedirs(os.path.dirname(dst)) shutil.copy(src, dst) def setup_keyring_txz(keyring_src, signing_keyring, json_data, dst): """Set up the .tar.xz and .asc files. The source keyring and json data is used to create a .tar.xz file and an associated .asc signature file. These are then copied to the given destination path name. :param keyring_src: The name of the source keyring (i.e. .gpg file), which should be relative to the test data directory. This will serve as the keyring.gpg file inside the tarball. :param signing_keyring: The name of the keyring to sign the resulting tarball with, again, relative to the test data directory. :param json_data: The JSON data dictionary, i.e. the contents of the keyring.json file inside the tarball. :param dst: The destination path of the .tar.xz file. For the resulting signature file, the .asc suffix will be automatically appended and copied next to the dst file. """ with temporary_directory() as tmpdir: copy(keyring_src, tmpdir, 'keyring.gpg') json_path = os.path.join(tmpdir, 'keyring.json') with open(json_path, 'w', encoding='utf-8') as fp: json.dump(json_data, fp) # Tar up the .gpg and .json files into a .tar.xz file. tarxz_path = os.path.join(tmpdir, 'keyring.tar.xz') with tarfile.open(tarxz_path, 'w:xz') as tf: tf.add(os.path.join(tmpdir, 'keyring.gpg'), 'keyring.gpg') tf.add(json_path, 'keyring.json') sign(tarxz_path, signing_keyring) # Copy the .tar.xz and .asc files to the proper directory under # the path the https server is vending them from. makedirs(os.path.dirname(dst)) shutil.copy(tarxz_path, dst) shutil.copy(tarxz_path + '.asc', dst + '.asc') def setup_keyrings(*keyrings, use_config=None, **data): """Copy the named keyrings to the right place. Also, set up the .xz.tar and .xz.tar.asc files which must exist in order to be copied to the updater partitions. :param keyrings: When given, names the keyrings to set up. When not given, all keyrings are set up. Each entry should be the name of the configuration variable inside the `config.gpg` namespace, e.g. 'archive_master'. :param use_config: If given, use this as the config object, otherwise use the global config object. :param data: Additional key/value data to insert into the keyring.json dictionary. """ if len(keyrings) == 0: keyrings = ('archive-master', 'image-master', 'image-signing', 'device-signing') for keyring in keyrings: if keyring in ('archive-master', 'image-master'): # Yes, the archive master is signed by itself. signing_kr = 'archive-master.gpg' elif keyring == 'image-signing': signing_kr = 'image-master.gpg' elif keyring == 'device-signing': signing_kr = 'image-signing.gpg' else: raise AssertionError('unknown key type: {}'.format(keyring)) # The local keyrings live in the .gpg file with the same keyring name # as the .tar.xz file, but cached in the temporary directory. copy(keyring + '.gpg', (config.tempdir if use_config is None else use_config.tempdir)) # Now set up the .tar.xz and .tar.xz.asc files in the destination. json_data = dict(type=keyring) json_data.update(data) dst = getattr((config.gpg if use_config is None else use_config.gpg), keyring.replace('-', '_')) setup_keyring_txz(keyring + '.gpg', signing_kr, json_data, dst) def setup_index(index, todir, keyring, write_callback=None): for image in get_index(index).images: for filerec in image.files: path = (filerec.path[1:] if filerec.path.startswith('/') else filerec.path) dst = os.path.join(todir, path) makedirs(os.path.dirname(dst)) if write_callback is None: contents = EMPTYSTRING.join( os.path.splitext(filerec.path)[0].split('/')) with open(dst, 'w', encoding='utf-8') as fp: fp.write(contents) else: write_callback(dst) # Sign with the specified signing key. sign(dst, keyring) @contextmanager def reset_envar(name): missing = object() old_value = os.environ.get(name, missing) try: yield finally: if old_value is missing: try: del os.environ[name] except KeyError: pass else: os.environ[name] = old_value @contextmanager def chmod(path, new_mode): old_mode = os.stat(path).st_mode try: os.chmod(path, new_mode) yield finally: os.chmod(path, old_mode) def touch_build(version, timestamp=None, use_config=None): # LP: #1220238 - assert that no old-style version numbers are being used. assert 0 <= version < (1 << 16), ( 'Old style version number: {}'.format(version)) if use_config is None: use_config = config override = Path(use_config.config_d) / '99_build.ini' with override.open('wt', encoding='utf-8') as fp: print("""\ [service] build_number: {} """.format(version), file=fp) # We have to touch the mtimes for all the files in the config directory. if timestamp is not None: timestamp = int(timestamp) for path in Path(use_config.config_d).iterdir(): os.utime(str(path), (timestamp, timestamp)) use_config.reload() def write_bytes(path, size_in_mib): # Write size_in_mib * 1MiB number of bytes to the file in path. with open(path, 'wb') as fp: for chunk in range(size_in_mib): fp.write(b'x' * MiB) def debuggable(fn): def wrapper(*args, **kws): try: path = Path('/tmp/debug.enabled') path.touch() return fn(*args, **kws) finally: path.unlink() return wrapper @contextmanager def debug(*, check_flag=False, end='\n'): if not check_flag or os.path.exists('/tmp/debug.enabled'): path = Path('/tmp/debug.log') else: path = Path(os.devnull) with path.open('a', encoding='utf-8') as fp: function = partial(print, file=fp, end=end, flush=True) function.fp = fp yield function def find_dbus_process(ini_path): """Return the system-image-dbus process running the given ini file.""" # This method searches all processes for the one matching the # system-image-dbus service. This is harder than it should be because # while dbus-launch gives us the PID of the dbus-launch process itself, # that can't be used to find the appropriate child process, because # D-Bus activated processes are orphaned to init as their parent. # # This then does a brute-force search over all the processes, looking one # that has a particular command line indicating that it's the # system-image-dbus service. We don't run this latter by that name # though, since that's a wrapper created by setup.py's entry points. # # To make doubly certain we're not going to get the wrong process (in case # there are multiple system-image-dbus processes running), we'll also look # for the specific ini_path for the instance we care about. Yeah, this # all kind of sucks, but should be effective in finding the one we need to # track. from systemimage.testing.controller import Controller for process in psutil.process_iter(): cmdline = SPACE.join(process.cmdline()) if Controller.MODULE in cmdline and ini_path in cmdline: return process return None class ServerTestBase(unittest.TestCase): # Must override in base classes. INDEX_FILE = None CHANNEL_FILE = None CHANNEL = None DEVICE = None SIGNING_KEY = 'device-signing.gpg' # For more detailed output. maxDiff = None @classmethod def setUpClass(self): # Avoid circular imports. from systemimage.testing.nose import SystemImagePlugin SystemImagePlugin.controller.set_mode(cert_pem='cert.pem') def setUp(self): # Avoid circular imports. from systemimage.state import State self._resources = ExitStack() self._state = State() try: self._serverdir = self._resources.enter_context( temporary_directory()) # Start up both an HTTPS and HTTP server. The data files are # vended over the latter, everything else, over the former. self._resources.push(make_http_server( self._serverdir, 8943, 'cert.pem', 'key.pem')) self._resources.push(make_http_server(self._serverdir, 8980)) # Set up the server files. assert self.CHANNEL_FILE is not None, ( 'Subclasses must set CHANNEL_FILE') copy(self.CHANNEL_FILE, self._serverdir, 'channels.json') sign(os.path.join(self._serverdir, 'channels.json'), 'image-signing.gpg') assert self.CHANNEL is not None, 'Subclasses must set CHANNEL' assert self.DEVICE is not None, 'Subclasses must set DEVICE' index_path = os.path.join( self._serverdir, self.CHANNEL, self.DEVICE, 'index.json') head, tail = os.path.split(index_path) assert self.INDEX_FILE is not None, ( 'Subclasses must set INDEX_FILE') copy(self.INDEX_FILE, head, tail) sign(index_path, self.SIGNING_KEY) setup_index(self.INDEX_FILE, self._serverdir, self.SIGNING_KEY) except: self._resources.close() raise self.addCleanup(self._resources.close) def _setup_server_keyrings(self, *, device_signing=True): # Only the archive-master key is pre-loaded. All the other keys # are downloaded and there will be both a blacklist and device # keyring. The four signed keyring tar.xz files and their # signatures end up in the proper location after the state machine # runs to completion. setup_keyrings('archive-master') setup_keyring_txz( 'spare.gpg', 'image-master.gpg', dict(type='blacklist'), os.path.join(self._serverdir, 'gpg', 'blacklist.tar.xz')) setup_keyring_txz( 'image-master.gpg', 'archive-master.gpg', dict(type='image-master'), os.path.join(self._serverdir, 'gpg', 'image-master.tar.xz')) setup_keyring_txz( 'image-signing.gpg', 'image-master.gpg', dict(type='image-signing'), os.path.join(self._serverdir, 'gpg', 'image-signing.tar.xz')) if device_signing: setup_keyring_txz( 'device-signing.gpg', 'image-signing.gpg', dict(type='device-signing'), os.path.join(self._serverdir, self.CHANNEL, self.DEVICE, 'device-signing.tar.xz')) def descriptions(path): descriptions = [] for image in path: # There's only one description per image so order doesn't # matter. descriptions.extend(image.descriptions.values()) return descriptions def wait_for_service(*, restart=False, reload=True): bus = dbus.SystemBus() if restart: service = bus.get_object('com.canonical.SystemImage', '/Service') iface = dbus.Interface(service, 'com.canonical.SystemImage') iface.Exit() service = dbus.SystemBus().get_object('org.freedesktop.DBus', '/') iface = dbus.Interface(service, 'org.freedesktop.DBus') if reload: iface.ReloadConfig() # Wait until the system-image-dbus process is actually running. # http://people.freedesktop.org/~david/eggdbus-20091014/eggdbus-interface-org.freedesktop.DBus.html#eggdbus-method-org.freedesktop.DBus.StartServiceByName reply = 0 # 2015-03-09 BAW: This could potentially spin forever, but we'll assume # D-Bus eventually is successful in starting the service. while reply != 2: reply = iface.StartServiceByName('com.canonical.SystemImage', 0) time.sleep(0.1) def terminate_service(): # Avoid circular imports. from systemimage.testing.nose import SystemImagePlugin proc = find_dbus_process(SystemImagePlugin.controller.ini_path) if proc is not None: bus = dbus.SystemBus() service = bus.get_object('com.canonical.SystemImage', '/Service') iface = dbus.Interface(service, 'com.canonical.SystemImage') iface.Exit() proc.wait() ./systemimage/testing/controller.py0000644000015600001650000002604212701500553017640 0ustar jenkinsjenkins# Copyright (C) 2013-2016 Canonical Ltd. # Author: Barry Warsaw # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 3 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . """Helper for testing the dbus service via dbus over a separate test bus.""" __all__ = [ 'Controller', ] import os import pwd import sys import dbus import time import psutil import subprocess try: import pycurl except ImportError: pycurl = None from contextlib import ExitStack from distutils.spawn import find_executable from pkg_resources import resource_string as resource_bytes from systemimage.helpers import temporary_directory from systemimage.testing.helpers import ( data_path, find_dbus_process, makedirs, reset_envar, wait_for_service) from unittest.mock import patch SPACE = ' ' DLSERVICE = os.environ.get( 'SYSTEMIMAGE_DLSERVICE', '/usr/bin/ubuntu-download-manager' # For debugging the in-tree version of u-d-m. #'/bin/sh $HOME/projects/phone/trunk/tools/runme.sh' ) def start_system_image(controller): wait_for_service(reload=False) process = find_dbus_process(controller.ini_path) if process is None: raise RuntimeError('Could not start system-image-dbus') def stop_system_image(controller): if controller.ini_path is None: process = None else: process = find_dbus_process(controller.ini_path) try: bus = dbus.SystemBus() service = bus.get_object('com.canonical.SystemImage', '/Service') iface = dbus.Interface(service, 'com.canonical.SystemImage') iface.Exit() except dbus.DBusException: # The process might not be running at all. return if process is not None: process.wait(60) def _find_udm_process(): for process in psutil.process_iter(): cmdline = SPACE.join(process.cmdline()) if 'ubuntu-download-manager' in cmdline and '-stoppable' in cmdline: return process return None def start_downloader(controller): service = dbus.SystemBus().get_object('org.freedesktop.DBus', '/') iface = dbus.Interface(service, 'org.freedesktop.DBus') reply = 0 while reply != 2: reply = iface.StartServiceByName( 'com.canonical.applications.Downloader', 0) time.sleep(0.1) # Something innocuous. process = _find_udm_process() if process is None: raise RuntimeError('Could not start ubuntu-download-manager') def stop_downloader(controller): # See find_dbus_process() for details. process = _find_udm_process() try: bus = dbus.SystemBus() service = bus.get_object('com.canonical.applications.Downloader', '/') iface = dbus.Interface( service, 'com.canonical.applications.DownloadManager') iface.exit() except dbus.DBusException: # The process might not be running at all. return if process is not None: process.wait(60) SERVICES = [ ('com.canonical.SystemImage', '{python} -m {self.MODULE} -C {self.ini_path} ' '{self.curl_cert} --testing {self.mode}', start_system_image, stop_system_image, ), ] if pycurl is None: USING_PYCURL = False else: USING_PYCURL = int(os.environ.get('SYSTEMIMAGE_PYCURL', '0')) if not USING_PYCURL: SERVICES.append( ('com.canonical.applications.Downloader', DLSERVICE + ' {self.udm_certs} -disable-timeout -stoppable -log-dir {self.tmpdir}', start_downloader, stop_downloader, ) ) class Controller: """Start and stop D-Bus service under test.""" MODULE = 'systemimage.testing.service' def __init__(self, logfile=None, loglevel='info'): self.loglevel = loglevel # Non-public. self._stack = ExitStack() self._stoppers = [] # Public. self.tmpdir = self._stack.enter_context(temporary_directory()) self.config_path = os.path.join(self.tmpdir, 'dbus-system.conf') self.serverdir = self._stack.enter_context(temporary_directory()) self.daemon_pid = None self.mode = 'live' self.udm_certs = '' self.curl_cert = '' self.patcher = None # Set up the dbus-daemon system configuration file. path = data_path('dbus-system.conf.in') with open(path, 'r', encoding='utf-8') as fp: template = fp.read() username = pwd.getpwuid(os.getuid()).pw_name config = template.format(tmpdir=self.tmpdir, user=username) with open(self.config_path, 'w', encoding='utf-8') as fp: fp.write(config) # We need a client.ini file for the subprocess. self.ini_tmpdir = self._stack.enter_context(temporary_directory()) self.ini_vardir = self._stack.enter_context(temporary_directory()) self.ini_logfile = (os.path.join(self.ini_tmpdir, 'client.log') if logfile is None else logfile) self.ini_path = os.path.join(self.tmpdir, 'config.d') makedirs(self.ini_path) self._reset_configs() def _reset_configs(self): for filename in os.listdir(self.ini_path): if filename.endswith('.ini'): os.remove(os.path.join(self.ini_path, filename)) template = resource_bytes( 'systemimage.tests.data', '01.ini').decode('utf-8') defaults = os.path.join(self.ini_path, '00_defaults.ini') with open(defaults, 'w', encoding='utf-8') as fp: print(template.format(tmpdir=self.ini_tmpdir, vardir=self.ini_vardir, logfile=self.ini_logfile, loglevel=self.loglevel), file=fp) def _configure_services(self): self.stop_children() # Now we have to set up the .service files. We use the Python # executable used to run the tests, executing the entry point as would # happen in a deployed script or virtualenv. for service, command_template, starter, stopper in SERVICES: command = command_template.format(python=sys.executable, self=self) service_file = service + '.service' path = data_path(service_file + '.in') with open(path, 'r', encoding='utf-8') as fp: template = fp.read() config = template.format(command=command) service_path = os.path.join(self.tmpdir, service_file) with open(service_path, 'w', encoding='utf-8') as fp: fp.write(config) self._stoppers.append(stopper) # If the dbus-daemon is running, reload its configuration files. if self.daemon_pid is not None: wait_for_service() def _set_udm_certs(self, cert_pem, certificate_path): self.udm_certs = ( '' if cert_pem is None else '-self-signed-certs ' + certificate_path) def _set_curl_certs(self, cert_pem, certificate_path): # We have to set up the PyCURL downloader's self-signed certificate for # the test in two ways. First, because we might be spawning the D-Bus # service, we have to pass the path to the cert to that service... self.curl_cert = ( '' if cert_pem is None else '--self-signed-cert ' + certificate_path) # ...but the controller is also used to set the mode for foreground # tests, such as test_download.py. Here we don't spawn any D-Bus # processes, but we still have to mock make_testable() in curl.py so # that the PyCURL object accepts the self-signed cert. if self.patcher is not None: self.patcher.stop() self.patcher = None if cert_pem is not None: def self_sign(c): c.setopt(pycurl.CAINFO, certificate_path) self.patcher = patch('systemimage.curl.make_testable', self_sign) self.patcher.start() def set_mode(self, *, cert_pem=None, service_mode=''): self.mode = service_mode certificate_path = data_path(cert_pem) if USING_PYCURL: self._set_curl_certs(cert_pem, certificate_path) else: self._set_udm_certs(cert_pem, certificate_path) self._reset_configs() self._configure_services() def _start(self): """Start the SystemImage service in a subprocess. Use the output from dbus-daemon to gather the address and pid of the service in the subprocess. We'll use those in the foreground process to talk to our test instance of the service (rather than any similar service running normally on the development desktop). """ daemon_exe = find_executable('dbus-daemon') if daemon_exe is None: print('Cannot find the `dbus-daemon` executable', file=sys.stderr) return os.environ['DBUS_VERBOSE'] = '1' dbus_args = [ daemon_exe, #'/usr/lib/x86_64-linux-gnu/dbus-1.0/debug-build/bin/dbus-daemon', '--fork', '--config-file=' + str(self.config_path), # Return the address and pid on stdout. '--print-address=1', '--print-pid=1', ] stdout = subprocess.check_output(dbus_args, bufsize=4096, universal_newlines=True) lines = stdout.splitlines() dbus_address = lines[0].strip() self.daemon_pid = int(lines[1].strip()) #print('DBUS_LAUNCH PID:', self.daemon_pid) self._stack.callback(self._kill, self.daemon_pid) #print("DBUS_SYSTEM_BUS_ADDRESS='{}'".format(dbus_address)) # Set the service's address into the environment for rendezvous. self._stack.enter_context(reset_envar('DBUS_SYSTEM_BUS_ADDRESS')) os.environ['DBUS_SYSTEM_BUS_ADDRESS'] = dbus_address # Try to start the DBus services. for service, command_template, starter, stopper in SERVICES: starter(self) def start(self): if self.daemon_pid is not None: # Already started. return try: self._configure_services() self._start() except: self._stack.close() raise def stop_children(self): # If the dbus-daemon is already running, kill all the children. if self.daemon_pid is not None: for stopper in self._stoppers: stopper(self) del self._stoppers[:] def _kill(self, pid): self.stop_children() process = psutil.Process(pid) process.terminate() process.wait(60) self.daemon_pid = None def stop(self): self._stack.close() ./systemimage/testing/nose.py0000644000015600001650000001463612701500553016427 0ustar jenkinsjenkins# Copyright (C) 2013-2016 Canonical Ltd. # Author: Barry Warsaw # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 3 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . """Nose plugin for testing.""" __all__ = [ 'SystemImagePlugin', ] import re import atexit from dbus.mainloop.glib import DBusGMainLoop from nose2.events import Plugin from systemimage.config import config from systemimage.logging import initialize from systemimage.testing.controller import Controller from systemimage.testing.helpers import configuration # Why are these tests set up like this? # # LP: #1205163 provides the impetus. Here's the problem: we have to start a # dbus-daemon child process which will create an isolated system bus on which # the services we want to talk to will be started via dbus-activatiion. This # closely mimics how the real system starts up our services. # # We ask dbus-daemon to return us its pid and the dbus address it's listening # on. We need the address because we have to ensure that the dbus client, # i.e. this foreground test process, can communicate with the isolated # service. To do this, the foreground process sets the environment variable # DBUS_SYSTEM_BUS_ADDRESS to the address that dbus-daemon gave us. # # The problem is that the low-level dbus client library only consults that # envar when it initializes, which it only does once per process. There's no # way to get the library to listen on a new DBUS_SYSTEM_BUS_ADDRESS later on. # # This means that our first approach, which involved killing the grandchild # service processes, and the child dbus-daemon process, and then restarting a # new dbus-daemon process on a new address, doesn't work. # # We need new service processes for many of our test cases because we have to # start them up in different testing modes, and there's no way to do that # without exiting them and restarting them. The grandchild processes get # started via different .service files with different commands. # # So, we have to restart the service process, but *not* the dbus-daemon # process because for all of these tests, it must be listening on the same # system bus. Fortunately, dbus-daemon responds to SIGHUP, which tells it to # re-read its configuration files, including its .service files. So how this # works is that at the end of each test class, we tell the dbus service to # .Exit(), wait until it has, then write a new .service file with the new # command, HUP the dbus-daemon, and now the next time it activates the # service, it will do so with the correct (i.e. newly written) command. class SystemImagePlugin(Plugin): # Hook into nose2's unittest.cfg configuration. configSection = 'systemimage' controller = None def __init__(self): super().__init__() self.patterns = [] self.verbosity = 0 self.log_file = None self.log_level = 'info' self.addArgument(self.patterns, 'P', 'pattern', 'Add a test matching pattern') def bump(ignore): self.verbosity += 1 self.addFlag(bump, 'V', 'verbosity', 'Increase system-image verbosity') def set_log_file(path): self.log_file = path[0] self.addOption(set_log_file, 'L', 'logfile', 'Set the log file for the test run', nargs=1) def set_dbus_loglevel(level): self.log_level = level[0] self.addOption(set_dbus_loglevel, 'M', 'loglevel', 'Set the systemimage[:systemimage.dbus] log level', nargs=1) @configuration def startTestRun(self, event): if self.log_file is not None: config.system.logfile = self.log_file DBusGMainLoop(set_as_default=True) initialize(verbosity=self.verbosity) # We need to set up the dbus service controller, since all the tests # which use a custom address must continue to use the same address for # the duration of the test process. We can kill and restart # individual services, and we can write new dbus configuration files # and HUP the dbus-launch to re-read them, but we cannot change bus # addresses after the initial one is set. SystemImagePlugin.controller = Controller( self.log_file, self.log_level) SystemImagePlugin.controller.start() atexit.register(SystemImagePlugin.controller.stop) def getTestCaseNames(self, event): if len(self.patterns) == 0: # No filter patterns, so everything should be tested. return # Does the pattern match the fully qualified class name? for pattern in self.patterns: full_class_name = '{}.{}'.format( event.testCase.__module__, event.testCase.__name__) if re.search(pattern, full_class_name): # Don't suppress this test class. return names = filter(event.isTestMethod, dir(event.testCase)) for name in names: full_test_name = '{}.{}.{}'.format( event.testCase.__module__, event.testCase.__name__, name) for pattern in self.patterns: if re.search(pattern, full_test_name): break else: event.excludedNames.append(name) def afterTestRun(self, event): SystemImagePlugin.controller.stop() # Let other plugins continue printing. return None ## def startTest(self, event): ## from systemimage.testing.helpers import debug ## with debug() as dlog: ## dlog('vvvvv', event.test) ## def stopTest(self, event): ## from systemimage.testing.helpers import debug ## with debug() as dlog: ## dlog('^^^^^', event.test) def describeTest(self, event): # This is fucked up. if 'partial' in event.description: event.description = event.description[:-73] ./systemimage/testing/service.py0000644000015600001650000000403012701500553017106 0ustar jenkinsjenkins# Copyright (C) 2014-2016 Canonical Ltd. # Author: Barry Warsaw # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 3 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . """DBus service testing pre-load module. This is arranged so that the test suite can enable code coverage data collection as early as possible in the private bus D-Bus activated processes. """ import os # Set this environment variable if the controller won't start. There's no # other good way to get debugging information about the D-Bus activated # process, since their stderr just seems to get lost. if os.environ.get('SYSTEMIMAGE_DEBUG_DBUS_ACTIVATION'): import sys sys.stderr = open('/tmp/debug.log', 'a', encoding='utf-8') # It's okay if this module isn't available. try: from coverage.control import coverage as _Coverage except ImportError: _Coverage = None def main(): # Enable code coverage. ini_file = os.environ.get('COVERAGE_PROCESS_START') if _Coverage is not None and ini_file is not None: coverage =_Coverage(config_file=ini_file, auto_data=True) # Stolen from coverage.process_startup() coverage.erase() coverage.start() coverage._warn_no_data = False coverage._warn_unimported_source = False # All systemimage imports happen here so that we have the best possible # chance of instrumenting all relevant code. from systemimage.service import main as real_main # Now run the actual D-Bus service. return real_main() if __name__ == '__main__': import sys sys.exit(main()) ./systemimage/bag.py0000644000015600001650000000765212701500553014537 0ustar jenkinsjenkins# Copyright (C) 2013-2016 Canonical Ltd. # Author: Barry Warsaw # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 3 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . """The Bag class.""" __all__ = [ 'Bag', ] import keyword from collections import defaultdict COMMASPACE = ', ' def default(): def identity(value): return value return identity def make_converter(original): converters = defaultdict(default) if original is not None: converters.update(original) return converters class Bag: # NOTE: This class's methods share a namespace with the possible # configuration variable names in the various sections. Thus no variable # in any section can be named `update`, `keys`, or `get`. They also can't # be named like any of the non-public methods, but that's usually not a # problem. Ideally, we'd name the methods part of the reserved namespace, # but it seems like a low tech debt for now. def __init__(self, *, converters=None, **kws): self._converters = make_converter(converters) self.__original__ = {} self.__untranslated__ = {} self._load_items(kws) def update(self, *, converters=None, **kws): if converters is not None: self._converters.update(converters) self._load_items(kws) def _load_items(self, kws): for key, value in kws.items(): self.__original__[key] = value safe_key, converted_value = self._normalize_key_value(key, value) self.__untranslated__[key] = converted_value # BAW 2013-04-30: attribute values *must* be immutable, but for # now we don't enforce this. If you set or delete attributes, you # will probably break things. self.__dict__[safe_key] = converted_value def _normalize_key_value(self, key, value): value = self._converters[key](value) key = key.replace('-', '_') if keyword.iskeyword(key): key += '_' return key, value def __repr__(self): # pragma: no cover return ''.format(COMMASPACE.join(sorted( key for key in self.__dict__ if not key.startswith('_')))) def __setitem__(self, key, value): if key in self.__original__: raise ValueError('Attributes are immutable: {}'.format(key)) safe_key, converted_value = self._normalize_key_value(key, value) self.__dict__[safe_key] = converted_value self.__untranslated__[key] = converted_value def __getitem__(self, key): return self.__untranslated__[key] def keys(self): for key in self.__dict__: if not key.startswith('_'): yield key def get(self, key, default=None): if key in self.__dict__: return self.__dict__[key] return default def __iter__(self): yield from self.keys() # Pickle protocol. def __getstate__(self): # We don't need to pickle the converters, because for all practical # purposes, those are only used when the Bag is instantiated. return (self.__original__, {key: value for key, value in self.__dict__.items() if not key.startswith('_')}) def __setstate__(self, state): original, values = state self.__original__ = original self._converters = None for key, value in values.items(): self.__dict__[key] = value ./systemimage/channel.py0000644000015600001650000000371112701500553015406 0ustar jenkinsjenkins# Copyright (C) 2013-2016 Canonical Ltd. # Author: Barry Warsaw # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 3 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . """Update channels.""" __all__ = [ 'Channels', ] import json from systemimage.bag import Bag def _parse_device_mappings(device_mapping): devices = {} # e.g. keys: nexus7, nexus4 for device_name, mapping_1 in device_mapping.items(): # Most of the keys at this level (e.g. index) have flat values, # however the keyring key is itself a mapping. keyring = mapping_1.pop('keyring', None) if keyring is not None: mapping_1['keyring'] = Bag(**keyring) # e.g. nexus7 -> {index, keyring} devices[device_name] = Bag(**mapping_1) return Bag(**devices) class Channels(Bag): @classmethod def from_json(cls, data): mapping = json.loads(data) channels = {} for channel_name, mapping_1 in mapping.items(): hidden = mapping_1.pop('hidden', None) if hidden is None: hidden = False else: assert hidden in (True, False), ( "Unexpected value for 'hidden': {}".format(hidden)) mapping_1['hidden'] = hidden device_mapping = mapping_1.pop('devices') mapping_1['devices'] = _parse_device_mappings(device_mapping) channels[channel_name] = Bag(**mapping_1) return cls(**channels) ./systemimage/state.py0000644000015600001650000006706112701500553015126 0ustar jenkinsjenkins# Copyright (C) 2013-2016 Canonical Ltd. # Author: Barry Warsaw # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 3 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . """Manage state transitions for updates.""" __all__ = [ 'ChecksumError', 'State', ] import os import json import shutil import logging import tarfile from collections import deque from contextlib import ExitStack from datetime import datetime, timezone from functools import partial from itertools import islice from systemimage.candidates import get_candidates, iter_path from systemimage.channel import Channels from systemimage.config import config from systemimage.download import Record, get_download_manager from systemimage.gpg import Context, SignatureError from systemimage.helpers import ( atomic, calculate_signature, makedirs, safe_remove, temporary_directory) from systemimage.index import Index from systemimage.keyring import KeyringError, get_keyring from urllib.parse import urljoin log = logging.getLogger('systemimage') COMMASPACE = ', ' COLON = ':' class ChecksumError(Exception): """Exception raised when a file's checksum does not match.""" def __init__(self, destination, got, checksum): super().__init__() self.destination = destination self.got = got self.expected = checksum def __str__(self): # pragma: no cover return 'got:{0.got} != exp:{0.expected}: {0.destination}'.format(self) def _copy_if_missing(src, dstdir): dst_path = os.path.join(dstdir, os.path.basename(src)) if os.path.exists(src) and not os.path.exists(dst_path): shutil.copy(src, dstdir) def _use_cached(txt, asc, keyrings, checksum=None, blacklist=None): if not os.path.exists(txt) or not os.path.exists(asc): return False with Context(*keyrings, blacklist=blacklist) as ctx: if not ctx.verify(asc, txt): return False if checksum is None: return True with open(txt, 'rb') as fp: got = calculate_signature(fp) return got == checksum def _use_cached_keyring(txz, asc, signing_key): if not _use_cached(txz, asc, (signing_key,)): return False # Do one additional check: unpack the .tar.xz file, grab the keyring.json # and if it has an expiry key, make sure that the keyring has not expired. with temporary_directory(dir=config.tempdir) as tmp: with tarfile.open(txz, 'r:xz') as tf: tf.extractall(tmp) json_path = os.path.join(tmp, 'keyring.json') with open(json_path, 'r', encoding='utf-8') as fp: data = json.load(fp) expiry = data.get('expiry') timestamp = datetime.now(tz=timezone.utc).timestamp() # We can use this keyring if it never expires, or if the expiration date # is some time in the future. return expiry is None or expiry > timestamp class State: def __init__(self): # Variables which manage state transitions. self._next = deque() self._debug_step = 1 self.candidate_filter = None self.winner_filter = None # Variables which represent things we've learned. self.blacklist = None self.channels = None self.index = None self.winner = None self.files = [] self.channel_switch = None # Other public attributes. self.downloader = get_download_manager() self._next.append(self._cleanup) def __iter__(self): return self def _pop(self): step = self._next.popleft() # step could be a partial or a method. name = getattr(step, 'func', step).__name__ log.debug('-> [{:2}] {}'.format(self._debug_step, name)) return step, name def __next__(self): try: step, name = self._pop() step() self._debug_step += 1 except IndexError: # Do not chain the exception. raise StopIteration from None except: log.exception('uncaught exception in state machine') raise def run_thru(self, stop_after): """Total hack to partially run the state machine. :param stop_after: Name of method, sans leading underscore to run the state machine through. In other words, the state machine runs until the named method completes. """ while True: try: step, name = self._pop() except (StopIteration, IndexError): # We're done. break step() self._debug_step += 1 if name[1:] == stop_after: break def run_until(self, stop_before): """Total hack to partially run the state machine. :param stop_before: Name of method, sans leading underscore that the state machine is run until the method is reached. Unlike `run_thru()` the named method is not run. """ while True: try: step, name = self._pop() except (StopIteration, IndexError): # We're done. break if name[1:] == stop_before: # Stop executing, but not before we push the last state back # onto the deque. Otherwise, resuming the state machine would # skip this step. self._next.appendleft(step) break step() self._debug_step += 1 def _cleanup(self): """Clean up the destination directories. Removes all residual files from the data partition. We leave the cache partition alone because some of those data files may still be valid and we want to avoid re-downloading them if possible. """ data_dir = config.updater.data_partition # Remove only the blacklist files (and generic keyring files) since # these are the only ones that will be downloaded to this location. safe_remove(os.path.join(data_dir, 'blacklist.tar.xz')) safe_remove(os.path.join(data_dir, 'blacklist.tar.xz.asc')) safe_remove(os.path.join(data_dir, 'keyring.tar.xz')) safe_remove(os.path.join(data_dir, 'keyring.tar.xz.asc')) self._next.append(self._get_blacklist_1) def _get_blacklist_1(self): """First try to get the blacklist.""" # If there is no image master key, or if the signature on the key is # not valid, download one now. Don't worry if we have an out of date # key; that will be handled elsewhere. The archive master key better # be pre-installed (we cannot download it). Let any exceptions in # grabbing the image master key percolate up. image_master = config.gpg.image_master if not _use_cached_keyring(image_master, image_master + '.asc', config.gpg.archive_master): log.info('No valid image master key found, downloading') get_keyring( 'image-master', 'gpg/image-master.tar.xz', 'archive-master') # The only way to know whether there is a blacklist or not is to try # to download it. If it fails, there isn't one. url = 'gpg/blacklist.tar.xz' try: # I think it makes no sense to check the blacklist when we're # downloading a blacklist file. log.info('Looking for blacklist: {}'.format( urljoin(config.https_base, url))) get_keyring('blacklist', url, 'image-master') except SignatureError: log.exception('No signed blacklist found') # The blacklist wasn't signed by the system image master. Maybe # there's a new system image master key? Let's find out. self._next.appendleft(self._get_master_key) return except FileNotFoundError: # There is no blacklist. log.info('No blacklist found') else: # After successful download, the blacklist.tar.xz will be living # in the data partition. self.blacklist = os.path.join( config.updater.data_partition, 'blacklist.tar.xz') log.info('Local blacklist file: {}', self.blacklist) # This is the first time we're trying to get the channel.json file. # If this fails because signature is invalid, we'll try to download a # new image-signing key. Then we'll call _get_channel() again. self._next.append(partial(self._get_channel, 0)) def _get_blacklist_2(self): """Second try to get the blacklist.""" # Unlike the first attempt, if this one fails with a SignatureError, # there's nothing more we can do, so we let those percolate up. We # still catch FileNotFoundErrors because of the small window of # opportunity for the blacklist to have been removed between the first # attempt and the second. Since it doesn't cost us much, we might as # well be thorough. # # The first attempt must already have gotten us an image master key if # one was missing originally, so don't try that again. url = 'gpg/blacklist.tar.xz' try: log.info('Looking for blacklist again: {}', urljoin(config.https_base, url)) get_keyring('blacklist', url, 'image-master') except FileNotFoundError: log.info('No blacklist found on second attempt') else: # After successful download, the blacklist.tar.xz will be living # in the data partition. self.blacklist = os.path.join( config.updater.data_partition, 'blacklist.tar.xz') log.info('Local blacklist file: {}', self.blacklist) # See above. self._next.append(partial(self._get_channel, 0)) def _get_channel(self, count): """Get and verify the channels.json file.""" # If there is no image signing key, download one now. Don't worry if # we have an out of date key; that will be handled elsewhere. The # imaging signing must be signed by the image master key, which we # better already have an up-to-date copy of. image_signing = config.gpg.image_signing if not _use_cached_keyring(image_signing, image_signing + '.asc', config.gpg.image_master): log.info('No valid image signing key found, downloading') get_keyring( 'image-signing', 'gpg/image-signing.tar.xz', 'image-master', self.blacklist) channels_url = urljoin(config.https_base, 'channels.json') channels_path = os.path.join(config.tempdir, 'channels.json') asc_url = urljoin(config.https_base, 'channels.json.asc') asc_path = os.path.join(config.tempdir, 'channels.json.asc') log.info('Looking for: {}', channels_url) with ExitStack() as stack: self.downloader.get_files([ (channels_url, channels_path), (asc_url, asc_path), ]) # Once we're done with them, we can remove these files. stack.callback(safe_remove, channels_path) stack.callback(safe_remove, asc_path) # The channels.json file must be signed with the SYSTEM IMAGE # SIGNING key. There may or may not be a blacklist. ctx = stack.enter_context( Context(config.gpg.image_signing, blacklist=self.blacklist)) try: ctx.validate(asc_path, channels_path) except SignatureError: # The signature on the channels.json file did not match. # Maybe there's a new image signing key on the server. If # we've already downloaded a new image signing key, then # there's nothing more to do but raise an exception. # Otherwise, if a new key *is* found, retry the current step. if count > 0: raise self._next.appendleft(self._get_signing_key) log.info('channels.json not properly signed') return # The signature was good. log.info('Local channels file: {}', channels_path) with open(channels_path, encoding='utf-8') as fp: self.channels = Channels.from_json(fp.read()) # Locate the index file for the channel/device. try: channel = self.channels[config.channel] except KeyError: log.info('no matching channel: {}', config.channel) return log.info('got channel: {}', config.channel) try: device = channel.devices[config.device] except KeyError: log.info('no matching device: {}', config.device) return log.info('found channel/device entry: {}/{}', config.channel, config.device) # The next step will depend on whether there is a device keyring # available or not. If there is, download and verify it now. keyring = getattr(device, 'keyring', None) if keyring: self._next.append(partial(self._get_device_keyring, keyring)) self._next.append(partial(self._get_index, device.index)) def _get_device_keyring(self, keyring): keyring_url = urljoin(config.https_base, keyring.path) asc_url = urljoin(config.https_base, keyring.signature) log.info('getting device keyring: {}', keyring_url) get_keyring( 'device-signing', (keyring_url, asc_url), 'image-signing', self.blacklist) # We don't need to set the next action because it's already been done. def _get_master_key(self): """Try to get and validate a new image master key. If there isn't one, throw a SignatureError. """ try: log.info('Getting the image master key') # The image signing key must be signed by the archive master. get_keyring( 'image-master', 'gpg/image-master.tar.xz', 'archive-master', self.blacklist) except (FileNotFoundError, SignatureError, KeyringError): # No valid image master key could be found. log.error('No valid image master key found') raise # Retry the previous step. log.info('Installing new image master key to: {}', config.gpg.image_master) self._next.appendleft(self._get_blacklist_2) def _get_signing_key(self): """Try to get and validate a new image signing key. If there isn't one, throw a SignatureError. """ try: # The image signing key must be signed by the image master. get_keyring( 'image-signing', 'gpg/image-signing.tar.xz', 'image-master', self.blacklist) except (FileNotFoundError, SignatureError, KeyringError): # No valid image signing key could be found. Don't chain this # exception. log.error('No valid image signing key found') raise # Retry the previous step, but signal to _get_channel() that if the # signature fails this time, it's an error. self._next.appendleft(partial(self._get_channel, 1)) def _get_index(self, index): """Get and verify the index.json file.""" index_url = urljoin(config.https_base, index) asc_url = index_url + '.asc' index_path = os.path.join(config.tempdir, 'index.json') asc_path = index_path + '.asc' with ExitStack() as stack: self.downloader.get_files([ (index_url, index_path), (asc_url, asc_path), ]) stack.callback(os.remove, index_path) stack.callback(os.remove, asc_path) # Check the signature of the index.json file. It may be signed by # either the device keyring (if one exists) or the image signing # key. keyrings = [config.gpg.image_signing] if os.path.exists(config.gpg.device_signing): keyrings.append(config.gpg.device_signing) ctx = stack.enter_context( Context(*keyrings, blacklist=self.blacklist)) ctx.validate(asc_path, index_path) # The signature was good. with open(index_path, encoding='utf-8') as fp: self.index = Index.from_json(fp.read()) self._next.append(self._calculate_winner) def _calculate_winner(self): """Given an index, calculate the paths and score a winner.""" # If we were tracking a channel alias, and that channel alias has # changed, squash the build number to 0 before calculating the # winner. Otherwise, trust the configured build number. channel = self.channels[config.channel] # channel_target is the channel we're on based on the alias mapping in # our config files. channel_alias is the alias mapping in the # channel.json file, i.e. the channel an update will put us on. channel_target = getattr(config.service, 'channel_target', None) channel_alias = getattr(channel, 'alias', None) if ( channel_alias is None or channel_target is None or channel_alias == channel_target): build_number = config.build_number else: # This is a channel switch caused by a new alias. Unless the # build number has been explicitly overridden on the command line # via --build/-b, use build number 0 to force a full update. build_number = (config.build_number if config.build_number_override else 0) self.channel_switch = (channel_target, channel_alias) candidates = get_candidates(self.index, build_number) log.debug('Candidates from build# {}: {}'.format( build_number, len(candidates))) if self.candidate_filter is not None: candidates = self.candidate_filter(candidates) self.winner = config.hooks.scorer().choose( candidates, (channel_target if channel_alias is None else channel_alias)) if len(self.winner) == 0: log.info('Already up-to-date') return winning_path = [str(image.version) for image in self.winner] log.info('Upgrade path is {}'.format(COLON.join(winning_path))) # Now filter the winning path to cap the maximum version number. if (self.winner_filter is not None and self.winner_filter.maximum_version is not None): log.info('Upgrade path capped at version {}'.format( self.winner_filter.maximum_version)) self.winner = self.winner_filter(self.winner) if len(self.winner) == 0: log.info('Capped upgrade leaves device up-to-date') return self._next.append(self._download_files) def _download_files(self): """Download and verify all the winning upgrade path's files.""" # If there is a device-signing key, the files can be signed by either # that or the image-signing key. keyrings = [config.gpg.image_signing] if os.path.exists(config.gpg.device_signing): keyrings.append(config.gpg.device_signing) # Now, go through all the file records in the winning upgrade path. # If the data file has already been downloaded and it has a valid # signature file, then we can save some bandwidth by not downloading # it again. downloads = [] signatures = [] checksums = [] # For the clean ups below, preserve recovery's log files. cache_dir = config.updater.cache_partition preserve = set(( os.path.join(cache_dir, 'log'), os.path.join(cache_dir, 'last_log'), )) for image_number, filerec in iter_path(self.winner): # Re-pack for arguments to get_files() and to collate the # signature path and checksum for the downloadable file. dst = os.path.join(cache_dir, os.path.basename(filerec.path)) asc = os.path.join(cache_dir, os.path.basename(filerec.signature)) checksum = filerec.checksum self.files.append((dst, (image_number, filerec.order))) self.files.append((asc, (image_number, filerec.order))) # Check the existence and signature of the file. if _use_cached(dst, asc, keyrings, checksum, self.blacklist): preserve.add(dst) preserve.add(asc) else: # Add the data file, which has a checksum. downloads.append(Record( urljoin(config.http_base, filerec.path), dst, checksum)) # Add the signature file, which does not have a checksum. downloads.append(Record( urljoin(config.http_base, filerec.signature), asc)) signatures.append((dst, asc)) checksums.append((dst, checksum)) # For any files we're about to download, we must make sure that none # of the destination file paths exist, otherwise the downloader will # throw exceptions. for record in downloads: safe_remove(record.destination) # Also delete cache partition files that we no longer need. for filename in os.listdir(cache_dir): path = os.path.join(cache_dir, filename) if path not in preserve: safe_remove(os.path.join(cache_dir, filename)) # Now, download all missing or ill-signed files, providing logging # feedback on progress. This download can be paused. The downloader # should also signal when the file downloads have started. self.downloader.get_files( downloads, pausable=True, signal_started=True) with ExitStack() as stack: # Set things up to remove the files if a SignatureError gets # raised or if the checksums don't match. If everything's okay, # we'll clear the stack before the context manager exits so none # of the files will get removed. for record in downloads: stack.callback(os.remove, record.destination) # Although we should never get there, if the downloading step # fails, clear out the self.files list so there's no possibilty # we'll try to move them later. stack.callback(setattr, self, 'files', []) # Verify the signatures on all the downloaded files. with Context(*keyrings, blacklist=self.blacklist) as ctx: for dst, asc in signatures: ctx.validate(asc, dst) # Verify the checksums. for dst, checksum in checksums: with open(dst, 'rb') as fp: got = calculate_signature(fp) if got != checksum: raise ChecksumError(dst, got, checksum) # Everything is fine so nothing needs to be cleared. stack.pop_all() log.info('all files available in {}', cache_dir) # Now, copy the files from the temporary directory into the location # for the upgrader. self._next.append(self._move_files) def _move_files(self): # The upgrader already has the archive-master, so we don't need to # copy it. The image-master, image-signing, and device-signing (if # there is one) keys go to the cache partition. They may already be # there if they had to be downloaded, but if not, they're in /var/lib # and now need to be copied to the cache partition. The blacklist # keyring, if there is one, should already exist in the data partition. cache_dir = config.updater.cache_partition makedirs(cache_dir) # Copy the keyring.tar.xz and .asc files. _copy_if_missing(config.gpg.image_master, cache_dir) _copy_if_missing(config.gpg.image_master + '.asc', cache_dir) _copy_if_missing(config.gpg.image_signing, cache_dir) _copy_if_missing(config.gpg.image_signing + '.asc', cache_dir) _copy_if_missing(config.gpg.device_signing, cache_dir) _copy_if_missing(config.gpg.device_signing + '.asc', cache_dir) # Issue the reboot. self._next.append(self._prepare_recovery) def _prepare_recovery(self): # First we have to create the ubuntu_command file, which will tell the # updater which files to apply and in which order. Right now, # self.files contains a sequence of the following contents: # # [ # (file_1, (image_number, order)), # (file_1.asc, (image_number, order)), # (file_2, (image_number, order)), # (file_2.asc, (image_number, order)), # ... # ] # # The order of the .asc file is redundant. Rearrange this sequence so # that we have the following: # # [ # ((image_number, order), file_1, file_1.asc), # ((image_number, order), file_2, file_2.asc), # ... # ] log.info('preparing recovery') collated = [] zipper = zip( # items # 0, 2, 4, ... islice(self.files, 0, None, 2), # items # 1, 3, 5, ... islice(self.files, 1, None, 2)) for (txz, txz_order), (asc, asc_order) in zipper: assert txz_order == asc_order, 'Mismatched tar.xz/.asc files' collated.append((txz_order, txz, asc)) ordered = sorted(collated) # Open command file and first write the load_keyring commands. command_file = os.path.join( config.updater.cache_partition, 'ubuntu_command') with atomic(command_file) as fp: print('load_keyring {0} {0}.asc'.format( os.path.basename(config.gpg.image_master)), file=fp) print('load_keyring {0} {0}.asc'.format( os.path.basename(config.gpg.image_signing)), file=fp) if os.path.exists(config.gpg.device_signing): print('load_keyring {0} {0}.asc'.format( os.path.basename(config.gpg.device_signing)), file=fp) # If there is a full update, the file system must be formated. for image in self.winner: if image.type == 'full': print('format system', file=fp) break # The filesystem must be mounted. print('mount system', file=fp) # Now write all the update commands for the tar.xz files. for order, txz, asc in ordered: print('update {} {}'.format( os.path.basename(txz), os.path.basename(asc)), file=fp) # The filesystem must be unmounted. print('unmount system', file=fp) self._next.append(self._apply) def _apply(self): log.info('applying') config.hooks.apply().apply() # Nothing more to do. ./systemimage/dbus.py0000644000015600001650000004266312701500553014744 0ustar jenkinsjenkins# Copyright (C) 2013-2016 Canonical Ltd. # Author: Barry Warsaw # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 3 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . """DBus service.""" __all__ = [ 'Loop', 'Service', 'log_and_exit', ] import os import sys import logging from datetime import datetime from dbus.service import Object, method, signal from functools import wraps from gi.repository import GLib from systemimage.api import Mediator from systemimage.config import config from systemimage.helpers import last_update_date from systemimage.settings import Settings from threading import Lock EMPTYSTRING = '' log = logging.getLogger('systemimage') dbus_log = logging.getLogger('systemimage.dbus') def log_and_exit(function): """Decorator for D-Bus methods to handle tracebacks. Put this *above* the @method or @signal decorator. It will cause the exception to be logged and the D-Bus service will exit. """ @wraps(function) def wrapper(*args, **kws): try: dbus_log.info('>>> {}', function.__name__) retval = function(*args, **kws) dbus_log.info('<<< {}', function.__name__) return retval except: dbus_log.info('!!! {}', function.__name__) dbus_log.exception('Error in D-Bus method') self = args[0] assert isinstance(self, Service), args[0] sys.exit(1) return wrapper class Loop: """Keep track of the main loop.""" def __init__(self): self._loop = GLib.MainLoop() self._quitter = None def keepalive(self): if self._quitter is not None: GLib.source_remove(self._quitter) self._quitter = None self._quitter = GLib.timeout_add_seconds( config.dbus.lifetime.total_seconds(), self.quit) def quit(self): if self._quitter is not None: GLib.source_remove(self._quitter) self._quitter = None self._loop.quit() def run(self): self._loop.run() class Service(Object): """Main dbus service.""" def __init__(self, bus, object_path, loop): super().__init__(bus, object_path) self.loop = loop self._api = Mediator(self._progress_callback) log.info('Mediator created {}', self._api) self._checking = Lock() self._downloading = Lock() self._update = None self._paused = False self._applicable = False self._failure_count = 0 self._last_error = '' @log_and_exit def _check_for_update(self): # Asynchronous method call. log.info('Enter _check_for_update()') self._update = self._api.check_for_update() log.info('_check_for_update(): checking lock releasing') try: self._checking.release() except RuntimeError: # pragma: no udm log.info('_check_for_update(): checking lock already released') else: log.info('_check_for_update(): checking lock released') # Do we have an update and can we auto-download it? delayed_download = False if self._update.is_available: settings = Settings() auto = settings.get('auto_download') log.info('Update available; auto-download: {}', auto) if auto in ('1', '2'): # XXX When we have access to the download service, we can # check if we're on the wifi (auto == '1'). delayed_download = True GLib.timeout_add(50, self._download) # We have a timing issue. We can't lock the downloading lock here, # otherwise when _download() starts running in ~50ms it will think a # download is already in progress. But we want to send the UAS signal # here and now, *and* indicate whether the download is about to happen. # So just lie for now since in ~50ms the download will begin. self.UpdateAvailableStatus( self._update.is_available, delayed_download, self._update.version, self._update.size, last_update_date(), self._update.error) # Stop GLib from calling this method again. return False # 2013-07-25 BAW: should we use the rather underdocumented async_callbacks # argument to @method? @log_and_exit @method('com.canonical.SystemImage') def CheckForUpdate(self): """Find out whether an update is available. This method is used to explicitly check whether an update is available, by communicating with the server and calculating an upgrade path from the current build number to a later build available on the server. This method runs asynchronously and thus does not return a result. Instead, an `UpdateAvailableStatus` signal is triggered when the check completes. The argument to that signal is a boolean indicating whether the update is available or not. """ self.loop.keepalive() # Check-and-acquire the lock. log.info('CheckForUpdate(): checking lock test and acquire') if not self._checking.acquire(blocking=False): log.info('CheckForUpdate(): checking lock not acquired') # Check is already in progress, so there's nothing more to do. If # there's status available (i.e. we are in the auto-downloading # phase of the last CFU), then send the status. if self._update is not None: self.UpdateAvailableStatus( self._update.is_available, self._downloading.locked(), self._update.version, self._update.size, last_update_date(), "") return log.info('CheckForUpdate(): checking lock acquired') # We've now acquired the lock. Reset any failure or in-progress # state. Get a new mediator to reset any of its state. self._api = Mediator(self._progress_callback) log.info('Mediator recreated {}', self._api) self._failure_count = 0 self._last_error = '' # Arrange for the actual check to happen in a little while, so that # this method can return immediately. GLib.timeout_add(50, self._check_for_update) #@log_and_exit def _progress_callback(self, received, total): # Plumb the progress through our own D-Bus API. Our API is defined as # signalling a percentage and an eta. We can calculate the percentage # easily, but the eta is harder. For now, we just send 0 as the eta. percentage = received * 100 // total eta = 0 self.UpdateProgress(percentage, eta) @log_and_exit def _download(self): if self._downloading.locked() and self._paused: self._api.resume() self._paused = False log.info('Download previously paused') return if (self._downloading.locked() # Already in progress. or self._update is None # Not yet checked. or not self._update.is_available # No update available. ): log.info('Download already in progress or not available') return if self._failure_count > 0: self._failure_count += 1 self.UpdateFailed(self._failure_count, self._last_error) log.info('Update failures: {}; last error: {}', self._failure_count, self._last_error) return log.info('_download(): downloading lock entering critical section') with self._downloading: log.info('Update is downloading') try: # Always start by sending a UpdateProgress(0, 0). This is # enough to get the u/i's attention. self.UpdateProgress(0, 0) self._api.download() except Exception: log.exception('Download failed') self._failure_count += 1 # Set the last error string to the exception's class name. exception, value = sys.exc_info()[:2] # if there's no meaningful value, omit it. value_str = str(value) name = exception.__name__ self._last_error = ('{}'.format(name) if len(value_str) == 0 else '{}: {}'.format(name, value)) self.UpdateFailed(self._failure_count, self._last_error) else: log.info('Update downloaded') self.UpdateDownloaded() self._failure_count = 0 self._last_error = '' self._applicable = True log.info('_download(): downloading lock finished critical section') # Stop GLib from calling this method again. return False @log_and_exit @method('com.canonical.SystemImage') def DownloadUpdate(self): """Download the available update. The download may be canceled during this time. """ # Arrange for the update to happen in a little while, so that this # method can return immediately. self.loop.keepalive() GLib.timeout_add(50, self._download) @log_and_exit @method('com.canonical.SystemImage', out_signature='s') def PauseDownload(self): """Pause a downloading update.""" self.loop.keepalive() if self._downloading.locked(): self._api.pause() self._paused = True error_message = '' else: error_message = 'not downloading' return error_message @log_and_exit @method('com.canonical.SystemImage', out_signature='s') def CancelUpdate(self): """Cancel a download.""" self.loop.keepalive() # During the download, this will cause an UpdateFailed signal to be # issued, as part of the exception handling in _download(). If we're # not downloading, then no signal need be sent. There's no need to # send *another* signal when downloading, because we never will be # downloading by the time we get past this next call. self._api.cancel() # XXX 2013-08-22: If we can't cancel the current download, return the # reason in this string. return '' @log_and_exit def _apply_update(self): self.loop.keepalive() if not self._applicable: command_file = os.path.join( config.updater.cache_partition, 'ubuntu_command') if not os.path.exists(command_file): # Not enough has been downloaded to allow for the update to be # applied. self.Applied(False) return self._api.apply() # This code may or may not run. On devices for which applying the # update requires a system reboot, we're racing against that reboot # procedure. self._applicable = False self.Applied(True) @log_and_exit @method('com.canonical.SystemImage') def ApplyUpdate(self): """Apply the update, rebooting the device.""" GLib.timeout_add(50, self._apply_update) return '' @log_and_exit @method('com.canonical.SystemImage') def ForceAllowGSMDownload(self): # pragma: no curl """Force an existing group download to proceed over GSM.""" log.info('Mediator {}', self._api) self._api.allow_gsm() return '' @log_and_exit @method('com.canonical.SystemImage', out_signature='a{ss}') def Information(self): self.loop.keepalive() settings = Settings() current_build_number = str(config.build_number) version_detail = getattr(config.service, 'version_detail', '') response = dict( current_build_number=current_build_number, device_name=config.device, channel_name=config.channel, last_update_date=last_update_date(), version_detail=version_detail, last_check_date=settings.get('last_check_date'), ) if self._update is None: response['target_build_number'] = '-1' response['target_version_detail'] = '' elif not self._update.is_available: response['target_build_number'] = current_build_number response['target_version_detail'] = version_detail else: response['target_build_number'] = str(self._update.version) response['target_version_detail'] = self._update.version_detail return response @log_and_exit @method('com.canonical.SystemImage', in_signature='ss') def SetSetting(self, key, value): """Set a key/value setting. Some values are special, e.g. min_battery and auto_downloads. Implement these special semantics here. """ self.loop.keepalive() if key == 'min_battery': try: as_int = int(value) except ValueError: return if as_int < 0 or as_int > 100: return if key == 'auto_download': try: as_int = int(value) except ValueError: return if as_int not in (0, 1, 2): return settings = Settings() old_value = settings.get(key) settings.set(key, value) if value != old_value: # Send the signal. self.SettingChanged(key, value) @log_and_exit @method('com.canonical.SystemImage', in_signature='s', out_signature='s') def GetSetting(self, key): """Get a setting.""" self.loop.keepalive() return Settings().get(key) @log_and_exit @method('com.canonical.SystemImage') def FactoryReset(self): self._api.factory_reset() @log_and_exit @method('com.canonical.SystemImage') def ProductionReset(self): self._api.production_reset() @log_and_exit @method('com.canonical.SystemImage') def Exit(self): """Quit the daemon immediately.""" self.loop.quit() @log_and_exit @signal('com.canonical.SystemImage', signature='bbsiss') def UpdateAvailableStatus(self, is_available, downloading, available_version, update_size, last_update_date, error_reason): """Signal sent in response to a CheckForUpdate().""" # For .Information()'s last_check_date value. iso8601_now = datetime.now().replace(microsecond=0).isoformat(sep=' ') Settings().set('last_check_date', iso8601_now) log.debug('EMIT UpdateAvailableStatus({}, {}, {}, {}, {}, {})', is_available, downloading, available_version, update_size, last_update_date, repr(error_reason)) self.loop.keepalive() @log_and_exit @signal('com.canonical.SystemImage') def DownloadStarted(self): """The download has started.""" log.debug('EMIT DownloadStarted()') self.loop.keepalive() #@log_and_exit @signal('com.canonical.SystemImage', signature='id') def UpdateProgress(self, percentage, eta): """Download progress.""" log.debug('EMIT UpdateProgress({}, {})', percentage, eta) self.loop.keepalive() @log_and_exit @signal('com.canonical.SystemImage') def UpdateDownloaded(self): """The update has been successfully downloaded.""" log.debug('EMIT UpdateDownloaded()') self.loop.keepalive() @log_and_exit @signal('com.canonical.SystemImage', signature='is') def UpdateFailed(self, consecutive_failure_count, last_reason): """The update failed for some reason.""" log.debug('EMIT UpdateFailed({}, {})', consecutive_failure_count, repr(last_reason)) self.loop.keepalive() @log_and_exit @signal('com.canonical.SystemImage', signature='i') def UpdatePaused(self, percentage): """The download got paused.""" log.debug('EMIT UpdatePaused({})', percentage) self.loop.keepalive() @log_and_exit @signal('com.canonical.SystemImage', signature='ss') def SettingChanged(self, key, new_value): """A setting value has change.""" log.debug('EMIT SettingChanged({}, {})', repr(key), repr(new_value)) self.loop.keepalive() @log_and_exit @signal('com.canonical.SystemImage', signature='b') def Applied(self, status): """The update has been applied.""" log.debug('EMIT Applied({})', status) self.loop.keepalive() @log_and_exit @signal('com.canonical.SystemImage', signature='b') def Rebooting(self, status): """The system is rebooting.""" # We don't need to keep the loop alive since we're probably just going # to shutdown anyway. log.debug('EMIT Rebooting({})', status) ./systemimage/gpg.py0000644000015600001650000002223012701500553014550 0ustar jenkinsjenkins# Copyright (C) 2013-2016 Canonical Ltd. # Author: Barry Warsaw # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 3 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . """Handle GPG signature verification.""" __all__ = [ 'Context', 'SignatureError', ] import os import gnupg import hashlib import tarfile from contextlib import ExitStack from systemimage.config import config from systemimage.helpers import calculate_signature, temporary_directory class SignatureError(Exception): """Exception raised when some signature fails to validate. Note that this exception isn't raised by Context.verify(); that method always returns a boolean. This exception is used by other functions to signal that a .asc file did not match. """ def __init__(self, signature_path, data_path, keyrings=None, blacklist=None): super().__init__() self.signature_path = signature_path self.data_path = data_path self.keyrings = ([] if keyrings is None else keyrings) self.blacklist = blacklist # We have to calculate the checksums now, because it's possible that # the files will be temporary/atomic files, deleted when a context # manager exits. I.e. the files aren't guaranteed to exist after this # constructor runs. # # Also, md5 is fine; this is not a security critical context, we just # want to be able to quickly and easily compare the file on disk # against the file on the server. with open(self.signature_path, 'rb') as fp: self.signature_checksum = calculate_signature(fp, hashlib.md5) with open(self.data_path, 'rb') as fp: self.data_checksum = calculate_signature(fp, hashlib.md5) self.keyring_checksums = [] for path in self.keyrings: with open(path, 'rb') as fp: checksum = calculate_signature(fp, hashlib.md5) self.keyring_checksums.append(checksum) if self.blacklist is None: self.blacklist_checksum = None else: with open(self.blacklist, 'rb') as fp: self.blacklist_checksum = calculate_signature(fp, hashlib.md5) def __str__(self): if self.blacklist is None: checksum_str = 'no blacklist' path_str = '' else: checksum_str = self.blacklist_checksum path_str = self.blacklist return """ sig path : {0.signature_checksum} {0.signature_path} data path: {0.data_checksum} {0.data_path} keyrings : {0.keyring_checksums} {1} blacklist: {2} {3} """.format(self, list(self.keyrings), checksum_str, path_str) class Context: def __init__(self, *keyrings, blacklist=None): """Create a GPG signature verification context. :param keyrings: The list of keyrings to use for validating the signature on data files. :type keyrings: Sequence of .tar.xz keyring files, which will be unpacked to retrieve the actual .gpg keyring file. :param blacklist: The blacklist keyring, from which fingerprints to explicitly disallow are retrieved. :type blacklist: A .tar.xz keyring file, which will be unpacked to retrieve the actual .gpg keyring file. """ self.keyring_paths = keyrings self.blacklist_path = blacklist self._ctx = None self._stack = ExitStack() self._keyrings = [] # The keyrings must be .tar.xz files, which need to be unpacked and # the keyring.gpg files inside them cached, using their actual name # (based on the .tar.xz file name). If we don't already have a cache # of the .gpg file, do the unpackaging and use the contained .gpg file # as the keyring. Note that this class does *not* validate the # .tar.xz files. That must be done elsewhere. for path in keyrings: base, dot, tarxz = os.path.basename(path).partition('.') assert dot == '.' and tarxz == 'tar.xz', ( 'Expected a .tar.xz path, got: {}'.format(path)) keyring_path = os.path.join(config.tempdir, base + '.gpg') if not os.path.exists(keyring_path): with tarfile.open(path, 'r:xz') as tf: tf.extract('keyring.gpg', config.tempdir) os.rename( os.path.join(config.tempdir, 'keyring.gpg'), os.path.join(config.tempdir, keyring_path)) self._keyrings.append(keyring_path) # Since python-gnupg doesn't do this for us, verify that all the # keyrings and blacklist files exist. Yes, this introduces a race # condition, but I don't see any good way to eliminate this given # python-gnupg's behavior. for path in self._keyrings: if not os.path.exists(path): # pragma: no cover raise FileNotFoundError(path) if blacklist is not None: if not os.path.exists(blacklist): raise FileNotFoundError(blacklist) # Extract all the blacklisted fingerprints. with Context(blacklist) as ctx: self._blacklisted_fingerprints = ctx.fingerprints else: self._blacklisted_fingerprints = set() def __enter__(self): try: # Use a temporary directory for the $GNUPGHOME, but be sure to # arrange for the tempdir to be deleted no matter what. home = self._stack.enter_context( temporary_directory(prefix='si-gnupghome', dir=config.tempdir)) self._ctx = gnupg.GPG(gnupghome=home, keyring=self._keyrings) self._stack.callback(setattr, self, '_ctx', None) except: # pragma: no cover # Restore all context and re-raise the exception. self._stack.close() raise else: return self def __exit__(self, *exc_details): self._stack.close() # Don't swallow exceptions. return False @property def keys(self): return self._ctx.list_keys() @property def fingerprints(self): return set(info['fingerprint'] for info in self._ctx.list_keys()) @property def key_ids(self): return set(info['keyid'] for info in self._ctx.list_keys()) def verify(self, signature_path, data_path): """Verify a GPG signature. This verifies that the data file signature is valid, given the keyrings and blacklist specified in the constructor. Specifically, we use GPG to extract the fingerprint in the signature path, and compare it against the fingerprints in the keyrings, subtracting any fingerprints in the blacklist. :param signature_path: The file system path to the detached signature file for the data file. :type signature_path: str :param data_path: The file system path to the data file. :type data_path: str :return: bool """ # For testing on some systems that are connecting to test servers, GPG # verification isn't possible. The s-i-cli supports a switch to # disable all GPG checks. if config.skip_gpg_verification: return True with open(signature_path, 'rb') as sig_fp: verified = self._ctx.verify_file(sig_fp, data_path) # If the file is properly signed, we'll be able to get back a set of # fingerprints that signed the file. From here we do a set operation # to see if the fingerprints are in the list of keys from all the # loaded-up keyrings. If so, the signature succeeds. return verified.fingerprint in (self.fingerprints - self._blacklisted_fingerprints) def validate(self, signature_path, data_path): """Like .verify() but raises a SignatureError when invalid. :param signature_path: The file system path to the detached signature file for the data file. :type signature_path: str :param data_path: The file system path to the data file. :type data_path: str :return: None :raises SignatureError: when the signature cannot be verified. Note that the exception will contain extra information, namely the keyrings involved in the verification, as well as the blacklist file if there is one. """ if not self.verify(signature_path, data_path): raise SignatureError(signature_path, data_path, self.keyring_paths, self.blacklist_path) ./systemimage/logging.py0000644000015600001650000001225012701500553015422 0ustar jenkinsjenkins# Copyright (C) 2013-2016 Canonical Ltd. # Author: Barry Warsaw # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 3 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . """Set up logging, both for main script execution and the test suite.""" __all__ = [ 'debug_logging', 'initialize', 'make_handler', ] import sys import stat import logging from contextlib import contextmanager, suppress from pathlib import Path from systemimage.config import config from systemimage.helpers import DEFAULT_DIRMODE from xdg.BaseDirectory import xdg_cache_home DATE_FMT = '%b %d %H:%M:%S %Y' MSG_FMT = '[{name}] {asctime} ({process:d}) {message}' LOGFILE_PERMISSIONS = stat.S_IRUSR | stat.S_IWUSR # We want to support {}-style logging for all systemimage child loggers. One # way to do this is with a LogRecord factory, but to play nice with third # party loggers which might be using %-style, we have to make sure that we use # the default factory for everything else. # # This actually isn't the best way to do this because it still makes a global # change and we don't know how this will interact with other third party # loggers. A marginally better way to do this is to pass class instances to # the logging calls. Those instances would have a __str__() method that does # the .format() conversion. The problem with that is that it's a bit less # convenient to make the logging calls because you can't pass strings # directly. One such suggestion at is to import # the class as __ (i.e. double underscore) so your logging calls would look # like: log.error(__('Message with {} {}'), foo, bar) class FormattingLogRecord(logging.LogRecord): def __init__(self, name, *args, **kws): logger_path = name.split('.') self._use_format = (logger_path[0] == 'systemimage') super().__init__(name, *args, **kws) def getMessage(self): if self._use_format: msg = str(self.msg) if self.args: msg = msg.format(*self.args) return msg else: # pragma: no cover return super().getMessage() def make_handler(path): # issue21539 - mkdir(..., exist_ok=True) with suppress(FileExistsError): path.parent.mkdir(DEFAULT_DIRMODE, parents=True) path.touch(LOGFILE_PERMISSIONS) # Our handler will output in UTF-8 using {} style logging. formatter = logging.Formatter(style='{', fmt=MSG_FMT, datefmt=DATE_FMT) handler = logging.FileHandler(bytes(path), encoding='utf-8') handler.setFormatter(formatter) return handler def initialize(*, verbosity=0): """Initialize the loggers.""" main, dbus = config.system.loglevel for name, loglevel in (('systemimage', main), ('systemimage.dbus', dbus), ('dbus.proxies', dbus)): level = { 0: logging.ERROR, 1: logging.INFO, 2: logging.DEBUG, 3: logging.CRITICAL, }.get(verbosity, logging.ERROR) level = min(level, loglevel) # Make sure our library's logging uses {}-style messages. logging.setLogRecordFactory(FormattingLogRecord) # Now configure the application level logger based on the ini file. log = logging.getLogger(name) try: handler = make_handler(Path(config.system.logfile)) except PermissionError: handler = make_handler( Path(xdg_cache_home) / 'system-image' / 'client.log') handler.setLevel(level) log.addHandler(handler) log.propagate = False # If we want more verbosity, add a stream handler. if verbosity == 0: # pragma: no branch # Set the log level. log.setLevel(level) else: # pragma: no cover handler = logging.StreamHandler(stream=sys.stderr) handler.setLevel(level) formatter = logging.Formatter( style='{', fmt=MSG_FMT, datefmt=DATE_FMT) handler.setFormatter(formatter) log.addHandler(handler) # Set the overall level on the log object to the minimum level. log.setLevel(level) # Please be quiet gnupg. gnupg_log = logging.getLogger('gnupg') gnupg_log.propagate = False @contextmanager def debug_logging(): # pragma: no cover # getEffectiveLevel() is the best we can do, but it's good enough because # we always set the level of the logger. log = logging.getLogger('systemimage') old_level = log.getEffectiveLevel() try: log.setLevel(logging.DEBUG) yield finally: log.setLevel(old_level) ./systemimage/config.py0000644000015600001650000002602112701500553015242 0ustar jenkinsjenkins# Copyright (C) 2013-2016 Canonical Ltd. # Author: Barry Warsaw # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 3 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . """Read the configuration file.""" __all__ = [ 'Configuration', 'config', ] import os import atexit from configparser import ConfigParser from contextlib import ExitStack from pathlib import Path from systemimage.bag import Bag from systemimage.helpers import ( NO_PORT, as_loglevel, as_object, as_port, as_stripped, as_timedelta, makedirs, temporary_directory) SECTIONS = ('service', 'system', 'gpg', 'updater', 'hooks', 'dbus') USER_AGENT = ('Ubuntu System Image Upgrade Client: ' 'device={0.device};channel={0.channel};build={0.build_number}') def expand_path(path): return os.path.abspath(os.path.expanduser(path)) class SafeConfigParser(ConfigParser): """Like ConfigParser, but with default empty sections. This makes the **style of loading keys/values into the Bag objects a little cleaner since it doesn't have to worry about KeyErrors when a configuration file doesn't contain a section, which is allowed. """ def __init__(self, *args, **kws): super().__init__(args, **kws) for section in SECTIONS: self[section] = {} class Configuration: def __init__(self, directory=None): self._set_defaults() # Because the configuration object is a global singleton, it makes for # a convenient place to stash information used by widely separate # components. For example, this is a placeholder for rendezvous # between the downloader and the D-Bus service. When running under # D-Bus and we get a `paused` signal from the download manager, we need # this to plumb through an UpdatePaused signal to our clients. It # rather sucks that we need a global for this, but I can't get the # plumbing to work otherwise. This seems like the least horrible place # to stash this global. self.dbus_service = None # These are used to plumb command line arguments from the main() to # other parts of the system. self.skip_gpg_verification = False self.override_gsm = False # Cache. self._device = None self._build_number = None self.build_number_override = False self._channel = None # This is used only to override the phased percentage via command line # and the property setter. self._phase_override = None self._tempdir = None self.config_d = None self.ini_files = [] self.http_base = None self.https_base = None if directory is not None: self.load(directory) self._calculate_http_bases() self._resources = ExitStack() atexit.register(self._resources.close) def _set_defaults(self): self.service = Bag( base='system-image.ubuntu.com', http_port=80, https_port=443, channel='daily', build_number=0, ) self.system = Bag( timeout=as_timedelta('1h'), tempdir='/tmp', logfile='/var/log/system-image/client.log', loglevel=as_loglevel('info'), settings_db='/var/lib/system-image/settings.db', ) self.gpg = Bag( archive_master='/usr/share/system-image/archive-master.tar.xz', image_master='/var/lib/system-image/keyrings/image-master.tar.xz', image_signing= '/var/lib/system-image/keyrings/image-signing.tar.xz', device_signing= '/var/lib/system-image/keyrings/device-signing.tar.xz', ) self.updater = Bag( cache_partition='/android/cache/recovery', data_partition='/var/lib/system-image', ) self.hooks = Bag( device=as_object('systemimage.device.SystemProperty'), scorer=as_object('systemimage.scores.WeightedScorer'), apply=as_object('systemimage.apply.Reboot'), ) self.dbus = Bag( lifetime=as_timedelta('10m'), ) def _load_file(self, path): parser = SafeConfigParser() str_path = str(path) parser.read(str_path) self.ini_files.append(path) self.service.update(converters=dict(http_port=as_port, https_port=as_port, build_number=int, device=as_stripped, ), **parser['service']) self.system.update(converters=dict(timeout=as_timedelta, loglevel=as_loglevel, settings_db=expand_path, tempdir=expand_path), **parser['system']) self.gpg.update(**parser['gpg']) self.updater.update(**parser['updater']) self.hooks.update(converters=dict(device=as_object, scorer=as_object, apply=as_object), **parser['hooks']) self.dbus.update(converters=dict(lifetime=as_timedelta), **parser['dbus']) def load(self, directory): """Load up the configuration from a config.d directory.""" # Look for all the files in the given directory with .ini or .cfg # suffixes. The files must start with a number, and the files are # loaded in numeric order. if self.config_d is not None: raise RuntimeError('Configuration already loaded; use .reload()') self.config_d = directory if not Path(directory).is_dir(): raise TypeError( '.load() requires a directory: {}'.format(directory)) candidates = [] for child in Path(directory).glob('*.ini'): order, _, base = child.stem.partition('_') # XXX 2014-10-03: The logging system isn't initialized when we get # here, so we can't log that these files are being ignored. if len(_) == 0: continue try: serial = int(order) except ValueError: continue candidates.append((serial, child)) for serial, path in sorted(candidates): self._load_file(path) self._calculate_http_bases() def reload(self): """Reload the configuration directory.""" # Reset some cached attributes. directory = self.config_d self.ini_files = [] self.config_d = None self._build_number = None # Now load the defaults, then reload the previous config.d directory. self._set_defaults() self.load(directory) def _calculate_http_bases(self): if (self.service.http_port is NO_PORT and self.service.https_port is NO_PORT): raise ValueError('Cannot disable both http and https ports') # Construct the HTTP and HTTPS base urls, which most applications will # actually use. We do this in two steps, in order to support disabling # one or the other (but not both) protocols. if self.service.http_port == 80: http_base = 'http://{}'.format(self.service.base) elif self.service.http_port is NO_PORT: http_base = None else: http_base = 'http://{}:{}'.format( self.service.base, self.service.http_port) # HTTPS. if self.service.https_port == 443: https_base = 'https://{}'.format(self.service.base) elif self.service.https_port is NO_PORT: https_base = None else: https_base = 'https://{}:{}'.format( self.service.base, self.service.https_port) # Sanity check and final settings. if http_base is None: assert https_base is not None http_base = https_base if https_base is None: assert http_base is not None https_base = http_base self.http_base = http_base self.https_base = https_base @property def build_number(self): if self._build_number is None: self._build_number = self.service.build_number return self._build_number @build_number.setter def build_number(self, value): if not isinstance(value, int): raise ValueError( 'integer is required, got: {}'.format(type(value).__name__)) self._build_number = value self.build_number_override = True @build_number.deleter def build_number(self): self._build_number = None @property def device(self): if self._device is None: # Start by looking for a [service]device setting. Use this if it # exists, otherwise fall back to calling the hook. self._device = getattr(self.service, 'device', None) if not self._device: self._device = self.hooks.device().get_device() return self._device @device.setter def device(self, value): self._device = value @property def channel(self): if self._channel is None: self._channel = self.service.channel return self._channel @channel.setter def channel(self, value): self._channel = value @property def phase_override(self): return self._phase_override @phase_override.setter def phase_override(self, value): self._phase_override = max(0, min(100, int(value))) @phase_override.deleter def phase_override(self): self._phase_override = None @property def tempdir(self): if self._tempdir is None: makedirs(self.system.tempdir) self._tempdir = self._resources.enter_context( temporary_directory(prefix='system-image-', dir=self.system.tempdir)) return self._tempdir @property def user_agent(self): return USER_AGENT.format(self) # Define the global configuration object. We use a proxy here so that # post-object creation loading will work. _config = Configuration() class _Proxy: def __getattribute__(self, name): return getattr(_config, name) def __setattr__(self, name, value): setattr(_config, name, value) def __delattr__(self, name): delattr(_config, name) config = _Proxy() ./systemimage/device.py0000644000015600001650000000307412701500553015237 0ustar jenkinsjenkins# Copyright (C) 2013-2016 Canonical Ltd. # Author: Barry Warsaw # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 3 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . """Device type calculation.""" __all__ = [ 'SystemProperty', ] import logging from subprocess import CalledProcessError, check_output class BaseDevice: """Common device calculation actions.""" def get_device(self): # pragma: no cover """Subclasses must override this.""" raise NotImplementedError class SystemProperty(BaseDevice): """Get the device type through system properties.""" def get_device(self): log = logging.getLogger('systemimage') try: stdout = check_output( 'getprop ro.product.device'.split(), universal_newlines=True) except CalledProcessError as error: log.exception('getprop exit status: {}'.format(error.returncode)) return '?' except FileNotFoundError as error: log.exception('getprop command not found') return '?' return stdout.strip() ./systemimage/data/0000755000015600001650000000000012701500553014333 5ustar jenkinsjenkins./systemimage/data/__init__.py0000644000015600001650000000000012701500553016432 0ustar jenkinsjenkins./systemimage/data/com.canonical.SystemImage.conf0000644000015600001650000000063112701500553022134 0ustar jenkinsjenkins ./systemimage/data/com.canonical.SystemImage.service0000644000015600001650000000013212701500553022643 0ustar jenkinsjenkins[D-BUS Service] Name=com.canonical.SystemImage Exec=/usr/sbin/system-image-dbus User=root ./systemimage/apply.py0000644000015600001650000000462312701500553015126 0ustar jenkinsjenkins# Copyright (C) 2013-2016 Canonical Ltd. # Author: Barry Warsaw # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 3 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . """Reboot issuer.""" __all__ = [ 'BaseApply', 'Noop', 'Reboot', 'factory_reset', 'production_reset', ] import os import logging from subprocess import CalledProcessError, check_call from systemimage.config import config from systemimage.helpers import atomic log = logging.getLogger('systemimage') class BaseApply: """Common apply-the-update actions.""" def apply(self): # pragma: no cover """Subclasses must override this.""" raise NotImplementedError class Reboot(BaseApply): """Apply the update by rebooting the device.""" def apply(self): try: check_call('/sbin/reboot -f recovery'.split(), universal_newlines=True) except CalledProcessError as error: log.exception('reboot exit status: {}'.format(error.returncode)) raise # This code may or may not run. We're racing against the system # reboot procedure. config.dbus_service.Rebooting(True) class Noop(BaseApply): """No-op apply, mostly for testing.""" def apply(self): pass def factory_reset(): """Perform a factory reset.""" command_file = os.path.join( config.updater.cache_partition, 'ubuntu_command') with atomic(command_file) as fp: print('format data', file=fp) log.info('Performing a factory reset') config.hooks.apply().apply() def production_reset(): """Perform a production reset.""" command_file = os.path.join( config.updater.cache_partition, 'ubuntu_command') with atomic(command_file) as fp: print('format data', file=fp) print('enable factory_wipe', file=fp) log.info('Performing a production factory reset') config.hooks.apply().apply() ./systemimage/main.py0000644000015600001650000004273012701500553014726 0ustar jenkinsjenkins# Copyright (C) 2013-2016 Canonical Ltd. # Author: Barry Warsaw # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 3 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . """Main script entry point.""" __all__ = [ 'main', ] import sys import json import logging import argparse from dbus.mainloop.glib import DBusGMainLoop from pkg_resources import resource_string as resource_bytes from systemimage.apply import factory_reset, production_reset from systemimage.candidates import delta_filter, full_filter, version_filter from systemimage.config import config from systemimage.helpers import ( last_update_date, makedirs, phased_percentage, version_detail) from systemimage.logging import initialize from systemimage.settings import Settings from systemimage.state import State from textwrap import dedent __version__ = resource_bytes( 'systemimage', 'version.txt').decode('utf-8').strip() DEFAULT_CONFIG_D = '/etc/system-image/config.d' COLON = ':' LINE_LENGTH = 78 class _DotsProgress: def __init__(self): self._dot_count = 0 def callback(self, received, total): received = int(received) total = int(total) sys.stderr.write('.') sys.stderr.flush() self._dot_count += 1 show_dots = self._dot_count % LINE_LENGTH == 0 if show_dots or received >= total: # pragma: no cover sys.stderr.write('\n') sys.stderr.flush() class _LogfileProgress: def __init__(self, log): self._log = log def callback(self, received, total): self._log.debug('received: {} of {} bytes', received, total) def _json_progress(received, total): # For use with --progress=json output. LP: #1423622 message = json.dumps(dict( type='progress', now=received, total=total)) sys.stdout.write(message) sys.stdout.write('\n') sys.stdout.flush() def main(): parser = argparse.ArgumentParser( prog='system-image-cli', description='Ubuntu System Image Upgrader') parser.add_argument('--version', action='version', version='system-image-cli {}'.format(__version__)) parser.add_argument('-C', '--config', default=DEFAULT_CONFIG_D, action='store', metavar='DIRECTORY', help="""Use the given configuration directory instead of the default""") parser.add_argument('-b', '--build', default=None, action='store', help="""Override the current build number just this once""") parser.add_argument('-c', '--channel', default=None, action='store', help="""Override the channel just this once. Use in combination with `--build 0` to switch channels.""") parser.add_argument('-d', '--device', default=None, action='store', help='Override the device name just this once') parser.add_argument('-f', '--filter', default=None, action='store', help="""Filter the candidate paths to contain only full updates or only delta updates. The argument to this option must be either `full` or `delta`""") parser.add_argument('-m', '--maximage', default=None, type=int, help="""After the winning upgrade path is selected, remove all images with version numbers greater than the given one. If no images remain in the winning path, the device is considered up-to-date.""") parser.add_argument('-g', '--no-apply', default=False, action='store_true', help="""Download (i.e. "get") all the data files and prepare for updating, but don't actually reboot the device into recovery to apply the update""") parser.add_argument('-i', '--info', default=False, action='store_true', help="""Show some information about the current device, including the current build number, device name and channel, then exit""") parser.add_argument('-n', '--dry-run', default=False, action='store_true', help="""Calculate and print the upgrade path, but do not download or apply it""") parser.add_argument('-v', '--verbose', default=0, action='count', help='Increase verbosity') parser.add_argument('--progress', default=[], action='append', help="""Add a progress meter. Available meters are: dots, logfile, and json. Multiple --progress options are allowed.""") parser.add_argument('-p', '--percentage', default=None, action='store', help="""Override the device's phased percentage value during upgrade candidate calculation.""") parser.add_argument('--list-channels', default=False, action='store_true', help="""List all available channels, then exit""") parser.add_argument('--factory-reset', default=False, action='store_true', help="""Perform a destructive factory reset and reboot. WARNING: this will wipe all user data on the device!""") parser.add_argument('--production-reset', default=False, action='store_true', help="""Perform a destructive production reset (similar to factory reset) and reboot. WARNING: this will wipe all user data on the device!""") parser.add_argument('--switch', default=None, action='store', metavar='CHANNEL', help="""Switch to the given channel. This is equivalent to `-c CHANNEL -b 0`.""") # Settings options. parser.add_argument('--show-settings', default=False, action='store_true', help="""Show all settings as key=value pairs, then exit""") parser.add_argument('--set', default=[], action='append', metavar='KEY=VAL', help="""Set a key and value in the settings, adding the key if it doesn't yet exist, or overriding its value if the key already exists. Multiple --set arguments can be given.""") parser.add_argument('--get', default=[], action='append', metavar='KEY', help="""Get the value for a key. If the key does not exist, a default value is returned. Multiple --get arguments can be given.""") parser.add_argument('--del', default=[], action='append', metavar='KEY', dest='delete', help="""Delete the key and its value. It is a no-op if the key does not exist. Multiple --del arguments can be given.""") parser.add_argument('--override-gsm', default=False, action='store_true', help="""When the device is set to only download over WiFi, but is currently on GSM, use this switch to temporarily override the update restriction. This switch has no effect when using the cURL based downloader.""") # Hidden system-image-cli only feature for testing purposes. LP: #1333414 parser.add_argument('--skip-gpg-verification', default=False, action='store_true', help=argparse.SUPPRESS) args = parser.parse_args(sys.argv[1:]) try: config.load(args.config) except (TypeError, FileNotFoundError): parser.error('\nConfiguration directory not found: {}'.format( args.config)) assert 'parser.error() does not return' # pragma: no cover if args.skip_gpg_verification: print("""\ WARNING: All GPG signature verifications have been disabled. Your upgrades are INSECURE.""", file=sys.stderr) config.skip_gpg_verification = True config.override_gsm = args.override_gsm # Perform factory and production resets. if args.factory_reset: factory_reset() # We should never get here, except possibly during the testing # process, so just return as normal. return 0 if args.production_reset: production_reset() # We should never get here, except possibly during the testing # process, so just return as normal. return 0 # Handle all settings arguments. They are mutually exclusive. if sum(bool(arg) for arg in (args.set, args.get, args.delete, args.show_settings)) > 1: parser.error('Cannot mix and match settings arguments') assert 'parser.error() does not return' # pragma: no cover if args.show_settings: rows = sorted(Settings()) for row in rows: print('{}={}'.format(*row)) return 0 if args.get: settings = Settings() for key in args.get: print(settings.get(key)) return 0 if args.set: settings = Settings() for keyval in args.set: key, val = keyval.split('=', 1) settings.set(key, val) return 0 if args.delete: settings = Settings() for key in args.delete: settings.delete(key) return 0 # Sanity check -f/--filter. if args.filter is None: candidate_filter = None elif args.filter == 'full': candidate_filter = full_filter elif args.filter == 'delta': candidate_filter = delta_filter else: parser.error('Bad filter type: {}'.format(args.filter)) assert 'parser.error() does not return' # pragma: no cover # Create the temporary directory if it doesn't exist. makedirs(config.system.tempdir) # Initialize the loggers. initialize(verbosity=args.verbose) log = logging.getLogger('systemimage') # We assume the cache_partition already exists, as does the /etc directory # (i.e. where the archive master key lives). # Command line overrides. Process --switch first since if both it and # -c/-b are given, the latter take precedence. if args.switch is not None: config.build_number = 0 config.channel = args.switch if args.build is not None: try: config.build_number = int(args.build) except ValueError: parser.error( '-b/--build requires an integer: {}'.format(args.build)) assert 'parser.error() does not return' # pragma: no cover if args.channel is not None: config.channel = args.channel if args.device is not None: config.device = args.device if args.percentage is not None: config.phase_override = args.percentage if args.info: alias = getattr(config.service, 'channel_target', None) kws = dict( build_number=config.build_number, device=config.device, channel=config.channel, last_update=last_update_date(), ) if alias is None: template = """\ current build number: {build_number} device name: {device} channel: {channel} last update: {last_update}""" else: template = """\ current build number: {build_number} device name: {device} channel: {channel} alias: {alias} last update: {last_update}""" kws['alias'] = alias print(dedent(template).format(**kws)) # If there's additional version details, print this out now too. We # sort the keys in reverse order because we want 'ubuntu' to generally # come first. details = version_detail() for key in sorted(details, reverse=True): print('version {}: {}'.format(key, details[key])) return 0 DBusGMainLoop(set_as_default=True) if args.list_channels: state = State() try: state.run_thru('get_channel') except Exception: print('Exception occurred during channel search; ' 'see log file for details', file=sys.stderr) log.exception('system-image-cli exception') return 1 print('Available channels:') for key in sorted(state.channels): alias = state.channels[key].get('alias') if alias is None: print(' {}'.format(key)) else: print(' {} (alias for: {})'.format(key, alias)) return 0 state = State() state.candidate_filter = candidate_filter if args.maximage is not None: state.winner_filter = version_filter(args.maximage) for meter in args.progress: if meter == 'dots': state.downloader.callbacks.append(_DotsProgress().callback) elif meter == 'json': state.downloader.callbacks.append(_json_progress) elif meter == 'logfile': state.downloader.callbacks.append(_LogfileProgress(log).callback) else: parser.error('Unknown progress meter: {}'.format(meter)) assert 'parser.error() does not return' # pragma: no cover if args.dry_run: try: state.run_until('download_files') except Exception: print('Exception occurred during dry-run; ' 'see log file for details', file=sys.stderr) log.exception('system-image-cli exception') return 1 # Say -c was given. This will fail. if state.winner is None or len(state.winner) == 0: print('Already up-to-date') else: winning_path = [str(image.version) for image in state.winner] kws = dict(path=COLON.join(winning_path)) target_build = state.winner[-1].version if state.channel_switch is None: # We're not switching channels due to an alias change. template = 'Upgrade path is {path}' percentage = phased_percentage(config.channel, target_build) else: # This upgrade changes the channel that our alias is mapped # to, so include that information in the output. template = 'Upgrade path is {path} ({from} -> {to})' kws['from'], kws['to'] = state.channel_switch percentage = phased_percentage(kws['to'], target_build) print(template.format(**kws)) print('Target phase: {}%'.format(percentage)) return 0 else: # Run the state machine to conclusion. Suppress all exceptions, but # note that the state machine will log them. If an exception occurs, # exit with a non-zero status. log.info('running state machine [{}/{}]', config.channel, config.device) try: if args.no_apply: state.run_until('apply') else: list(state) except KeyboardInterrupt: # pragma: no cover return 0 except Exception as error: print('Exception occurred during update; see log file for details', file=sys.stderr) log.exception('system-image-cli exception') # This is a little bit of a hack because it's not generalized to # all values of --progress. But OTOH, we always want to log the # error, so --progress=logfile is redundant, and --progress=dots # doesn't make much sense either. Just just include some JSON # output if --progress=json was specified. if 'json' in args.progress: print(json.dumps(dict(type='error', msg=str(error)))) return 1 else: return 0 finally: log.info('state machine finished') if __name__ == '__main__': # pragma: no cover sys.exit(main()) ./systemimage/download.py0000644000015600001650000002430712701500553015611 0ustar jenkinsjenkins# Copyright (C) 2013-2016 Canonical Ltd. # Author: Barry Warsaw # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 3 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . """Download files.""" __all__ = [ 'Canceled', 'DuplicateDestinationError', 'Record', 'get_download_manager', ] import os import dbus import logging from collections import namedtuple from io import StringIO from pprint import pformat try: import pycurl except ImportError: # pragma: no cover pycurl = None log = logging.getLogger('systemimage') class Canceled(Exception): """Raised when the download was canceled.""" class DuplicateDestinationError(Exception): """Raised when two files are downloaded to the same destination.""" def __init__(self, duplicates): super().__init__() self.duplicates = duplicates def __str__(self): return '\n' + pformat(self.duplicates, indent=4, width=79) # A namedtuple is convenient here since we want to access items by their # attribute names. However, we also want to allow for the checksum to default # to the empty string. We do this by creating a prototypical record type and # using _replace() to replace non-default values. See the namedtuple # documentation for details. _Record = namedtuple('Record', 'url destination checksum')('', '', '') _RecordType = type(_Record) def Record(url, destination, checksum=''): return _Record._replace( url=url, destination=destination, checksum=checksum) class DownloadManagerBase: """Base class for all download managers.""" def __init__(self): """ :param callback: If given, a function that is called every so often during downloading. :type callback: A function that takes two arguments, the number of bytes received so far, and the total amount of bytes to be downloaded. """ # This is a list of functions that are called every so often during # downloading. Functions in this list take two arguments, the number # of bytes received so far, and the total amount of bytes to be # downloaded. self.callbacks = [] self.total = 0 self.received = 0 self._queued_cancel = False def __repr__(self): # pragma: no cover return '<{} at 0x{:x}>'.format(self.__class__.__name__, id(self)) def _get_download_records(self, downloads): """Convert the downloads items to download records.""" records = [item if isinstance(item, _RecordType) else Record(*item) for item in downloads] destinations = set(record.destination for record in records) # Check for duplicate destinations, specifically for a local file path # coming from two different sources. It's okay if there are duplicate # destination records in the download request, but each of those must # be specified by the same source url and have the same checksum. # # An easy quick check just asks if the set of destinations is smaller # than the total number of requested downloads. It can't be larger. # If it *is* smaller, then there are some duplicates, however the # duplicates may be legitimate, so look at the details. # # Note though that we cannot pass duplicates destinations to udm, so we # have to filter out legitimate duplicates. That's fine since they # really are pointing to the same file, and will end up in the # destination location. if len(destinations) < len(downloads): by_destination = dict() unique_downloads = set() for record in records: by_destination.setdefault(record.destination, set()).add( record) unique_downloads.add(record) duplicates = [] for dst, seen in by_destination.items(): if len(seen) > 1: # Tuples will look better in the pretty-printed output. duplicates.append( (dst, sorted(tuple(dup) for dup in seen))) if len(duplicates) > 0: raise DuplicateDestinationError(sorted(duplicates)) # Uniquify the downloads. records = list(unique_downloads) return records def _do_callback(self): # Be defensive, so yes, use a bare except. If an exception occurs in # the callback, log it, but continue onward. for callback in self.callbacks: try: callback(self.received, self.total) except: log.exception('Exception in progress callback') def cancel(self): """Cancel any current downloads.""" self._queued_cancel = True def pause(self): """Pause the download, but only if one is in progress.""" pass # pragma: no cover def resume(self): """Resume the download, but only if one is in progress.""" pass # pragma: no cover def _get_files(self, records, pausable, signal_started): raise NotImplementedError # pragma: no cover def get_files(self, downloads, *, pausable=False, signal_started=False): """Download a bunch of files concurrently. Occasionally, the callback is called to report on progress. This function blocks until all files have been downloaded or an exception occurs. In the latter case, the download directory will be cleared of the files that succeeded and the exception will be re-raised. This means that 1) the function blocks until all files are downloaded, but at least we do that concurrently; 2) this is an all-or-nothing function. Either you get all the requested files or none of them. :params downloads: A list of `download records`, each of which may either be a 2-tuple where the first item is the url to download, and the second item is the destination file, or an instance of a `Record` namedtuple with attributes `url`, `destination`, and `checksum`. The checksum may be the empty string. :type downloads: List of 2-tuples or `Record`s. :param pausable: A flag specifying whether this download can be paused or not. In general, data file downloads are pausable, but preliminary downloads are not. :type pausable: bool :param signal_started: A flag indicating whether the D-Bus DownloadStarted signal should be sent once the download has started. Normally this is False, but it should be set to True when the update files are being downloaded (i.e. not for the metadata files). :type signal_started: bool :raises: FileNotFoundError if any download error occurred. In this case, all download files are deleted. :raises: DuplicateDestinationError if more than one source url is downloaded to the same destination file. """ if self._queued_cancel: # A cancel is queued, so don't actually download anything. raise Canceled if len(downloads) == 0: # Nothing to download. See LP: #1245597. return records = self._get_download_records(downloads) # Better logging of the requested downloads. However, we want the # entire block of multiline log output to appear under a single # timestamp. fp = StringIO() print('[0x{:x}] Requesting group download:'.format(id(self)), file=fp) for record in records: if record.checksum == '': print('\t{} -> {}'.format(*record[:2]), file=fp) else: print('\t{} [{}] -> {}'.format(*record), file=fp) log.info('{}'.format(fp.getvalue())) self._get_files(records, pausable, signal_started) @staticmethod def allow_gsm(): """Allow downloads on GSM. This is a temporary override for the `auto_download` setting. If a download was attempted on wifi-only and not started because the device is on GSM, calling this issues a temporary override to allow downloads while on GSM, for download managers that support this (currently only UDM). """ pass # pragma: no cover def get_download_manager(*args): # We have to avoid circular imports since both download managers import # various things from this module. from systemimage.curl import CurlDownloadManager from systemimage.udm import DOWNLOADER_INTERFACE, UDMDownloadManager # Detect if we have ubuntu-download-manager. # # Use PyCURL based downloader if no udm is found, or if the environment # variable is set. However, if we're told to use PyCURL and it's # unavailable, throw an exception. cls = None use_pycurl = os.environ.get('SYSTEMIMAGE_PYCURL') if use_pycurl is None: # Auto-detect. For backward compatibility, use udm if it's available, # otherwise use PyCURL. try: bus = dbus.SystemBus() bus.get_object(DOWNLOADER_INTERFACE, '/') udm_available = True except dbus.exceptions.DBusException: udm_available = False if udm_available: cls = UDMDownloadManager elif pycurl is None: raise ImportError('No module named {}'.format('pycurl')) else: cls = CurlDownloadManager else: cls = (CurlDownloadManager if use_pycurl.lower() in ('1', 'yes', 'true') else UDMDownloadManager) return cls(*args) ./systemimage/helpers.py0000644000015600001650000002355512701500553015450 0ustar jenkinsjenkins# Copyright (C) 2013-2016 Canonical Ltd. # Author: Barry Warsaw # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 3 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . """Various and sundry helpers.""" __all__ = [ 'DEFAULT_DIRMODE', 'MiB', 'as_loglevel', 'as_object', 'as_port', 'as_stripped', 'as_timedelta', 'atomic', 'calculate_signature', 'last_update_date', 'makedirs', 'phased_percentage', 'safe_remove', 'temporary_directory', 'version_detail', ] import os import re import random import shutil import logging import tempfile from contextlib import ExitStack, contextmanager, suppress from datetime import datetime, timedelta from hashlib import sha256 from importlib import import_module UNIQUE_MACHINE_ID_FILES = ['/var/lib/dbus/machine-id', '/etc/machine-id'] LAST_UPDATE_FILE = '/userdata/.last_update' DEFAULT_DIRMODE = 0o02700 MiB = 1 << 20 EMPTYSTRING = '' NO_PORT = object() def calculate_signature(fp, hash_class=None): """Calculate the hex digest hash signature for a file stream. :param fp: The open file object. This function will read the entire contents of the file, leaving the file pointer at the end. It is the responsibility of the caller to both open and close the file. :type fp: File-like object with `.read(count)` method. :param hash_class: The hash class to use. Defaults to `hashlib.sha256`. :type hash_class: Object having both `.update(bytes)` and `.hexdigest()` methods. :return: The hex digest of the contents of the file. :rtype: str """ checksum = (sha256 if hash_class is None else hash_class)() while True: chunk = fp.read(MiB) if not chunk: break checksum.update(chunk) return checksum.hexdigest() def safe_remove(path): """Like os.remove() but don't complain if the file doesn't exist.""" try: os.remove(path) except (FileNotFoundError, IsADirectoryError, PermissionError): pass @contextmanager def atomic(dst, encoding='utf-8'): """Open a temporary file for writing using the given encoding. The context manager returns an open file object, into which you can write text or bytes depending on the encoding it was opened with. Upon exit, the temporary file is moved atomically to the destination. If an exception occurs, the temporary file is removed. :param dst: The path name of the target file. :param encoding: The encoding to use for the open file. If None, then file is opened in binary mode. """ directory = os.path.dirname(dst) fd, temp = tempfile.mkstemp(dir=directory) with ExitStack() as stack: stack.callback(safe_remove, temp) os.close(fd) mode = 'wb' if encoding is None else 'wt' with open(temp, mode, encoding=encoding) as fp: yield fp os.rename(temp, dst) # This is stolen directly out of lazr.config. We can do that since we own # both code bases. :) def _sortkey(item): """Return a value that sorted(..., key=_sortkey) can use.""" order = dict( w=0, # weeks d=1, # days h=2, # hours m=3, # minutes s=4, # seconds ) return order.get(item[-1]) class _Called: # Defer importing named object until it's actually called. This should # reduce the instances of circular imports. def __init__(self, path): self._path, dot, self._name = path.rpartition('.') if dot != '.': raise ValueError def _dig(self): module = import_module(self._path) return getattr(module, self._name) def __call__(self, *args, **kws): return self._dig()(*args, **kws) def __eq__(self, other): # Let class equality (and in-equality) work. myself = self._dig() return myself == other def __ne__(self, other): return not self.__eq__(other) def as_object(value): """Convert a Python dotted-path specification to an object. :param value: A dotted-path specification, e.g. the string `systemimage.scores.WeightedScorer` :return: A proxy object that when called, performs the import and calls the underyling object. :raises ValueError: when `value` is not dotted. """ return _Called(value) def as_timedelta(value): """Convert a value string to the equivalent timedelta.""" # Technically, the regex will match multiple decimal points in the # left-hand side, but that's okay because the float/int conversion below # will properly complain if there's more than one dot. components = sorted(re.findall(r'([\d.]+[smhdw])', value), key=_sortkey) # Complain if the components are out of order. if EMPTYSTRING.join(components) != value: raise ValueError keywords = dict((interval[0].lower(), interval) for interval in ('weeks', 'days', 'hours', 'minutes', 'seconds')) keyword_arguments = {} for interval in components: assert len(interval) > 0, 'Unexpected value: {}'.format(interval) keyword = keywords[interval[-1].lower()] if keyword in keyword_arguments: raise ValueError if '.' in interval[:-1]: converted = float(interval[:-1]) else: converted = int(interval[:-1]) keyword_arguments[keyword] = converted if len(keyword_arguments) == 0: raise ValueError return timedelta(**keyword_arguments) def as_loglevel(value): # The value can now be a single name, like "info" or two names separated # by a colon, such as "info:debug". In the later case, the second name is # used to initialize the systemimage.dbus logger. In the former case, the # dbus logger defaults to 'error'. main, colon, dbus = value.upper().partition(':') if len(dbus) == 0: dbus = 'ERROR' main_level = getattr(logging, main, None) if main_level is None or not isinstance(main_level, int): raise ValueError(value) dbus_level = getattr(logging, dbus, None) if dbus_level is None or not isinstance(dbus_level, int): raise ValueError(value) return main_level, dbus_level def as_port(value): if value.lower() in ('disabled', 'disable'): return NO_PORT result = int(value) if result < 0: raise ValueError(value) return result def as_stripped(value): return value.strip() @contextmanager def temporary_directory(*args, **kws): """A context manager that creates a temporary directory. The directory and all its contents are deleted when the context manager exits. All positional and keyword arguments are passed to mkdtemp(). """ tempdir = tempfile.mkdtemp(*args, **kws) os.chmod(tempdir, kws.get('mode', DEFAULT_DIRMODE)) try: yield tempdir finally: try: shutil.rmtree(tempdir) except FileNotFoundError: pass def makedirs(dir, mode=DEFAULT_DIRMODE): os.makedirs(dir, mode=mode, exist_ok=True) def last_update_date(): """Return the last update date. If /userdata/.last_update exists, we use this file's mtime. If it doesn't exist, then we use the latest mtime of any of the files in /etc/system-image/config.d/*.ini (or whatever directory was given with the -C/--config option). """ # Avoid circular imports. from systemimage.config import config try: timestamp = datetime.fromtimestamp(os.stat(LAST_UPDATE_FILE).st_mtime) except (FileNotFoundError, PermissionError): # We fall back to the latest mtime of the config.d/*.ini files. For # robustness, watch out for two possibilities: the config file could # have been deleted after the system started up (thus making # config.ini_files include nonexistent files), and the ini file could # be a dangling symlink. For the latter, use lstat(). timestamps = [] for path in config.ini_files: with suppress(FileNotFoundError): timestamps.append( datetime.fromtimestamp(path.lstat().st_mtime)) if len(timestamps) == 0: return 'Unknown' timestamp = sorted(timestamps)[-1] return str(timestamp.replace(microsecond=0)) def version_detail(details_string=None): """Return a dictionary of the version details.""" # Avoid circular imports. if details_string is None: from systemimage.config import config details_string = getattr(config.service, 'version_detail', None) if details_string is None: return {} details = {} for item in details_string.strip().split(','): name, equals, version = item.partition('=') if equals != '=': continue details[name] = version return details def phased_percentage(channel, target): # Avoid circular imports. from systemimage.config import config if config.phase_override is not None: return config.phase_override for path in UNIQUE_MACHINE_ID_FILES: try: with open(path, 'r', encoding='utf-8') as fp: machine_id = fp.read().strip() break # pragma: no branch except FileNotFoundError: pass else: raise RuntimeError('No machine-id file found') r = random.Random() r.seed('{}.{}.{}'.format(channel, target, machine_id)) return r.randint(0, 100) ./systemimage/image.py0000644000015600001650000000607712701500553015070 0ustar jenkinsjenkins# Copyright (C) 2013-2016 Canonical Ltd. # Author: Barry Warsaw # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 3 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . """Images are like Bags but are hashable and sortable.""" __all__ = [ 'Image', ] from systemimage.bag import Bag COMMASPACE = ', ' class Image(Bag): def __init__(self, **kws): converters = {'phased-percentage': int} super().__init__(converters=converters, **kws) def __hash__(self): # BAW 2013-04-30: We don't currently enforce immutability of attribute # values. See Bag.__init__(). # # Full images must be unique on the version, but delta images are # unique on the version and base. We need to turn these two values # into a hash of no more than 32 bits. This is because Python's # built-in hash() method truncates __hash__()'s return value to # Py_ssize_t which on the phone hardware (and i386 as in the buildds) # is 32 bits. # # You can verifiy this with the following bit of Python: # # $ python3 -c "from ctypes import *; print(sizeof(c_ssize_t))" # # Use a base of 0 for full images. base = self.base if self.type == 'delta' else 0 assert ((0 <= base < (1 << 16)) and (0 <= self.version < (1 << 16))), ( '16 bit unsigned version numbers only') # LP: #1218612 introduces a new version number regime, starting # sequentially at 1. We still have the 32 bit limit on hashes, but # now we don't have to play games with the content, giving us 65k new # versions before we have to worry about running out of bits. We # still have to fit two version numbers (version and base for deltas) # into those 32 bits, thus version numbers bigger than 16 bits are not # supported. Still, even if we release 10 images every day, that # gives us nearly 17 years of running room. I sure hope we'll have 64 # bit phones by then. return (self.version << 16) + base def __eq__(self, other): return hash(self) == hash(other) def __ne__(self, other): return not self.__eq__(other) def __repr__(self): # pragma: no cover return ''.format(COMMASPACE.join(sorted( key for key in self.__dict__ if not key.startswith('_')))) @property def phased_percentage(self): return self.__untranslated__.get('phased-percentage', 100) @property def version_detail(self): return self.__untranslated__.get('version_detail', '') ./systemimage/curl.py0000644000015600001650000002665612701500553014760 0ustar jenkinsjenkins# Copyright (C) 2014-2016 Canonical Ltd. # Author: Barry Warsaw # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 3 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . """Download files via PyCURL.""" __all__ = [ 'CurlDownloadManager', ] import pycurl import hashlib import logging from contextlib import ExitStack from gi.repository import GLib from systemimage.config import config from systemimage.download import Canceled, DownloadManagerBase log = logging.getLogger('systemimage') # Some cURL defaults. XXX pull these out of the configuration file. CONNECTION_TIMEOUT = 120 # seconds LOW_SPEED_LIMIT = 10 LOW_SPEED_TIME = 120 # seconds MAX_REDIRECTS = 5 MAX_TOTAL_CONNECTIONS = 4 SELECT_TIMEOUT = 0.05 # 20fps def _curl_debug(debug_type, debug_msg): # pragma: no cover from systemimage.testing.helpers import debug with debug(end='') as ddlog: ddlog('PYCURL:', debug_type, debug_msg) def make_testable(c): # The test suite needs to make the PyCURL object accept the testing # server's self signed certificate. It will mock this function. pass # pragma: no cover class SingleDownload: def __init__(self, record): self.url, self.destination, self.expected_checksum = record self._checksum = None self._fp = None self._resources = ExitStack() def make_handle(self, *, HEAD): # If we're doing GET, record some more information. if not HEAD: self._checksum = hashlib.sha256() # Create the basic PyCURL object. c = pycurl.Curl() # Set the common options. c.setopt(pycurl.URL, self.url) c.setopt(pycurl.USERAGENT, config.user_agent) # If we're doing a HEAD, then we don't want the body of the # file. Otherwise, set things up to write the body data to the # destination file. if HEAD: c.setopt(pycurl.NOBODY, 1) else: c.setopt(pycurl.WRITEDATA, self) self._fp = self._resources.enter_context( open(self.destination, 'wb')) # Set some limits. XXX Pull these out of the configuration files. c.setopt(pycurl.FOLLOWLOCATION, 1) c.setopt(pycurl.MAXREDIRS, MAX_REDIRECTS) c.setopt(pycurl.CONNECTTIMEOUT, CONNECTION_TIMEOUT) # If the average transfer speed is below 10 bytes per second for 2 # minutes, libcurl will consider the connection too slow and abort. ## c.setopt(pycurl.LOW_SPEED_LIMIT, LOW_SPEED_LIMIT) ## c.setopt(pycurl.LOW_SPEED_TIME, LOW_SPEED_TIME) # Fail on error codes >= 400. c.setopt(pycurl.FAILONERROR, 1) # Switch off the libcurl progress meters. The multi that uses # this handle will set the transfer info function. c.setopt(pycurl.NOPROGRESS, 1) # ssl: no need to set SSL_VERIFYPEER, SSL_VERIFYHOST, CAINFO # they all use sensible defaults # # Enable debugging. self._make_debuggable(c) # For the test suite. make_testable(c) return c def _make_debuggable(self, c): """Add some additional debugging options.""" ## c.setopt(pycurl.VERBOSE, 1) ## c.setopt(pycurl.DEBUGFUNCTION, _curl_debug) pass def write(self, data): """Update the checksum and write the data out to the file.""" self._checksum.update(data) self._fp.write(data) # Returning None implies that all bytes were written # successfully, so it's better to be explicit. return None def close(self): self._resources.close() @property def checksum(self): # If no checksum was expected, pretend none was gotten. This # makes the verification step below a wee bit simpler. if self.expected_checksum == '': return '' return self._checksum.hexdigest() class CurlDownloadManager(DownloadManagerBase): """The PyCURL based download manager.""" def __init__(self, callback=None): super().__init__() if callback is not None: self.callbacks.append(callback) self._pausables = [] self._paused = False def _get_files(self, records, pausable, signal_started): # Start by doing a HEAD on all the URLs so that we can get the total # target download size in bytes, at least as best as is possible. with ExitStack() as resources: handles = [] multi = pycurl.CurlMulti() multi.setopt( pycurl.M_MAX_TOTAL_CONNECTIONS, MAX_TOTAL_CONNECTIONS) for record in records: download = SingleDownload(record) resources.callback(download.close) handle = download.make_handle(HEAD=True) handles.append(handle) multi.add_handle(handle) # .add_handle() does not bump the reference count, so we # need to keep the PyCURL object alive for the duration # of this download. resources.callback(multi.remove_handle, handle) self._perform(multi, handles) self.total = sum( handle.getinfo(pycurl.CONTENT_LENGTH_DOWNLOAD) for handle in handles) # Now do a GET on all the URLs. This will write the data to the # destination file and collect the checksums. if signal_started and config.dbus_service is not None: config.dbus_service.DownloadStarted() with ExitStack() as resources: resources.callback(setattr, self, '_handles', None) downloads = [] multi = pycurl.CurlMulti() multi.setopt( pycurl.M_MAX_TOTAL_CONNECTIONS, MAX_TOTAL_CONNECTIONS) for record in records: download = SingleDownload(record) downloads.append(download) resources.callback(download.close) handle = download.make_handle(HEAD=False) self._pausables.append(handle) multi.add_handle(handle) # .add_handle() does not bump the reference count, so we # need to keep the PyCURL object alive for the duration # of this download. resources.callback(multi.remove_handle, handle) self._perform(multi, self._pausables) # Verify internally calculated checksums. The API requires # a FileNotFoundError to be raised when they don't match. # Since it doesn't matter which one fails, log them all and # raise the first one. first_mismatch = None for download in downloads: if download.checksum != download.expected_checksum: log.error('Checksum mismatch. got:{} != exp:{}: {}', download.checksum, download.expected_checksum, download.destination) if first_mismatch is None: first_mismatch = download if first_mismatch is not None: # For backward compatibility with ubuntu-download_manager. raise FileNotFoundError('HASH ERROR: {}'.format( first_mismatch.destination)) self._pausables = [] def _do_once(self, multi, handles): status, active_count = multi.perform() if status == pycurl.E_CALL_MULTI_PERFORM: # Call .perform() again before calling select. return True elif status != pycurl.E_OK: # An error occurred in the multi, so be done with the # whole thing. We can't get a description string out of # PyCURL though. Just raise one of the urls. log.error('CurlMulti() error: {}', status) raise FileNotFoundError(handles[0].getinfo(pycurl.EFFECTIVE_URL)) # The multi is okay, but it's possible there are errors pending on # the individual downloads; check those now. queued_count, ok_list, error_list = multi.info_read() if len(error_list) > 0: # It helps to have at least one URL in the FileNotFoundError. first_url = None log.error('Curl() errors encountered:') for c, code, message in error_list: url = c.getinfo(pycurl.EFFECTIVE_URL) if first_url is None: first_url = url log.error(' {} ({}): {}', message, code, url) raise FileNotFoundError('{}: {}'.format(message, first_url)) # For compatibility with .io_add_watch(), we return False if we want # to stop the callbacks, and True if we want to call back here again. return active_count > 0 def _perform(self, multi, handles): # While we're performing the cURL downloads, we need to periodically # process D-Bus events, otherwise we won't be able to cancel downloads # or handle other interruptive events. To do this, we grab the GLib # main loop context and then ask it to do an iteration over its events # once in a while. It turns out that even if we're not running a D-Bus # main loop (i.e. during the in-process tests) periodically dispatching # into GLib doesn't hurt, so just do it unconditionally. self.received = 0 context = GLib.main_context_default() while True: # Do the progress callback, but only if the current received size # is different than the last one. Don't worry about in which # direction it's different. received = int( sum(c.getinfo(pycurl.SIZE_DOWNLOAD) for c in handles)) if received != self.received: self._do_callback() self.received = received if not self._do_once(multi, handles): break multi.select(SELECT_TIMEOUT) # Let D-Bus events get dispatched, but only block if downloads are # paused. while context.iteration(may_block=self._paused): pass if self._queued_cancel: raise Canceled # One last callback, unconditionally. self.received = int( sum(c.getinfo(pycurl.SIZE_DOWNLOAD) for c in handles)) self._do_callback() def pause(self): for c in self._pausables: c.pause(pycurl.PAUSE_ALL) self._paused = True # 2014-10-20 BAW: We could plumb through the `service` object from # service.py (the main entry point for system-image-dbus, but that's # actually a bit of a pain, so do the expedient thing and grab the # interface here. percentage = (int(self.received / self.total * 100.0) if self.total > 0 else 0) config.dbus_service.UpdatePaused(percentage) def resume(self): self._paused = False for c in self._pausables: c.pause(pycurl.PAUSE_CONT) ./systemimage/version.txt0000644000015600001650000000000412701500553015642 0ustar jenkinsjenkins3.1 ./systemimage/settings.py0000644000015600001650000000750212701500553015640 0ustar jenkinsjenkins# Copyright (C) 2013-2016 Canonical Ltd. # Author: Barry Warsaw # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 3 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . """Persistent settings - used by the DBus API.""" __all__ = [ 'Settings', ] import sqlite3 from contextlib import contextmanager from pathlib import Path from systemimage.config import config from xdg.BaseDirectory import xdg_cache_home SCHEMA_VERSION = '1' AUTO_DOWNLOAD_DEFAULT = '1' class Settings: def __init__(self, use_config=None): self._use_config = use_config # If the database file does not yet exist, create it. This could fail, # as LP: #1349478 describes, if the parent directory containing # settings.db is not writable by the process. In that case, fall back # to a user path. self._dbpath = None try: with self._cursor(): pass # pragma: no branch except sqlite3.OperationalError: self._check_fallback() with self._cursor() as c: c.execute('select tbl_name from sqlite_master') if len(c.fetchall()) == 0: # The database file has no tables. c.execute('create table settings (key, value)''') # Hopefully we won't ever need to migrate this schema, but just in # case we do, set a version value. c.execute('insert into settings values ("__version__", ?)', (SCHEMA_VERSION,)) def _check_fallback(self): # This is refactored into a separate method for testing purposes. self._dbpath = Path(xdg_cache_home) / 'lib' / 'settings.db' try: self._dbpath.parent.mkdir(parents=True) except FileExistsError: # http://bugs.python.org/issue21539 pass with self._cursor(): pass @contextmanager def _cursor(self): if self._dbpath is None: self._dbpath = (config.system.settings_db if self._use_config is None else self._use_config.system.settings_db) with sqlite3.connect(str(self._dbpath)) as conn: yield conn.cursor() def set(self, key, value): with self._cursor() as c: c.execute('select value from settings where key = ?', (key,)) row = c.fetchone() if row is None: c.execute('insert into settings values (?, ?)', (key, value)) else: c.execute('update settings set value = ? where key = ?', (value, key)) def get(self, key): with self._cursor() as c: c.execute('select value from settings where key = ?', (key,)) row = c.fetchone() if row is None: if key == 'auto_download': return AUTO_DOWNLOAD_DEFAULT return '' return row[0] def delete(self, key): with self._cursor() as c: c.execute('delete from settings where key = ?', (key,)) def __iter__(self): # Iterate over all rows, ignoring implementation details. with self._cursor() as c: for row in c.execute('select * from settings'): if not row[0].startswith('_'): yield row ./systemimage/service.py0000644000015600001650000001123412701500553015435 0ustar jenkinsjenkins# Copyright (C) 2013-2016 Canonical Ltd. # Author: Barry Warsaw # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 3 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . """DBus service main entry point.""" __all__ = [ 'main', ] import sys import dbus import logging import argparse from contextlib import ExitStack from dbus.mainloop.glib import DBusGMainLoop from pkg_resources import resource_string as resource_bytes from systemimage.config import config from systemimage.dbus import Loop from systemimage.helpers import makedirs from systemimage.logging import initialize from systemimage.main import DEFAULT_CONFIG_D # --testing is only enabled when the systemimage.testing package is # available. This will be the case for the upstream source package, and when # the systemimage-dev binary package is installed in Ubuntu. try: from systemimage.testing.dbus import instrument, get_service except ImportError: # pragma: no cover instrument = None get_service = None __version__ = resource_bytes( 'systemimage', 'version.txt').decode('utf-8').strip() def main(): # If enabled, start code coverage collection as early as possible. # Parse arguments. parser = argparse.ArgumentParser( prog='system-image-dbus', description='Ubuntu System Image Upgrader DBus service') parser.add_argument('--version', action='version', version='system-image-dbus {}'.format(__version__)) parser.add_argument('-C', '--config', default=DEFAULT_CONFIG_D, action='store', metavar='DIRECTORY', help="""Use the given configuration directory instead of the default""") parser.add_argument('-v', '--verbose', default=0, action='count', help='Increase verbosity') # Hidden argument for special setup required by test environment. if instrument is not None: # pragma: no branch parser.add_argument('--testing', default=None, action='store', help=argparse.SUPPRESS) parser.add_argument('--self-signed-cert', default=None, action='store', help=argparse.SUPPRESS) args = parser.parse_args(sys.argv[1:]) try: config.load(args.config) except TypeError as error: parser.error('\nConfiguration directory not found: {}'.format(error)) assert 'parser.error() does not return' # pragma: no cover # Create the temporary directory if it doesn't exist. makedirs(config.system.tempdir) # Initialize the loggers. initialize(verbosity=args.verbose) log = logging.getLogger('systemimage') DBusGMainLoop(set_as_default=True) system_bus = dbus.SystemBus() # Ensure we're the only owner of this bus name. code = system_bus.request_name( 'com.canonical.SystemImage', dbus.bus.NAME_FLAG_DO_NOT_QUEUE) if code == dbus.bus.REQUEST_NAME_REPLY_EXISTS: # Another instance already owns this name. Exit. log.error('Cannot get exclusive ownership of bus name.') return 2 log.info('SystemImage dbus main loop starting [{}/{}]', config.channel, config.device) with ExitStack() as stack: loop = Loop() testing_mode = getattr(args, 'testing', None) if testing_mode: instrument(config, stack, args.self_signed_cert) config.dbus_service = get_service( testing_mode, system_bus, '/Service', loop) else: from systemimage.dbus import Service config.dbus_service = Service(system_bus, '/Service', loop) try: loop.run() except KeyboardInterrupt: # pragma: no cover log.info('SystemImage dbus main loop interrupted') except: # pragma: no cover log.exception('D-Bus loop exception') raise else: log.info('SystemImage dbus main loop exited') if __name__ == '__main__': # pragma: no cover sys.exit(main()) ./PKG-INFO0000644000015600001650000000034312701500553012170 0ustar jenkinsjenkinsMetadata-Version: 1.0 Name: system-image Version: 3.1 Summary: Ubuntu System Image Based Upgrades Home-page: UNKNOWN Author: Barry Warsaw Author-email: barry@ubuntu.com License: GNU GPLv3 Description: UNKNOWN Platform: UNKNOWN ./tox.ini0000644000015600001650000000160612701500553012411 0ustar jenkinsjenkins[tox] envlist = {py34,py35,coverage}-{udm,curl} recreate = True skip_missing_interpreters = True [coverage] rcfile = {toxinidir}/{envname}.ini rc = --rcfile={[coverage]rcfile} dir = --directory={envname} [testenv] commands = py34: python -m nose2 -v py35: python -m nose2 -v coverage: python -m coverage run {[coverage]rc} -m nose2 -v coverage: python -m coverage combine {[coverage]rc} coverage: python -m coverage html {[coverage]rc} {[coverage]dir} coverage: python -m coverage report -m {[coverage]rc} sitepackages = True deps = coverage: coverage indexserver = default = http://missing.example.com usedevelop = True setenv = SYSTEMIMAGE_REACTOR_TIMEOUT=60 coverage: COVERAGE_PROCESS_START={[coverage]rcfile} coverage: COVERAGE_OPTIONS="-p" coverage: COVERAGE_FILE={toxinidir}/.coverage udm: SYSTEMIMAGE_PYCURL=0 curl: SYSTEMIMAGE_PYCURL=1