././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1724334195.7776763 ironic-lib-6.2.0/0000775000175000017500000000000000000000000013571 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/.stestr.conf0000664000175000017500000000010000000000000016031 0ustar00zuulzuul00000000000000[DEFAULT] test_path=${TESTS_DIR:-./ironic_lib/tests} top_dir=./ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334195.0 ironic-lib-6.2.0/AUTHORS0000664000175000017500000002344400000000000014650 0ustar00zuulzuul00000000000000119Vik Adam Gandelman Alberto Planas Alex Meade Alexander Gordeev Alexis Lee Aline Bousquet Andreas Jaeger Andreas Jaeger Andrew Bogott Andrey Kurilin Angus Thomas Anita Kuno Anne Gentle Anson Y.W Anusha Ramineni Arata Notsu Armando Migliaccio Arne Wiebalck Artem Rozumenko Boris Pavlovic Brian Elliott Brian Waldon Cao Xuan Hoang Chang Bo Guo ChangBo Guo(gcb) Chris Behrens Chris Jones Chris Krelle Chris Krelle Christian Berendt Chuck Short Chuck Short Clif Houck Corey Bryant Dan Prince Dan Smith Daryl Walleck Davanum Srinivas David Hewson David Kang David McNally David Shrewsbury David Vallee Delisle Davide Guerri Derek Higgins Devananda van der Veen Dima Shulyak Dirk Mueller Dmitry Nikishov Dmitry Tantsur Dmitry Tantsur Dmitry Tantsur Dongdong Zhou Doug Hellmann Ellen Hui Erhan Ekici Eric Guo Eric Windisch Faizan Barmawer Fengqian Gao Flavio Percoco Ghanshyam Mann Ghe Rivero Ghe Rivero Ghe Rivero Gonéri Le Bouder Gregory Haynes Hadi Bannazadeh Hang Yang Hans Lindgren Haomeng, Wang Harald Jensås Harshada Mangesh Kakad He Yongli Hervé Beraud Hironori Shiina Hugo Nicodemos Ian Pilcher Ihar Hrachyshka Ilya Etingof Ilya Pekelny Imre Farkas Iury Gregory Melo Ferreira Iury Gregory Melo Ferreira Jacek Tomasiak James E. Blair James E. Blair James Slagle Jason Kölker Javier Pena Jay Faulkner Jeremy Stanley Jesse Andrews Jim Rollenhagen Jing Sun Joe Gordon Joe Gordon Johannes Erdfelt John Garbutt John L. Villalovos John L. Villalovos John Trowbridge Josh Gachnang Joshua Harlow Joshua Harlow Julia Kreger Julien Danjou Junya Akahira Kaifeng Wang Ken Igarashi Kun Huang Kurt Taylor Kyle Stevenson LiZekun <2954674728@qq.com> Lin Tan Lin Tan Lucas Alvares Gomes Luke Odom Luong Anh Tuan Marco Morais Mario Villaplana Mark Atwood Mark Goddard Mark McLoughlin Martyn Taylor Mathieu Gagné Matt Joyce Matt Wagner Matthew Gilliard Matthew Thode Max Lobur Max Lobur Michael Davies Michael Kerrin Michael Still Michael Turek Michey Mehta michey.mehta@hp.com Mikhail Durnosvistov Mikyung Kang Monty Taylor Motohiro OTSUKA Motohiro Otsuka Nam Nguyen Hoai Naohiro Tamura Nikolay Fedotov Nisha Agarwal Olivier Bourdon OpenStack Release Bot Pablo Fernando Cargnelutti Pavlo Shchelokovskyy Peeyush Gupta Peng Yong Phil Day Pádraig Brady Rafi Khardalian Rakesh H S Ramakrishnan G Ramamani Yeleswarapu Riccardo Pittau Rick Harris Robert Collins Robert Collins Rohan Kanade Rohan Kanade Roman Bogorodskiy Roman Dashevsky Roman Podoliaka Roman Prykhodchenko Roman Prykhodchenko Ruby Loo Ruby Loo Russell Bryant Russell Haering Sam Betts Sandhya Balakrishnan Sandy Walsh Sascha Peilicke Sascha Peilicke Satoru Moriya Sean Dague Sean McGinnis Sergey Lukjanov Sergey Lupersolsky Sergey Lupersolsky Sergey Nikitin Sergey Vilgelm Shane Wang Sharat Sharma Sharpz7 Shivanand Tendulker Shuangtai Tian Srinivasa Acharya Stanislaw Pitucha Steve Baker Steven Dake Stig Telfer Takashi Kajinami Takashi Kajinami Tan Lin Thierry Carrez Thomas Bechtold Thomas Goirand Tom Fifield Tudor Domnescu Tushar Kalra Uros Orozel Vasyl Saienko Vic Howard Victor Lowther Victor Sergeyev Vikas Jain Vinay B S Vishvananda Ishaya Vladyslav Drok Vu Cong Tuan Wei Du Will Szumski Xian Dong, Meng Yolanda Robla Yolanda Robla Mota Yuiko Takada Yun Mao Yuriy Taraday Yuriy Zveryanskyy Yushiro FURUKAWA Zhang Yang Zhenguo Niu Zhenzan Zhou ZhiQiang Fan ZhiQiang Fan Zhongyue Luo Zhongyue Luo cheng chenglch cid dekehn divakar-padiyar-nandavar fpxie gengchc2 ghanshyam howardlee jiangfei jiangwt100 kavithahr leiyashuai likui linggao lingyongxu lvdongbing max_lobur melissaml mvpnitesh niuke ricolin ryo.kurahashi sjing stephane tanlin vmud213 whaom whitekid wu.chunyang yangxurong yangyawei yolanda.robla yunhong jiang yuyafei zhangbailin zhulingjie ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/CONTRIBUTING.rst0000664000175000017500000000053100000000000016231 0ustar00zuulzuul00000000000000If you would like to contribute to the development of OpenStack, you must follow the steps documented at: https://docs.openstack.org/infra/manual/developers.html#development-workflow Pull requests submitted through GitHub will be ignored. Bugs should be filed in StoryBoard, not GitHub: https://storyboard.openstack.org/#!/project/946 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334195.0 ironic-lib-6.2.0/ChangeLog0000664000175000017500000024320400000000000015350 0ustar00zuulzuul00000000000000CHANGES ======= 6.2.0 ----- * Fix invalid UTF-8 characters in execute output * Update to match latest development cycle * Fix codespell reported errors 6.1.0 ----- * destroy\_disk\_metadata: support 4096 sector size * Remove old excludes 6.0.0 ----- * Swap partprobe and udev settle * Force constraints when installing a package during tox test * Bump hacking to 6.1.0 * Split common qemu-img functions from disk\_utils * [codespell] - Adding CI for codespell tox target * Remove compatibility with Python < 3.8 * Raise on non-existing file in qemu\_img\_info * [codespell] Add tox target and configuration for codespell * [codespell] Fixing spelling issues in Ironic-Lib * Drop lower-constraints.txt (again) 5.6.0 ----- * Improve error message for wrong metrics backend * Compatibility with zeroconf 0.129.0 * Trivial: remove an incorrect comment * JSON RPC: add support for unix sockets and extract common code * Increase the ESP partition size to 550 MB 5.5.0 ----- * Upgrade to latest hacking - v6 * Add jsonrpc client port capability * Exclude all files starting with . from flake8 tests * Use direct writes when erasing the GPT 5.4.0 ----- * Provide an interface to store metrics * Fix tox4 error * No longer override install\_command in tox.ini * Add Python3 antelope unit tests 5.3.0 ----- * Drop lower-constraints.txt and its testing * remove unicode prefix from code * json\_rpc.client: log the URL and exceptions * Replace the netboot job with a local-boot one * Remove python version 2 pkgs from bindep * CI: Removing job queue * Use TOX\_CONSTRAINTS\_FILE * Update jobs names * Add Python3 zed unit tests 5.2.0 ----- * Follow-up Allow customizing roles to use JSON RPC * Add platform:base-py[2|3] to bindep * Allow customizing roles to use JSON RPC * Re-add python 3.6/3.7 in classifier * Updating yoga tested python versions in classifier 5.1.0 ----- * Restore blkid compatibility with Centos 7 * Allow qemu-img to write out zeros to disk * Add Python3 yoga unit tests * Avoid using blkid in favour of lsblk and parted * Drop an explicit requirement of oslo.log 5.0.0 ----- * json\_rpc: setup.cfg is missing entrypoint for json\_rpc's list\_opts * json\_rpc: list\_opts() should return tuple * Add lower-constraints job to current development branch * Increase version of hacking and pycodestyle * Use the json format output of qemu-img info * Update min version of tox to use allowlist * utils.execute: log stdout and stderr even on failure 4.7.1 ----- * Limit the number of malloc arenas for qemu-img convert 4.7.0 ----- * convert\_image: add flags required for IPA * Raise qemu-img memory limit to 2 GiB * setup.cfg: Replace dashes with underscores * Remove the logic specific to the way deployment works * Stop testing the iscsi deploy interface * Remove runtime dependency on pbr * Add Python3 xena unit tests 4.6.1 ----- * Fixes parsing blkid output with iSCSI devices 4.6.0 ----- * Explicit execute arguments * Remove default parameter from execute * Import json\_rpc from ironic * Add find\_efi\_partition, deprecate get\_uefi\_disk\_identifier * Trivial: fix a type in tests * Split out a common helper for blkid * utils.mounted: retry umount and allow retrying mount * Add a helper for getting partition path * keystone: use os\_service\_type for service\_type defaults * Import common keystone configuration from ironic * Handle EBRs and tiny partitions when removing metadata * Get rid of oslo.serialization * Allow binary data for configdrive * Correct base64 error handling * Update minversion of tox 4.5.0 ----- * Import mounted context manager from IPA * Remove lower-constraints job * Fix lower-constraints with the new pip resolver * Fix other out of memory error for qemu-img convert * Set safe version of hacking * Add Python3 wallaby unit tests 4.4.0 ----- * Drop the dependency on oslo.service loopingcall * Remove the unused coding style modules * Move some generic functions from IPA * Additional testing for Metal3 string cases * Use tenacity to retry convert\_image * Add logging to the HTTP basic auth code * Update version of hacking 4.3.1 ----- * Return correct response on HTTP basic failure * Set min version of tox to 3.2.1 * Support more bcrypt prefixes * Use unittest.mock instead of mock * Fix for latest zeroconf version 4.3.0 ----- * Bump hacking min version to 3.0.1 * Implement Basic HTTP authentication middleware * Switch to newer openstackdocstheme version * Fix DIB jobs * Update lower-constraints.txt * Fix pep8 test * image\_convert: retry resource unavailable and make RLIMIT configurable * Add py38 package metadata * Dependencies and tox.ini clean-up * Upgrade flake8-import-order version to 0.17.1 * Use unittest.mock instead of third party mock * Convert str to bytes for py3 socket compatibility * Add Python3 victoria unit tests * Convert jobs to dib * Cleanup py27 support * Add netboot to all partition jobs 4.2.0 ----- * [trivial] add description of ignored flake8 checks * Bump hacking to 3.0.0 * Explicitly set ramdisk type 4.1.0 ----- * Improve device hints logging * Add a function to find all devices by hints 4.0.0 ----- * Include rootwrap.d/ironic-lib.filters in package * Remove deprecated [disk\_utils]iscsi\_verify\_attempts * Enforce running tox with correct python version based on env 3.0.0 ----- * Stop using six library * mdns: correctly handle loopback addresses * mdns: default to listing on both V4 and V6 interfaces * Updating testing doc to py3 * mdns: support IPv6 addresses * Drop python 2.7 support and testing * Fix Jobs * Add equivalent py3 jobs * Switch to Ussuri job * Move ironic-lib.filters to etc/ironic/rootwrap.d * Build pdf doc 2.21.0 ------ * Deprecate [disk\_utils]iscsi\_verify\_attempts 2.20.0 ------ * Rescan after making partition changes * Erase expected GPT locations in metadata wipe 2.19.0 ------ * Use last digit to determine paritition naming scheme * Returns disk\_identifier for UEFI boot mode nodes 2.18.0 ------ * Update Python 3 test runtimes for Train * mdns: allow specifying which interfaces to listen on 2.17.1 ------ * mdns: allow quoting in the [mdns]params configuration option * Expose configuration option relates to exception * mdns: try to convert bytes to strings in received properties 2.17.0 ------ * Tests: replace mocking loopingcall with zero interval * Handle ironic exception in ironic-lib * Add support code for multicast DNS service discovery * Update Sphinx requirements to match global-requirements * Replace git.openstack.org URLs with opendev.org URLs * OpenDev Migration Patch * Dropping the py35 testing * Include partiton name and flags from parted output 2.16.2 ------ * add python 3.7 unit test job * Refactor zuulv3 based on ironic-base * Build ironic-python-agent images on merging ironic-lib patches 2.16.1 ------ * Run sync and partprobe after adding a configdrive partition * Correct author email address * zuul-ify ironic-lib jobs 2.16.0 ------ * Allow to return unpopulated root partition from work\_on\_disk * Correct headings in README.rst * Prevent configuration changes in unit tests from affecting each other 2.15.1 ------ * Use templates for cover and lower-constraints * Accepts option for conv flags in dd command * Revert "Use dd conv=sparse when writing images to nodes" * Make search for config drive partition case insensitive * Check GPT table with sgdisk insread of partprobe * add python 3.6 unit test job * switch documentation job to new PTI * import zuul job settings from project-config 2.14.0 ------ * Add logic to create PReP partition for ppc64\* arch * Fall back to PARTUUID if UUID returns nothing * Remove testrepository * Expose GPT partitioning fixing method * Switch to using stestr * Do not run API (functional) tests in the CI * Remove unneccessary lambda from \_test\_make\_partitions * fix tox python3 overrides 2.13.0 ------ * Gate fix: Cap hacking to avoid gate failure * fix lower constraints and uncap eventlet * Add retry attempts for the partprobe command * Change launchpad references to storyboard * Updated from global requirements * add lower-constraints job * Updated from global requirements * Updated from global requirements * Switch the CI to hardware types and clean up playbooks * Updated from global requirements * Updated from global requirements * Use six.wraps() for Metrics so decorated methods can be inspected * Zuul: Remove project name * Use the 'ironic' queue for the gate * Replace curly quotes with straight quotes * Add missing 'self' parameter to class methods * Updated from global requirements 2.12.0 ------ * Simplify logic in wait\_for\_disk\_to\_become\_available * Updated from global requirements * Add wait\_for\_disk to destroy\_disk\_metadata function * Use dd conv=sparse when writing images to nodes * Updated from global requirements * Updated from global requirements * Updated from global requirements * Use the tempest plugin from openstack/ironic-tempest-plugin * tox: Use the default version of Python 3 for tox tests * Fix issue with double mocking of utils.execute functions * Update version of flake8-import-order package * Avoid tox\_install.sh for constraints support * Updated from global requirements * zuul: Remove un-needed projects in required\_projects * Updated from global requirements * Add raises note to disk\_utils.get\_disk\_identifier * Updated from global requirements * zuul: Centralize irrelevant-files in legacy-ironic-lib-dsvm-base * Zuul: add file extension to playbook path * Move openstack-tox-cover job to project config * Move legacy ironic-lib jobs in-tree 2.11.0 ------ * Migrate to stestr as unit tests runner * Updated from global requirements * Updated from global requirements * flake8: Enable some off-by-default checks * Added \`by\_path\` root device hint * Updated from global requirements * Updated from global requirements * Update and replace http with https for doc links 2.9.0 ----- * Correct url in setup.cfg for documentation path * Rework base landing page to contributor docs * Updated from global requirements * Switch from oslosphinx to openstackdocstheme * Updated from global requirements * Adjust test with option 'backend' * Updated from global requirements 2.8.0 ----- * Updated from global requirements * Remove pbr warnerrors in favor of sphinx check * Support for NVMe drives for config drives * Remove unnecessary setUp function in testcase * Updated from global requirements * Add 'sgdisk -Z' to destroy\_disk\_metadata * Updated from global requirements * Prevent tests from using utils.execute() * Updated from global requirements * Updated from global requirements * Remove unit tests that test oslo\_concurrency.processutils.execute * Use enforce\_type=False in test\_nonexisting\_backend * Updated from global requirements * Make disk size check output handling consistent * Updated from global requirements 2.7.0 ----- * Revert "Update UPPER\_CONSTRAINTS\_FILE for stable/ocata" * Remove translation of log messages * Test that configdrive disk exist with test -e * Add missing 'autospec' statements to unit test mocks * flake8: Specify 'ironic\_lib' as name of app * Updated from global requirements * Add .eggs to the .gitignore file * Improve logs for the root device hints mechanism * Python 3.5 is added * Updated from global requirements * Updated from global requirements * Update test requirement * Updated from global requirements * Use flake8-import-order 2.6.0 ----- * Updated from global requirements * Limit memory usage when running qemu-img * Update UPPER\_CONSTRAINTS\_FILE for stable/ocata 2.5.1 ----- * Fix exception for multiple devices with same label 2.5.0 ----- * Fix retrieval of config-2 existing partition * Create configdrive partitions as vfat 2.4.0 ----- * Use oslo\_serialization.base64 to follow OpenStack Python3 * Correct reraising of exception * Check for HCTL as part of root device hints * Updated from global requirements * Show team and repo badges on README * Updated from global requirements * Updated from global requirements * Updated from global requirements * Updated from global requirements * Add missing autospec=True to mocks 2.3.0 ----- * Add debug logging while waiting for udevadm * Wait for a config drive device to be available * Bump hacking to 0.11.0 in test-requirements * Add more tests for work\_on\_disk * Fix typo in error message * followup to disk\_utils.count\_mbr\_partitions() * Use 'code-block' for pieces of code * Correctly count the number of primary partitions * Changed the home-page of ironic-lib * Remove tests and common.i18n from autodoc * Updated from global requirements * Add prefix "$" for command examples 2.2.0 ----- * Remove unneeded disk\_utils.mkfs() function * Updated from global requirements * Add py35 to tox environments * Fix check for GPT partioned device * Fix creating config drive for whole disk images * TrivialFix: Remove cfg import unused * Add match\_root\_device\_hints() to the utils.py module * Sync tools/tox\_install.sh * Extend parse\_root\_device\_hints to support operators * Using assertIsNone() is preferred over assertEqual() 2.1.0 ----- * Correct reraising of exception * Add developer documentation on metrics * Use constraints for all the things * Enforce doc8, make it pass, + fix inaccuracies * Add framework for doc building in ironic-lib * Updated from global requirements * Updated from global requirements * Support configdrive in iscsi deploy for whole disk images * Add parse\_root\_device\_hints to utils.py * Updated from global requirements 2.0.0 ----- * Include wipefs --force option * Updated from global requirements * Add keyword arg 'log\_stdout' to utils.execute() * Remove releasenotes/\* * Use autospec in mocked objects * Add support for metrics * Ignore .idea folder * Updated from global requirements * Updated from global requirements * Updated from global requirements * Remove deprecated disk util configs * Updated from global requirements * Add support for BIOS local boot for GPT label * Clarify which projects are meant to use the ironic-lib * Fix coverage option and execution 1.3.0 ----- * Updated from global requirements * Explore config options to oslo-config-generator * Clean up test-requirements * use wipefs to erase FS meta information * Updated from global requirements 1.1.0 ----- * Move eventlet to test-requirements. Remove greenlet * Fixes naming for the partitions in baremetal * Tests to not depend on psmisc to be installed * Add support for choosing the disk label * Updated from global requirements * Updated from global requirements 1.0.0 ----- * Updated from global requirements * Updated from global requirements * Updated from global requirements * Remove unused packages from requirements * Updated from global requirements * Updated from global requirements * Sync test\_utils from ironic * Add tests for qemu\_img\_info() & convert\_image() * Use imageutils from oslo.utils * Updated from global requirements * Updated from global requirements * Updated from global requirements * Updated from global requirements 0.5.0 ----- * Default for root\_helper config * Replace deprecated LOG.warn with LOG.warning * Updated from global requirements 0.4.0 ----- * Replace rootwrap\_config and rootwrap\_helper\_cmd with root\_helper * Updated from global requirements * Updated from global requirements * Updated from global requirements * Remove tempdir config, add tempdir parameter * Updated from global requirements * Updated from global requirements * Updated from global requirements * Fix help string for 'rootwrap\_helper\_cmd' * Updated from global requirements * Updated from global requirements * Use standard locale when executing 'parted' command 0.3.0 ----- * Replace .iteritems() with .items() * Updated from global requirements * Add more info level logs to disk\_utils.work\_on\_disk() method * Sync the make\_partitions function with ironic * Add 'node\_uuid' in debug logs to assist debugging 0.2.0 ----- * Move to oslo.i18n * Put py34 first in the env order of tox * Remove README.md and update README.rst * Update link for where to report bugs * Remove execute permission from exception.py * Add a cover target to tox * Better optimize test runs * Move tests/ under ironic\_lib/, use ostestr * Remove TODO * Update setup.cfg * Use standard locale when executing 'dd' command * Updated from global requirements 0.1.0 ----- * Updated from global requirements * Updated from global requirements * Updated from global requirements * Fix E128 issues and enable E128 * Update tox.ini and fix py34 issues * Change PyPI name to ironic-lib and clean up setup.cfg * Updated from global requirements * Updated from global requirements * Updated from global requirements * Update mock assertion for mock 1.1 * Updated from global requirements * Switch to oslo.service * Updated from global requirements * Drop use of 'oslo' namespace package * Updated from global requirements * Updated from global requirements * Drop use of 'oslo' namespace package * Copy .gitignore from Ironic * Updated from global requirements * Add .gitreview * Initial commit for ironic-lib * INSPECTFAIL value is more readable * Disable n-novnc, heat, cinder and horizon on devstack * Return required properties for agent deploy driver * Remove unused modules from ironic/openstack/common * Use functions from oslo.utils * Update Ilo drivers to use REST API interface to iLO * Add dhcp-all-interfaces to get IP to NIC other than eth0 * Log exception on tear\_down failure * Fix PEP8 E124 & E125 errors * Mock sleep function for OtherFunctionTestCase * Log node UUID rather than node object * Updated from global requirements * Add InspectInterface for node-introspection * Correctly rebuild the PXE file during takeover of ACTIVE nodes * Fix PEP8 E121 & E122 errors * Add documentation for the IPMI retry timeout option * Use oslo\_utils replace oslo.utils * Avoid deregistering conductor following SIGUSR1 * Add states required for node-inspection * For flake8 check, make the 'E12' ignore be more granular * add retry logic to is\_block\_device function * Imported Translations from Transifex * Move oslo.config references to oslo\_config * Add AMT-PXE-Driver Common Library * Fix typos in documentation: Capabilities * Removed unused image file * Address final comments of a4cf7149fb * Add concept of stable states to the state machine * Fix ml2\_conf.ini settings * Vendorpassthru doesn't get correct 'self' * Remove docs in proprietary formats * Fix file permissions in project * Imported Translations from Transifex * Updated from global requirements * Remove deploy\_is\_done() from AgentClient * AgentVendorInterface: Move to a common place * Stop console at first if console is enabled when destroy node * fixed typos from eligable to eligible and delition to deletion * Add logical name support to Ironic * Add support for local boot * Fix chown invalid option -- 'p' * ipmitool drivers fail with integer passwords * Add the subnet creation step to the install guide * improve iSCSI connection check * Remove min and max from base.Version * Add list of python driver packages * Add policy show\_password to mask passwords in driver\_info * Conductor errors if enabled\_drivers are not found * Add MANAGEABLE state and associated transitions * Raise minimum API version to 1.1 * Correct typo in agent\_client * Fix argument value for work\_on\_disk() in unit test * Documentation: Describe the 'spacing' argument * update docstring for driver\_periodic\_task's parallel param * Use prolianutils module for ilo driver tests * Add documentation on parallel argument for driver periodic tasks * Rename provision\_state to power\_state in test\_manager.py * Refactor ilo.deploy.\_get\_single\_nic\_with\_vif\_port\_id() * Update agent driver with new field driver\_internal\_info * Updated from global requirements * Add support for driver-specific periodic tasks * Partial revert of 4606716 until we debug further * Clean driver\_internal\_info when changes nodes' driver * Add Node.driver\_internal\_info * Move oslo.config references to oslo\_config * Move oslo.db references to oslo\_db * Revert "Do not pass PXE net config from bootloader to ramdisk" * Bump oslo.rootwrap to 1.5.0 * Drop deprecated namespace for oslo.rootwrap * Add VirtualBox drivers and its modules * region missing in endpoint selection * Add :raises: for Version constructor docstring * Improve testing of the Node's REST API * Rename NOSTATE to AVAILABLE * Add support for API microversions * Address final comments of edf532db91 * Add missing exceptions into function docstring * Fix typos in commit I68c9f9f86f5f113bb111c0f4fd83216ae0659d36 * Add logic to store the config drive passed by Nova * Do not POST conductor\_affinity in tests * Add 'irmc\_' prefix to optional properties * Actively check iSCSI connection after login * Updated from global requirements * Add iRMC Driver and its iRMC Power module * Fix drivers.rst doc format error * Improve test assertion for get\_glance\_image\_properties * Do not pass PXE net config from bootloader to ramdisk * Adds get\_glance\_image\_properties * Fix filter\_query in drac/power interface * Updated from global requirements * Simplify policy.json * Replace DIB installation step from git clone to pip * Add a TODO file * Updated from global requirements * Fix function docstring of \_get\_boot\_iso\_object\_name() * Improve ironic-dbsync help strings * Clear locks on conductor startup * Remove argparse from requirements * Use oslo\_serialization replace oslo.serialization * Agent driver fails with Swift Multiple Containers * Add ipmitool to quickstart guide for Ubuntu * Allow operations on DEPLOYFAIL'd nodes * Allow associate an instance independent of the node power state * Improve docstrings about TaskManager's spawning feature * DracClient to handle ReturnValue validation * Fix instance\_info parameters clearing * DRAC: Fix wsman host verification * Updated from global requirements * Clean up ilo's parse\_driver\_info() * Fix ssh \_get\_power\_status as it returned status for wrong node * Fix RPCService and Ironic Conductor so they shut down gracefully * Remove jsonutils from openstack.common * Remove lockfile from dependencies * Remove IloPXEDeploy.validate() * Force glance recheck for kernel/ramdisk on rebuild * iboot power driver: unbound variable error * Remove unused state transitions * PXE: Add configdrive support * Rename localrc for local.conf * DracClient to handle ClientOptions creation * Ensure we don't have stale power state in database after power action * Remove links autogenerated from module names * Make DD block size adjustable * Improve testing of state transitions * Convert drivers to use process\_event() * Update service.py to support graceful Service shutdown * Ensure that image link points to the correct image * Raise SSH failure messages to the error level * Make 'method' explicit for VendorInterface.validate() * Updated from global requirements * Provided backward compat for enforcing admin policy * Allow configuration of neutronclient retries * Convert check\_deploy\_timeout to use process\_event * Add requests to requirements.txt * Enable async callbacks from task.process\_event() * Document dependency on \`fuser\` for pxe driver * Distinguish between prepare + deploy errors * Avoid querying the power state twice * Add state machine to documentation * Updated from global requirements * Adjust the help strings to better reflect usage * Updated from global requirements * Updated from global requirements * Update etc/ironic/ironic.conf.sample * Fix policy enforcement to properly detect admin * Minor changes to state model * Add documentation to create in RegionOne * Delete unnecessary document files * Updated from global requirements * display error logging should be improved * Refactor async helper methods in conductor/manager.py * Hide oslo.messaging DEBUG logs by default * add comments for NodeStates fields * Stop conductor if no drivers were loaded * Fix typo in install-guide.rst * Reuse methods from netutils * Use get\_my\_ipv4 from oslo.utils * improve the neutron configuration in install-guide * Refactoring for Ironic policy * PXE: Pass root device hints via kernel cmdline * Extend API multivalue fields * Add a fsm state -> dot diagram generator * Updated from global requirements * Update command options in the Installation Guide * Improve Agent deploy driver validation * Add new enrollment and troubleshooting doc sections * Begin using the state machine for node deploy/teardown * Add base state machine * Updated from global requirements * Get rid of set\_failed\_state duplication * Remove Python 2.6 from setup.cfg * Updated from global requirements * Update dev quick-start for devstack * Updated from global requirements * Correct vmware ssh power manager * rename oslo.concurrency to oslo\_concurrency * Remove duplicate dependencies from dev-quickstart docs * Do not strip 'glance://' prefix from image hrefs * Updated from global requirements * Fix image\_info passed to IPA for image download * Use Literal Blocks to write code sample in docstring * Workflow documentation is now in infra-manual * Add tests to iscsi\_deploy.build\_deploy\_ramdisk\_options * Fix for broken deploy of iscsi\_ilo driver * Updated from global requirements * Add info on creating a tftp map file * Add documentation for SeaMicro driver * Fixed typo in Drac management driver test * boot\_devices.PXE value should match with pyghmi define * Add decorator that requires a lock for Drac management driver * Remove useless deprecation warning for node-update maintenance * Ilo tests refactoring * Change some exceptions from invalid to missing * Add decorator that requires a lock for Drac power driver * Change methods from classmethod to staticmethod * iLO Management Interface * Improve docs for running IPA in Devstack * Update 'Introduction to Ironic' document * Avoid calling \_parse\_driver\_info in every test * Updated from global requirements * Correct link in user guide * Minor fix to install guide for associating k&r to nodes * Add serial console feature to seamicro driver * Support configdrive in agent driver * Add driver\_validate() * Update drivers VendorInterface validate() method * Adds help for installing prerequisites on RHEL * Add documentation about Vendor Methods * Make vendor methods discoverable via the Ironic API * Fix PXEDeploy class docstring * Updated from global requirements * Vendor endpoints to support different HTTP methods * Add ipmitool as dependency on RHEL/Fedora systems * dev-quickstart.rst update to add required packages * Add gendocs tox job for generating the documentation * Add gettext to packages needed in dev quickstart * Convert qcow2 image to raw format when deploy * Update iLO driver documentation * Disable IPMI timeout before setting boot device * Updated from global requirements * ConductorManager catches Exceptions * Remove unused variable in agent.\_get\_interfaces() * Enable hacking rule E265 * Add sync and async support for passthru methods * Fix documentation on Standard driver interfaces * Add a mechanism to route vendor methods * Remove redundant FunctionalTest usage in API tests * Use wsme.Unset as default value for API objects * Fix traceback on rare agent error case * Make \_send\_sensor\_data more cooperative * Updated from global requirements * Add logging to driver vendor\_passthru functions * Support ipxe with Dnsmasq * Correct "returns" line in PXE deploy method * Remove all redundant setUp() methods * Update install guide to install tftp * Remove duplicated \_fetch\_images function * Change the force\_raw\_image config usage * Clear maintenance\_reason when setting maintenance=False * Removed hardcoded IDs from "port" test resources * Switch to oslo.concurrency * Updated from global requirements * Use docstrings for attributes in api/controllers * Put nodes-related API in same section * Fix get\_test\_node attributes set incorrectly * Get new auth token for ramdisk if old will expire soon * Delete unused 'use\_ipv6' config option * Updated from global requirements * Add maintenance to RESTful web API documentation * Updated from global requirements * Iterate over glance API servers * Add API endpoint to set/unset the node maintenance mode * Removed hardcoded IDs from "node" test resources * Add maintenance\_reason when setting maintenance mode * Add Node.maintenance\_reason * Fix F811 error in pep8 * Improve hash ring value conversion * Add SNMP driver for Aten PDU's * Update node-validate error messages * Store image disk\_format and container\_format * Continue heartbeating after DB connection failure * TestAgentVendor to use the fake\_agent driver * Put a cap on our cyclomatic complexity * More helpful failure for tests on noexec /tmp * Update doc headers at end of Juno * Fix E131 PEP8 errors * Add documentation for iLO driver(s) * Enable E111 PEP8 check * Updated from global requirements * Fix F812 PEP8 error * Enable H305 PEP8 check * Enable H307 PEP8 check * Updated from global requirements * Enable H405 PEP8 check * Enable H702 PEP8 check * Enable H904 PEP8 check * Migration to oslo.serialization * Add the PXE VendorPassthru interface to PXEDracDriver * Adds instructions for deploying instances on real hardware * Fix pep8 test * Add missing attributes to sample API objects * Fix markup-related issues in documentation * Add documentation for PXE UEFI setup * Clear hash ring cache in get\_topic\_for\* * Fix exceptions names and messages for Keystone errors * Remove unused change\_node\_maintenance\_mode from rpcapi * Imported Translations from Transifex * Clear hash ring cache in get\_topic\_for\* * Move database fixture to a separate test case * KeyError from AgentVendorInterface.\_heartbeat() * Validate the power interface before deployment * Cleans up some Sphinx rST warnings in Ironic * Remove kombu as a dependency for Ironic * Make hash ring mapping be more consistent * Add periodic task to rebuild conductor local state * Open Kilo development * Add "affinity" tracking to nodes and conductors * ilo\* drivers to use only ilo credentials * Update hacking version in test requirements * Add a call to management.validate(task) * Replace custom lazy loading by stevedore * Updated from global requirements * Remove useless variable in migration * Use DbTestCase as test base when context needed * For convention rename the first classmethod parameter to cls * Always reset target\_power\_state in node\_power\_action * Imported Translations from Transifex * Stop running check\_uptodate in the pep8 testenv * Add HashRingManager to wrap hash ring singleton * Fix typo in agent validation code * Conductor changes target\_power\_state before starting work * Adds openSUSE support for developer documentation * Updated from global requirements * Remove untranslated PO files * Update ironic.conf.sample * Remove unneeded context initialization in tests * Force the SSH commands to use their default language * Add parameter to override locale to utils.execute * Refactor PXE clean up tests * Updated from global requirements * Don't reraise Exceptions from agent driver * Add documentation for ironic-dbsync command * Do not return 'id' in REST API error messages * Separate the agent driver config from the base localrc config * pxe\_ilo driver to call iLO set\_boot\_device * Remove redundant context parameter * Update docs with new dbsync command * Update devstack docs, require Ubuntu 14.04 * Do not use the context parameter on refresh() * Pass ipa-driver-name to agent ramdisk * Do not set the context twice when forming RPC objects * Make context mandatory when instantiating a RPC object * Neutron DHCP implementation to raise exception if no ports have VIF * Do not cache auth token in Neutron DHCP provider * Imported Translations from Transifex * add\_node\_capability and rm\_node\_capability unable to save changes to db * Updated from global requirements * Handle SNMP exception error.PySnmpError * Use standard locale in list\_partitions * node\_uuid should not be used to create test port * Revert "Revert "Search line with awk itself and avoid grep"" * Fix code error in pxe\_ilo driver * Add unit tests for SNMPClient * Check whether specified FS is supported * Sync the doc with latest code * Add a doc note about the vendor\_passthru endpoint * Remove 'incubated' documentation theme * Import modules for fake IPMINative/iBoot drivers * Allow clean\_up with missing image ref * mock.called\_once\_with() is not a valid method * Fix Devstack docs for zsh users * Fix timestamp column migration * Update ironic states and documentation * Stop using intersphinx * Updated from global requirements * Remove the objectify decorator * Add reserve() and release() to Node object * Add uefi boot mode support in IloVirtualMediaIscsiDeploy * Don't write python bytecode while testing * Support for setting boot mode in pxe\_ilo driver * Remove bypassing of H302 for gettextutils markers * Revert "Search line with awk itself and avoid grep" * Search line with awk itself and avoid grep * Add list\_by\_node\_id() to Port object * Remove unused modules from openstack-common.conf * Sync the document with the current implementation * Unify the sensor data format * Updated from global requirements * Deprecate Ironic compute driver and sched manager * Log ERROR power state in node\_power\_action() * Fix compute\_driver and scheduler\_host\_manager in install-guide * Use oslo.utils instead of ironic.openstack.common * Use expected, actual order for PXE template test * Fix agent PXE template * Translator functions cleanup part 3 * Translator functions cleanup part 2 * Imported Translations from Transifex * Updated from global requirements * Remove XML from api doc samples * Update ironic.conf.sample * Fix race conditions running pxe\_utils tests in parallel * Switch to "incubating" doc theme * Minor fixes for ipminative console support * Translator functions cleanup part 4 * Translator functions cleanup part 1 * Remove unnecessary mapping from Agent drivers * mock.assert\_called\_once() is not valid method * Use models.TimestampMixin from oslo.db * Updated from global requirements * Driver merge review comments from 111425 * Nova review updates for \_node\_resource * Ignore backup files * IloVirtualMediaAgent deploy driver * IloVirtualMediaIscsi deploy driver * Unbreak debugging via testr * Interactive console support for ipminative driver * Add UEFI based deployment support in Ironic * Adds SNMP power driver * Control extra space for images conversion in image\_cache * Use metadata.create\_all() to initialise DB schema * Fix minor issues in the DRAC driver * Add send-data-to-ceilometer support for pxe\_ipminative driver * Reduce redundancy in conductor manager docstrings * Fix typo in PXE driver docstrings * Update installation guide for syslinux 6 * Updated from global requirements * Imported Translations from Transifex * Avoid deadlock when logging network\_info * Implements the DRAC ManagementInterface for get/set boot device * Rewrite images tests with mock * Add boot\_device support for vbox * Remove gettextutils \_ injection * Make DHCP provider pluggable * DRAC wsman\_{enumerate, invoke}() to return an ElementTree object * Remove futures from requirements * Script to migrate Nova BM data to Ironic * Imported Translations from Transifex * Updated from global requirements * Fix unit tests with keystoneclient master * Add support for interacting with swift * properly format user guide in RST * Updated from global requirements * Fix typo in user-guide.rst * Add console interface to agent\_ipmitool driver * Add support for creating vfat and iso images * Check ERROR state from driver in \_do\_sync\_power\_state * Set PYTHONHASHSEED for venv tox environment * Add iPXE Installation Guide documentation * Add management interface for agent drivers * Add driver name on driver load exception * Take iSCSI deploy out of pxe driver * Set ssh\_virt\_type to vmware * Update nova driver's power\_off() parameters * return power state ERROR instead of an exception * handle invalid seamicro\_api\_version * Imported Translations from Transifex * Nova ironic driver review update requests to p4 * Allow rebuild of node in ERROR and DEPLOYFAIL state * Use cache in node\_is\_available() * Query full node details and cache * Add in text for text mode on trusty * Add Parallels virtualisation type * IPMI double bridging functionality * Add DracDriver and its DracPower module * use MissingParameterValue exception in iboot * Update compute driver macs\_for\_instance per docs * Update DevStack guide when querying the image UUID * Updated from global requirements * Fix py3k-unsafe code in test\_get\_properties() * Fix tear\_down a node with missing info * Remove d\_info param from \_destroy\_images * Add docs for agent driver with devstack * Removes get\_port\_by\_vif * Update API document with BootDevice * Replace incomplete "ilo" driver with pxe\_ilo and fake\_ilo * Handle all exceptions from \_exec\_ipmitool * Remove objectify decorator from dbapi's {get, register}\_conductor() * Improve exception handling in console code * Use valid exception in start\_shellinabox\_console * Remove objectify decorator from dbapi.update\_\* methods * Add list() to Chassis, Node, Port objects * Raise MissingParameterValue when validating glance info * Mechanism to cleanup all ImageCaches * Driver merge review comments from 111425-2-3 * Raise MissingParameterValue instead of Invalid * Import fixes from the Nova driver reviews * Imported Translations from Transifex * Use auth\_token from keystonemiddleware * Make swift tempurl key secret * Add method for deallocating networks on reschedule * Reduce running time of test\_different\_sizes * Remove direct calls to dbapi's get\_node\_by\_instance * Add create() and destroy() to Port object * Correct \`op.drop\_constraint\` parameters * Use timeutils from one place * Add create() and destroy() to Chassis object * Add iPXE support for Ironic * Imported Translations from Transifex * Add posix\_ipc to requirements * backport reviewer comments on nova.virt.ironic.patcher * Move the 'instance\_info' fields to GenericDriverFields * Migration to oslo.utils library * Fix self.fields on API Port object * Fix self.fields on API Chassis object * Sync oslo.incubator modules * Updated from global requirements * Expose {set,get}\_boot\_device in the API * Check if boot device is persistent on ipminative * Sync oslo imageutils, strutils to Ironic * Add charset and engine settings to every table * Imported Translations from Transifex * Remove dbapi calls from agent driver * Fix not attribute '\_periodic\_last\_run' * Implements send-data-to-ceilometer * Port iBoot PDU driver from Nova * Log exception with translation * Add ironic-python-agent deploy driver * Updated from global requirements * Imported Translations from Transifex * Clean up calls to get\_port() * Clean up calls to get\_chassis() * Do not rely on hash ordering in tests * Update\_port should expect MACAlreadyExists * Imported Translations from Transifex * Adding swift temp url support * Push the image cache ttl way up * Imported Translations from Transifex * SSH virsh to use the new ManagementInterface * Split test case in ironic.tests.conductor.test\_manager * Tune down node\_locked\_retry\_{attempts,interval} config for tests * Add RPC version to test\_get\_driver\_properties * Import fixes from the Nova driver reviews * Generalize exception handling in Nova driver * Fix nodes left in an incosistent state if no workers * IPMINative to use the new ManagementInterface * Backporting nova host manager changes into ironic * Catch oslo.db error instead of sqlalchemy error * Add a test case for DB schema comparison * remove ironic-manage-ipmi.filters * Implement API to get driver properties * Add drivers.base.BaseDriver.get\_properties() * Implement retry on NodeLocked exceptions * SeaMicro to use the new ManagementInterface * Import fixes from Nova scheduler reviews * Rename/update common/tftp.py to common/pxe\_utils.py * Imported Translations from Transifex * Factor out deploy info from PXE driver * IPMITool to use the new ManagementInterface * Use mock.assert\_called\_once\_with() * Add missing docstrings * Raise appropriate errors on duplicate Node, Port and Chassis creation * Add IloDriver and its IloPower module * Add methods to ipmitool driver * Use opportunistic approach for migration testing * Use oslo.db library * oslo.i18n migration * Import a few more fixes from the Nova driver * Set a more generous default image cache size * Fix wrong test fixture for Node.properties * Make ComputeCapabilitiesFilter work with Ironic * Add more INFO logging to ironic/common/service.py * Clean up nova virt driver test code * Fix node to chassis and port to node association * Allow Ironic URL from config file * Imported Translations from Transifex * Update webapi doc with link and console * REST API 'limit' parameter to only accept positive values * Update docstring for api...node.validate * Document 'POST /v1/.../vendor\_passthru' * ManagementInterface {set, get}\_boot\_device() to support 'persistent' * Use my\_ip for neutron URL * Updated from global requirements * Add more INFO logging to ironic/conductor * Specify rootfstype=ramfs deploy kernel parameter * Add set\_spawn\_error\_hook to TaskManager * Imported Translations from Transifex * Updates the Ironic on Devstack dev documentation * Simplify error handling * Add gettextutils.\_L\* to import\_exceptions * Fix workaround for the "device is busy" problem * Allow noauth for Neutron * Minor cleanups to nova virt driver and tests * Update nova rebuild to account for new image * Updated from global requirements * pep8 cleanup of Nova code * PEP fixes for the Nova driver * Fix glance endpoint tests * Update Nova's available resources at termination * Fix the section name in CONTRIBUTING.rst * Add/Update docstrings in the Nova Ironic Driver * Update Nova Ironic Driver destroy() method * Nova Ironic driver get\_info() to return memory stats in KBytes * Updates Ironic Guide with deployment information * Add the remaining unittests to the ClientWrapper class * Wait for Neutron port updates when using SSHPower * Fix 'fake' driver unable to finish a deploy * Update "Exercising the Services Locally" doc * Fixing hardcoded glance protocol * Remove from\_chassis/from\_nodes from the API doc * Prevent updating UUID of Node, Port and Chassis on DB API level * Imported Translations from Transifex * Do not delete pxe\_deploy\_{kernel, ramdisk} on tear down * Implement security groups and firewall filtering methods * Add genconfig tox job for sample config file generation * Mock pyghmi lib in unit tests if not present * PXE to pass hints to ImageCache on how much space to reclaim * Add some real-world testing on DiskPartitioner * Eliminate races in Conductor \_check\_deploy\_timeouts * Use temporary dir for image conversion * Updated from global requirements * Move PXE instance level parameters to instance\_info * Clarify doc: API is admin only * Mock time.sleep for the IPMI tests * Destroy instance to clear node state on failure * Add 'context' parameter to get\_console\_output() * Cleanup virt driver tests and verify final spawn * Test fake console driver * Allow overriding the log level for ironicclient * Virt driver logging improvements * ipmitool driver raises DriverLoadError * VendorPassthru.validate()s call \_parse\_driver\_info * Enforce a minimum time between all IPMI commands * Remove 'node' parameter from the validate() methods * Test for membership should be 'not in' * Replace mknod() with chmod() * Factoring out PXE and TFTP functions * Let ipmitool natively retry commands * Sync processutils from oslo code * Driver interface's validate should return nothing * Use .png instead of .gif images * Fix utils.execute() for consistency with Oslo code * remove default=None for config options * Stop ipmitool.validate from touching the BMC * Set instance default\_ephemeral\_device * Add unique constraint to instance\_uuid * Add node id to DEBUG messages in impitool * Remove 'node' parameter from the Console and Rescue interfaces * TaskManager: Only support single node locking * Allow more time for API requests to be completed * Add retry logic to iscsiadm commands * Wipe any metadata from a nodes disk * Rework make\_partitions logic when preserve\_ephemeral is set * Fix host manager node detection logic * Add missing stats to IronicNodeState * Update IronicHostManager tests to better match how code works * Update Nova driver's list\_instance\_uuids() * Remove 'fake' and 'ssh' drivers from default enabled list * Work around iscsiadm delete failures * Mock seamicroclient lib in unit tests if not present * Cleanup mock patch without \`with\` part 2 * Add \_\_init\_\_.py for nova scheduler filters * Skip migrations test\_walk\_versions instead of pass * Improving unit tests for \_do\_sync\_power\_state * Fix AttributeError when calling create\_engine() * Reuse validate\_instance\_and\_node() Nova ironic Driver * Fix the logging message to identify node by uuid * Fix concurrent deletes in virt driver * Log exceptions from deploy and tear\_down * PXE driver to validate the requested image in Glance * Return the HTTP Location for accepted requestes * Return the HTTP Location for newly created resources * Fix tests with new keystoneclient * list\_instances() to return a list of instances names * Pass kwargs to ClientWrapper's call() method * Remove 'node' parameter from the Power interface * Set the correct target versions for the RPC methods * Consider free disk space before downloading images into cache * Change NodeLocked status code to a client-side error * Remove "node" parameter from methods handling power state in docs * Add parallel\_image\_downloads option * Synced jsonutils from oslo-incubator * Fix chassis bookmark link url * Remove 'node' parameter from the Deploy interface * Imported Translations from Transifex * Remove all mostly untranslated PO files * Cleanup images after deployment * Fix wrong usage of mock methods * Using system call for downloading files * Run keepalive in a dedicated thread * Don't translate debug level logs * Update dev quickstart guide for ephemeral testing * Speed up Nova Ironic driver tests * Renaming ironicclient exceptions in nova driver * Fix bad Mock calls to assert\_called\_once() * Cleanup mock patch without \`with\` part 1 * Corrects a typo in RESTful Web API (v1) document * Updated from global requirements * Clean up openstack-common.conf * Remove non-existent 'pxe\_default\_format' parameter from patcher * Remove explicit dependency on amqplib * Pin RPC client version min == max * Check requested image size * Fix 'pxe\_preserve\_ephemeral' parameter leakage * RPC\_API\_VERSION out of sync * Simplify calls to ImageCache in PXE module * Implement the reboot command on the Ironic Driver * Place root partition last so that it can always be expanded * Stop creating a swap partition when none was specified * Virt driver change to use API retry config value * Implement more robust caching for master images * Decouple state inspection and availability check * Updated from global requirements * Fix ironic node state comparison * Add create() and destroy() to Node * Fix typo in rpcapi.driver\_vendor\_passthru * Support serial console access * Remove 'node' parameter from the VendorPassthru interface * Updated from global requirements * Synced jsonutils from oslo-incubator * Fix chassis-node relationship * Implement instance rebuild in nova.virt.driver * Sync oslo logging * Add ManagementInterface * Clean oslo dependencies files * Return error immediately if set\_console\_mode is not supported * Fix bypassed reference to node state values * Updated from global requirements * Port to oslo.messaging * Drivers may expose a top-level passthru API * Overwrite instance\_exists in Nova Ironic Driver * Update Ironic User Guide post landing for 41af7d6b * Spawn support for TaskManager and 2 locking fixes * Document ClusteredComputeManager * Clean up calls to get\_node() * nova.virt.ironic passes ephemeral\_gb to ironic * Implement list\_instance\_uuids() in Nova driver * Modify the get console API * Complete wrapping ironic client calls * Add worker threads limit to \_check\_deploy\_timeouts task * Use DiskPartitioner * Better handling of missing drivers * Remove hardcoded node id value * cleanup docstring for drivers.utils.get\_node\_mac\_addresses * Update ironic.conf.sample * Make sync\_power\_states yield * Refactor sync\_power\_states tests to not use DB * Add DiskPartitioner * Some minor clean up of various doc pages * Fix message preventing overwrite the instance\_uuid * Install guide for Ironic * Refactor the driver fields mapping * Imported Translations from Transifex * Fix conductor.manager test assertion order * Overwriting node\_is\_available in IronicDriver * Sync oslo/common/excutils * Sync oslo/config/generator * Cherry pick oslo rpc HA fixes * Add Ironic User Guide * Remove a DB query for get\_ports\_by\_node() * Fix missed stopping of conductor service * Encapsulate Ironic client retry logic * Do not sync power state for new invalidated nodes * Make tests use Node object instead of dict * Sync object list stuff from Nova * Fix Node object version * Cleanup running conductor services in tests * Factor hash ring management out of the conductor * Replace sfdisk with parted * Handling validation in conductor consistently * JsonPatch add operation on existing property * Updated from global requirements * Remove usage of Glance from PXE clean\_up() * Fix hosts mapping for conductor's periodic tasks * Supports filtering port by address * Fix seamicro power.validate() method definition * Update tox.ini to also run nova tests * Updated from global requirements * Fix messages formatting for \_sync\_power\_states * Refactor nova.virt.ironic.driver get\_host\_stats * Use xargs -0 instead of --null * Change admin\_url help in ironic driver * Sync base object code with Nova's * Add Node.instance\_info field * Fix self.fields on API Node object * Show maintenance field in GET /nodes * Move duplicated \_get\_node(s)\_mac\_addresses() * Fix grammar in error string in pxe driver * Reduce logging output from non-Ironic libraries * Open Juno development * Fix spelling error in conductor/manager * Improved coverage for ironic API * Manually update all translated strings * Check that all po/pot files are valid * If no swap is specified default to 1MB * Fix Nova rescheduling tear down problem * Remove obsolete po entries - they break translation jobs * Add note to ssh about impact on ci testing * Adds exact match filters to nova scheduler * Clean up IronicNodeStates.update\_from\_compute\_node * ironic\_host\_manager was missing two stats * Imported Translations from Transifex * Fix seamicro validate() method definition * Remove some obsolete settings from DevStack doc * Raise unexpected exceptions during destroy() * Start using oslosphinx theme for docs * Provide a new ComputeManager for Ironic * Nova Ironic driver to set pxe\_swap\_mb in Ironic * Fix strings post landing for c63e1d9f6 * Run periodic\_task in a with a dynamic timer * Update SeaMicro to use MixinVendorInterface * Run ipmi power status less aggressively * Avoid API root controller dependency on v1 dir * Update Neutron if mac address of the port changed * Replace fixtures with mock in test\_keystone.py * Decrease running time of SeaMicro driver tests * Remove logging of exceptions from controller's methods * Imported Translations from Transifex * Fix missed exception raise in \_add\_driver\_fields * Speed up ironic tests * Pass no arguments to \_wait\_for\_provision\_state() * Adds max retry limit to sync\_power\_state task * Updated from global requirements * Imported Translations from Transifex * Stop incorrectly returning rescue: supported * Correct version.py and update current version string * Documentation for deploying DevStack /w Ironic * Hide rescue interface from validate() output * Change set\_console\_mode() to use greenthreads * Fix help string for a glance option * Expose API for fetching a single driver * Change JsonEncodedType.impl to TEXT * Fix traceback hook for avoid duplicate traces * Fix 'spacing' parameters for periodic tasks * Permit passing SSH keys into the Ironic API * Better instance-not-found handling within IronicDriver * Make sure auth\_url exists and is not versionless * Conductor de-registers on shutdown * Change deploy validation exception handling * Suppress conductor logging of expected exceptions * Remove unused method from timeutils * Add admin\_auth\_token option for nova driver * Remove redundant nova virt driver test * Process public API list as regular expressions * Enable pep8 tests for the Nova Ironic Driver * Fix typo tenet -> tenant * Stop logging paramiko's DEBUG and INFO messages * Set boot device to PXE when deploying * Driver utils should raise unsupported method * Delete node while waiting for deploy * Check BMC availability in ipmitool 'validate' method * SeaMicro use device parameter for set\_boot\_device * Make the Nova Ironic driver to wait for ACTIVE * Fix misspelled impi to ipmi * Do not use \_\_builtin\_\_ in python3 * Use range instead xrange to keep python 3.X compatibility * Set the database.connection option default value * PXE validate() to fail if no Ironic API URL * Improve Ironic Conductor threading & locks * Generic MixinVendorInterface using static mapping * Conductor logs better error if seamicroclient missing * Add TaskManager lock on change port data * Nova ironic driver to retry on HTTP 503 * Mark hash\_replicas as experimental * do\_node\_deploy() to use greenthreads * Move v1 API tests to separate v1 directory * Pin iso8601 logging to WARN * Only fetch node once for vif actions * Fix how nova ironic driver gets flavor information * Imported Translations from Transifex * API: Add sample() method to remaining models * Import Nova "ironic" driver * Remove errors from API documentation * Add libffi-dev(el) dependency to quickstart * Updated from global requirements * Remove redundant default value None for dict.get * Refactor vendor\_passthru to use conductor async workers * Fix wrong exception raised by conductor for node * Fix params order in assertEqual * Sync the log\_handler from oslo * Fix SeaMicro driver post landing for ba207b4aa0 * Implements SeaMicro VendorPassThru functionality * Implement the SeaMicro Power driver * Fix provision\_updated\_at deserialization * Remove jsonutils from test\_rpcapi * Do not delete a Node which is not powered off * Add provision\_updated\_at to node's resource * Prevent a node in maintenance from being deployed * Allow clients to mark a node as in maintenance * Support preserve\_ephemeral * Updated from global requirements * API: Expose a way to start/stop the console * Add option to sync node power state from DB * Make the PXE driver understand ephemeral disks * Log deploy\_utils.deploy() erros in the PXE driver * Removing get\_node\_power\_state, bumping RPC version * Add timeout for waiting callback from deploy ramdisk * Prevent GET /v1/nodes returning maintenance field * Suggested improvements to \_set\_boot\_device * Move ipminative \_set\_boot\_device to VendorPassthru * Sync common db code from Oslo * PXE clean\_up() to remove the pxe\_deploy\_key parameter * Add support for custom libvirt uri * Python 3: replace "im\_self" by "\_\_self\_\_" * Fix race condition when deleting a node * Remove extraneous vim configuration comments for ironic * Do not allow POST ports and chassis internal attributes * Do not allow POST node's internal attributes * Unused 'pxe\_key\_data' & 'pxe\_instance\_name' info * Add provision\_updated\_at field to nodes table * Exclude nodes in DEPLOYWAIT state from \_sync\_power\_states * Sync common config module from Oslo * Get rid object model \`dict\` methods part 4 * Sync Oslo rpc module to Ironic * Clarify and fix the dev-quickstart doc some more * Do not use CONF as a default parameter value * Simplify locking around acquiring Node resources * Improve help strings * Remove shebang lines from code * Use six.moves.urllib.parse instead of urlparse * Add string representation method to MultiType * Fix test migrations for alembic * Sync Oslo gettextutils module to Ironic * NodeLocked returns 503 error status * Supports OPERATOR priv level for ipmitool driver * Correct assertEqual order from patch e69e41c99fb * PXE and SSH validate() method to check for a port * Task object as paramater to validate() methods * Fix dev-quick-start.rst post landing for 9d81333fd0 * API validates driver name for both POST and PATCH * Sync Oslo service module to Ironic * Move ipmitool \_set\_boot\_device to VendorPassthru * Use six.StringIO/BytesIO instead of StringIO.StringIO * Add JSONEncodedType with enforced type checking * Correct PXEPrivateMethodsTestCase.setUp * Don't raise MySQL 2013 'Lost connection' errors * Use the custom wsme BooleanType on the nodes api * Add wsme custom BooleanType type * Fix task\_manager acquire post landing for c4f2f26ed * Add common.service config options to sample * Removes use of timeutils.set\_time\_override * Replace assertEqual(None, \*) with assertIsNone in tests * Replace nonexistent mock assert methods with real ones * Log IPMI power on/off timeouts * Remove None as default value for dict get() * Fix autodoc formatting in pxe.py * Fix race condition when changing node states * Use StringType from WSME * Add testing and doc sections to docs/dev-quickstart * Implement \_update\_neutron in PXE driver * Remove \_load\_one\_plugin fallback * SSHPower driver support VMware ESXi * Make ironic-api not single threaded * Remove POST calls in tests for resource creation * Add topic to the change\_node\_maintenance\_mode() RPC method * Fix API inconsistence when changing node's states * Add samples to serve API through Apache mod\_wsgi * Add git dependency to quickstart docs * Add get\_console() method * Remove unnecessary json dumps/loads from tests * Add parameter for filtering nodes by maintenance mode * Rename and update ironic-deploy-helper rootwrap * Remove tox locale overrides * Updated from global requirements * Move eventlent monkeypatch out of cmd/ * Fix misspellings in ironic * Ensure parameter order of assertEqual correct * Return correct HTTP response codes for create ops * Fix broken doc links on the index page * Allow to tear-down a node waiting to be deployed * Improve NodeLocked exception message * Expose 'reservation' field of a node via API * Implement a multiplexed VendorPassthru example * Fix log and test for NeutronAPI.update\_port\_dhcp\_opts * Fix 'run\_as\_root' parameter check in utils * Handle multiple exceptions raised by jsonpatch * API tests to check for the return codes * Imported Translations from Transifex * Move test\_\_get\_nodes\_mac\_addresses * Removed duplicated function to create a swap fs * Updated from global requirements * Add futures to requirements * Fix missing keystone option in ironic.conf.sample * Adds Neutron support to Ironic * Replace CONF.set\_default with self.config * Fix ssh\_port type in \_parse\_driver\_info() from ssh.py * Improve handling of invalid input in HashRing class * Sync db.sqlalchemy code from Oslo * Add lockfile>=0.8 to requirements.txt * Remove net\_config\_template options * Remove deploy kernel and ramdisk global config * Update docstrings in ssh.py * SSHPower driver raises IronicExceptions * mock's return value for processutils.ssh\_execute * API: Add sample() method on Node * Update method doc strings in pxe.py * Minor documentation update * Removed unused exceptions * Bump version of sphinxcontrib-pecanwsme * Add missing parameter in call to \_load\_one\_plugin * Docstrings for ipmitool * alembic with initial migration and tests * Update RPC version post-landing for 9bc5f92fb * ipmitool's \_power\_status raises IPMIFailure * Add [keystone\_authtoken] to ironic.conf.sample * Updated from global requirements * Add comment about node.instance\_uuid * Run mkfs as root * Remove the absolute paths from ironic-deploy-helper.filters * PXE instance\_name is no longer mandatory * Remove unused config option - pxe\_deploy\_timeout * Delete the iscsi target * Imported Translations from Transifex * Fix non-unique tftp dir instance\_uuid * Fix non-unique pxe driver 'instance\_name' * Add missing "Filters" section to the ironic-images.filters * Use oslo.rootwrap library instead of local copy * Replace assertTrue with explicit assertIsInstance * Disallow new provision for nodes in maintenance * Add RPC method for node maintenance mode * Fix keystone get\_service\_url filtering * Use same MANAGER\_TOPIC variable * Implement consistent hashing of nodes to conductors * PXEAndSSH driver lacked vendor\_passthru * Use correct auth context inside pxe driver * sync\_power\_states handles missing driver info * Enable $pybasedir value in pxe.py * Correct SSHPowerDriver validate() exceptions * API to check the requested power state * Improve the node driver interfaces validation output * Remove copyright from empty files * Make param descriptions more consistent in API * Imported Translations from Transifex * Fix wrong message of pxe validator * Remove unused dict BYTE\_MULTIPLIERS * Implement API for provisioning * API to validate UUID parameters * Make chassis\_uuid field of nodes optional * Add unit tests for get\_nodeinfo\_list * Improve error handling in PXE \_continue\_deploy * Make param names more consistent in API * Sync config module from oslo * Fix wrong message of MACAlreadyExists * Avoid a race when associating instance\_uuid * Move and rename ValidTypes * Convert trycmd() to oslo's processutils * Improve error handling in validate\_vendor\_action * Passing nodes more consistently * Add 'next' link when GET maximum number of items * Check connectivity in SSH driver 'validate' method * GET /drivers to show a list of active conductors * Improve method to get list of active conductors * Refactor /node//state * Reworks Chassis validations * Reworks Node validations * Developer doc index page points to correct API docs * Fix auto-generated REST API formatting * Method to generate PXE options for Neutron ports * Strip '/' from api\_url string for PXE driver * Add driver interfaces validation * Command call should log the stdout and stderr * Add prepare, clean\_up, take\_over methods to deploy * PEP8-ify imports in test\_ipmitool * API: Add sample() method on Port and PortCollection * API: Validate and normalize address * Handle DBDuplicateEntry on Ports with same address * Imported Translations from Transifex * removed wrap\_exception method from ironic/common/exception.py * Rework patch validation on Ports * Add JsonPatchType class * Change default API auth to keystone-based * Clean up duplicated change-building code in objects * Add -U to pip install command in tox.ini * Updated from global requirements * Add config option for # of conductor replicas * Port StringType class from WSME trunk * Add tools/conf/check\_uptodate to tox.ini * Correct error with unicode mac address * Expose created\_at/updated\_at properties in the REST API * Import heartbeat\_interval opt in API * Add power control to PXE driver * Implement sync\_power\_state periodic task * Set the provision\_state to DEPLOYFAIL * Save PKI token in a file for PXE deploy ramdisk * API ports update for WSME 0.5b6 compliance * Add heartbeat\_interval to new 'conductor' cfg group * Add missing hash\_partition\_exponent config option * If no block devices abort deployment * Add missing link for drivers resource * Apply comments to 58558/4 post-landing * Replace removed xrange in Python3 * Imported Translations from Transifex * Use addCleanup() in test\_deploy\_utils * Allow Pecan to use 'debuginfo' response field * Do not allow API to expose error stacktrace * Add port address unique constraint for sqlite * Implement consistent hashing common methods * Sync some db changes from Oslo * Bump required version of sqlalchemy-migrate * Update ironic.conf.sample * Import uuidutils unit tests from oslo * Allow FakePower to return node objects power\_state * Adds doc strings to API FunctionalTest class * Use oslo's execute() and ssh\_execute() methods * Remove openstack.common.uuidutils * Sync common.context changes from olso * Remove oslo uuidutils.is\_uuid\_like call * Remove oslo uuidutils.generate\_uuid() call * Add troubleshoot option to PXE template * Imported Translations from Transifex * Add tftp\_server pattern in ironic.conf * Import HasLength object * ipmitool SHOULD accept empty username/password * Imported Translations from Transifex * Add missing ConfigNotFound exception * Imported Translations from Transifex * Add hooks to auto-generate REST API docs * Imported Translations from Transifex * Redefined default value of allowed\_rpc\_exception\_modules * Add last\_error usage to deploy and teardown methods * Support building wheels (PEP-427) * Import missing gettext \_ to fix Sphinx error * sync common.service from oslo * sync common.periodic\_task from oslo * sync common.notifier.\* from oslo * sync common.log from oslo * sync common.local from oslo * Sync common utils from Oslo * Rename parameters * Accessing a subresource that parent does not exist * Imported Translations from Transifex * Changes power\_state and adds last\_error field * Update openstack/common/lockutils * sync common.context from oslo * sync common.config.generator from oslo * Remove sqlalchemy-migrate 0.7.3 patching * Fix integer division compatibility in middleware * Fix node lock in PXE driver * Imported Translations from Transifex * Register API options under the 'api' group * Supporting both Python 2 and Python 3 with six * Supports get node by instance uuid in API * Imported Translations from Transifex * Check invalid uuid for get-by-instance db api * Fix error handling in ssh driver * Replace \_\_metaclass\_\_ * Supporting both Python 2 and Python 3 with six * Pass Ironic API url to deploy ramdisk in PXE driver * Remove 'basestring' from objects utils * Allows unicode description for chassis * Fix a typo in the name of logger method exception * Don't use deprecated module commands * Comply with new hacking requirements * Improve the API doc spec for chassis * Improve the API doc spec for node * Updated from global requirements * Fix i18N compliance * Add wrapper for keystone service catalog * Fix test node manager * Expose /drivers on the API * Update mailmap for Joe Gordon * Add mailmap file * Implement /nodes/UUID/vendor\_passthru in the API * Add context to TaskManager * Regenerate the sample config file * Conductors maintan driver list in the DB * Group and unify ipmi configurations * Fix a few missing i18n * Fix status codes in node controller * Fix exceptions handling in controllers * Updated from global requirements * Support uniform MAC address with colons * Remove redundant test stubs from conductor/manager * Remove several old TODO messages * Supports paginate query for two get nodes DB APIs * Remove \_driver\_factory class attribute * Fixes RootController to allow URL without version tag * Don't allow deletion of associated node * Remove duplicated db\_api.get\_instance() from tests * Updated from global requirements * Do not use string concatenation for localized strings * Remove the NULL state * Add DriverFactory * Adjust native ipmi default wait time * Be more patient with IPMI and BMC * Implement db get\_[un]associated\_nodes * Remove unused nova specific files * Removes unwanted mox and fixture files * Removes stubs from unit tests * Remove unused class/file * Remove driver validation on node update * Consolidates TestCase and BaseTestCase * Fix policies * Improve error message for ssh * Fix datetime format in FakeCache * Fix power\_state set to python object repr * Updated from global requirements * Replaces mox with mock for test\_deploy\_utils * Replaces mox with mock in api's unit tests * Replaces mox with mock in objects' unit tests * Replaces mox with mock for conductor unit tests * fix ssh driver exec command issues * Fix exceptions error codes * Remove obsolete redhat-eventlet.patch * Replaces mox with mock for test\_utils * Replaces mox with mock for ssh driver unit tests * Remove nested 'ipmi' dict from driver\_info * Replace tearDown with addCleanup in unit tests * Remove nested 'ssh' dict from driver\_info * Remove nested 'pxe' dict from driver\_info * Save and validate deployment key in PXE driver * Implement deploy and tear\_down conductor methods * Use mock to do unit tests for pxe driver * Code clean in node controller * Use mock to do unit tests for ipminative driver * Replaces mox with mock for ipmitool driver unit tests * Fix parameter name in wsexpose * Rename start\_power\_state\_change to change\_node\_power\_state * Mount iSCSI target and 'dd' in PXE driver * Add tests for api/utils.py * Check for required fields on ports * Replace Cheetah with Jinja2 * Update from global requirements * Upgrade tox to 1.6 * Add API uuid <-> id mapping * Doc string and minor clean up for 41976 * Update error return code to match new Pecan release * Add vendor\_passthru method to RPC API * Integer types support in api * Add native ipmi driver * API GET to return only minimal data * Fix broken links * Collection named based on resource type * Remove nova specific tests * Changes documentation hyperlinks to be relative * Replace OpenStack LLC with OpenStack Foundation * Force textmode consoles * Implemented start\_power\_state\_change In Conductor * Updates documentation for tox use * Drop setuptools\_git dependency * Fix tests return codes * Fix misused assertTrue in unit tests * Prevent updates while state change is in progress * Use localisation where user visible strings are used * Update only the changed fields * Improve parameters validate in PXE driver * Rename ipmi driver to ipmitool * Remove jsonutils from PXE driver * Expose the vendor\_passthru resource * Driver's validation during node update process implemented * Public API * Remove references for the 'task\_state' property * Use 'provision\_state' in PXE driver * Updating resources with PATCH * Add missing unique constraint * Fix docstring typo * Removed templates directory in api config * Added upper version boundry for six * Sync models with migrations * Optimization reserve and release nodes db api methods * Add missing foreign key * Porting nova pxe driver to ironic * API Nodes states * Fix driver loading * Move glance image service client from nova and cinder into ironic * Implement the root and v1 entry points of the API * Expose subresources for Chassis and Node * Add checks locked nodes to db api * Update the dev docs with driver interface description * Add missing tests for chassis API * Delete controller to make code easy to read and understood * Disable deleting a chassis that contains nodes * Update API documentation * Add Pagination of collections across the API * Fix typo in conductor manager * Remove wsme validate decorator from API * Add missing tests for ports API * Modify is\_valid\_mac() for support unicode strings * Add DB and RPC method doc strings to hook.py * Delete unused templates * Use fixture from Oslo * Move "opportunistic" db migrations tests from Nova * Build unittests for nodes api * make api test code more readable * Add links to API Objects * Delete Ironic context * Add tests for existing db migrations * Add common code from Oslo for db migrations test * Remove extra pep8/flake8/pyflakes requirements * Sync requirements with OpenStack/requirements * Fix up API tests before updating hacking checks * Add RPC methods for updating nodes * Run extract\_messages * Keystone authentiation * Add serializer param to RPC service * Import serialization and nesting from Nova Objects * Implement chassis api actions * update requires to prevent version cap * Change validate() to raise instead of returning T/F * Add helpers for single-node tasks * Implement port api action * Modify gitignore to ignore sqlite * Update resource manager for fixed stevedore issue * Add dbapi functions * Remove suds requirement * Sync install\_venv\_common from oslo * Move mysql\_engine option to [database] group * Re-define 'extra' as dict\_or\_none * Added Python-2.6 to the classifier * Rename "manager" to "conductor" * Port from nova: Fix local variable 'root\_uuid' ref * Created a package for API controllers V1 * Sync requirements with OpenStack/requirements * Remove unused APICoverage class * Sync fileutils from oslo-incubator * Sync strutils from oslo-incubator * Add license header * Update get\_by\_uuid function doc in chassis * Fix various Python 2.x->3.x compat issues * Improve unit tests for API * Add Chassis object * Add Chassis DB model and DB-API * Delete associated ports after deleting a node * Virtual power driver is superceded by ssh driver * Add conf file generator * Refactored query filters * Add troubleshoot to baremetal PXE template * Add err\_msg param to baremetal\_deploy\_helper * Retry the sfdisk command up to 3 times * Updated API Spec for new Drivers * Improve IPMI's \_make\_password\_file method * Remove spurious print statement from update\_node * Port middleware error handler from ceilometer API * Add support for GET /v1/nodes to return a list * Add object support to API service * Remove the unused plugin framework * Improve tests for Node and Port DB objects * SSH driver doesn't need to query database * Create Port object * Add uuid to Port DB model * Delete Flask Dependence * Writing Error: nodess to nodes * Create the Node object * Restructuring driver API and inheritance * Remove explicit distribute depend * Bump version of PBR * Remove deleted[\_at] from base object * Make object actions pass positional arguments * Fix relative links in architecture doc * Reword architecture driver description * Remove duplication from README, add link to docs * Port base object from Nova * Fix ironic-rootwrap capability * Add ssh power manager * Prevent IPMI actions from colliding * Add TaskManager tests and fix decorator * Mocked NodeManager can load and mock real drivers * Add docs for task\_manager and tests/manager/utils * Fix one typo in index.rst * Add missing 'extra' field to models.nodes * More doc updates * Remove the old README * More doc updates * Minor fixes to sphinx docs * Added API v1 Specification * Add initial sphinx docs, based on README * Initial skeleton for an RPC layer * Log configuration values on API startup * Don't use pecan to configure logging * Move database.backend option import * Remove unused authentication CLI options * Rename TestCase.flags() to TestCase.config() * Copy the RHEL6 eventlet workaround from Oslo * Sync new database config group from oslo-incubator * Minor doc change for manager and resorce\_manager * Add support for Sphinx Docs * Update IPMI driver to work with resource manager * Add validate\_driver\_info to driver classes * Implement Task and Resource managers * Update [reserve|release]\_nodes to accept a tag * More updates to the README * Reimplement reserve\_nodes and release\_nodes * Rename the 'ifaces' table to 'ports' * Change 'nodes' to use more driver-specific JSON * Update driver names and base class * Stop creating a new db IMPL for every request * Fix double "host" option * Sync safe changes from oslo-incubator * Sync rpc changes from oslo-incubator * Sync log changes from oslo-incubator * Sync a rootwrap KillFilter fix from oslo-incubator * Sync oslo-incubator python3 changes * Add steps to README.rst * Fix fake bmc driver * move ironic docs to top level for ease of discovery * Update the README file development section * Add some API definitions to the README * Update the distribute dependency version * Add information to the project README * Fixes test\_update\_node by testing updated node * Fix pep8 errors and make it pass Jenkins tests * Update IPMI driver for new base class * Add new base and fake driver classes * Delete old base and fake classes * Add a few fixes for the API * Move strong nova depenencies into temporary dir * Update IPMI for new DB schema * Add unit tests for DB API * Remove tests for old DB * Add tests for ironic-dbsync * Remove ironic\_manage * Implement GET /node/ifaces/ in API * Update exception.py * Update db models and API * Implement skeleton for a new DB backend * Remove the old db implementation * Implement initial skeleton of a manager service * Implement initial draft of a Pecan-based API * Fix IPMI tests * Move common things to ironic.common * Fix failing db and deploy\_helper tests * un-split the db backend * Rename files and fix things * Import add'l files from Nova * update openstack-common.conf and import from oslo * Added .testr.conf * Renamed nova to ironic * Fixed hacking, pep8 and pyflakes errors * Added project infrastructure needs * Fix baremetal get\_available\_nodes * Improve Python 3.x compatibility * Import and convert to oslo loopingcall * baremetal: VirtualPowerDriver uses mac addresses in bm\_interfaces * baremetal: Change input for sfdisk * baremetal: Change node api related to prov\_mac\_address * Remove "undefined name" pyflake errors * Remove unnecessary LOG initialisation * Define LOG globally in baremetal\_deploy\_helper * Only call getLogger after configuring logging * baremetal: Integrate provisioning and non-provisioning interfaces * Move console scripts to entrypoints * baremetal: Drop unused columns in bm\_nodes * Remove print statements * Delete tests.baremetal.util.new\_bm\_deployment() * Adds Tilera back-end for baremetal * Change type of ssh\_port option from Str to Int * Virtual Power Driver list running vms quoting error * xenapi: Fix reboot with hung volumes * Make bm model's deleted column match database * Correct substring matching of baremetal VPD node names * Read baremetal images from extra\_specs namespace * Compute manager should remove dead resources * Add ssh port and key based auth to VPD * Add instance\_type\_get() to virt api * Don't blindly skip first migration * BM Migration 004: Actually drop column * Update OpenStack LLC to Foundation * Sync nova with oslo DB exception cleanup * Fix exception handling in baremetal API * BM Migrations 2 & 3: Fix drop\_column statements * Remove function redefinitions * Move some context checking code from sqlalchemy * Baremetal driver returns accurate list of instance * Identify baremetal nodes by UUID * Improve performance of baremetal list\_instances * Better error handling in baremetal spawn & destroy * Wait for baremetal deploy inside driver.spawn * Add better status to baremetal deployments * Use oslo-config-2013.1b4 * Delete baremetal interfaces when their parent node is deleted * VirtualPowerDriver catches ProcessExecutionError * Don't modify injected\_files inside PXE driver * Remove nova.db call from baremetal PXE driver * Add a virtual PowerDriver for Baremetal testing * Recache or rebuild missing images on hard\_reboot * Use oslo database code * Fixes 'not in' operator usage * Make sure there are no unused import * Enable N302: Import modules only * Correct a format string in virt/baremetal/ipmi.py * Add REST api to manage bare-metal nodes * Baremetal/utils should not log certain exceptions * PXE driver should rmtree directories it created * Add support for Option Groups in LazyPluggable * Remove obsolete baremetal override of MAC addresses * PXE driver should not accept empty kernel UUID * Correcting improper use of the word 'an' * Export the MAC addresses of nodes for bare-metal * Break out a helper function for working with bare metal nodes * Keep self and context out of error notification payload * Tests for PXE bare-metal provisioning helper server * Change ComputerDriver.legacy\_nwinfo to raise by default * fix new N402 errors * Remove unused baremetal PXE options * Move global service networking opts to new module * Fix N402 for nova/virt * Cope better with out of sync bm data * Fix baremetal VIFDriver * CLI for bare-metal database sync * attach/detach\_volume() take instance as a parameter * Convert short doc strings to be on one line * Check admin context in bm\_interface\_get\_all() * Provide a PXE NodeDriver for the Baremetal driver * Refactor periodic tasks * Add helper methods to nova.paths * Move global path opts in nova.paths * Removes unused imports * Improve baremetal driver error handling * baremetal power driver takes \*\*kwargs * Implement IPMI sub-driver for baremetal compute * Fix tests/baremetal/test\_driver.py * Move baremetal options to [BAREMETAL] OptGroup * Remove session.flush() and session.query() monkey patching * Remove unused imports * Removed unused imports * Parameterize database connection in test.py * Baremetal VIF and Volume sub-drivers * New Baremetal provisioning framework * Move baremetal database tests to fixtures * Add exceptions to baremetal/db/api * Add blank nova/virt/baremetal/\_\_init\_\_.py * Move sql options to nova.db.sqlalchemy.session * Use CONF.import\_opt() for nova.config opts * Remove nova.config.CONF * remove old baremetal driver * Remove nova.flags * Fix a couple uses of FLAGS * Added separate bare-metal MySQL DB * Switch from FLAGS to CONF in tests * Updated scheduler and compute for multiple capabilities * Switch from FLAGS to CONF in nova.virt * Make ComputeDrivers send hypervisor\_hostname * Introduce VirtAPI to nova/virt * Migrate to fileutils and lockutils * Remove ComputeDriver.update\_host\_status() * Rename imagebackend arguments * Move ensure\_tree to utils * Keep the ComputeNode model updated with usage * Don't stuff non-db data into instance dict * Making security group refresh more specific * Use dict style access for image\_ref * Remove unused InstanceInfo class * Remove list\_instances\_detail from compute drivers * maint: remove an unused import in libvirt.driver * Fixes bare-metal spawn error * Refactoring required for blueprint xenapi-live-migration * refactor baremetal/proxy => baremetal/driver * Switch to common logging * Make libvirt LoopingCalls actually wait() * Imports cleanup * Unused imports cleanup (folsom-2) * convert virt drivers to fully dynamic loading * cleanup power state (partially implements bp task-management) * clean-up of the bare-metal framework * Added a instance state update notification * Update pep8 dependency to v1.1 * Alphabetize imports in nova/tests/ * Make use of openstack.common.jsonutils * Alphabetize imports in nova/virt/ * Replaces exceptions.Error with NovaException * Log instance information for baremetal * Improved localization testing * remove unused flag: baremetal\_injected\_network\_template baremetal\_uri baremetal\_allow\_project\_net\_traffic * Add periodic\_fuzzy\_delay option * HACKING fixes, TODO authors * Add pybasedir and bindir options * Only raw string literals should be used with \_() * Remove unnecessary setting up and down of mox and stubout * Remove unnecessary variables from tests * Move get\_info to taking an instance * Exception cleanup * Backslash continuations (nova.tests) * Replace ApiError with new exceptions * Standardize logging delaration and use * remove unused and buggy function from baremetal proxy * Backslash continuations (nova.virt.baremetal) * Remove the last of the gflags shim layer * Implements blueprint heterogeneous-tilera-architecture-support * Deleting test dir from a pull from trunk * Updated to remove built docs * initial commit ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/LICENSE0000664000175000017500000002607500000000000014610 0ustar00zuulzuul00000000000000Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright {yyyy} {name of copyright owner} Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/MANIFEST.in0000664000175000017500000000013600000000000015327 0ustar00zuulzuul00000000000000include AUTHORS include ChangeLog exclude .gitignore exclude .gitreview global-exclude *.pyc ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1724334195.7776763 ironic-lib-6.2.0/PKG-INFO0000664000175000017500000000345100000000000014671 0ustar00zuulzuul00000000000000Metadata-Version: 1.2 Name: ironic-lib Version: 6.2.0 Summary: Ironic common library Home-page: https://docs.openstack.org/ironic-lib/ Author: OpenStack Ironic Author-email: openstack-discuss@lists.openstack.org License: UNKNOWN Description: ========== ironic-lib ========== Team and repository tags ------------------------ .. image:: https://governance.openstack.org/tc/badges/ironic-lib.svg :target: https://governance.openstack.org/tc/reference/tags/index.html Overview -------- A common library to be used **exclusively** by projects under the `Ironic governance `_. Running Tests ------------- To run tests in virtualenvs (preferred):: $ sudo pip install tox $ tox To run tests in the current environment:: $ sudo pip install -r requirements.txt -r test-requirements.txt $ stestr run Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: 3 :: Only Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: Python :: 3.9 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Requires-Python: >=3.8 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/README.rst0000664000175000017500000000117600000000000015265 0ustar00zuulzuul00000000000000========== ironic-lib ========== Team and repository tags ------------------------ .. image:: https://governance.openstack.org/tc/badges/ironic-lib.svg :target: https://governance.openstack.org/tc/reference/tags/index.html Overview -------- A common library to be used **exclusively** by projects under the `Ironic governance `_. Running Tests ------------- To run tests in virtualenvs (preferred):: $ sudo pip install tox $ tox To run tests in the current environment:: $ sudo pip install -r requirements.txt -r test-requirements.txt $ stestr run ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/TESTING.rst0000664000175000017500000000505100000000000015441 0ustar00zuulzuul00000000000000=========================== Testing Your OpenStack Code =========================== ------------ A Quickstart ------------ This is designed to be enough information for you to run your first tests. Detailed information on testing can be found here: https://wiki.openstack.org/wiki/Testing *Install pip*:: $ [apt-get | yum] install python-pip More information on pip here: http://www.pip-installer.org/en/latest/ *Use pip to install tox*:: $ pip install tox Run The Tests ------------- *Navigate to the project's root directory and execute*:: $ tox Note: completing this command may take a long time (depends on system resources) also, you might not see any output until tox is complete. Information about tox can be found here: http://testrun.org/tox/latest/ Run The Tests in One Environment -------------------------------- Tox will run your entire test suite in the environments specified in the project tox.ini:: [tox] envlist = To run the test suite in just one of the environments in envlist execute:: $ tox -e so for example, *run the test suite with the default OS version of Python 3*:: $ tox -e py3 or select a specific version, *run the test suite using Python 3.6*:: $ tox -e py36 Other useful tox options that can be specified when running the test suite are:: -v to increase verbosity of the output, can be repeated up to three times based on the desired verbosity level -r to recreate the virtual environment from scratch Run One Test ------------ To run individual tests with tox: if testr is in tox.ini, for example:: [testenv] includes "python setup.py testr --slowest --testr-args='{posargs}'" run individual tests with the following syntax:: $ tox -e -- path.to.module:Class.test so for example, *run the cpu_limited test in Nova*:: $ tox -e py36 -- nova.tests.test_claims:ClaimTestCase.test_cpu_unlimited if nose is in tox.ini, for example:: [testenv] includes "nosetests {posargs}" run individual tests with the following syntax:: $ tox -e -- --tests path.to.module:Class.test so for example, *run the list test in Glance*:: $ tox -e py36 -- --tests glance.tests.unit.test_auth.py:TestImageRepoProxy.test_list Need More Info? --------------- More information about testr: https://wiki.openstack.org/wiki/Testr More information about tox: https://tox.readthedocs.io/en/latest/ More information about nose: https://nose.readthedocs.org/en/latest/ More information about testing OpenStack code can be found here: https://wiki.openstack.org/wiki/Testing ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/bindep.txt0000664000175000017500000000035700000000000015600 0ustar00zuulzuul00000000000000# these are needed to compile Python dependencies from sources python3-all-dev [platform:dpkg test] python3-devel [(platform:rpm test)] build-essential [platform:dpkg test] libffi-dev [platform:dpkg test] libffi-devel [platform:rpm test] ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1724334195.761677 ironic-lib-6.2.0/doc/0000775000175000017500000000000000000000000014336 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/doc/requirements.txt0000664000175000017500000000013600000000000017622 0ustar00zuulzuul00000000000000sphinx>=2.0.0 # BSD openstackdocstheme>=2.2.1 # Apache-2.0 sphinxcontrib-apidoc>=0.2.0 # BSD ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1724334195.761677 ironic-lib-6.2.0/doc/source/0000775000175000017500000000000000000000000015636 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/doc/source/conf.py0000664000175000017500000000447200000000000017144 0ustar00zuulzuul00000000000000# -*- coding: utf-8 -*- # # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'sphinx.ext.viewcode', 'openstackdocstheme', 'sphinxcontrib.apidoc' ] wsme_protocols = ['restjson'] # autodoc generation is a bit aggressive and a nuisance when doing heavy # text edit cycles. # execute "export SPHINX_DEBUG=1" in your terminal to disable # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. copyright = 'OpenStack Foundation' # A list of ignored prefixes for module index sorting. modindex_common_prefix = ['ironic_lib'] # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'native' # -- sphinxcontrib.apidoc configuration -------------------------------------- apidoc_module_dir = '../../ironic_lib' apidoc_output_dir = 'reference/api' apidoc_excluded_paths = [ 'tests', ] # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. #html_theme_path = ["."] #html_static_path = ['_static'] html_theme = 'openstackdocs' # openstackdocstheme options openstackdocs_repo_name = 'openstack/ironic-lib' openstackdocs_pdf_link = True openstackdocs_use_storyboard = True # Output file base name for HTML help builder. htmlhelp_basename = 'ironic-libdoc' latex_use_xindy = False # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ( 'index', 'doc-ironic-lib.tex', 'Ironic Lib Documentation', 'OpenStack Foundation', 'manual' ), ] ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1724334195.765677 ironic-lib-6.2.0/doc/source/contributor/0000775000175000017500000000000000000000000020210 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/doc/source/contributor/index.rst0000664000175000017500000000545400000000000022061 0ustar00zuulzuul00000000000000====================== Welcome to Ironic-lib! ====================== Overview ======== Ironic-lib is a library for use by projects under Bare Metal governance only. This documentation is intended for developer use only. If you are looking for documentation for deployers, please see the `ironic documentation `_. Metrics ======= Ironic-lib provides a pluggable metrics library as of the 2.0.0 release. Current provided backends are the default, 'noop', which discards all data, and 'statsd', which emits metrics to a statsd daemon over the network. The metrics backend to be used is configured via ``CONF.metrics.backend``. How this configuration is set in practice may vary by project. The typical usage of metrics is to initialize and cache a metrics logger, using the `get_metrics_logger()` method in `ironic_lib.metrics_utils`, then use that object to decorate functions or create context managers to gather metrics. The general convention is to provide the name of the module as the first argument to set it as the prefix, then set the actual metric name to the method name. For example: .. code-block:: python from ironic_lib import metrics_utils METRICS = metrics_utils.get_metrics_logger(__name__) @METRICS.timer('my_simple_method') def my_simple_method(arg, matey): pass def my_complex_method(arg, matey): with METRICS.timer('complex_method_pt_1'): do_some_work() with METRICS.timer('complex_method_pt_2'): do_more_work() There are three different kinds of metrics: - **Timers** measure how long the code in the decorated method or context manager takes to execute, and emits the value as a timer metric. These are useful for measuring performance of a given block of code. - **Counters** increment a counter each time a decorated method or context manager is executed. These are useful for counting the number of times a method is called, or the number of times an event occurs. - **Gauges** return the value of a decorated method as a metric. This is useful when you want to monitor the value returned by a method over time. Additionally, metrics can be sent directly, rather than using a context manager or decorator, when appropriate. When used in this way, ironic-lib will simply emit the value provided as the requested metric type. For example: .. code-block:: python from ironic_lib import metrics_utils METRICS = metrics_utils.get_metrics_logger(__name__) def my_node_failure_method(node): if node.failed: METRICS.send_counter(node.uuid, 1) The provided statsd backend natively supports all three metric types. For more information about how statsd changes behavior based on the metric type, see `statsd metric types `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/doc/source/index.rst0000664000175000017500000000076000000000000017502 0ustar00zuulzuul00000000000000======================== Ironic-lib Documentation ======================== Ironic-lib is a library for use by projects under Bare Metal governance only. This documentation is intended for developer use only. If you are looking for documentation for deployers, please see the `ironic documentation `_. .. toctree:: :maxdepth: 1 Installation and Usage documentation reference/index * :ref:`genindex` * :ref:`search` ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1724334195.765677 ironic-lib-6.2.0/doc/source/reference/0000775000175000017500000000000000000000000017574 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/doc/source/reference/index.rst0000664000175000017500000000020600000000000021433 0ustar00zuulzuul00000000000000=========================== Autogenerated API Reference =========================== .. toctree:: :maxdepth: 1 api/modules.rst ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1724334195.7576773 ironic-lib-6.2.0/etc/0000775000175000017500000000000000000000000014344 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1724334195.7576773 ironic-lib-6.2.0/etc/ironic/0000775000175000017500000000000000000000000015627 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1724334195.765677 ironic-lib-6.2.0/etc/ironic/rootwrap.d/0000775000175000017500000000000000000000000017726 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/etc/ironic/rootwrap.d/ironic-lib.filters0000664000175000017500000000165100000000000023352 0ustar00zuulzuul00000000000000# An ironic-lib.filters to be used with rootwrap command. # The following commands should be used in filters for disk manipulation. # This file should be owned by (and only-writeable by) the root user. # NOTE: # if you update this file, you will also need to adjust the # ironic-lib.filters from the ironic module. [Filters] # ironic_lib/disk_utils.py blkid: CommandFilter, blkid, root blockdev: CommandFilter, blockdev, root hexdump: CommandFilter, hexdump, root lsblk: CommandFilter, lsblk, root wipefs: CommandFilter, wipefs, root sgdisk: CommandFilter, sgdisk, root partprobe: CommandFilter, partprobe, root # ironic_lib/utils.py mkswap: CommandFilter, mkswap, root mkfs: CommandFilter, mkfs, root dd: CommandFilter, dd, root mount: CommandFilter, mount, root # ironic_lib/disk_partitioner.py fuser: CommandFilter, fuser, root parted: CommandFilter, parted, root # ironic_lib/qemu_img.py qemu-img: CommandFilter, qemu-img, root ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/extra-requirements.txt0000664000175000017500000000037700000000000020205 0ustar00zuulzuul00000000000000# This file mirrors all extra requirements from setup.cfg and must be kept # in sync. It is used both in unit tests and when building docs. keystoneauth1>=4.2.0 # Apache-2.0 os-service-types>=1.2.0 # Apache-2.0 oslo.service!=1.28.1,>=1.24.0 # Apache-2.0 ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1724334195.765677 ironic-lib-6.2.0/ironic_lib/0000775000175000017500000000000000000000000015702 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/ironic_lib/__init__.py0000664000175000017500000000153200000000000020014 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This ensures the ironic_lib namespace is defined try: import pkg_resources pkg_resources.declare_namespace(__name__) except ImportError: import pkgutil __path__ = pkgutil.extend_path(__path__, __name__) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/ironic_lib/auth_basic.py0000664000175000017500000001566200000000000020370 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 import binascii import logging import bcrypt import webob from ironic_lib.common.i18n import _ from ironic_lib import exception LOG = logging.getLogger(__name__) class BasicAuthMiddleware(object): """Middleware which performs HTTP basic authentication on requests """ def __init__(self, app, auth_file): self.app = app self.auth_file = auth_file validate_auth_file(auth_file) def format_exception(self, e): result = {'error': {'message': str(e), 'code': e.code}} headers = list(e.headers.items()) + [ ('Content-Type', 'application/json') ] return webob.Response(content_type='application/json', status_code=e.code, json_body=result, headerlist=headers) def __call__(self, env, start_response): try: token = parse_header(env) username, password = parse_token(token) env.update(authenticate(self.auth_file, username, password)) return self.app(env, start_response) except exception.IronicException as e: response = self.format_exception(e) return response(env, start_response) def authenticate(auth_file, username, password): """Finds username and password match in Apache style user auth file The user auth file format is expected to comply with Apache documentation[1] however the bcrypt password digest is the *only* digest format supported. [1] https://httpd.apache.org/docs/current/misc/password_encryptions.html :param: auth_file: Path to user auth file :param: username: Username to authenticate :param: password: Password encoded as bytes :returns: A dictionary of WSGI environment values to append to the request :raises: Unauthorized, if no file entries match supplied username/password """ line_prefix = username + ':' try: with open(auth_file, 'r') as f: for line in f: entry = line.strip() if entry and entry.startswith(line_prefix): return auth_entry(entry, password) except OSError as exc: LOG.error('Problem reading auth user file: %s', exc) raise exception.ConfigInvalid( error_msg=_('Problem reading auth user file')) # reached end of file with no matches LOG.info('User %s not found', username) unauthorized() def auth_entry(entry, password): """Compare a password with a single user auth file entry :param: entry: Line from auth user file to use for authentication :param: password: Password encoded as bytes :returns: A dictionary of WSGI environment values to append to the request :raises: Unauthorized, if the entry doesn't match supplied password or if the entry is crypted with a method other than bcrypt """ username, crypted = parse_entry(entry) if not bcrypt.checkpw(password, crypted): LOG.info('Password for %s does not match', username) unauthorized() return { 'HTTP_X_USER': username, 'HTTP_X_USER_NAME': username } def validate_auth_file(auth_file): """Read the auth user file and validate its correctness :param: auth_file: Path to user auth file :raises: ConfigInvalid on validation error """ try: with open(auth_file, 'r') as f: for line in f: entry = line.strip() if entry and ':' in entry: parse_entry(entry) except OSError: raise exception.ConfigInvalid( error_msg=_('Problem reading auth user file: %s') % auth_file) def parse_entry(entry): """Extrace the username and crypted password from a user auth file entry :param: entry: Line from auth user file to use for authentication :returns: a tuple of username and crypted password :raises: ConfigInvalid if the password is not in the supported bcrypt format """ username, crypted_str = entry.split(':', maxsplit=1) crypted = crypted_str.encode('utf-8') if crypted[:4] not in (b'$2y$', b'$2a$', b'$2b$'): error_msg = _('Only bcrypt digested passwords are supported for ' '%(username)s') % {'username': username} raise exception.ConfigInvalid(error_msg=error_msg) return username, crypted def parse_token(token): """Parse the token portion of the Authentication header value :param: token: Token value from basic authorization header :returns: tuple of username, password :raises: Unauthorized, if username and password could not be parsed for any reason """ try: if isinstance(token, str): token = token.encode('utf-8') auth_pair = base64.b64decode(token, validate=True) (username, password) = auth_pair.split(b':', maxsplit=1) return (username.decode('utf-8'), password) except (TypeError, binascii.Error, ValueError) as exc: LOG.info('Could not decode authorization token: %s', exc) raise exception.BadRequest(_('Could not decode authorization token')) def parse_header(env): """Parse WSGI environment for Authorization header of type Basic :param: env: WSGI environment to get header from :returns: Token portion of the header value :raises: Unauthorized, if header is missing or if the type is not Basic """ try: auth_header = env.pop('HTTP_AUTHORIZATION') except KeyError: LOG.info('No authorization token received') unauthorized(_('Authorization required')) try: auth_type, token = auth_header.strip().split(maxsplit=1) except (ValueError, AttributeError) as exc: LOG.info('Could not parse Authorization header: %s', exc) raise exception.BadRequest(_('Could not parse Authorization header')) if auth_type.lower() != 'basic': msg = _('Unsupported authorization type "%s"') % auth_type LOG.info(msg) raise exception.BadRequest(msg) return token def unauthorized(message=None): """Raise an Unauthorized exception to prompt for basic authentication :param: message: Optional message for esception :raises: Unauthorized with WWW-Authenticate header set """ if not message: message = _('Incorrect username or password') raise exception.Unauthorized(message) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/ironic_lib/capabilities.py0000664000175000017500000001154000000000000020706 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Code for working with capabilities.""" import json import logging from ironic_lib.common.i18n import _ LOG = logging.getLogger(__name__) def _parse_old_format(cap_str, skip_malformed=True): """Extract capabilities from string. :param cap_str: A string in the key1:value1,key2:value2 format. :param skip_malformed: Whether to skip malformed items or raise ValueError. :return: a dictionary """ capabilities = {} for node_capability in cap_str.split(','): parts = node_capability.split(':', 1) if len(parts) == 2 and parts[0] and parts[1]: capabilities[parts[0]] = parts[1] else: if skip_malformed: LOG.warning("Ignoring malformed capability '%s'. " "Format should be 'key:val'.", node_capability) else: raise ValueError( _("Malformed capability %s. Format should be 'key:val'") % node_capability) return capabilities def parse(capabilities, compat=True, skip_malformed=False): """Extract capabilities from provided object. The capabilities value can either be a dict, or a json str, or a key1:value1,key2:value2 formatted string (if compat is True). If None, an empty dictionary is returned. :param capabilities: The capabilities value. Can either be a dict, or a json str, or a key1:value1,key2:value2 formatted string (if compat is True). :param compat: Whether to parse the old format key1:value1,key2:value2. :param skip_malformed: Whether to skip malformed items or raise ValueError. :returns: A dictionary with the capabilities if found and well formatted, otherwise an empty dictionary. :raises: TypeError if the capabilities are of invalid type. :raises: ValueError on a malformed capability if skip_malformed is False or on invalid JSON with compat is False. """ if capabilities is None: return {} elif isinstance(capabilities, str): try: return json.loads(capabilities) except (ValueError, TypeError) as exc: if compat: return _parse_old_format(capabilities, skip_malformed=skip_malformed) else: raise ValueError( _('Invalid JSON capabilities %(value)s: %(error)s') % {'value': capabilities, 'error': exc}) elif not isinstance(capabilities, dict): raise TypeError( _('Invalid capabilities, expected a string or a dict, got %s') % capabilities) else: return capabilities def combine(capabilities_dict, skip_none=False): """Combine capabilities into the old format. :param capabilities_dict: Capabilities as a mapping. :param skip_none: If True, skips all items with value of None. :returns: Capabilities as a string key1:value1,key2:value2. """ return ','.join(["%s:%s" % (key, value) for key, value in capabilities_dict.items() if not skip_none or value is not None]) def update_and_combine(capabilities, new_values, skip_malformed=False, skip_none=False): """Parses capabilities, updated them with new values and re-combines. :param capabilities: The capabilities value. Can either be a dict, or a json str, or a key1:value1,key2:value2 formatted string (if compat is True). :param new_values: New values as a dictionary. :param skip_malformed: Whether to skip malformed items or raise ValueError. :param skip_none: If True, skips all items with value of None. :returns: Capabilities in the old format (key1:value1,key2:value2). :raises: TypeError if the capabilities are of invalid type. :raises: ValueError on a malformed capability if skip_malformed is False. """ if not isinstance(new_values, dict): raise TypeError( _("Cannot update capabilities. The new capabilities should be in " "a dictionary. Provided value is %s") % new_values) capabilities = parse(capabilities, skip_malformed=skip_malformed) capabilities.update(new_values) return combine(capabilities, skip_none=skip_none) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1724334195.7696767 ironic-lib-6.2.0/ironic_lib/common/0000775000175000017500000000000000000000000017172 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/ironic_lib/common/__init__.py0000664000175000017500000000000000000000000021271 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/ironic_lib/common/config.py0000664000175000017500000000135600000000000021016 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import types class Octal(types.Integer): def __call__(self, value): if isinstance(value, int): return value else: return int(str(value), 8) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/ironic_lib/common/i18n.py0000664000175000017500000000145200000000000020325 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import oslo_i18n as i18n _translators = i18n.TranslatorFactory(domain='ironic-lib') # The primary translation function using the well-known name "_" _ = _translators.primary ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/ironic_lib/disk_partitioner.py0000664000175000017500000001311100000000000021623 0ustar00zuulzuul00000000000000# Copyright 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging from oslo_config import cfg from ironic_lib.common.i18n import _ from ironic_lib import exception from ironic_lib import utils opts = [ cfg.IntOpt('check_device_interval', default=1, help='After Ironic has completed creating the partition table, ' 'it continues to check for activity on the attached iSCSI ' 'device status at this interval prior to copying the image' ' to the node, in seconds'), cfg.IntOpt('check_device_max_retries', default=20, help='The maximum number of times to check that the device is ' 'not accessed by another process. If the device is still ' 'busy after that, the disk partitioning will be treated as' ' having failed.') ] CONF = cfg.CONF opt_group = cfg.OptGroup(name='disk_partitioner', title='Options for the disk partitioner') CONF.register_group(opt_group) CONF.register_opts(opts, opt_group) LOG = logging.getLogger(__name__) class DiskPartitioner(object): def __init__(self, device, disk_label='msdos', alignment='optimal'): """A convenient wrapper around the parted tool. :param device: The device path. :param disk_label: The type of the partition table. Valid types are: "bsd", "dvh", "gpt", "loop", "mac", "msdos", "pc98", or "sun". :param alignment: Set alignment for newly created partitions. Valid types are: none, cylinder, minimal and optimal. """ self._device = device self._disk_label = disk_label self._alignment = alignment self._partitions = [] def _exec(self, *args): # NOTE(lucasagomes): utils.execute() is already a wrapper on top # of processutils.execute() which raises specific # exceptions. It also logs any failure so we don't # need to log it again here. utils.execute('parted', '-a', self._alignment, '-s', self._device, '--', 'unit', 'MiB', *args, use_standard_locale=True, run_as_root=True) def add_partition(self, size, part_type='primary', fs_type='', boot_flag=None, extra_flags=None): """Add a partition. :param size: The size of the partition in MiB. :param part_type: The type of the partition. Valid values are: primary, logical, or extended. :param fs_type: The filesystem type. Valid types are: ext2, fat32, fat16, HFS, linux-swap, NTFS, reiserfs, ufs. If blank (''), it will create a Linux native partition (83). :param boot_flag: Boot flag that needs to be configured on the partition. Ignored if None. It can take values 'bios_grub', 'boot'. :param extra_flags: List of flags to set on the partition. Ignored if None. :returns: The partition number. """ self._partitions.append({'size': size, 'type': part_type, 'fs_type': fs_type, 'boot_flag': boot_flag, 'extra_flags': extra_flags}) return len(self._partitions) def get_partitions(self): """Get the partitioning layout. :returns: An iterator with the partition number and the partition layout. """ return enumerate(self._partitions, 1) def commit(self): """Write to the disk.""" LOG.debug("Committing partitions to disk.") cmd_args = ['mklabel', self._disk_label] # NOTE(lucasagomes): Lead in with 1MiB to allow room for the # partition table itself. start = 1 for num, part in self.get_partitions(): end = start + part['size'] cmd_args.extend(['mkpart', part['type'], part['fs_type'], str(start), str(end)]) if part['boot_flag']: cmd_args.extend(['set', str(num), part['boot_flag'], 'on']) if part['extra_flags']: for flag in part['extra_flags']: cmd_args.extend(['set', str(num), flag, 'on']) start = end self._exec(*cmd_args) try: utils.wait_for_disk_to_become_available(self._device) except exception.IronicException as e: raise exception.InstanceDeployFailure( _('Disk partitioning failed on device %(device)s. ' 'Error: %(error)s') % {'device': self._device, 'error': e}) def list_opts(): """Entry point for oslo-config-generator.""" return [('disk_partitioner', opts)] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/ironic_lib/disk_utils.py0000664000175000017500000006744500000000000020446 0ustar00zuulzuul00000000000000# Copyright 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import os import re import stat import time import warnings from oslo_concurrency import processutils from oslo_config import cfg from oslo_utils import excutils from ironic_lib.common.i18n import _ from ironic_lib import disk_partitioner from ironic_lib import exception from ironic_lib import qemu_img from ironic_lib import utils opts = [ cfg.IntOpt('efi_system_partition_size', default=550, help='Size of EFI system partition in MiB when configuring ' 'UEFI systems for local boot. A common minimum is ~200 ' 'megabytes, however OS driven firmware updates and ' 'unikernel usage generally requires more space on the ' 'efi partition.'), cfg.IntOpt('bios_boot_partition_size', default=1, help='Size of BIOS Boot partition in MiB when configuring ' 'GPT partitioned systems for local boot in BIOS.'), cfg.StrOpt('dd_block_size', default='1M', help='Block size to use when writing to the nodes disk.'), cfg.IntOpt('partition_detection_attempts', default=3, min=1, help='Maximum attempts to detect a newly created partition.'), cfg.IntOpt('partprobe_attempts', default=10, help='Maximum number of attempts to try to read the ' 'partition.'), ] CONF = cfg.CONF CONF.register_opts(opts, group='disk_utils') LOG = logging.getLogger(__name__) _PARTED_PRINT_RE = re.compile(r"^(\d+):([\d\.]+)MiB:" r"([\d\.]+)MiB:([\d\.]+)MiB:(\w*):(.*):(.*);") _PARTED_TABLE_TYPE_RE = re.compile(r'^.*partition\s+table\s*:\s*(gpt|msdos)', re.IGNORECASE | re.MULTILINE) CONFIGDRIVE_LABEL = "config-2" MAX_CONFIG_DRIVE_SIZE_MB = 64 # Maximum disk size supported by MBR is 2TB (2 * 1024 * 1024 MB) MAX_DISK_SIZE_MB_SUPPORTED_BY_MBR = 2097152 # Backward compatibility, do not use qemu_img_info = qemu_img.image_info convert_image = qemu_img.convert_image def list_partitions(device): """Get partitions information from given device. :param device: The device path. :returns: list of dictionaries (one per partition) with keys: number, start, end, size (in MiB), filesystem, partition_name, flags, path. """ output = utils.execute( 'parted', '-s', '-m', device, 'unit', 'MiB', 'print', use_standard_locale=True, run_as_root=True)[0] if isinstance(output, bytes): output = output.decode("utf-8") lines = [line for line in output.split('\n') if line.strip()][2:] # Example of line: 1:1.00MiB:501MiB:500MiB:ext4::boot fields = ('number', 'start', 'end', 'size', 'filesystem', 'partition_name', 'flags') result = [] for line in lines: match = _PARTED_PRINT_RE.match(line) if match is None: LOG.warning("Partition information from parted for device " "%(device)s does not match " "expected format: %(line)s", dict(device=device, line=line)) continue # Cast int fields to ints (some are floats and we round them down) groups = [int(float(x)) if i < 4 else x for i, x in enumerate(match.groups())] item = dict(zip(fields, groups)) item['path'] = partition_index_to_path(device, item['number']) result.append(item) return result def count_mbr_partitions(device): """Count the number of primary and logical partitions on a MBR :param device: The device path. :returns: A tuple with the number of primary partitions and logical partitions. :raise: ValueError if the device does not have a valid MBR partition table. """ # -d do not update the kernel table # -s print a summary of the partition table output, err = utils.execute('partprobe', '-d', '-s', device, run_as_root=True, use_standard_locale=True) if 'msdos' not in output: raise ValueError('The device %s does not have a valid MBR ' 'partition table' % device) # Sample output: /dev/vdb: msdos partitions 1 2 3 <5 6 7> # The partitions with number > 4 (and inside <>) are logical partitions output = output.replace('<', '').replace('>', '') partitions = [int(s) for s in output.split() if s.isdigit()] return (sum(i < 5 for i in partitions), sum(i > 4 for i in partitions)) def get_disk_identifier(dev): """Get the disk identifier from the disk being exposed by the ramdisk. This disk identifier is appended to the pxe config which will then be used by chain.c32 to detect the correct disk to chainload. This is helpful in deployments to nodes with multiple disks. http://www.syslinux.org/wiki/index.php/Comboot/chain.c32#mbr: :param dev: Path for the already populated disk device. :raises OSError: When the hexdump binary is unavailable. :returns: The Disk Identifier. """ disk_identifier = utils.execute('hexdump', '-s', '440', '-n', '4', '-e', '''\"0x%08x\"''', dev, run_as_root=True, attempts=5, delay_on_retry=True) return disk_identifier[0] def get_partition_table_type(device): """Get partition table type, msdos or gpt. :param device: the name of the device :return: dos, gpt or None """ out = utils.execute('parted', '--script', device, '--', 'print', run_as_root=True, use_standard_locale=True)[0] m = _PARTED_TABLE_TYPE_RE.search(out) if m: return m.group(1) LOG.warning("Unable to get partition table type for device %s", device) return 'unknown' def _blkid(device, probe=False, fields=None): args = [] if probe: args.append('-p') if fields: args += sum((['-s', field] for field in fields), []) output, err = utils.execute('blkid', device, *args, use_standard_locale=True, run_as_root=True) if output.strip(): return output.split(': ', 1)[1] else: return "" def _lsblk(device, deps=True, fields=None): args = ['--pairs', '--bytes', '--ascii'] if not deps: args.append('--nodeps') if fields: args.extend(['--output', ','.join(fields)]) else: args.append('--output-all') output, err = utils.execute('lsblk', device, *args, use_standard_locale=True, run_as_root=True) return output.strip() def get_device_information(device, probe=False, fields=None): """Get information about a device using blkid. Can be applied to all block devices: disks, RAID, partitions. :param device: Device name. :param probe: DEPRECATED, do not use. :param fields: A list of fields to request (all by default). :return: A dictionary with requested fields as keys. :raises: ProcessExecutionError """ if probe: output = _blkid(device, probe=True, fields=fields) else: output = _lsblk(device, fields=fields, deps=False) if output: return next(utils.parse_device_tags(output)) else: return {} def find_efi_partition(device): """Looks for the EFI partition on a given device. A boot partition on a GPT disk is assumed to be an EFI partition as well. :param device: the name of the device :return: the EFI partition record from `list_partitions` or None """ is_gpt = get_partition_table_type(device) == 'gpt' for part in list_partitions(device): flags = {x.strip() for x in part['flags'].split(',')} if 'esp' in flags or ('boot' in flags and is_gpt): LOG.debug("Found EFI partition %s on device %s", part, device) return part else: LOG.debug("No efi partition found on device %s", device) def get_uefi_disk_identifier(dev): """Get the uuid from the disk being exposed by the ramdisk. DEPRECATED: use find_efi_partition with get_device_information instead. :param dev: Path for the already populated disk device. :raises InstanceDeployFailure: Image is not UEFI bootable. :returns: The UUID of the partition. """ warnings.warn("get_uefi_disk_identifier is deprecated, use " "find_efi_partition and get_partition_information instead", DeprecationWarning) partition_id = None try: report, _ = utils.execute('fdisk', '-l', dev, run_as_root=True) except processutils.ProcessExecutionError as e: msg = _('Failed to find the partition on the disk %s ') % e LOG.error(msg) raise exception.InstanceDeployFailure(msg) for line in report.splitlines(): if line.startswith(dev) and 'EFI System' in line: vals = line.split() partition_id = vals[0] try: lsblk_output = _lsblk(partition_id, fields=['UUID']) disk_identifier = lsblk_output.split("=")[1].strip() disk_identifier = disk_identifier.strip('"') except processutils.ProcessExecutionError as e: raise exception.InstanceDeployFailure("Image is not UEFI bootable. " "Error: %s " % e) return disk_identifier _ISCSI_PREFIX = "iqn.2008-10.org.openstack:" # TODO(dtantsur): deprecate node_uuid here, it's not overly useful (any iSCSI # device should get the same treatment). def is_iscsi_device(dev, node_uuid=None): """Check whether the device path belongs to an iSCSI device. If node UUID is provided, checks that the device belongs to this UUID. """ if node_uuid: return (_ISCSI_PREFIX + node_uuid) in dev else: return _ISCSI_PREFIX in dev def is_last_char_digit(dev): """check whether device name ends with a digit""" if len(dev) >= 1: return dev[-1].isdigit() return False def partition_index_to_path(device, index): """Guess a partition path based on its device and index. :param device: Device path. :param index: Partition index. """ # the actual device names in the baremetal are like /dev/sda, /dev/sdb etc. # While for the iSCSI device, the naming convention has a format which has # iqn also embedded in it. # When this function is called by ironic-conductor, the iSCSI device name # should be appended by "part%d". While on the baremetal, it should name # the device partitions as /dev/sda1 and not /dev/sda-part1. if is_iscsi_device(device): part_template = '%s-part%d' elif is_last_char_digit(device): part_template = '%sp%d' else: part_template = '%s%d' return part_template % (device, index) def make_partitions(dev, root_mb, swap_mb, ephemeral_mb, configdrive_mb, node_uuid, commit=True, boot_option="netboot", boot_mode="bios", disk_label=None, cpu_arch=""): """Partition the disk device. Create partitions for root, swap, ephemeral and configdrive on a disk device. :param dev: Path for the device to work on. :param root_mb: Size of the root partition in mebibytes (MiB). :param swap_mb: Size of the swap partition in mebibytes (MiB). If 0, no partition will be created. :param ephemeral_mb: Size of the ephemeral partition in mebibytes (MiB). If 0, no partition will be created. :param configdrive_mb: Size of the configdrive partition in mebibytes (MiB). If 0, no partition will be created. :param commit: True/False. Default for this setting is True. If False partitions will not be written to disk. :param boot_option: Can be "local" or "netboot". "netboot" by default. :param boot_mode: Can be "bios" or "uefi". "bios" by default. :param node_uuid: Node's uuid. Used for logging. :param disk_label: The disk label to be used when creating the partition table. Valid values are: "msdos", "gpt" or None; If None Ironic will figure it out according to the boot_mode parameter. :param cpu_arch: Architecture of the node the disk device belongs to. When using the default value of None, no architecture specific steps will be taken. This default should be used for x86_64. When set to ppc64*, architecture specific steps are taken for booting a partition image locally. :returns: A dictionary containing the partition type as Key and partition path as Value for the partitions created by this method. """ LOG.debug("Starting to partition the disk device: %(dev)s " "for node %(node)s", {'dev': dev, 'node': node_uuid}) part_dict = {} if disk_label is None: disk_label = 'gpt' if boot_mode == 'uefi' else 'msdos' dp = disk_partitioner.DiskPartitioner(dev, disk_label=disk_label) # For uefi localboot, switch partition table to gpt and create the efi # system partition as the first partition. if boot_mode == "uefi" and boot_option == "local": part_num = dp.add_partition(CONF.disk_utils.efi_system_partition_size, fs_type='fat32', boot_flag='boot') part_dict['efi system partition'] = partition_index_to_path( dev, part_num) if (boot_mode == "bios" and boot_option == "local" and disk_label == "gpt" and not cpu_arch.startswith('ppc64')): part_num = dp.add_partition(CONF.disk_utils.bios_boot_partition_size, boot_flag='bios_grub') part_dict['BIOS Boot partition'] = partition_index_to_path( dev, part_num) # NOTE(mjturek): With ppc64* nodes, partition images are expected to have # a PrEP partition at the start of the disk. This is an 8 MiB partition # with the boot and prep flags set. The bootloader should be installed # here. if (cpu_arch.startswith("ppc64") and boot_mode == "bios" and boot_option == "local"): LOG.debug("Add PReP boot partition (8 MB) to device: " "%(dev)s for node %(node)s", {'dev': dev, 'node': node_uuid}) boot_flag = 'boot' if disk_label == 'msdos' else None part_num = dp.add_partition(8, part_type='primary', boot_flag=boot_flag, extra_flags=['prep']) part_dict['PReP Boot partition'] = partition_index_to_path( dev, part_num) if ephemeral_mb: LOG.debug("Add ephemeral partition (%(size)d MB) to device: %(dev)s " "for node %(node)s", {'dev': dev, 'size': ephemeral_mb, 'node': node_uuid}) part_num = dp.add_partition(ephemeral_mb) part_dict['ephemeral'] = partition_index_to_path(dev, part_num) if swap_mb: LOG.debug("Add Swap partition (%(size)d MB) to device: %(dev)s " "for node %(node)s", {'dev': dev, 'size': swap_mb, 'node': node_uuid}) part_num = dp.add_partition(swap_mb, fs_type='linux-swap') part_dict['swap'] = partition_index_to_path(dev, part_num) if configdrive_mb: LOG.debug("Add config drive partition (%(size)d MB) to device: " "%(dev)s for node %(node)s", {'dev': dev, 'size': configdrive_mb, 'node': node_uuid}) part_num = dp.add_partition(configdrive_mb) part_dict['configdrive'] = partition_index_to_path(dev, part_num) # NOTE(lucasagomes): Make the root partition the last partition. This # enables tools like cloud-init's growroot utility to expand the root # partition until the end of the disk. LOG.debug("Add root partition (%(size)d MB) to device: %(dev)s " "for node %(node)s", {'dev': dev, 'size': root_mb, 'node': node_uuid}) boot_val = 'boot' if (not cpu_arch.startswith("ppc64") and boot_mode == "bios" and boot_option == "local" and disk_label == "msdos") else None part_num = dp.add_partition(root_mb, boot_flag=boot_val) part_dict['root'] = partition_index_to_path(dev, part_num) if commit: # write to the disk dp.commit() trigger_device_rescan(dev) return part_dict def is_block_device(dev): """Check whether a device is block or not.""" attempts = CONF.disk_utils.partition_detection_attempts for attempt in range(attempts): try: s = os.stat(dev) except OSError as e: LOG.debug("Unable to stat device %(dev)s. Attempt %(attempt)d " "out of %(total)d. Error: %(err)s", {"dev": dev, "attempt": attempt + 1, "total": attempts, "err": e}) time.sleep(1) else: return stat.S_ISBLK(s.st_mode) msg = _("Unable to stat device %(dev)s after attempting to verify " "%(attempts)d times.") % {'dev': dev, 'attempts': attempts} LOG.error(msg) raise exception.InstanceDeployFailure(msg) def dd(src, dst, conv_flags=None): """Execute dd from src to dst.""" if conv_flags: extra_args = ['conv=%s' % conv_flags] else: extra_args = [] utils.dd(src, dst, 'bs=%s' % CONF.disk_utils.dd_block_size, 'oflag=direct', *extra_args) def populate_image(src, dst, conv_flags=None): data = qemu_img.image_info(src) if data.file_format == 'raw': dd(src, dst, conv_flags=conv_flags) else: qemu_img.convert_image(src, dst, 'raw', True, sparse_size='0') def block_uuid(dev): """Get UUID of a block device. Try to fetch the UUID, if that fails, try to fetch the PARTUUID. """ info = get_device_information(dev, fields=['UUID', 'PARTUUID']) if info.get('UUID'): return info['UUID'] else: LOG.debug('Falling back to partition UUID as the block device UUID ' 'was not found while examining %(device)s', {'device': dev}) return info.get('PARTUUID', '') def get_image_mb(image_path, virtual_size=True): """Get size of an image in Megabyte.""" mb = 1024 * 1024 if not virtual_size: image_byte = os.path.getsize(image_path) else: data = qemu_img.image_info(image_path) image_byte = data.virtual_size # round up size to MB image_mb = int((image_byte + mb - 1) / mb) return image_mb def get_dev_block_size(dev): """Get the device size in 512 byte sectors.""" block_sz, cmderr = utils.execute('blockdev', '--getsz', dev, run_as_root=True) return int(block_sz) def get_dev_byte_size(dev): """Get the device size in bytes.""" byte_sz, cmderr = utils.execute('blockdev', '--getsize64', dev, run_as_root=True) return int(byte_sz) def get_dev_sector_size(dev): """Get the device logical sector size in bytes.""" sect_sz, cmderr = utils.execute('blockdev', '--getss', dev, run_as_root=True) return int(sect_sz) def destroy_disk_metadata(dev, node_uuid): """Destroy metadata structures on node's disk. Ensure that node's disk magic strings are wiped without zeroing the entire drive. To do this we use the wipefs tool from util-linux. :param dev: Path for the device to work on. :param node_uuid: Node's uuid. Used for logging. """ # NOTE(NobodyCam): This is needed to work around bug: # https://bugs.launchpad.net/ironic/+bug/1317647 LOG.debug("Start destroy disk metadata for node %(node)s.", {'node': node_uuid}) try: utils.execute('wipefs', '--force', '--all', dev, run_as_root=True, use_standard_locale=True) except processutils.ProcessExecutionError as e: with excutils.save_and_reraise_exception() as ctxt: # NOTE(zhenguo): Check if --force option is supported for wipefs, # if not, we should try without it. if '--force' in str(e): ctxt.reraise = False utils.execute('wipefs', '--all', dev, run_as_root=True, use_standard_locale=True) # NOTE(TheJulia): sgdisk attempts to load and make sense of the # partition tables in advance of wiping the partition data. # This means when a CRC error is found, sgdisk fails before # erasing partition data. # This is the same bug as # https://bugs.launchpad.net/ironic-python-agent/+bug/1737556 sector_size = get_dev_sector_size(dev) # https://uefi.org/specs/UEFI/2.10/05_GUID_Partition_Table_Format.html If # the block size is 512, the First Usable LBA must be greater than or equal # to 34 [...] if the logical block size is 4096, the First Usable LBA must # be greater than or equal to 6 if sector_size == 512: gpt_sectors = 33 elif sector_size == 4096: gpt_sectors = 5 # Overwrite the Primary GPT, catch very small partitions (like EBRs) dd_bs = 'bs=%s' % sector_size dd_device = 'of=%s' % dev dd_count = 'count=%s' % gpt_sectors dev_size = get_dev_byte_size(dev) if dev_size < gpt_sectors * sector_size: dd_count = 'count=%s' % int(dev_size / sector_size) utils.execute('dd', dd_bs, 'if=/dev/zero', dd_device, dd_count, 'oflag=direct', run_as_root=True, use_standard_locale=True) # Overwrite the Secondary GPT, do this only if there could be one if dev_size > gpt_sectors * sector_size: gpt_backup = int(dev_size / sector_size - gpt_sectors) dd_seek = 'seek=%i' % gpt_backup dd_count = 'count=%s' % gpt_sectors utils.execute('dd', dd_bs, 'if=/dev/zero', dd_device, dd_count, 'oflag=direct', dd_seek, run_as_root=True, use_standard_locale=True) # Go ahead and let sgdisk run as well. utils.execute('sgdisk', '-Z', dev, run_as_root=True, use_standard_locale=True) try: utils.wait_for_disk_to_become_available(dev) except exception.IronicException as e: raise exception.InstanceDeployFailure( _('Destroying metadata failed on device %(device)s. ' 'Error: %(error)s') % {'device': dev, 'error': e}) LOG.info("Disk metadata on %(dev)s successfully destroyed for node " "%(node)s", {'dev': dev, 'node': node_uuid}) def list_opts(): """Entry point for oslo-config-generator.""" return [('disk_utils', opts)] def _fix_gpt_structs(device, node_uuid): """Checks backup GPT data structures and moves them to end of the device :param device: The device path. :param node_uuid: UUID of the Node. Used for logging. :raises: InstanceDeployFailure, if any disk partitioning related commands fail. """ try: output, _err = utils.execute('sgdisk', '-v', device, run_as_root=True) search_str = "it doesn't reside\nat the end of the disk" if search_str in output: utils.execute('sgdisk', '-e', device, run_as_root=True) except (processutils.UnknownArgumentError, processutils.ProcessExecutionError, OSError) as e: msg = (_('Failed to fix GPT data structures on disk %(disk)s ' 'for node %(node)s. Error: %(error)s') % {'disk': device, 'node': node_uuid, 'error': e}) LOG.error(msg) raise exception.InstanceDeployFailure(msg) def fix_gpt_partition(device, node_uuid): """Fix GPT partition Fix GPT table information when image is written to a disk which has a bigger extend (e.g. 30GB image written on a 60Gb physical disk). :param device: The device path. :param node_uuid: UUID of the Node. :raises: InstanceDeployFailure if exception is caught. """ try: disk_is_gpt_partitioned = (get_partition_table_type(device) == 'gpt') if disk_is_gpt_partitioned: _fix_gpt_structs(device, node_uuid) except Exception as e: msg = (_('Failed to fix GPT partition on disk %(disk)s ' 'for node %(node)s. Error: %(error)s') % {'disk': device, 'node': node_uuid, 'error': e}) LOG.error(msg) raise exception.InstanceDeployFailure(msg) def udev_settle(): """Wait for the udev event queue to settle. Wait for the udev event queue to settle to make sure all devices are detected once the machine boots up. :return: True on success, False otherwise. """ LOG.debug('Waiting until udev event queue is empty') try: utils.execute('udevadm', 'settle') except processutils.ProcessExecutionError as e: LOG.warning('Something went wrong when waiting for udev ' 'to settle. Error: %s', e) return False else: return True def partprobe(device, attempts=None): """Probe partitions on the given device. :param device: The block device containing partitions that is attempting to be updated. :param attempts: Number of attempts to run partprobe, the default is read from the configuration. :return: True on success, False otherwise. """ if attempts is None: attempts = CONF.disk_utils.partprobe_attempts try: utils.execute('partprobe', device, run_as_root=True, attempts=attempts) except (processutils.UnknownArgumentError, processutils.ProcessExecutionError, OSError) as e: LOG.warning("Unable to probe for partitions on device %(device)s, " "the partitioning table may be broken. Error: %(error)s", {'device': device, 'error': e}) return False else: return True def trigger_device_rescan(device, attempts=None): """Sync and trigger device rescan. Disk partition performed via parted, when performed on a ramdisk do not have to honor the fsync mechanism. In essence, fsync is used on the file representing the block device, which falls to the kernel filesystem layer to trigger a sync event. On a ramdisk using ramfs, this is an explicit non-operation. As a result of this, we need to trigger a system wide sync operation which will trigger cache to flush to disk, after which partition changes should be visible upon re-scan. When ramdisks are not in use, this also helps ensure that data has been safely flushed across the wire, such as on iscsi connections. :param device: The block device containing partitions that is attempting to be updated. :param attempts: Number of attempts to run partprobe, the default is read from the configuration. :return: True on success, False otherwise. """ LOG.debug('Explicitly calling sync to force buffer/cache flush') utils.execute('sync') # Make sure any additions to the partitioning are reflected in the # kernel. udev_settle() partprobe(device, attempts=attempts) udev_settle() try: # Also verify that the partitioning is correct now. utils.execute('sgdisk', '-v', device, run_as_root=True) except processutils.ProcessExecutionError as exc: LOG.warning('Failed to verify partition tables on device %(dev)s: ' '%(err)s', {'dev': device, 'err': exc}) return False else: return True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/ironic_lib/exception.py0000664000175000017500000001552300000000000020260 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Ironic base exception handling. Includes decorator for re-raising Ironic-type exceptions. SHOULD include dedicated exception logging. """ import collections from http import client as http_client import json import logging from oslo_config import cfg from oslo_utils import excutils from ironic_lib.common.i18n import _ LOG = logging.getLogger(__name__) exc_log_opts = [ cfg.BoolOpt('fatal_exception_format_errors', default=False, help=_('Used if there is a formatting error when generating ' 'an exception message (a programming error). If True, ' 'raise an exception; if False, use the unformatted ' 'message.'), deprecated_group='DEFAULT'), ] CONF = cfg.CONF CONF.register_opts(exc_log_opts, group='ironic_lib') def list_opts(): """Entry point for oslo-config-generator.""" return [('ironic_lib', exc_log_opts)] def _ensure_exception_kwargs_serializable(exc_class_name, kwargs): """Ensure that kwargs are serializable Ensure that all kwargs passed to exception constructor can be passed over RPC, by trying to convert them to JSON, or, as a last resort, to string. If it is not possible, unserializable kwargs will be removed, letting the receiver to handle the exception string as it is configured to. :param exc_class_name: a IronicException class name. :param kwargs: a dictionary of keyword arguments passed to the exception constructor. :returns: a dictionary of serializable keyword arguments. """ serializers = [(json.dumps, _('when converting to JSON')), (str, _('when converting to string'))] exceptions = collections.defaultdict(list) serializable_kwargs = {} for k, v in kwargs.items(): for serializer, msg in serializers: try: serializable_kwargs[k] = serializer(v) exceptions.pop(k, None) break except Exception as e: exceptions[k].append( '(%(serializer_type)s) %(e_type)s: %(e_contents)s' % {'serializer_type': msg, 'e_contents': e, 'e_type': e.__class__.__name__}) if exceptions: LOG.error("One or more arguments passed to the %(exc_class)s " "constructor as kwargs can not be serialized. The " "serialized arguments: %(serialized)s. These " "unserialized kwargs were dropped because of the " "exceptions encountered during their " "serialization:\n%(errors)s", dict(errors=';\n'.join("%s: %s" % (k, '; '.join(v)) for k, v in exceptions.items()), exc_class=exc_class_name, serialized=serializable_kwargs)) # We might be able to actually put the following keys' values into # format string, but there is no guarantee, drop it just in case. for k in exceptions: del kwargs[k] return serializable_kwargs class IronicException(Exception): """Base Ironic Exception To correctly use this class, inherit from it and define a '_msg_fmt' property. That _msg_fmt will get printf'd with the keyword arguments provided to the constructor. If you need to access the message from an exception you should use str(exc) """ _msg_fmt = _("An unknown exception occurred.") code = 500 headers = {} safe = False def __init__(self, message=None, **kwargs): self.kwargs = _ensure_exception_kwargs_serializable( self.__class__.__name__, kwargs) if 'code' not in self.kwargs: try: self.kwargs['code'] = self.code except AttributeError: pass else: self.code = int(kwargs['code']) if not message: try: message = self._msg_fmt % kwargs except Exception: with excutils.save_and_reraise_exception() as ctxt: # kwargs doesn't match a variable in the message # log the issue and the kwargs prs = ', '.join('%s=%s' % pair for pair in kwargs.items()) LOG.exception('Exception in string format operation ' '(arguments %s)', prs) if not CONF.ironic_lib.fatal_exception_format_errors: # at least get the core message out if something # happened message = self._msg_fmt ctxt.reraise = False super(IronicException, self).__init__(message) class InstanceDeployFailure(IronicException): _msg_fmt = _("Failed to deploy instance: %(reason)s") class FileSystemNotSupported(IronicException): _msg_fmt = _("Failed to create a file system. " "File system %(fs)s is not supported.") class InvalidMetricConfig(IronicException): _msg_fmt = _("Invalid value for metrics config option: %(reason)s") class ServiceLookupFailure(IronicException): _msg_fmt = _("Cannot find %(service)s service through multicast") class ServiceRegistrationFailure(IronicException): _msg_fmt = _("Cannot register %(service)s service: %(error)s") class BadRequest(IronicException): code = http_client.BAD_REQUEST class Unauthorized(IronicException): code = http_client.UNAUTHORIZED headers = {'WWW-Authenticate': 'Basic realm="Baremetal API"'} class ConfigInvalid(IronicException): _msg_fmt = _("Invalid configuration file. %(error_msg)s") class CatalogNotFound(IronicException): _msg_fmt = _("Service type %(service_type)s with endpoint type " "%(endpoint_type)s not found in keystone service catalog.") class KeystoneUnauthorized(IronicException): _msg_fmt = _("Not authorized in Keystone.") class KeystoneFailure(IronicException): pass class MetricsNotSupported(IronicException): _msg_fmt = _("Metrics action is not supported. You may need to " "adjust the [metrics] section in ironic.conf.") ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1724334195.7696767 ironic-lib-6.2.0/ironic_lib/json_rpc/0000775000175000017500000000000000000000000017517 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/ironic_lib/json_rpc/__init__.py0000664000175000017500000000630500000000000021634 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from ironic_lib.common import config from ironic_lib.common.i18n import _ from ironic_lib import keystone CONF = cfg.CONF opts = [ cfg.StrOpt('auth_strategy', choices=[('noauth', _('no authentication')), ('keystone', _('use the Identity service for ' 'authentication')), ('http_basic', _('HTTP basic authentication'))], help=_('Authentication strategy used by JSON RPC. Defaults to ' 'the global auth_strategy setting.')), cfg.StrOpt('http_basic_auth_user_file', default='/etc/ironic/htpasswd-json-rpc', help=_('Path to Apache format user authentication file used ' 'when auth_strategy=http_basic')), cfg.HostAddressOpt('host_ip', default='::', help=_('The IP address or hostname on which JSON RPC ' 'will listen.')), cfg.PortOpt('port', default=8089, help=_('The port to use for JSON RPC')), cfg.BoolOpt('use_ssl', default=False, help=_('Whether to use TLS for JSON RPC')), cfg.StrOpt('http_basic_username', deprecated_for_removal=True, deprecated_reason=_("Use username instead"), help=_("Name of the user to use for HTTP Basic authentication " "client requests.")), cfg.StrOpt('http_basic_password', deprecated_for_removal=True, deprecated_reason=_("Use password instead"), secret=True, help=_("Password to use for HTTP Basic authentication " "client requests.")), cfg.ListOpt('allowed_roles', default=['admin'], help=_("List of roles allowed to use JSON RPC")), cfg.StrOpt('unix_socket', help=_('Unix socket to listen on. Disables host_ip and port.')), cfg.Opt('unix_socket_mode', type=config.Octal(), help=_('File mode (an octal number) of the unix socket to ' 'listen on. Ignored if unix_socket is not set.')), ] def register_opts(conf): conf.register_opts(opts, group='json_rpc') keystone.register_auth_opts(conf, 'json_rpc') conf.set_default('timeout', 120, group='json_rpc') register_opts(CONF) def list_opts(): return [('json_rpc', opts + keystone.add_auth_opts([]))] def auth_strategy(): # NOTE(dtantsur): this expects [DEFAULT]auth_strategy to be provided by the # service configuration. return CONF.json_rpc.auth_strategy or CONF.auth_strategy ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/ironic_lib/json_rpc/client.py0000664000175000017500000002062500000000000021354 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """A simple JSON RPC client. This client is compatible with any JSON RPC 2.0 implementation, including ours. """ import logging from oslo_config import cfg from oslo_utils import importutils from oslo_utils import netutils from oslo_utils import strutils from oslo_utils import uuidutils from ironic_lib.common.i18n import _ from ironic_lib import exception from ironic_lib import json_rpc from ironic_lib import keystone CONF = cfg.CONF LOG = logging.getLogger(__name__) _SESSION = None def _get_session(): global _SESSION if _SESSION is None: kwargs = {} auth_strategy = json_rpc.auth_strategy() if auth_strategy != 'keystone': auth_type = 'none' if auth_strategy == 'noauth' else auth_strategy CONF.set_default('auth_type', auth_type, group='json_rpc') # Deprecated, remove in W if auth_strategy == 'http_basic': if CONF.json_rpc.http_basic_username: kwargs['username'] = CONF.json_rpc.http_basic_username if CONF.json_rpc.http_basic_password: kwargs['password'] = CONF.json_rpc.http_basic_password auth = keystone.get_auth('json_rpc', **kwargs) session = keystone.get_session('json_rpc', auth=auth) headers = { 'Content-Type': 'application/json' } # Adds options like connect_retries _SESSION = keystone.get_adapter('json_rpc', session=session, additional_headers=headers) return _SESSION class Client(object): """JSON RPC client with ironic exception handling.""" allowed_exception_namespaces = [ "ironic_lib.exception.", "ironic.common.exception.", "ironic_inspector.utils.", ] def __init__(self, serializer, version_cap=None): self.serializer = serializer self.version_cap = version_cap def can_send_version(self, version): return _can_send_version(version, self.version_cap) def prepare(self, topic, version=None): """Prepare the client to transmit a request. :param topic: Topic which is being addressed. Typically this is the hostname of the remote json-rpc service. :param version: The RPC API version to utilize. """ host = topic.split('.', 1)[1] host, port = netutils.parse_host_port(host) return _CallContext( host, self.serializer, version=version, version_cap=self.version_cap, allowed_exception_namespaces=self.allowed_exception_namespaces, port=port) class _CallContext(object): """Wrapper object for compatibility with oslo.messaging API.""" def __init__(self, host, serializer, version=None, version_cap=None, allowed_exception_namespaces=(), port=None): if not port: self.port = CONF.json_rpc.port else: self.port = int(port) self.host = host self.serializer = serializer self.version = version self.version_cap = version_cap self.allowed_exception_namespaces = allowed_exception_namespaces def _is_known_exception(self, class_name): for ns in self.allowed_exception_namespaces: if class_name.startswith(ns): return True return False def _handle_error(self, error): if not error: return message = error['message'] try: cls = error['data']['class'] except KeyError: LOG.error("Unexpected error from RPC: %s", error) raise exception.IronicException( _("Unexpected error raised by RPC")) else: if not self._is_known_exception(cls): # NOTE(dtantsur): protect against arbitrary code execution LOG.error("Unexpected error from RPC: %s", error) raise exception.IronicException( _("Unexpected error raised by RPC")) raise importutils.import_object(cls, message, code=error.get('code', 500)) def call(self, context, method, version=None, **kwargs): """Call conductor RPC. Versioned objects are automatically serialized and deserialized. :param context: Security context. :param method: Method name. :param version: RPC API version to use. :param kwargs: Keyword arguments to pass. :return: RPC result (if any). """ return self._request(context, method, cast=False, version=version, **kwargs) def cast(self, context, method, version=None, **kwargs): """Call conductor RPC asynchronously. Versioned objects are automatically serialized and deserialized. :param context: Security context. :param method: Method name. :param version: RPC API version to use. :param kwargs: Keyword arguments to pass. :return: None """ return self._request(context, method, cast=True, version=version, **kwargs) def _request(self, context, method, cast=False, version=None, **kwargs): """Call conductor RPC. Versioned objects are automatically serialized and deserialized. :param context: Security context. :param method: Method name. :param cast: If true, use a JSON RPC notification. :param version: RPC API version to use. :param kwargs: Keyword arguments to pass. :return: RPC result (if any). """ params = {key: self.serializer.serialize_entity(context, value) for key, value in kwargs.items()} params['context'] = context.to_dict() if version is None: version = self.version if version is not None: _check_version(version, self.version_cap) params['rpc.version'] = version body = { "jsonrpc": "2.0", "method": method, "params": params, } if not cast: body['id'] = (getattr(context, 'request_id', None) or uuidutils.generate_uuid()) scheme = 'http' if CONF.json_rpc.use_ssl: scheme = 'https' url = '%s://%s:%d' % (scheme, netutils.escape_ipv6(self.host), self.port) LOG.debug("RPC %s to %s with %s", method, url, strutils.mask_dict_password(body)) try: result = _get_session().post(url, json=body) except Exception as exc: LOG.debug('RPC %s to %s failed with %s', method, url, exc) raise LOG.debug('RPC %s to %s returned %s', method, url, strutils.mask_password(result.text or '')) if not cast: result = result.json() self._handle_error(result.get('error')) result = self.serializer.deserialize_entity(context, result['result']) return result def _can_send_version(requested, version_cap): if requested is None or version_cap is None: return True requested_parts = [int(item) for item in requested.split('.', 1)] version_cap_parts = [int(item) for item in version_cap.split('.', 1)] if requested_parts[0] != version_cap_parts[0]: return False # major version mismatch else: return requested_parts[1] <= version_cap_parts[1] def _check_version(requested, version_cap): if not _can_send_version(requested, version_cap): raise RuntimeError(_("Cannot send RPC request: requested version " "%(requested)s, maximum allowed version is " "%(version_cap)s") % {'requested': requested, 'version_cap': version_cap}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/ironic_lib/json_rpc/server.py0000664000175000017500000002406700000000000021410 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of JSON RPC for communication between API and conductors. This module implementa a subset of JSON RPC 2.0 as defined in https://www.jsonrpc.org/specification. Main differences: * No support for batched requests. * No support for positional arguments passing. * No JSON RPC 1.0 fallback. """ import json import logging try: from keystonemiddleware import auth_token except ImportError: auth_token = None from oslo_config import cfg try: import oslo_messaging except ImportError: oslo_messaging = None from oslo_utils import strutils import webob from ironic_lib import auth_basic from ironic_lib.common.i18n import _ from ironic_lib import exception from ironic_lib import json_rpc from ironic_lib import wsgi CONF = cfg.CONF LOG = logging.getLogger(__name__) _DENY_LIST = {'init_host', 'del_host', 'target', 'iter_nodes'} def _build_method_map(manager): """Build mapping from method names to their bodies. :param manager: A conductor manager. :return: dict with mapping """ result = {} for method in dir(manager): if method.startswith('_') or method in _DENY_LIST: continue func = getattr(manager, method) if not callable(func): continue LOG.debug('Adding RPC method %s', method) result[method] = func return result class JsonRpcError(exception.IronicException): pass class ParseError(JsonRpcError): code = -32700 _msg_fmt = _("Invalid JSON received by RPC server") class InvalidRequest(JsonRpcError): code = -32600 _msg_fmt = _("Invalid request object received by RPC server") class MethodNotFound(JsonRpcError): code = -32601 _msg_fmt = _("Method %(name)s was not found") class InvalidParams(JsonRpcError): code = -32602 _msg_fmt = _("Params %(params)s are invalid for %(method)s: %(error)s") class EmptyContext: request_id = None def __init__(self, src): self.__dict__.update(src) def to_dict(self): return self.__dict__.copy() class WSGIService(wsgi.WSGIService): """Provides ability to launch JSON RPC as a WSGI application.""" def __init__(self, manager, serializer, context_class=EmptyContext): """Create a JSON RPC service. :param manager: Object from which to expose methods. :param serializer: A serializer that supports calls serialize_entity and deserialize_entity. :param context_class: A context class - a callable accepting a dict received from network. """ self.manager = manager self.serializer = serializer self.context_class = context_class self._method_map = _build_method_map(manager) auth_strategy = json_rpc.auth_strategy() if auth_strategy == 'keystone': conf = dict(CONF.keystone_authtoken) if auth_token is None: raise exception.ConfigInvalid( _("keystonemiddleware is required for keystone " "authentication")) app = auth_token.AuthProtocol(self._application, conf) elif auth_strategy == 'http_basic': app = auth_basic.BasicAuthMiddleware( self._application, cfg.CONF.json_rpc.http_basic_auth_user_file) else: app = self._application super().__init__('ironic-json-rpc', app, CONF.json_rpc) def _application(self, environment, start_response): """WSGI application for conductor JSON RPC.""" request = webob.Request(environment) if request.method != 'POST': body = {'error': {'code': 405, 'message': _('Only POST method can be used')}} return webob.Response(status_code=405, json_body=body)( environment, start_response) if json_rpc.auth_strategy() == 'keystone': roles = (request.headers.get('X-Roles') or '').split(',') allowed_roles = CONF.json_rpc.allowed_roles if set(roles).isdisjoint(allowed_roles): LOG.debug('Roles %s do not contain any of %s, rejecting ' 'request', roles, allowed_roles) body = {'error': {'code': 403, 'message': _('Forbidden')}} return webob.Response(status_code=403, json_body=body)( environment, start_response) result = self._call(request) if result is not None: response = webob.Response(content_type='application/json', charset='UTF-8', json_body=result) else: response = webob.Response(status_code=204) return response(environment, start_response) def _handle_error(self, exc, request_id=None): """Generate a JSON RPC 2.0 error body. :param exc: Exception object. :param request_id: ID of the request (if any). :return: dict with response body """ if (oslo_messaging is not None and isinstance(exc, oslo_messaging.ExpectedException)): exc = exc.exc_info[1] expected = isinstance(exc, exception.IronicException) cls = exc.__class__ if expected: LOG.debug('RPC error %s: %s', cls.__name__, exc) else: LOG.exception('Unexpected RPC exception %s', cls.__name__) response = { "jsonrpc": "2.0", "id": request_id, "error": { "code": getattr(exc, 'code', 500), "message": str(exc), } } if expected and not isinstance(exc, JsonRpcError): # Allow de-serializing the correct class for expected errors. response['error']['data'] = { 'class': '%s.%s' % (cls.__module__, cls.__name__) } return response def _call(self, request): """Process a JSON RPC request. :param request: ``webob.Request`` object. :return: dict with response body. """ request_id = None try: try: body = json.loads(request.text) except ValueError: LOG.error('Cannot parse JSON RPC request as JSON') raise ParseError() if not isinstance(body, dict): LOG.error('JSON RPC request %s is not an object (batched ' 'requests are not supported)', body) raise InvalidRequest() request_id = body.get('id') params = body.get('params', {}) if (body.get('jsonrpc') != '2.0' or not body.get('method') or not isinstance(params, dict)): LOG.error('JSON RPC request %s is invalid', body) raise InvalidRequest() except Exception as exc: # We do not treat malformed requests as notifications and return # a response even when request_id is None. This seems in agreement # with the examples in the specification. return self._handle_error(exc, request_id) try: method = body['method'] try: func = self._method_map[method] except KeyError: raise MethodNotFound(name=method) result = self._handle_requests(func, method, params) if request_id is not None: return { "jsonrpc": "2.0", "result": result, "id": request_id } except Exception as exc: result = self._handle_error(exc, request_id) # We treat correctly formed requests without "id" as notifications # and do not return any errors. if request_id is not None: return result def _handle_requests(self, func, name, params): """Convert arguments and call a method. :param func: Callable object. :param name: RPC call name for logging. :param params: Keyword arguments. :return: call result as JSON. """ # TODO(dtantsur): server-side version check? params.pop('rpc.version', None) logged_params = strutils.mask_dict_password(params) try: context = params.pop('context') except KeyError: context = None else: # A valid context is required for deserialization if not isinstance(context, dict): raise InvalidParams( _("Context must be a dictionary, if provided")) context = self.context_class(context) params = {key: self.serializer.deserialize_entity(context, value) for key, value in params.items()} params['context'] = context LOG.debug('RPC %s with %s', name, logged_params) try: result = func(**params) # FIXME(dtantsur): we could use the inspect module, but # oslo_messaging.expected_exceptions messes up signatures. except TypeError as exc: raise InvalidParams(params=', '.join(params), method=name, error=exc) if context is not None: # Currently it seems that we can serialize even with invalid # context, but I'm not sure it's guaranteed to be the case. result = self.serializer.serialize_entity(context, result) LOG.debug('RPC %s returned %s', name, strutils.mask_dict_password(result) if isinstance(result, dict) else result) return result ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/ironic_lib/keystone.py0000664000175000017500000001710700000000000020123 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Central place for handling Keystone authorization and service lookup.""" import copy import functools import logging from keystoneauth1 import exceptions as ks_exception from keystoneauth1 import loading as ks_loading from keystoneauth1 import service_token from keystoneauth1 import token_endpoint import os_service_types from oslo_config import cfg from ironic_lib import exception CONF = cfg.CONF LOG = logging.getLogger(__name__) DEFAULT_VALID_INTERFACES = ['internal', 'public'] def ks_exceptions(f): """Wraps keystoneclient functions and centralizes exception handling.""" @functools.wraps(f) def wrapper(group, *args, **kwargs): try: return f(group, *args, **kwargs) except ks_exception.EndpointNotFound: service_type = kwargs.get( 'service_type', getattr(getattr(CONF, group), 'service_type', group)) endpoint_type = kwargs.get('endpoint_type', 'internal') raise exception.CatalogNotFound( service_type=service_type, endpoint_type=endpoint_type) except (ks_exception.Unauthorized, ks_exception.AuthorizationFailure): raise exception.KeystoneUnauthorized() except (ks_exception.NoMatchingPlugin, ks_exception.MissingRequiredOptions) as e: raise exception.ConfigInvalid(str(e)) except Exception as e: LOG.exception('Keystone request failed with unexpected exception') raise exception.KeystoneFailure(str(e)) return wrapper @ks_exceptions def get_session(group, **session_kwargs): """Loads session object from options in a configuration file section. The session_kwargs will be passed directly to keystoneauth1 Session and will override the values loaded from config. Consult keystoneauth1 docs for available options. :param group: name of the config section to load session options from """ return ks_loading.load_session_from_conf_options( CONF, group, **session_kwargs) @ks_exceptions def get_auth(group, **auth_kwargs): """Loads auth plugin from options in a configuration file section. The auth_kwargs will be passed directly to keystoneauth1 auth plugin and will override the values loaded from config. Note that the accepted kwargs will depend on auth plugin type as defined by [group]auth_type option. Consult keystoneauth1 docs for available auth plugins and their options. :param group: name of the config section to load auth plugin options from """ try: auth = ks_loading.load_auth_from_conf_options(CONF, group, **auth_kwargs) except ks_exception.MissingRequiredOptions: LOG.error('Failed to load auth plugin from group %s', group) raise return auth @ks_exceptions def get_adapter(group, **adapter_kwargs): """Loads adapter from options in a configuration file section. The adapter_kwargs will be passed directly to keystoneauth1 Adapter and will override the values loaded from config. Consult keystoneauth1 docs for available adapter options. :param group: name of the config section to load adapter options from """ return ks_loading.load_adapter_from_conf_options(CONF, group, **adapter_kwargs) def get_endpoint(group, **adapter_kwargs): """Get an endpoint from an adapter. The adapter_kwargs will be passed directly to keystoneauth1 Adapter and will override the values loaded from config. Consult keystoneauth1 docs for available adapter options. :param group: name of the config section to load adapter options from :raises: CatalogNotFound if the endpoint is not found """ result = get_adapter(group, **adapter_kwargs).get_endpoint() if not result: service_type = adapter_kwargs.get( 'service_type', getattr(getattr(CONF, group), 'service_type', group)) endpoint_type = adapter_kwargs.get('endpoint_type', 'internal') raise exception.CatalogNotFound( service_type=service_type, endpoint_type=endpoint_type) return result def get_service_auth(context, endpoint, service_auth): """Create auth plugin wrapping both user and service auth. When properly configured and using auth_token middleware, requests with valid service auth will not fail if the user token is expired. Ideally we would use the plugin provided by auth_token middleware however this plugin isn't serialized yet. """ # TODO(pas-ha) use auth plugin from context when it is available user_auth = token_endpoint.Token(endpoint, context.auth_token) return service_token.ServiceTokenAuthWrapper(user_auth=user_auth, service_auth=service_auth) def register_auth_opts(conf, group, service_type=None): """Register session- and auth-related options Registers only basic auth options shared by all auth plugins. The rest are registered at runtime depending on auth plugin used. """ ks_loading.register_session_conf_options(conf, group) ks_loading.register_auth_conf_options(conf, group) CONF.set_default('auth_type', default='password', group=group) ks_loading.register_adapter_conf_options(conf, group) conf.set_default('valid_interfaces', DEFAULT_VALID_INTERFACES, group=group) if service_type: conf.set_default('service_type', service_type, group=group) else: types = os_service_types.get_service_types() key = 'ironic-inspector' if group == 'inspector' else group service_types = types.service_types_by_project.get(key) if service_types: conf.set_default('service_type', service_types[0], group=group) def add_auth_opts(options, service_type=None): """Add auth options to sample config As these are dynamically registered at runtime, this adds options for most used auth_plugins when generating sample config. """ def add_options(opts, opts_to_add): for new_opt in opts_to_add: for opt in opts: if opt.name == new_opt.name: break else: opts.append(new_opt) opts = copy.deepcopy(options) opts.insert(0, ks_loading.get_auth_common_conf_options()[0]) # NOTE(dims): There are a lot of auth plugins, we just generate # the config options for a few common ones plugins = ['password', 'v2password', 'v3password'] for name in plugins: plugin = ks_loading.get_plugin_loader(name) add_options(opts, ks_loading.get_auth_plugin_conf_options(plugin)) add_options(opts, ks_loading.get_session_conf_options()) if service_type: adapter_opts = ks_loading.get_adapter_conf_options( include_deprecated=False) # adding defaults for valid interfaces cfg.set_defaults(adapter_opts, service_type=service_type, valid_interfaces=DEFAULT_VALID_INTERFACES) add_options(opts, adapter_opts) opts.sort(key=lambda x: x.name) return opts ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/ironic_lib/mdns.py0000664000175000017500000003012600000000000017217 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Multicast DNS implementation for API discovery. This implementation follows RFC 6763 as clarified by the API SIG guideline https://review.opendev.org/651222. """ import collections import ipaddress import logging import socket import time from urllib import parse as urlparse from oslo_config import cfg from oslo_config import types as cfg_types import zeroconf from ironic_lib.common.i18n import _ from ironic_lib import exception from ironic_lib import utils opts = [ cfg.IntOpt('registration_attempts', min=1, default=5, help='Number of attempts to register a service. Currently ' 'has to be larger than 1 because of race conditions ' 'in the zeroconf library.'), cfg.IntOpt('lookup_attempts', min=1, default=3, help='Number of attempts to lookup a service.'), cfg.Opt('params', # This is required for values that contain commas. type=cfg_types.Dict(cfg_types.String(quotes=True)), default={}, help='Additional parameters to pass for the registered ' 'service.'), cfg.ListOpt('interfaces', help='List of IP addresses of interfaces to use for mDNS. ' 'Defaults to all interfaces on the system.'), ] CONF = cfg.CONF opt_group = cfg.OptGroup(name='mdns', title='Options for multicast DNS') CONF.register_group(opt_group) CONF.register_opts(opts, opt_group) LOG = logging.getLogger(__name__) _MDNS_DOMAIN = '_openstack._tcp.local.' _endpoint = collections.namedtuple('Endpoint', ['addresses', 'hostname', 'port', 'params']) class Zeroconf(object): """Multicast DNS implementation client and server. Uses threading internally, so there is no start method. It starts automatically on creation. .. warning:: The underlying library does not yet support IPv6. """ def __init__(self): """Initialize and start the mDNS server.""" interfaces = (CONF.mdns.interfaces if CONF.mdns.interfaces else zeroconf.InterfaceChoice.All) # If interfaces are set, let zeroconf auto-detect the version ip_version = None if CONF.mdns.interfaces else zeroconf.IPVersion.All self._zc = zeroconf.Zeroconf(interfaces=interfaces, ip_version=ip_version) self._registered = [] def register_service(self, service_type, endpoint, params=None): """Register a service. This call announces the new services via multicast and instructs the built-in server to respond to queries about it. :param service_type: OpenStack service type, e.g. "baremetal". :param endpoint: full endpoint to reach the service. :param params: optional properties as a dictionary. :raises: :exc:`.ServiceRegistrationFailure` if the service cannot be registered, e.g. because of conflicts. """ parsed = _parse_endpoint(endpoint, service_type) all_params = CONF.mdns.params.copy() if params: all_params.update(params) all_params.update(parsed.params) properties = { (key.encode('utf-8') if isinstance(key, str) else key): (value.encode('utf-8') if isinstance(value, str) else value) for key, value in all_params.items() } # TODO(dtantsur): allow overriding TTL values via configuration info = zeroconf.ServiceInfo(_MDNS_DOMAIN, '%s.%s' % (service_type, _MDNS_DOMAIN), addresses=parsed.addresses, port=parsed.port, properties=properties, server=parsed.hostname) LOG.debug('Registering %s via mDNS', info) # Work around a potential race condition in the registration code: # https://github.com/jstasiak/python-zeroconf/issues/163 delay = 0.1 try: for attempt in range(CONF.mdns.registration_attempts): try: self._zc.register_service(info) except zeroconf.NonUniqueNameException: LOG.debug('Could not register %s - conflict', info) if attempt == CONF.mdns.registration_attempts - 1: raise # reset the cache to purge learned records and retry self._zc.cache = zeroconf.DNSCache() time.sleep(delay) delay *= 2 else: break except zeroconf.Error as exc: raise exception.ServiceRegistrationFailure( service=service_type, error=exc) self._registered.append(info) def get_endpoint(self, service_type, skip_loopback=True, skip_link_local=False): """Get an endpoint and its properties from mDNS. If the requested endpoint is already in the built-in server cache, and its TTL is not exceeded, the cached value is returned. :param service_type: OpenStack service type. :param skip_loopback: Whether to ignore loopback addresses. :param skip_link_local: Whether to ignore link local V6 addresses. :returns: tuple (endpoint URL, properties as a dict). :raises: :exc:`.ServiceLookupFailure` if the service cannot be found. """ delay = 0.1 for attempt in range(CONF.mdns.lookup_attempts): name = '%s.%s' % (service_type, _MDNS_DOMAIN) info = self._zc.get_service_info(name, name) if info is not None: break elif attempt == CONF.mdns.lookup_attempts - 1: raise exception.ServiceLookupFailure(service=service_type) else: time.sleep(delay) delay *= 2 all_addr = info.parsed_addresses() # Try to find the first routable address fallback = None for addr in all_addr: try: loopback = ipaddress.ip_address(addr).is_loopback except ValueError: LOG.debug('Skipping invalid IP address %s', addr) continue else: if loopback and skip_loopback: LOG.debug('Skipping loopback IP address %s', addr) continue if utils.get_route_source(addr, skip_link_local): address = addr break elif fallback is None: fallback = addr else: if fallback is None: raise exception.ServiceLookupFailure( _('None of addresses %(addr)s for service %(service)s ' 'are valid') % {'addr': all_addr, 'service': service_type}) else: LOG.warning('None of addresses %s seem routable, using %s', all_addr, fallback) address = fallback properties = {} for key, value in info.properties.items(): try: if isinstance(key, bytes): key = key.decode('utf-8') except UnicodeError as exc: raise exception.ServiceLookupFailure( _('Invalid properties for service %(svc)s. Cannot decode ' 'key %(key)r: %(exc)r') % {'svc': service_type, 'key': key, 'exc': exc}) try: if isinstance(value, bytes): value = value.decode('utf-8') except UnicodeError as exc: LOG.debug('Cannot convert value %(value)r for key %(key)s ' 'to string, assuming binary: %(exc)s', {'key': key, 'value': value, 'exc': exc}) properties[key] = value path = properties.pop('path', '') protocol = properties.pop('protocol', None) if not protocol: if info.port == 80: protocol = 'http' else: protocol = 'https' if info.server.endswith('.local.'): # Local hostname means that the catalog lists an IP address, # so use it host = address if int(ipaddress.ip_address(host).version) == 6: host = '[%s]' % host else: # Otherwise use the provided hostname. host = info.server.rstrip('.') return ('{proto}://{host}:{port}{path}'.format(proto=protocol, host=host, port=info.port, path=path), properties) def close(self): """Shut down mDNS and unregister services. .. note:: If another server is running for the same services, it will re-register them immediately. """ for info in self._registered: try: self._zc.unregister_service(info) except Exception: LOG.exception('Cound not unregister mDNS service %s', info) self._zc.close() def __enter__(self): return self def __exit__(self, *args): self.close() def get_endpoint(service_type): """Get an endpoint and its properties from mDNS. If the requested endpoint is already in the built-in server cache, and its TTL is not exceeded, the cached value is returned. :param service_type: OpenStack service type. :returns: tuple (endpoint URL, properties as a dict). :raises: :exc:`.ServiceLookupFailure` if the service cannot be found. """ with Zeroconf() as zc: return zc.get_endpoint(service_type) def _parse_endpoint(endpoint, service_type=None): params = {} url = urlparse.urlparse(endpoint) port = url.port if port is None: if url.scheme == 'https': port = 443 else: port = 80 addresses = [] hostname = url.hostname try: infos = socket.getaddrinfo(hostname, port, 0, socket.IPPROTO_TCP) except socket.error as exc: raise exception.ServiceRegistrationFailure( service=service_type, error=_('Could not resolve hostname %(host)s: %(exc)s') % {'host': hostname, 'exc': exc}) for info in infos: ip = info[4][0] if ip == hostname: # we need a host name for the service record. if what we have in # the catalog is an IP address, use the local hostname instead hostname = None # zeroconf requires addresses in network format ip = socket.inet_pton(info[0], ip) if ip not in addresses: addresses.append(ip) if not addresses: raise exception.ServiceRegistrationFailure( service=service_type, error=_('No suitable addresses found for %s') % url.hostname) # avoid storing information that can be derived from existing data if url.path not in ('', '/'): params['path'] = url.path if (not (port == 80 and url.scheme == 'http') and not (port == 443 and url.scheme == 'https')): params['protocol'] = url.scheme # zeroconf is pretty picky about having the trailing dot if hostname is not None and not hostname.endswith('.'): hostname += '.' return _endpoint(addresses, hostname, port, params) def list_opts(): """Entry point for oslo-config-generator.""" return [('mdns', opts)] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/ironic_lib/metrics.py0000664000175000017500000002266100000000000017731 0ustar00zuulzuul00000000000000# Copyright 2016 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import functools import random import time from ironic_lib.common.i18n import _ from ironic_lib import exception class Timer(object): """A timer decorator and context manager. This metric type times the decorated method or code running inside the context manager, and emits the time as the metric value. It is bound to this MetricLogger. For example:: from ironic_lib import metrics_utils METRICS = metrics_utils.get_metrics_logger() @METRICS.timer('foo') def foo(bar, baz): print bar, baz with METRICS.timer('foo'): do_something() """ def __init__(self, metrics, name): """Init the decorator / context manager. :param metrics: The metric logger :param name: The metric name """ if not isinstance(name, str): raise TypeError(_("The metric name is expected to be a string. " "Value is %s") % name) self.metrics = metrics self.name = name self._start = None def __call__(self, f): @functools.wraps(f) def wrapped(*args, **kwargs): start = _time() result = f(*args, **kwargs) duration = _time() - start # Log the timing data (in ms) self.metrics.send_timer(self.metrics.get_metric_name(self.name), duration * 1000) return result return wrapped def __enter__(self): self._start = _time() def __exit__(self, exc_type, exc_val, exc_tb): duration = _time() - self._start # Log the timing data (in ms) self.metrics.send_timer(self.metrics.get_metric_name(self.name), duration * 1000) class Counter(object): """A counter decorator and context manager. This metric type increments a counter every time the decorated method or context manager is executed. It is bound to this MetricLogger. For example:: from ironic_lib import metrics_utils METRICS = metrics_utils.get_metrics_logger() @METRICS.counter('foo') def foo(bar, baz): print bar, baz with METRICS.counter('foo'): do_something() """ def __init__(self, metrics, name, sample_rate): """Init the decorator / context manager. :param metrics: The metric logger :param name: The metric name :param sample_rate: Probabilistic rate at which the values will be sent """ if not isinstance(name, str): raise TypeError(_("The metric name is expected to be a string. " "Value is %s") % name) if (sample_rate is not None and (sample_rate < 0.0 or sample_rate > 1.0)): msg = _("sample_rate is set to %s. Value must be None " "or in the interval [0.0, 1.0]") % sample_rate raise ValueError(msg) self.metrics = metrics self.name = name self.sample_rate = sample_rate def __call__(self, f): @functools.wraps(f) def wrapped(*args, **kwargs): self.metrics.send_counter( self.metrics.get_metric_name(self.name), 1, sample_rate=self.sample_rate) result = f(*args, **kwargs) return result return wrapped def __enter__(self): self.metrics.send_counter(self.metrics.get_metric_name(self.name), 1, sample_rate=self.sample_rate) def __exit__(self, exc_type, exc_val, exc_tb): pass class Gauge(object): """A gauge decorator. This metric type returns the value of the decorated method as a metric every time the method is executed. It is bound to this MetricLogger. For example:: from ironic_lib import metrics_utils METRICS = metrics_utils.get_metrics_logger() @METRICS.gauge('foo') def add_foo(bar, baz): return (bar + baz) """ def __init__(self, metrics, name): """Init the decorator / context manager. :param metrics: The metric logger :param name: The metric name """ if not isinstance(name, str): raise TypeError(_("The metric name is expected to be a string. " "Value is %s") % name) self.metrics = metrics self.name = name def __call__(self, f): @functools.wraps(f) def wrapped(*args, **kwargs): result = f(*args, **kwargs) self.metrics.send_gauge(self.metrics.get_metric_name(self.name), result) return result return wrapped def _time(): """Wraps time.time() for simpler testing.""" return time.time() class MetricLogger(object, metaclass=abc.ABCMeta): """Abstract class representing a metrics logger. A MetricLogger sends data to a backend (noop or statsd). The data can be a gauge, a counter, or a timer. The data sent to the backend is composed of: - a full metric name - a numeric value The format of the full metric name is: _prefixname where: - _prefix: [global_prefix][uuid][host_name]prefix - name: the name of this metric - : the delimiter. Default is '.' """ def __init__(self, prefix='', delimiter='.'): """Init a MetricLogger. :param prefix: Prefix for this metric logger. This string will prefix all metric names. :param delimiter: Delimiter used to generate the full metric name. """ self._prefix = prefix self._delimiter = delimiter def get_metric_name(self, name): """Get the full metric name. The format of the full metric name is: _prefixname where: - _prefix: [global_prefix][uuid][host_name] prefix - name: the name of this metric - : the delimiter. Default is '.' :param name: The metric name. :return: The full metric name, with logger prefix, as a string. """ if not self._prefix: return name return self._delimiter.join([self._prefix, name]) def send_gauge(self, name, value): """Send gauge metric data. Gauges are simple values. The backend will set the value of gauge 'name' to 'value'. :param name: Metric name :param value: Metric numeric value that will be sent to the backend """ self._gauge(name, value) def send_counter(self, name, value, sample_rate=None): """Send counter metric data. Counters are used to count how many times an event occurred. The backend will increment the counter 'name' by the value 'value'. Optionally, specify sample_rate in the interval [0.0, 1.0] to sample data probabilistically where:: P(send metric data) = sample_rate If sample_rate is None, then always send metric data, but do not have the backend send sample rate information (if supported). :param name: Metric name :param value: Metric numeric value that will be sent to the backend :param sample_rate: Probabilistic rate at which the values will be sent. Value must be None or in the interval [0.0, 1.0]. """ if (sample_rate is None or random.random() < sample_rate): return self._counter(name, value, sample_rate=sample_rate) def send_timer(self, name, value): """Send timer data. Timers are used to measure how long it took to do something. :param m_name: Metric name :param m_value: Metric numeric value that will be sent to the backend """ self._timer(name, value) def timer(self, name): return Timer(self, name) def counter(self, name, sample_rate=None): return Counter(self, name, sample_rate) def gauge(self, name): return Gauge(self, name) @abc.abstractmethod def _gauge(self, name, value): """Abstract method for backends to implement gauge behavior.""" @abc.abstractmethod def _counter(self, name, value, sample_rate=None): """Abstract method for backends to implement counter behavior.""" @abc.abstractmethod def _timer(self, name, value): """Abstract method for backends to implement timer behavior.""" def get_metrics_data(self): """Return the metrics collection, if available.""" raise exception.MetricsNotSupported() class NoopMetricLogger(MetricLogger): """Noop metric logger that throws away all metric data.""" def _gauge(self, name, value): pass def _counter(self, name, value, sample_rate=None): pass def _timer(self, m_name, value): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/ironic_lib/metrics_collector.py0000664000175000017500000001064200000000000021773 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_concurrency import lockutils from oslo_config import cfg from ironic_lib import metrics CONF = cfg.CONF STATISTIC_DATA = {} class DictCollectionMetricLogger(metrics.MetricLogger): """Metric logger that collects internal counters.""" # These are internal typing labels in Ironic-lib. GAUGE_TYPE = 'g' COUNTER_TYPE = 'c' TIMER_TYPE = 'ms' def __init__(self, prefix, delimiter='.'): """Initialize the Collection Metrics Logger. The logger stores metrics data in a dictionary which can then be retrieved by the program utilizing it whenever needed utilizing a get_metrics_data call to return the metrics data structure. :param prefix: Prefix for this metric logger. :param delimiter: Delimiter used to generate the full metric name. """ super(DictCollectionMetricLogger, self).__init__( prefix, delimiter=delimiter) @lockutils.synchronized('statistics-update') def _send(self, name, value, metric_type, sample_rate=None): """Send the metrics to be stored in memory. This memory updates the internal dictionary to facilitate collection of statistics, and the retrieval of them for consumers or plugins in Ironic to retrieve the statistic data utilizing the `get_metrics_data` method. :param name: Metric name :param value: Metric value :param metric_type: Metric type (GAUGE_TYPE, COUNTER_TYPE), TIMER_TYPE is not supported. :param sample_rate: Not Applicable. """ global STATISTIC_DATA if metric_type == self.TIMER_TYPE: if name in STATISTIC_DATA: STATISTIC_DATA[name] = { 'count': STATISTIC_DATA[name]['count'] + 1, 'sum': STATISTIC_DATA[name]['sum'] + value, 'type': 'timer' } else: # Set initial data value. STATISTIC_DATA[name] = { 'count': 1, 'sum': value, 'type': 'timer' } elif metric_type == self.GAUGE_TYPE: STATISTIC_DATA[name] = { 'value': value, 'type': 'gauge' } elif metric_type == self.COUNTER_TYPE: if name in STATISTIC_DATA: # NOTE(TheJulia): Value is hard coded for counter # data types as a value of 1. STATISTIC_DATA[name] = { 'count': STATISTIC_DATA[name]['count'] + 1, 'type': 'counter' } else: STATISTIC_DATA[name] = { 'count': 1, 'type': 'counter' } def _gauge(self, name, value): return self._send(name, value, self.GAUGE_TYPE) def _counter(self, name, value, sample_rate=None): return self._send(name, value, self.COUNTER_TYPE, sample_rate=sample_rate) def _timer(self, name, value): return self._send(name, value, self.TIMER_TYPE) def get_metrics_data(self): """Return the metrics collection dictionary. :returns: Dictionary containing the keys and values of data stored via the metrics collection hooks. The values themselves are dictionaries which contain a type field, indicating if the statistic is a counter, gauge, or timer. A counter has a `count` field, a gauge value has a `value` field, and a 'timer' fiend las a 'count' and 'sum' fields. The multiple fields for for a timer type allows for additional statistics to be implied from the data once collected and compared over time. """ return STATISTIC_DATA ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/ironic_lib/metrics_statsd.py0000664000175000017500000000720500000000000021310 0ustar00zuulzuul00000000000000# Copyright 2016 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import logging import socket from oslo_config import cfg from ironic_lib import metrics statsd_opts = [ cfg.StrOpt('statsd_host', default='localhost', help='Host for use with the statsd backend.'), cfg.PortOpt('statsd_port', default=8125, help='Port to use with the statsd backend.') ] CONF = cfg.CONF CONF.register_opts(statsd_opts, group='metrics_statsd') LOG = logging.getLogger(__name__) class StatsdMetricLogger(metrics.MetricLogger): """Metric logger that reports data via the statsd protocol.""" GAUGE_TYPE = 'g' COUNTER_TYPE = 'c' TIMER_TYPE = 'ms' def __init__(self, prefix, delimiter='.', host=None, port=None): """Initialize a StatsdMetricLogger The logger uses the given prefix list, delimiter, host, and port. :param prefix: Prefix for this metric logger. :param delimiter: Delimiter used to generate the full metric name. :param host: The statsd host :param port: The statsd port """ super(StatsdMetricLogger, self).__init__(prefix, delimiter=delimiter) self._host = host or CONF.metrics_statsd.statsd_host self._port = port or CONF.metrics_statsd.statsd_port self._target = (self._host, self._port) def _send(self, name, value, metric_type, sample_rate=None): """Send metrics to the statsd backend :param name: Metric name :param value: Metric value :param metric_type: Metric type (GAUGE_TYPE, COUNTER_TYPE, or TIMER_TYPE) :param sample_rate: Probabilistic rate at which the values will be sent """ if sample_rate is None: metric = '%s:%s|%s' % (name, value, metric_type) else: metric = '%s:%s|%s@%s' % (name, value, metric_type, sample_rate) # Ideally, we'd cache a sending socket in self, but that # results in a socket getting shared by multiple green threads. with contextlib.closing(self._open_socket()) as sock: try: sock.settimeout(0.0) sock.sendto(metric.encode(), self._target) except socket.error as e: LOG.warning("Failed to send the metric value to host " "%(host)s, port %(port)s. Error: %(error)s", {'host': self._host, 'port': self._port, 'error': e}) def _open_socket(self): return socket.socket(socket.AF_INET, socket.SOCK_DGRAM) def _gauge(self, name, value): return self._send(name, value, self.GAUGE_TYPE) def _counter(self, name, value, sample_rate=None): return self._send(name, value, self.COUNTER_TYPE, sample_rate=sample_rate) def _timer(self, name, value): return self._send(name, value, self.TIMER_TYPE) def list_opts(): """Entry point for oslo-config-generator.""" return [('metrics_statsd', statsd_opts)] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/ironic_lib/metrics_utils.py0000664000175000017500000001013200000000000021137 0ustar00zuulzuul00000000000000# Copyright 2016 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from ironic_lib.common.i18n import _ from ironic_lib import exception from ironic_lib import metrics from ironic_lib import metrics_collector from ironic_lib import metrics_statsd metrics_opts = [ cfg.StrOpt('backend', default='noop', choices=[ ('noop', 'Do nothing in relation to metrics.'), ('statsd', 'Transmits metrics data to a statsd backend.'), ('collector', 'Collects metrics data and saves it in ' 'memory for use by the running application.'), ], help='Backend to use for the metrics system.'), cfg.BoolOpt('prepend_host', default=False, help='Prepend the hostname to all metric names. ' 'The format of metric names is ' '[global_prefix.][host_name.]prefix.metric_name.'), cfg.BoolOpt('prepend_host_reverse', default=True, help='Split the prepended host value by "." and reverse it ' '(to better match the reverse hierarchical form of ' 'domain names).'), cfg.StrOpt('global_prefix', help='Prefix all metric names with this value. ' 'By default, there is no global prefix. ' 'The format of metric names is ' '[global_prefix.][host_name.]prefix.metric_name.') ] CONF = cfg.CONF CONF.register_opts(metrics_opts, group='metrics') def get_metrics_logger(prefix='', backend=None, host=None, delimiter='.'): """Return a metric logger with the specified prefix. The format of the prefix is: [global_prefix][host_name]prefix where is the delimiter (default is '.') :param prefix: Prefix for this metric logger. Value should be a string or None. :param backend: Backend to use for the metrics system. Possible values are 'noop' and 'statsd'. :param host: Name of this node. :param delimiter: Delimiter to use for the metrics name. :return: The new MetricLogger. """ if not isinstance(prefix, str): msg = (_("This metric prefix (%s) is of unsupported type. " "Value should be a string or None") % str(prefix)) raise exception.InvalidMetricConfig(msg) if CONF.metrics.prepend_host and host: if CONF.metrics.prepend_host_reverse: host = '.'.join(reversed(host.split('.'))) if prefix: prefix = delimiter.join([host, prefix]) else: prefix = host if CONF.metrics.global_prefix: if prefix: prefix = delimiter.join([CONF.metrics.global_prefix, prefix]) else: prefix = CONF.metrics.global_prefix backend = backend or CONF.metrics.backend if backend == 'statsd': return metrics_statsd.StatsdMetricLogger(prefix, delimiter=delimiter) elif backend == 'noop': return metrics.NoopMetricLogger(prefix, delimiter=delimiter) elif backend == 'collector': return metrics_collector.DictCollectionMetricLogger( prefix, delimiter=delimiter) else: msg = (_("The backend is set to an unsupported type: " "%s. Value should be 'noop' or 'statsd'.") % backend) raise exception.InvalidMetricConfig(msg) def list_opts(): """Entry point for oslo-config-generator.""" return [('metrics', metrics_opts)] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/ironic_lib/qemu_img.py0000664000175000017500000001051400000000000020060 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import os from oslo_concurrency import processutils from oslo_config import cfg from oslo_utils import imageutils from oslo_utils import units import tenacity from ironic_lib.common.i18n import _ from ironic_lib import utils opts = [ cfg.IntOpt('image_convert_memory_limit', default=2048, help='Memory limit for "qemu-img convert" in MiB. Implemented ' 'via the address space resource limit.'), cfg.IntOpt('image_convert_attempts', default=3, help='Number of attempts to convert an image.'), ] LOG = logging.getLogger(__name__) CONF = cfg.CONF CONF.register_opts(opts, group='disk_utils') # Limit the memory address space to 1 GiB when running qemu-img QEMU_IMG_LIMITS = None def _qemu_img_limits(): global QEMU_IMG_LIMITS if QEMU_IMG_LIMITS is None: QEMU_IMG_LIMITS = processutils.ProcessLimits( address_space=CONF.disk_utils.image_convert_memory_limit * units.Mi) return QEMU_IMG_LIMITS def _retry_on_res_temp_unavailable(exc): if (isinstance(exc, processutils.ProcessExecutionError) and ('Resource temporarily unavailable' in exc.stderr or 'Cannot allocate memory' in exc.stderr)): return True return False def image_info(path): """Return an object containing the parsed output from qemu-img info.""" if not os.path.exists(path): raise FileNotFoundError(_("File %s does not exist") % path) out, err = utils.execute('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path, '--output=json', prlimit=_qemu_img_limits()) return imageutils.QemuImgInfo(out, format='json') @tenacity.retry( retry=tenacity.retry_if_exception(_retry_on_res_temp_unavailable), stop=tenacity.stop_after_attempt(CONF.disk_utils.image_convert_attempts), reraise=True) def convert_image(source, dest, out_format, run_as_root=False, cache=None, out_of_order=False, sparse_size=None): """Convert image to other format.""" cmd = ['qemu-img', 'convert', '-O', out_format] if cache is not None: cmd += ['-t', cache] if sparse_size is not None: cmd += ['-S', sparse_size] if out_of_order: cmd.append('-W') cmd += [source, dest] # NOTE(TheJulia): Statically set the MALLOC_ARENA_MAX to prevent leaking # and the creation of new malloc arenas which will consume the system # memory. If limited to 1, qemu-img consumes ~250 MB of RAM, but when # another thread tries to access a locked section of memory in use with # another thread, then by default a new malloc arena is created, # which essentially balloons the memory requirement of the machine. # Default for qemu-img is 8 * nCPU * ~250MB (based on defaults + # thread/code/process/library overhead. In other words, 64 GB. Limiting # this to 3 keeps the memory utilization in happy cases below the overall # threshold which is in place in case a malicious image is attempted to # be passed through qemu-img. env_vars = {'MALLOC_ARENA_MAX': '3'} try: utils.execute(*cmd, run_as_root=run_as_root, prlimit=_qemu_img_limits(), use_standard_locale=True, env_variables=env_vars) except processutils.ProcessExecutionError as e: if ('Resource temporarily unavailable' in e.stderr or 'Cannot allocate memory' in e.stderr): LOG.debug('Failed to convert image, retrying. Error: %s', e) # Sync disk caches before the next attempt utils.execute('sync') raise def list_opts(): """Entry point for oslo-config-generator.""" return [('disk_utils', opts)] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1724334195.7736764 ironic-lib-6.2.0/ironic_lib/tests/0000775000175000017500000000000000000000000017044 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/ironic_lib/tests/__init__.py0000664000175000017500000000000000000000000021143 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/ironic_lib/tests/base.py0000664000175000017500000000602500000000000020333 0ustar00zuulzuul00000000000000# Copyright 2017 Cisco Systems, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Common utilities and classes across all unit tests.""" import subprocess from oslo_concurrency import processutils from oslo_config import fixture as config_fixture from oslotest import base as test_base from ironic_lib import utils class IronicLibTestCase(test_base.BaseTestCase): """Test case base class for all unit tests except callers of utils.execute. This test class prevents calls to the utils.execute() / processutils.execute() and similar functions. """ # By default block execution of utils.execute() and related functions. block_execute = True def setUp(self): super(IronicLibTestCase, self).setUp() # Make sure config overrides do not leak for test to test. self.cfg_fixture = self.useFixture(config_fixture.Config()) # Ban running external processes via 'execute' like functions. If the # patched function is called, an exception is raised to warn the # tester. if self.block_execute: # NOTE(jlvillal): Intentionally not using mock as if you mock a # mock it causes things to not work correctly. As doing an # autospec=True causes strangeness. By using a simple function we # can then mock it without issue. self.patch(processutils, 'execute', do_not_call) self.patch(subprocess, 'call', do_not_call) self.patch(subprocess, 'check_call', do_not_call) self.patch(subprocess, 'check_output', do_not_call) self.patch(utils, 'execute', do_not_call) # subprocess.Popen is a class self.patch(subprocess, 'Popen', DoNotCallPopen) def config(self, **kw): """Override config options for a test.""" self.cfg_fixture.config(**kw) def do_not_call(*args, **kwargs): """Helper function to raise an exception if it is called""" raise Exception( "Don't call ironic_lib.utils.execute() / " "processutils.execute() or similar functions in tests!") class DoNotCallPopen(object): """Helper class to mimic subprocess.popen() It's job is to raise an exception if it is called. We create stub functions so mocks that use autospec=True will work. """ def __init__(self, *args, **kwargs): do_not_call(*args, **kwargs) def communicate(self, input=None): pass def kill(self): pass def poll(self): pass def terminate(self): pass def wait(self): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/ironic_lib/tests/test_base.py0000664000175000017500000000670300000000000021375 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import subprocess from unittest import mock from oslo_concurrency import processutils from ironic_lib.tests import base from ironic_lib import utils class BlockExecuteTestCase(base.IronicLibTestCase): """Test to ensure we block access to the 'execute' type functions""" def test_exception_raised_for_execute(self): execute_functions = (processutils.execute, subprocess.Popen, subprocess.call, subprocess.check_call, subprocess.check_output, utils.execute) for function_name in execute_functions: exc = self.assertRaises(Exception, function_name, ["echo", "%s" % function_name]) # noqa # Have to use 'noqa' as we are raising plain Exception and we will # get H202 error in 'pep8' check. self.assertEqual( "Don't call ironic_lib.utils.execute() / " "processutils.execute() or similar functions in tests!", "%s" % exc) @mock.patch.object(utils, "execute", autospec=True) def test_can_mock_execute(self, mock_exec): # NOTE(jlvillal): We had discovered an issue where mocking wasn't # working because we had used a mock to block access to the execute # functions. This caused us to "mock a mock" and didn't work correctly. # We want to make sure that we can mock our execute functions even with # our "block execute" code. utils.execute("ls") utils.execute("echo") self.assertEqual(2, mock_exec.call_count) @mock.patch.object(processutils, "execute", autospec=True) def test_exception_raised_for_execute_parent_mocked(self, mock_exec): # Make sure that even if we mock the parent execute function, that we # still get an exception for a child. So in this case # ironic_lib.utils.execute() calls processutils.execute(). Make sure an # exception is raised even though we mocked processutils.execute() exc = self.assertRaises(Exception, utils.execute, "ls") # noqa # Have to use 'noqa' as we are raising plain Exception and we will get # H202 error in 'pep8' check. self.assertEqual( "Don't call ironic_lib.utils.execute() / " "processutils.execute() or similar functions in tests!", "%s" % exc) class DontBlockExecuteTestCase(base.IronicLibTestCase): """Ensure we can turn off blocking access to 'execute' type functions""" # Don't block the execute function block_execute = False @mock.patch.object(processutils, "execute", autospec=True) def test_no_exception_raised_for_execute(self, mock_exec): # Make sure we can call ironic_lib.utils.execute() even though we # didn't mock it. We do mock processutils.execute() so we don't # actually execute anything. utils.execute("ls") utils.execute("echo") self.assertEqual(2, mock_exec.call_count) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/ironic_lib/tests/test_basic_auth.py0000664000175000017500000002054100000000000022561 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 import json import os import tempfile from unittest import mock from ironic_lib import auth_basic from ironic_lib import exception from ironic_lib.tests import base class TestAuthBasic(base.IronicLibTestCase): def write_auth_file(self, data=None): if not data: data = '\n' with tempfile.NamedTemporaryFile(mode='w', delete=False) as f: f.write(data) self.addCleanup(os.remove, f.name) return f.name def test_middleware_authenticate(self): auth_file = self.write_auth_file( 'myName:$2y$05$lE3eGtyj41jZwrzS87KTqe6.' 'JETVCWBkc32C63UP2aYrGoYOEpbJm\n\n\n') app = mock.Mock() start_response = mock.Mock() middleware = auth_basic.BasicAuthMiddleware(app, auth_file) env = { 'HTTP_AUTHORIZATION': 'Basic bXlOYW1lOm15UGFzc3dvcmQ=' } result = middleware(env, start_response) self.assertEqual(app.return_value, result) start_response.assert_not_called() def test_middleware_unauthenticated(self): auth_file = self.write_auth_file( 'myName:$2y$05$lE3eGtyj41jZwrzS87KTqe6.' 'JETVCWBkc32C63UP2aYrGoYOEpbJm\n\n\n') app = mock.Mock() start_response = mock.Mock() middleware = auth_basic.BasicAuthMiddleware(app, auth_file) env = {'REQUEST_METHOD': 'GET'} body = middleware(env, start_response) decoded = json.loads(body[0].decode()) self.assertEqual({'error': {'message': 'Authorization required', 'code': 401}}, decoded) start_response.assert_called_once_with( '401 Unauthorized', [('WWW-Authenticate', 'Basic realm="Baremetal API"'), ('Content-Type', 'application/json'), ('Content-Length', str(len(body[0])))] ) app.assert_not_called() def test_authenticate(self): auth_file = self.write_auth_file( 'foo:bar\nmyName:$2y$05$lE3eGtyj41jZwrzS87KTqe6.' 'JETVCWBkc32C63UP2aYrGoYOEpbJm\n\n\n') # test basic auth self.assertEqual( {'HTTP_X_USER': 'myName', 'HTTP_X_USER_NAME': 'myName'}, auth_basic.authenticate( auth_file, 'myName', b'myPassword') ) # test failed auth e = self.assertRaises(exception.ConfigInvalid, auth_basic.authenticate, auth_file, 'foo', b'bar') self.assertEqual('Invalid configuration file. Only bcrypt digested ' 'passwords are supported for foo', str(e)) # test problem reading user data file auth_file = auth_file + '.missing' e = self.assertRaises(exception.ConfigInvalid, auth_basic.authenticate, auth_file, 'myName', b'myPassword') self.assertEqual('Invalid configuration file. Problem reading ' 'auth user file', str(e)) def test_auth_entry(self): entry_pass = ('myName:$2y$05$lE3eGtyj41jZwrzS87KTqe6.' 'JETVCWBkc32C63UP2aYrGoYOEpbJm') entry_pass_2a = ('myName:$2a$10$I9Fi3DM1sbxQP0560MK9' 'tec1dUdytBtIqXfDCyTNfDUabtGvQjW1S') entry_pass_2b = ('myName:$2b$12$dWLBxT6aMxpVTfUNAyOu' 'IusHXewu8m6Hrsxw4/e95WGBelFn0oOMW') entry_fail = 'foo:bar' # success self.assertEqual( {'HTTP_X_USER': 'myName', 'HTTP_X_USER_NAME': 'myName'}, auth_basic.auth_entry( entry_pass, b'myPassword') ) # success with a bcrypt implementations other than htpasswd self.assertEqual( {'HTTP_X_USER': 'myName', 'HTTP_X_USER_NAME': 'myName'}, auth_basic.auth_entry( entry_pass_2a, b'myPassword') ) self.assertEqual( {'HTTP_X_USER': 'myName', 'HTTP_X_USER_NAME': 'myName'}, auth_basic.auth_entry( entry_pass_2b, b'myPassword') ) # failed, unknown digest format e = self.assertRaises(exception.ConfigInvalid, auth_basic.auth_entry, entry_fail, b'bar') self.assertEqual('Invalid configuration file. Only bcrypt digested ' 'passwords are supported for foo', str(e)) # failed, incorrect password e = self.assertRaises(exception.Unauthorized, auth_basic.auth_entry, entry_pass, b'bar') self.assertEqual('Incorrect username or password', str(e)) def test_validate_auth_file(self): auth_file = self.write_auth_file( 'myName:$2y$05$lE3eGtyj41jZwrzS87KTqe6.' 'JETVCWBkc32C63UP2aYrGoYOEpbJm\n\n\n') # success, valid config auth_basic.validate_auth_file(auth_file) # failed, missing auth file auth_file = auth_file + '.missing' self.assertRaises(exception.ConfigInvalid, auth_basic.validate_auth_file, auth_file) # failed, invalid entry auth_file = self.write_auth_file( 'foo:bar\nmyName:$2y$05$lE3eGtyj41jZwrzS87KTqe6.' 'JETVCWBkc32C63UP2aYrGoYOEpbJm\n\n\n') self.assertRaises(exception.ConfigInvalid, auth_basic.validate_auth_file, auth_file) def test_parse_token(self): # success with bytes token = base64.b64encode(b'myName:myPassword') self.assertEqual( ('myName', b'myPassword'), auth_basic.parse_token(token) ) # success with string token = str(token, encoding='utf-8') self.assertEqual( ('myName', b'myPassword'), auth_basic.parse_token(token) ) # failed, invalid base64 e = self.assertRaises(exception.BadRequest, auth_basic.parse_token, token[:-1]) self.assertEqual('Could not decode authorization token', str(e)) # failed, no colon in token token = str(base64.b64encode(b'myNamemyPassword'), encoding='utf-8') e = self.assertRaises(exception.BadRequest, auth_basic.parse_token, token[:-1]) self.assertEqual('Could not decode authorization token', str(e)) def test_parse_header(self): auth_value = 'Basic bXlOYW1lOm15UGFzc3dvcmQ=' # success self.assertEqual( 'bXlOYW1lOm15UGFzc3dvcmQ=', auth_basic.parse_header({ 'HTTP_AUTHORIZATION': auth_value }) ) # failed, missing Authorization header e = self.assertRaises(exception.Unauthorized, auth_basic.parse_header, {}) self.assertEqual('Authorization required', str(e)) # failed missing token e = self.assertRaises(exception.BadRequest, auth_basic.parse_header, {'HTTP_AUTHORIZATION': 'Basic'}) self.assertEqual('Could not parse Authorization header', str(e)) # failed, type other than Basic digest_value = 'Digest username="myName" nonce="foobar"' e = self.assertRaises(exception.BadRequest, auth_basic.parse_header, {'HTTP_AUTHORIZATION': digest_value}) self.assertEqual('Unsupported authorization type "Digest"', str(e)) def test_unauthorized(self): e = self.assertRaises(exception.Unauthorized, auth_basic.unauthorized, 'ouch') self.assertEqual('ouch', str(e)) self.assertEqual({ 'WWW-Authenticate': 'Basic realm="Baremetal API"' }, e.headers) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/ironic_lib/tests/test_capabilities.py0000664000175000017500000000641300000000000023112 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections from ironic_lib import capabilities from ironic_lib.tests import base class ParseTestCase(base.IronicLibTestCase): def test_none(self): self.assertEqual({}, capabilities.parse(None)) def test_from_dict(self): expected_dict = {"hello": "world"} self.assertDictEqual(expected_dict, capabilities.parse(expected_dict)) def test_from_json_string(self): caps = '{"test": "world"}' self.assertDictEqual({"test": "world"}, capabilities.parse(caps)) def test_from_old_format(self): caps = 'hello:test1,cat:meow' self.assertDictEqual({'hello': 'test1', 'cat': 'meow'}, capabilities.parse(caps)) def test_from_old_format_with_malformed(self): caps = 'hello:test1,badformat' self.assertRaisesRegex(ValueError, 'Malformed capability', capabilities.parse, caps) def test_from_old_format_skip_malformed(self): caps = 'hello:test1,badformat' self.assertDictEqual({'hello': 'test1'}, capabilities.parse(caps, skip_malformed=True)) def test_no_old_format(self): caps = 'hello:test1,cat:meow' self.assertRaisesRegex(ValueError, 'Invalid JSON capabilities', capabilities.parse, caps, compat=False) def test_unexpected_type(self): self.assertRaisesRegex(TypeError, 'Invalid capabilities', capabilities.parse, 42) class CombineTestCase(base.IronicLibTestCase): def test_combine(self): caps = capabilities.combine( collections.OrderedDict([('hello', None), ('cat', 'meow')])) self.assertEqual('hello:None,cat:meow', caps) def test_skip_none(self): caps = capabilities.combine( collections.OrderedDict([('hello', None), ('cat', 'meow')]), skip_none=True) self.assertEqual('cat:meow', caps) class UpdateAndCombineTestCase(base.IronicLibTestCase): def test_from_dict(self): result = capabilities.update_and_combine( {'key1': 'old value', 'key2': 'value2'}, {'key1': 'value1'}) self.assertIn(result, ['key1:value1,key2:value2', 'key2:value2,key1:value1']) def test_from_old_format(self): result = capabilities.update_and_combine( 'key1:old value,key2:value2', {'key1': 'value1'}) self.assertIn(result, ['key1:value1,key2:value2', 'key2:value2,key1:value1']) def test_skip_none(self): result = capabilities.update_and_combine( 'key1:old value,key2:value2', {'key1': None}, skip_none=True) self.assertEqual('key2:value2', result) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/ironic_lib/tests/test_disk_partitioner.py0000664000175000017500000002243200000000000024032 0ustar00zuulzuul00000000000000# Copyright 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from ironic_lib import disk_partitioner from ironic_lib import exception from ironic_lib.tests import base from ironic_lib import utils CONF = disk_partitioner.CONF class DiskPartitionerTestCase(base.IronicLibTestCase): def test_add_partition(self): dp = disk_partitioner.DiskPartitioner('/dev/fake') dp.add_partition(1024) dp.add_partition(512, fs_type='linux-swap') dp.add_partition(2048, boot_flag='boot') dp.add_partition(2048, boot_flag='bios_grub') expected = [(1, {'boot_flag': None, 'extra_flags': None, 'fs_type': '', 'type': 'primary', 'size': 1024}), (2, {'boot_flag': None, 'extra_flags': None, 'fs_type': 'linux-swap', 'type': 'primary', 'size': 512}), (3, {'boot_flag': 'boot', 'extra_flags': None, 'fs_type': '', 'type': 'primary', 'size': 2048}), (4, {'boot_flag': 'bios_grub', 'extra_flags': None, 'fs_type': '', 'type': 'primary', 'size': 2048})] partitions = [(n, p) for n, p in dp.get_partitions()] self.assertEqual(4, len(partitions)) self.assertEqual(expected, partitions) @mock.patch.object(disk_partitioner.DiskPartitioner, '_exec', autospec=True) @mock.patch.object(utils, 'execute', autospec=True) def test_commit(self, mock_utils_exc, mock_disk_partitioner_exec): dp = disk_partitioner.DiskPartitioner('/dev/fake') fake_parts = [(1, {'boot_flag': None, 'extra_flags': None, 'fs_type': 'fake-fs-type', 'type': 'fake-type', 'size': 1}), (2, {'boot_flag': 'boot', 'extra_flags': None, 'fs_type': 'fake-fs-type', 'type': 'fake-type', 'size': 1}), (3, {'boot_flag': 'bios_grub', 'extra_flags': None, 'fs_type': 'fake-fs-type', 'type': 'fake-type', 'size': 1}), (4, {'boot_flag': 'boot', 'extra_flags': ['prep', 'fake-flag'], 'fs_type': 'fake-fs-type', 'type': 'fake-type', 'size': 1})] with mock.patch.object(dp, 'get_partitions', autospec=True) as mock_gp: mock_gp.return_value = fake_parts mock_utils_exc.return_value = ('', '') dp.commit() mock_disk_partitioner_exec.assert_called_once_with( mock.ANY, 'mklabel', 'msdos', 'mkpart', 'fake-type', 'fake-fs-type', '1', '2', 'mkpart', 'fake-type', 'fake-fs-type', '2', '3', 'set', '2', 'boot', 'on', 'mkpart', 'fake-type', 'fake-fs-type', '3', '4', 'set', '3', 'bios_grub', 'on', 'mkpart', 'fake-type', 'fake-fs-type', '4', '5', 'set', '4', 'boot', 'on', 'set', '4', 'prep', 'on', 'set', '4', 'fake-flag', 'on') mock_utils_exc.assert_called_once_with( 'fuser', '/dev/fake', run_as_root=True, check_exit_code=[0, 1]) @mock.patch.object(disk_partitioner.DiskPartitioner, '_exec', autospec=True) @mock.patch.object(utils, 'execute', autospec=True) def test_commit_with_device_is_busy_once(self, mock_utils_exc, mock_disk_partitioner_exec): CONF.set_override('check_device_interval', 0, group='disk_partitioner') dp = disk_partitioner.DiskPartitioner('/dev/fake') fake_parts = [(1, {'boot_flag': None, 'extra_flags': None, 'fs_type': 'fake-fs-type', 'type': 'fake-type', 'size': 1}), (2, {'boot_flag': 'boot', 'extra_flags': None, 'fs_type': 'fake-fs-type', 'type': 'fake-type', 'size': 1})] # Test as if the 'psmisc' version of 'fuser' which has stderr output fuser_outputs = iter([(" 10000 10001", '/dev/fake:\n'), ('', '')]) with mock.patch.object(dp, 'get_partitions', autospec=True) as mock_gp: mock_gp.return_value = fake_parts mock_utils_exc.side_effect = fuser_outputs dp.commit() mock_disk_partitioner_exec.assert_called_once_with( mock.ANY, 'mklabel', 'msdos', 'mkpart', 'fake-type', 'fake-fs-type', '1', '2', 'mkpart', 'fake-type', 'fake-fs-type', '2', '3', 'set', '2', 'boot', 'on') mock_utils_exc.assert_called_with( 'fuser', '/dev/fake', run_as_root=True, check_exit_code=[0, 1]) self.assertEqual(2, mock_utils_exc.call_count) @mock.patch.object(disk_partitioner.DiskPartitioner, '_exec', autospec=True) @mock.patch.object(utils, 'execute', autospec=True) def test_commit_with_device_is_always_busy(self, mock_utils_exc, mock_disk_partitioner_exec): CONF.set_override('check_device_interval', 0, group='disk_partitioner') dp = disk_partitioner.DiskPartitioner('/dev/fake') fake_parts = [(1, {'boot_flag': None, 'extra_flags': None, 'fs_type': 'fake-fs-type', 'type': 'fake-type', 'size': 1}), (2, {'boot_flag': 'boot', 'extra_flags': None, 'fs_type': 'fake-fs-type', 'type': 'fake-type', 'size': 1})] with mock.patch.object(dp, 'get_partitions', autospec=True) as mock_gp: mock_gp.return_value = fake_parts # Test as if the 'busybox' version of 'fuser' which does not have # stderr output mock_utils_exc.return_value = ("10000 10001", '') self.assertRaises(exception.InstanceDeployFailure, dp.commit) mock_disk_partitioner_exec.assert_called_once_with( mock.ANY, 'mklabel', 'msdos', 'mkpart', 'fake-type', 'fake-fs-type', '1', '2', 'mkpart', 'fake-type', 'fake-fs-type', '2', '3', 'set', '2', 'boot', 'on') mock_utils_exc.assert_called_with( 'fuser', '/dev/fake', run_as_root=True, check_exit_code=[0, 1]) self.assertEqual(20, mock_utils_exc.call_count) @mock.patch.object(disk_partitioner.DiskPartitioner, '_exec', autospec=True) @mock.patch.object(utils, 'execute', autospec=True) def test_commit_with_device_disconnected(self, mock_utils_exc, mock_disk_partitioner_exec): CONF.set_override('check_device_interval', 0, group='disk_partitioner') dp = disk_partitioner.DiskPartitioner('/dev/fake') fake_parts = [(1, {'boot_flag': None, 'extra_flags': None, 'fs_type': 'fake-fs-type', 'type': 'fake-type', 'size': 1}), (2, {'boot_flag': 'boot', 'extra_flags': None, 'fs_type': 'fake-fs-type', 'type': 'fake-type', 'size': 1})] with mock.patch.object(dp, 'get_partitions', autospec=True) as mock_gp: mock_gp.return_value = fake_parts mock_utils_exc.return_value = ('', "Specified filename /dev/fake" " does not exist.") self.assertRaises(exception.InstanceDeployFailure, dp.commit) mock_disk_partitioner_exec.assert_called_once_with( mock.ANY, 'mklabel', 'msdos', 'mkpart', 'fake-type', 'fake-fs-type', '1', '2', 'mkpart', 'fake-type', 'fake-fs-type', '2', '3', 'set', '2', 'boot', 'on') mock_utils_exc.assert_called_with( 'fuser', '/dev/fake', run_as_root=True, check_exit_code=[0, 1]) self.assertEqual(20, mock_utils_exc.call_count) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/ironic_lib/tests/test_disk_utils.py0000664000175000017500000012575300000000000022644 0ustar00zuulzuul00000000000000# Copyright 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import stat from unittest import mock from oslo_concurrency import processutils from oslo_config import cfg from ironic_lib import disk_utils from ironic_lib import exception from ironic_lib import qemu_img from ironic_lib.tests import base from ironic_lib import utils CONF = cfg.CONF @mock.patch.object(utils, 'execute', autospec=True) class ListPartitionsTestCase(base.IronicLibTestCase): def test_correct(self, execute_mock): output = """ BYT; /dev/sda:500107862016B:scsi:512:4096:msdos:ATA HGST HTS725050A7:; 1:1.00MiB:501MiB:500MiB:ext4::boot; 2:501MiB:476940MiB:476439MiB:::; """ expected = [ {'number': 1, 'start': 1, 'end': 501, 'size': 500, 'filesystem': 'ext4', 'partition_name': '', 'flags': 'boot', 'path': '/dev/fake1'}, {'number': 2, 'start': 501, 'end': 476940, 'size': 476439, 'filesystem': '', 'partition_name': '', 'flags': '', 'path': '/dev/fake2'}, ] execute_mock.return_value = (output, '') result = disk_utils.list_partitions('/dev/fake') self.assertEqual(expected, result) execute_mock.assert_called_once_with( 'parted', '-s', '-m', '/dev/fake', 'unit', 'MiB', 'print', use_standard_locale=True, run_as_root=True) @mock.patch.object(disk_utils.LOG, 'warning', autospec=True) def test_incorrect(self, log_mock, execute_mock): output = """ BYT; /dev/sda:500107862016B:scsi:512:4096:msdos:ATA HGST HTS725050A7:; 1:XX1076MiB:---:524MiB:ext4::boot; """ execute_mock.return_value = (output, '') self.assertEqual([], disk_utils.list_partitions('/dev/fake')) self.assertEqual(1, log_mock.call_count) def test_correct_gpt_nvme(self, execute_mock): output = """ BYT; /dev/vda:40960MiB:virtblk:512:512:gpt:Virtio Block Device:; 2:1.00MiB:2.00MiB:1.00MiB::Bios partition:bios_grub; 1:4.00MiB:5407MiB:5403MiB:ext4:Root partition:; 3:5407MiB:5507MiB:100MiB:fat16:Boot partition:boot, esp; """ expected = [ {'end': 2, 'number': 2, 'start': 1, 'flags': 'bios_grub', 'filesystem': '', 'partition_name': 'Bios partition', 'size': 1, 'path': '/dev/fake0p2'}, {'end': 5407, 'number': 1, 'start': 4, 'flags': '', 'filesystem': 'ext4', 'partition_name': 'Root partition', 'size': 5403, 'path': '/dev/fake0p1'}, {'end': 5507, 'number': 3, 'start': 5407, 'flags': 'boot, esp', 'filesystem': 'fat16', 'partition_name': 'Boot partition', 'size': 100, 'path': '/dev/fake0p3'}, ] execute_mock.return_value = (output, '') result = disk_utils.list_partitions('/dev/fake0') self.assertEqual(expected, result) execute_mock.assert_called_once_with( 'parted', '-s', '-m', '/dev/fake0', 'unit', 'MiB', 'print', use_standard_locale=True, run_as_root=True) @mock.patch.object(disk_utils.LOG, 'warning', autospec=True) def test_incorrect_gpt(self, log_mock, execute_mock): output = """ BYT; /dev/vda:40960MiB:virtblk:512:512:gpt:Virtio Block Device:; 2:XX1.00MiB:---:1.00MiB::primary:bios_grub; """ execute_mock.return_value = (output, '') self.assertEqual([], disk_utils.list_partitions('/dev/fake')) self.assertEqual(1, log_mock.call_count) class GetUEFIDiskIdentifierTestCase(base.IronicLibTestCase): def setUp(self): super(GetUEFIDiskIdentifierTestCase, self).setUp() self.dev = '/dev/fake' @mock.patch.object(utils, 'execute', autospec=True) def test_get_uefi_disk_identifier_uefi_bootable_image(self, mock_execute): mock_execute.return_value = ('', '') fdisk_output = """ Disk /dev/sda: 931.5 GiB, 1000171331584 bytes, 1953459632 sectors Units: sectors of 1 * 512 = 512 bytes Sector size (logical/physical): 512 bytes / 512 bytes I/O size (minimum/optimal): 262144 bytes / 262144 bytes Disklabel type: gpt Disk identifier: 73457A6C-3595-4965-8D83-2EA1BD85F327 Device Start End Sectors Size Type /dev/fake-part1 2048 1050623 1048576 512M EFI System /dev/fake-part2 1050624 1920172031 1919121408 915.1G Linux filesystem /dev/fake-part3 1920172032 1953458175 33286144 15.9G Linux swap """ partition_id = '/dev/fake-part1' lsblk_output = 'UUID="ABCD-B05B"\n' part_result = 'ABCD-B05B' mock_execute.side_effect = [(fdisk_output, ''), (lsblk_output, '')] result = disk_utils.get_uefi_disk_identifier(self.dev) self.assertEqual(part_result, result) execute_calls = [ mock.call('fdisk', '-l', self.dev, run_as_root=True), mock.call('lsblk', partition_id, '--pairs', '--bytes', '--ascii', '--output', 'UUID', use_standard_locale=True, run_as_root=True) ] mock_execute.assert_has_calls(execute_calls) @mock.patch.object(utils, 'execute', autospec=True) def test_get_uefi_disk_identifier_non_uefi_bootable_image(self, mock_execute): mock_execute.return_value = ('', '') fdisk_output = """ Disk /dev/vda: 50 GiB, 53687091200 bytes, 104857600 sectors Units: sectors of 1 * 512 = 512 bytes Sector size (logical/physical): 512 bytes / 512 bytes I/O size (minimum/optimal): 512 bytes / 512 bytes Disklabel type: dos Disk identifier: 0xb82b9faf Device Boot Start End Sectors Size Id Type /dev/fake-part1 * 2048 104857566 104855519 50G 83 Linux """ partition_id = None mock_execute.side_effect = [(fdisk_output, ''), processutils.ProcessExecutionError()] self.assertRaises(exception.InstanceDeployFailure, disk_utils.get_uefi_disk_identifier, self.dev) execute_calls = [ mock.call('fdisk', '-l', self.dev, run_as_root=True), mock.call('lsblk', partition_id, '--pairs', '--bytes', '--ascii', '--output', 'UUID', use_standard_locale=True, run_as_root=True) ] mock_execute.assert_has_calls(execute_calls) @mock.patch.object(utils, 'execute', autospec=True) class MakePartitionsTestCase(base.IronicLibTestCase): def setUp(self): super(MakePartitionsTestCase, self).setUp() self.dev = 'fake-dev' self.root_mb = 1024 self.swap_mb = 512 self.ephemeral_mb = 0 self.configdrive_mb = 0 self.node_uuid = "12345678-1234-1234-1234-1234567890abcxyz" self.efi_size = CONF.disk_utils.efi_system_partition_size self.bios_size = CONF.disk_utils.bios_boot_partition_size def _get_parted_cmd(self, dev, label=None): if label is None: label = 'msdos' return ['parted', '-a', 'optimal', '-s', dev, '--', 'unit', 'MiB', 'mklabel', label] def _add_efi_sz(self, x): return str(x + self.efi_size) def _add_bios_sz(self, x): return str(x + self.bios_size) def _test_make_partitions(self, mock_exc, boot_option, boot_mode='bios', disk_label=None, cpu_arch=""): mock_exc.return_value = ('', '') disk_utils.make_partitions(self.dev, self.root_mb, self.swap_mb, self.ephemeral_mb, self.configdrive_mb, self.node_uuid, boot_option=boot_option, boot_mode=boot_mode, disk_label=disk_label, cpu_arch=cpu_arch) if boot_option == "local" and boot_mode == "uefi": expected_mkpart = ['mkpart', 'primary', 'fat32', '1', self._add_efi_sz(1), 'set', '1', 'boot', 'on', 'mkpart', 'primary', 'linux-swap', self._add_efi_sz(1), self._add_efi_sz(513), 'mkpart', 'primary', '', self._add_efi_sz(513), self._add_efi_sz(1537)] else: if boot_option == "local": if disk_label == "gpt": if cpu_arch.startswith('ppc64'): expected_mkpart = ['mkpart', 'primary', '', '1', '9', 'set', '1', 'prep', 'on', 'mkpart', 'primary', 'linux-swap', '9', '521', 'mkpart', 'primary', '', '521', '1545'] else: expected_mkpart = ['mkpart', 'primary', '', '1', self._add_bios_sz(1), 'set', '1', 'bios_grub', 'on', 'mkpart', 'primary', 'linux-swap', self._add_bios_sz(1), self._add_bios_sz(513), 'mkpart', 'primary', '', self._add_bios_sz(513), self._add_bios_sz(1537)] elif cpu_arch.startswith('ppc64'): expected_mkpart = ['mkpart', 'primary', '', '1', '9', 'set', '1', 'boot', 'on', 'set', '1', 'prep', 'on', 'mkpart', 'primary', 'linux-swap', '9', '521', 'mkpart', 'primary', '', '521', '1545'] else: expected_mkpart = ['mkpart', 'primary', 'linux-swap', '1', '513', 'mkpart', 'primary', '', '513', '1537', 'set', '2', 'boot', 'on'] else: expected_mkpart = ['mkpart', 'primary', 'linux-swap', '1', '513', 'mkpart', 'primary', '', '513', '1537'] self.dev = 'fake-dev' parted_cmd = (self._get_parted_cmd(self.dev, disk_label) + expected_mkpart) parted_call = mock.call(*parted_cmd, use_standard_locale=True, run_as_root=True) fuser_cmd = ['fuser', 'fake-dev'] fuser_call = mock.call(*fuser_cmd, run_as_root=True, check_exit_code=[0, 1]) sync_calls = [mock.call('sync'), mock.call('udevadm', 'settle'), mock.call('partprobe', self.dev, attempts=10, run_as_root=True), mock.call('udevadm', 'settle'), mock.call('sgdisk', '-v', self.dev, run_as_root=True)] mock_exc.assert_has_calls([parted_call, fuser_call] + sync_calls) def test_make_partitions(self, mock_exc): self._test_make_partitions(mock_exc, boot_option="netboot") def test_make_partitions_local_boot(self, mock_exc): self._test_make_partitions(mock_exc, boot_option="local") def test_make_partitions_local_boot_uefi(self, mock_exc): self._test_make_partitions(mock_exc, boot_option="local", boot_mode="uefi", disk_label="gpt") def test_make_partitions_local_boot_gpt_bios(self, mock_exc): self._test_make_partitions(mock_exc, boot_option="local", disk_label="gpt") def test_make_partitions_disk_label_gpt(self, mock_exc): self._test_make_partitions(mock_exc, boot_option="netboot", disk_label="gpt") def test_make_partitions_mbr_with_prep(self, mock_exc): self._test_make_partitions(mock_exc, boot_option="local", disk_label="msdos", cpu_arch="ppc64le") def test_make_partitions_gpt_with_prep(self, mock_exc): self._test_make_partitions(mock_exc, boot_option="local", disk_label="gpt", cpu_arch="ppc64le") def test_make_partitions_with_ephemeral(self, mock_exc): self.ephemeral_mb = 2048 expected_mkpart = ['mkpart', 'primary', '', '1', '2049', 'mkpart', 'primary', 'linux-swap', '2049', '2561', 'mkpart', 'primary', '', '2561', '3585'] self.dev = 'fake-dev' cmd = self._get_parted_cmd(self.dev) + expected_mkpart mock_exc.return_value = ('', '') disk_utils.make_partitions(self.dev, self.root_mb, self.swap_mb, self.ephemeral_mb, self.configdrive_mb, self.node_uuid) parted_call = mock.call(*cmd, use_standard_locale=True, run_as_root=True) mock_exc.assert_has_calls([parted_call]) def test_make_partitions_with_iscsi_device(self, mock_exc): self.ephemeral_mb = 2048 expected_mkpart = ['mkpart', 'primary', '', '1', '2049', 'mkpart', 'primary', 'linux-swap', '2049', '2561', 'mkpart', 'primary', '', '2561', '3585'] self.dev = '/dev/iqn.2008-10.org.openstack:%s.fake-9' % self.node_uuid ep = '/dev/iqn.2008-10.org.openstack:%s.fake-9-part1' % self.node_uuid swap = ('/dev/iqn.2008-10.org.openstack:%s.fake-9-part2' % self.node_uuid) root = ('/dev/iqn.2008-10.org.openstack:%s.fake-9-part3' % self.node_uuid) expected_result = {'ephemeral': ep, 'swap': swap, 'root': root} cmd = self._get_parted_cmd(self.dev) + expected_mkpart mock_exc.return_value = ('', '') result = disk_utils.make_partitions( self.dev, self.root_mb, self.swap_mb, self.ephemeral_mb, self.configdrive_mb, self.node_uuid) parted_call = mock.call(*cmd, use_standard_locale=True, run_as_root=True) mock_exc.assert_has_calls([parted_call]) self.assertEqual(expected_result, result) def test_make_partitions_with_nvme_device(self, mock_exc): self.ephemeral_mb = 2048 expected_mkpart = ['mkpart', 'primary', '', '1', '2049', 'mkpart', 'primary', 'linux-swap', '2049', '2561', 'mkpart', 'primary', '', '2561', '3585'] self.dev = '/dev/nvmefake-9' ep = '/dev/nvmefake-9p1' swap = '/dev/nvmefake-9p2' root = '/dev/nvmefake-9p3' expected_result = {'ephemeral': ep, 'swap': swap, 'root': root} cmd = self._get_parted_cmd(self.dev) + expected_mkpart mock_exc.return_value = ('', '') result = disk_utils.make_partitions( self.dev, self.root_mb, self.swap_mb, self.ephemeral_mb, self.configdrive_mb, self.node_uuid) parted_call = mock.call(*cmd, use_standard_locale=True, run_as_root=True) mock_exc.assert_has_calls([parted_call]) self.assertEqual(expected_result, result) def test_make_partitions_with_local_device(self, mock_exc): self.ephemeral_mb = 2048 expected_mkpart = ['mkpart', 'primary', '', '1', '2049', 'mkpart', 'primary', 'linux-swap', '2049', '2561', 'mkpart', 'primary', '', '2561', '3585'] self.dev = 'fake-dev' expected_result = {'ephemeral': 'fake-dev1', 'swap': 'fake-dev2', 'root': 'fake-dev3'} cmd = self._get_parted_cmd(self.dev) + expected_mkpart mock_exc.return_value = ('', '') result = disk_utils.make_partitions( self.dev, self.root_mb, self.swap_mb, self.ephemeral_mb, self.configdrive_mb, self.node_uuid) parted_call = mock.call(*cmd, use_standard_locale=True, run_as_root=True) mock_exc.assert_has_calls([parted_call]) self.assertEqual(expected_result, result) @mock.patch.object(utils, 'execute', autospec=True) class DestroyMetaDataTestCase(base.IronicLibTestCase): def setUp(self): super(DestroyMetaDataTestCase, self).setUp() self.dev = 'fake-dev' self.node_uuid = "12345678-1234-1234-1234-1234567890abcxyz" def test_destroy_disk_metadata_4096(self, mock_exec): mock_exec.side_effect = iter([ (None, None), ('4096\n', None), ('524288\n', None), (None, None), (None, None), (None, None), (None, None)]) expected_calls = [mock.call('wipefs', '--force', '--all', 'fake-dev', run_as_root=True, use_standard_locale=True), mock.call('blockdev', '--getss', 'fake-dev', run_as_root=True), mock.call('blockdev', '--getsize64', 'fake-dev', run_as_root=True), mock.call('dd', 'bs=4096', 'if=/dev/zero', 'of=fake-dev', 'count=5', 'oflag=direct', run_as_root=True, use_standard_locale=True), mock.call('dd', 'bs=4096', 'if=/dev/zero', 'of=fake-dev', 'count=5', 'oflag=direct', 'seek=123', run_as_root=True, use_standard_locale=True), mock.call('sgdisk', '-Z', 'fake-dev', run_as_root=True, use_standard_locale=True), mock.call('fuser', self.dev, check_exit_code=[0, 1], run_as_root=True)] disk_utils.destroy_disk_metadata(self.dev, self.node_uuid) mock_exec.assert_has_calls(expected_calls) def test_destroy_disk_metadata(self, mock_exec): # Note(TheJulia): This list will get-reused, but only the second # execution returning a string is needed for the test as otherwise # command output is not used. mock_exec.side_effect = iter([ (None, None), ('512\n', None), ('524288\n', None), (None, None), (None, None), (None, None), (None, None)]) expected_calls = [mock.call('wipefs', '--force', '--all', 'fake-dev', run_as_root=True, use_standard_locale=True), mock.call('blockdev', '--getss', 'fake-dev', run_as_root=True), mock.call('blockdev', '--getsize64', 'fake-dev', run_as_root=True), mock.call('dd', 'bs=512', 'if=/dev/zero', 'of=fake-dev', 'count=33', 'oflag=direct', run_as_root=True, use_standard_locale=True), mock.call('dd', 'bs=512', 'if=/dev/zero', 'of=fake-dev', 'count=33', 'oflag=direct', 'seek=991', run_as_root=True, use_standard_locale=True), mock.call('sgdisk', '-Z', 'fake-dev', run_as_root=True, use_standard_locale=True), mock.call('fuser', self.dev, check_exit_code=[0, 1], run_as_root=True)] disk_utils.destroy_disk_metadata(self.dev, self.node_uuid) mock_exec.assert_has_calls(expected_calls) def test_destroy_disk_metadata_wipefs_fail(self, mock_exec): mock_exec.side_effect = processutils.ProcessExecutionError expected_call = [mock.call('wipefs', '--force', '--all', 'fake-dev', run_as_root=True, use_standard_locale=True)] self.assertRaises(processutils.ProcessExecutionError, disk_utils.destroy_disk_metadata, self.dev, self.node_uuid) mock_exec.assert_has_calls(expected_call) def test_destroy_disk_metadata_sgdisk_fail(self, mock_exec): expected_calls = [mock.call('wipefs', '--force', '--all', 'fake-dev', run_as_root=True, use_standard_locale=True), mock.call('blockdev', '--getss', 'fake-dev', run_as_root=True), mock.call('blockdev', '--getsize64', 'fake-dev', run_as_root=True), mock.call('dd', 'bs=512', 'if=/dev/zero', 'of=fake-dev', 'count=33', 'oflag=direct', run_as_root=True, use_standard_locale=True), mock.call('dd', 'bs=512', 'if=/dev/zero', 'of=fake-dev', 'count=33', 'oflag=direct', 'seek=991', run_as_root=True, use_standard_locale=True), mock.call('sgdisk', '-Z', 'fake-dev', run_as_root=True, use_standard_locale=True)] mock_exec.side_effect = iter([ (None, None), ('512\n', None), ('524288\n', None), (None, None), (None, None), processutils.ProcessExecutionError()]) self.assertRaises(processutils.ProcessExecutionError, disk_utils.destroy_disk_metadata, self.dev, self.node_uuid) mock_exec.assert_has_calls(expected_calls) def test_destroy_disk_metadata_wipefs_not_support_force(self, mock_exec): mock_exec.side_effect = iter([ processutils.ProcessExecutionError(description='--force'), (None, None), ('512\n', None), ('524288\n', None), (None, None), (None, None), (None, None), (None, None)]) expected_call = [mock.call('wipefs', '--force', '--all', 'fake-dev', run_as_root=True, use_standard_locale=True), mock.call('wipefs', '--all', 'fake-dev', run_as_root=True, use_standard_locale=True)] disk_utils.destroy_disk_metadata(self.dev, self.node_uuid) mock_exec.assert_has_calls(expected_call) def test_destroy_disk_metadata_ebr(self, mock_exec): expected_calls = [mock.call('wipefs', '--force', '--all', 'fake-dev', run_as_root=True, use_standard_locale=True), mock.call('blockdev', '--getss', 'fake-dev', run_as_root=True), mock.call('blockdev', '--getsize64', 'fake-dev', run_as_root=True), mock.call('dd', 'bs=512', 'if=/dev/zero', 'of=fake-dev', 'count=2', 'oflag=direct', run_as_root=True, use_standard_locale=True), mock.call('sgdisk', '-Z', 'fake-dev', run_as_root=True, use_standard_locale=True)] mock_exec.side_effect = iter([ (None, None), ('512\n', None), ('1024\n', None), # an EBR is 2 sectors (None, None), (None, None), (None, None), (None, None)]) disk_utils.destroy_disk_metadata(self.dev, self.node_uuid) mock_exec.assert_has_calls(expected_calls) def test_destroy_disk_metadata_tiny_partition(self, mock_exec): expected_calls = [mock.call('wipefs', '--force', '--all', 'fake-dev', run_as_root=True, use_standard_locale=True), mock.call('blockdev', '--getss', 'fake-dev', run_as_root=True), mock.call('blockdev', '--getsize64', 'fake-dev', run_as_root=True), mock.call('dd', 'bs=512', 'if=/dev/zero', 'of=fake-dev', 'count=33', 'oflag=direct', run_as_root=True, use_standard_locale=True), mock.call('dd', 'bs=512', 'if=/dev/zero', 'of=fake-dev', 'count=33', 'oflag=direct', 'seek=9', run_as_root=True, use_standard_locale=True), mock.call('sgdisk', '-Z', 'fake-dev', run_as_root=True, use_standard_locale=True)] mock_exec.side_effect = iter([ (None, None), ('512\n', None), ('21504\n', None), (None, None), (None, None), (None, None), (None, None)]) disk_utils.destroy_disk_metadata(self.dev, self.node_uuid) mock_exec.assert_has_calls(expected_calls) @mock.patch.object(utils, 'execute', autospec=True) class GetDeviceBlockSizeTestCase(base.IronicLibTestCase): def setUp(self): super(GetDeviceBlockSizeTestCase, self).setUp() self.dev = 'fake-dev' self.node_uuid = "12345678-1234-1234-1234-1234567890abcxyz" def test_get_dev_block_size(self, mock_exec): mock_exec.return_value = ("64", "") expected_call = [mock.call('blockdev', '--getsz', self.dev, run_as_root=True)] disk_utils.get_dev_block_size(self.dev) mock_exec.assert_has_calls(expected_call) @mock.patch.object(utils, 'execute', autospec=True) class GetDeviceByteSizeTestCase(base.IronicLibTestCase): def setUp(self): super(GetDeviceByteSizeTestCase, self).setUp() self.dev = 'fake-dev' self.node_uuid = "12345678-1234-1234-1234-1234567890abcxyz" def test_get_dev_byte_size(self, mock_exec): mock_exec.return_value = ("64", "") expected_call = [mock.call('blockdev', '--getsize64', self.dev, run_as_root=True)] disk_utils.get_dev_byte_size(self.dev) mock_exec.assert_has_calls(expected_call) @mock.patch.object(disk_utils, 'dd', autospec=True) @mock.patch.object(qemu_img, 'image_info', autospec=True) @mock.patch.object(qemu_img, 'convert_image', autospec=True) class PopulateImageTestCase(base.IronicLibTestCase): def test_populate_raw_image(self, mock_cg, mock_qinfo, mock_dd): type(mock_qinfo.return_value).file_format = mock.PropertyMock( return_value='raw') disk_utils.populate_image('src', 'dst') mock_dd.assert_called_once_with('src', 'dst', conv_flags=None) self.assertFalse(mock_cg.called) def test_populate_raw_image_with_convert(self, mock_cg, mock_qinfo, mock_dd): type(mock_qinfo.return_value).file_format = mock.PropertyMock( return_value='raw') disk_utils.populate_image('src', 'dst', conv_flags='sparse') mock_dd.assert_called_once_with('src', 'dst', conv_flags='sparse') self.assertFalse(mock_cg.called) def test_populate_qcow2_image(self, mock_cg, mock_qinfo, mock_dd): type(mock_qinfo.return_value).file_format = mock.PropertyMock( return_value='qcow2') disk_utils.populate_image('src', 'dst') mock_cg.assert_called_once_with('src', 'dst', 'raw', True, sparse_size='0') self.assertFalse(mock_dd.called) @mock.patch('time.sleep', lambda sec: None) class OtherFunctionTestCase(base.IronicLibTestCase): @mock.patch.object(os, 'stat', autospec=True) @mock.patch.object(stat, 'S_ISBLK', autospec=True) def test_is_block_device_works(self, mock_is_blk, mock_os): device = '/dev/disk/by-path/ip-1.2.3.4:5678-iscsi-iqn.fake-lun-9' mock_is_blk.return_value = True mock_os().st_mode = 10000 self.assertTrue(disk_utils.is_block_device(device)) mock_is_blk.assert_called_once_with(mock_os().st_mode) @mock.patch.object(os, 'stat', autospec=True) def test_is_block_device_raises(self, mock_os): device = '/dev/disk/by-path/ip-1.2.3.4:5678-iscsi-iqn.fake-lun-9' mock_os.side_effect = OSError self.assertRaises(exception.InstanceDeployFailure, disk_utils.is_block_device, device) mock_os.assert_has_calls([mock.call(device)] * 3) @mock.patch.object(os, 'stat', autospec=True) def test_is_block_device_attempts(self, mock_os): CONF.set_override('partition_detection_attempts', 2, group='disk_utils') device = '/dev/disk/by-path/ip-1.2.3.4:5678-iscsi-iqn.fake-lun-9' mock_os.side_effect = OSError self.assertRaises(exception.InstanceDeployFailure, disk_utils.is_block_device, device) mock_os.assert_has_calls([mock.call(device)] * 2) @mock.patch.object(os.path, 'getsize', autospec=True) @mock.patch.object(qemu_img, 'image_info', autospec=True) def test_get_image_mb(self, mock_qinfo, mock_getsize): mb = 1024 * 1024 mock_getsize.return_value = 0 type(mock_qinfo.return_value).virtual_size = mock.PropertyMock( return_value=0) self.assertEqual(0, disk_utils.get_image_mb('x', False)) self.assertEqual(0, disk_utils.get_image_mb('x', True)) mock_getsize.return_value = 1 type(mock_qinfo.return_value).virtual_size = mock.PropertyMock( return_value=1) self.assertEqual(1, disk_utils.get_image_mb('x', False)) self.assertEqual(1, disk_utils.get_image_mb('x', True)) mock_getsize.return_value = mb type(mock_qinfo.return_value).virtual_size = mock.PropertyMock( return_value=mb) self.assertEqual(1, disk_utils.get_image_mb('x', False)) self.assertEqual(1, disk_utils.get_image_mb('x', True)) mock_getsize.return_value = mb + 1 type(mock_qinfo.return_value).virtual_size = mock.PropertyMock( return_value=mb + 1) self.assertEqual(2, disk_utils.get_image_mb('x', False)) self.assertEqual(2, disk_utils.get_image_mb('x', True)) def _test_count_mbr_partitions(self, output, mock_execute): mock_execute.return_value = (output, '') out = disk_utils.count_mbr_partitions('/dev/fake') mock_execute.assert_called_once_with('partprobe', '-d', '-s', '/dev/fake', run_as_root=True, use_standard_locale=True) return out @mock.patch.object(utils, 'execute', autospec=True) def test_count_mbr_partitions(self, mock_execute): output = "/dev/fake: msdos partitions 1 2 3 <5 6>" pp, lp = self._test_count_mbr_partitions(output, mock_execute) self.assertEqual(3, pp) self.assertEqual(2, lp) @mock.patch.object(utils, 'execute', autospec=True) def test_count_mbr_partitions_no_logical_partitions(self, mock_execute): output = "/dev/fake: msdos partitions 1 2" pp, lp = self._test_count_mbr_partitions(output, mock_execute) self.assertEqual(2, pp) self.assertEqual(0, lp) @mock.patch.object(utils, 'execute', autospec=True) def test_count_mbr_partitions_wrong_partition_table(self, mock_execute): output = "/dev/fake: gpt partitions 1 2 3 4 5 6" mock_execute.return_value = (output, '') self.assertRaises(ValueError, disk_utils.count_mbr_partitions, '/dev/fake') mock_execute.assert_called_once_with('partprobe', '-d', '-s', '/dev/fake', run_as_root=True, use_standard_locale=True) @mock.patch.object(disk_utils, 'get_device_information', autospec=True) def test_block_uuid(self, mock_get_device_info): mock_get_device_info.return_value = {'UUID': '123', 'PARTUUID': '123456'} self.assertEqual('123', disk_utils.block_uuid('/dev/fake')) mock_get_device_info.assert_called_once_with( '/dev/fake', fields=['UUID', 'PARTUUID']) @mock.patch.object(disk_utils, 'get_device_information', autospec=True) def test_block_uuid_fallback_to_uuid(self, mock_get_device_info): mock_get_device_info.return_value = {'PARTUUID': '123456'} self.assertEqual('123456', disk_utils.block_uuid('/dev/fake')) mock_get_device_info.assert_called_once_with( '/dev/fake', fields=['UUID', 'PARTUUID']) @mock.patch.object(utils, 'execute', autospec=True) class FixGptStructsTestCases(base.IronicLibTestCase): def setUp(self): super(FixGptStructsTestCases, self).setUp() self.dev = "/dev/fake" self.config_part_label = "config-2" self.node_uuid = "12345678-1234-1234-1234-1234567890abcxyz" def test_fix_gpt_structs_fix_required(self, mock_execute): sgdisk_v_output = """ Problem: The secondary header's self-pointer indicates that it doesn't reside at the end of the disk. If you've added a disk to a RAID array, use the 'e' option on the experts' menu to adjust the secondary header's and partition table's locations. Identified 1 problems! """ mock_execute.return_value = (sgdisk_v_output, '') execute_calls = [ mock.call('sgdisk', '-v', '/dev/fake', run_as_root=True), mock.call('sgdisk', '-e', '/dev/fake', run_as_root=True) ] disk_utils._fix_gpt_structs('/dev/fake', self.node_uuid) mock_execute.assert_has_calls(execute_calls) def test_fix_gpt_structs_fix_not_required(self, mock_execute): mock_execute.return_value = ('', '') disk_utils._fix_gpt_structs('/dev/fake', self.node_uuid) mock_execute.assert_called_once_with('sgdisk', '-v', '/dev/fake', run_as_root=True) @mock.patch.object(disk_utils.LOG, 'error', autospec=True) def test_fix_gpt_structs_exc(self, mock_log, mock_execute): mock_execute.side_effect = processutils.ProcessExecutionError self.assertRaisesRegex(exception.InstanceDeployFailure, 'Failed to fix GPT data structures on disk', disk_utils._fix_gpt_structs, self.dev, self.node_uuid) mock_execute.assert_called_once_with('sgdisk', '-v', '/dev/fake', run_as_root=True) self.assertEqual(1, mock_log.call_count) @mock.patch.object(utils, 'execute', autospec=True) class TriggerDeviceRescanTestCase(base.IronicLibTestCase): def test_trigger(self, mock_execute): self.assertTrue(disk_utils.trigger_device_rescan('/dev/fake')) mock_execute.assert_has_calls([ mock.call('sync'), mock.call('udevadm', 'settle'), mock.call('partprobe', '/dev/fake', run_as_root=True, attempts=10), mock.call('udevadm', 'settle'), mock.call('sgdisk', '-v', '/dev/fake', run_as_root=True), ]) def test_custom_attempts(self, mock_execute): self.assertTrue( disk_utils.trigger_device_rescan('/dev/fake', attempts=1)) mock_execute.assert_has_calls([ mock.call('sync'), mock.call('udevadm', 'settle'), mock.call('partprobe', '/dev/fake', run_as_root=True, attempts=1), mock.call('udevadm', 'settle'), mock.call('sgdisk', '-v', '/dev/fake', run_as_root=True), ]) def test_fails(self, mock_execute): mock_execute.side_effect = [('', '')] * 4 + [ processutils.ProcessExecutionError ] self.assertFalse(disk_utils.trigger_device_rescan('/dev/fake')) mock_execute.assert_has_calls([ mock.call('sync'), mock.call('udevadm', 'settle'), mock.call('partprobe', '/dev/fake', run_as_root=True, attempts=10), mock.call('udevadm', 'settle'), mock.call('sgdisk', '-v', '/dev/fake', run_as_root=True), ]) BLKID_PROBE = (""" /dev/disk/by-path/ip-10.1.0.52:3260-iscsi-iqn.2008-10.org.openstack: """ """PTUUID="123456" PTTYPE="gpt" """) LSBLK_NORMAL = ( 'UUID="123" BLOCK_SIZE="512" TYPE="vfat" ' 'PARTLABEL="EFI System Partition" PARTUUID="123456"' ) @mock.patch.object(utils, 'execute', autospec=True) class GetDeviceInformationTestCase(base.IronicLibTestCase): def test_normal(self, mock_execute): mock_execute.return_value = LSBLK_NORMAL, "" result = disk_utils.get_device_information('/dev/fake') self.assertEqual( {'UUID': '123', 'BLOCK_SIZE': '512', 'TYPE': 'vfat', 'PARTLABEL': 'EFI System Partition', 'PARTUUID': '123456'}, result ) mock_execute.assert_called_once_with( 'lsblk', '/dev/fake', '--pairs', '--bytes', '--ascii', '--nodeps', '--output-all', use_standard_locale=True, run_as_root=True) def test_probe(self, mock_execute): mock_execute.return_value = BLKID_PROBE, "" result = disk_utils.get_device_information('/dev/fake', probe=True) self.assertEqual({'PTUUID': '123456', 'PTTYPE': 'gpt'}, result) mock_execute.assert_called_once_with('blkid', '/dev/fake', '-p', use_standard_locale=True, run_as_root=True) def test_fields(self, mock_execute): mock_execute.return_value = LSBLK_NORMAL, "" result = disk_utils.get_device_information('/dev/fake', fields=['UUID', 'LABEL']) # No filtering on our side, so returning all fake fields self.assertEqual( {'UUID': '123', 'BLOCK_SIZE': '512', 'TYPE': 'vfat', 'PARTLABEL': 'EFI System Partition', 'PARTUUID': '123456'}, result ) mock_execute.assert_called_once_with( 'lsblk', '/dev/fake', '--pairs', '--bytes', '--ascii', '--nodeps', '--output', 'UUID,LABEL', use_standard_locale=True, run_as_root=True) def test_empty(self, mock_execute): mock_execute.return_value = "\n", "" result = disk_utils.get_device_information('/dev/fake', probe=True) self.assertEqual({}, result) mock_execute.assert_called_once_with('blkid', '/dev/fake', '-p', use_standard_locale=True, run_as_root=True) @mock.patch.object(utils, 'execute', autospec=True) class GetPartitionTableTypeTestCase(base.IronicLibTestCase): def test_gpt(self, mocked_execute): self._test_by_type(mocked_execute, 'gpt', 'gpt') def test_msdos(self, mocked_execute): self._test_by_type(mocked_execute, 'msdos', 'msdos') def test_unknown(self, mocked_execute): self._test_by_type(mocked_execute, 'whatever', 'unknown') def _test_by_type(self, mocked_execute, table_type_output, expected_table_type): parted_ret = PARTED_OUTPUT_UNFORMATTED.format(table_type_output) mocked_execute.side_effect = [ (parted_ret, None), ] ret = disk_utils.get_partition_table_type('hello') mocked_execute.assert_called_once_with( 'parted', '--script', 'hello', '--', 'print', run_as_root=True, use_standard_locale=True) self.assertEqual(expected_table_type, ret) PARTED_OUTPUT_UNFORMATTED = '''Model: whatever Disk /dev/sda: 450GB Sector size (logical/physical): 512B/512B Partition Table: {} Disk Flags: Number Start End Size File system Name Flags 14 1049kB 5243kB 4194kB bios_grub 15 5243kB 116MB 111MB fat32 boot, esp 1 116MB 2361MB 2245MB ext4 ''' @mock.patch.object(disk_utils, 'list_partitions', autospec=True) @mock.patch.object(disk_utils, 'get_partition_table_type', autospec=True) class FindEfiPartitionTestCase(base.IronicLibTestCase): def test_find_efi_partition(self, mocked_type, mocked_parts): mocked_parts.return_value = [ {'number': '1', 'flags': ''}, {'number': '14', 'flags': 'bios_grub'}, {'number': '15', 'flags': 'esp, boot'}, ] ret = disk_utils.find_efi_partition('/dev/sda') self.assertEqual({'number': '15', 'flags': 'esp, boot'}, ret) def test_find_efi_partition_only_boot_flag_gpt(self, mocked_type, mocked_parts): mocked_type.return_value = 'gpt' mocked_parts.return_value = [ {'number': '1', 'flags': ''}, {'number': '14', 'flags': 'bios_grub'}, {'number': '15', 'flags': 'boot'}, ] ret = disk_utils.find_efi_partition('/dev/sda') self.assertEqual({'number': '15', 'flags': 'boot'}, ret) def test_find_efi_partition_only_boot_flag_mbr(self, mocked_type, mocked_parts): mocked_type.return_value = 'msdos' mocked_parts.return_value = [ {'number': '1', 'flags': ''}, {'number': '14', 'flags': 'bios_grub'}, {'number': '15', 'flags': 'boot'}, ] self.assertIsNone(disk_utils.find_efi_partition('/dev/sda')) def test_find_efi_partition_not_found(self, mocked_type, mocked_parts): mocked_parts.return_value = [ {'number': '1', 'flags': ''}, {'number': '14', 'flags': 'bios_grub'}, ] self.assertIsNone(disk_utils.find_efi_partition('/dev/sda')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/ironic_lib/tests/test_exception.py0000664000175000017500000000507500000000000022462 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import re from unittest import mock from oslo_config import cfg from ironic_lib import exception from ironic_lib.tests import base CONF = cfg.CONF class Unserializable(object): def __str__(self): raise NotImplementedError('nostr') class TestException(exception.IronicException): _msg_fmt = 'Some exception: %(spam)s, %(ham)s' class TestIronicException(base.IronicLibTestCase): def test___init___json_serializable(self): exc = TestException(spam=[1, 2, 3], ham='eggs') self.assertIn('[1, 2, 3]', str(exc)) self.assertEqual('[1, 2, 3]', exc.kwargs['spam']) def test___init___string_serializable(self): exc = TestException( spam=type('ni', (object,), dict(a=1, b=2))(), ham='eggs' ) check_str = 'ni object at' self.assertIn(check_str, str(exc)) self.assertIn(check_str, exc.kwargs['spam']) @mock.patch.object(exception.LOG, 'error', autospec=True) def test___init___invalid_kwarg(self, log_mock): CONF.set_override('fatal_exception_format_errors', False, group='ironic_lib') e = TestException(spam=Unserializable(), ham='eggs') message = \ log_mock.call_args_list[0][0][0] % log_mock.call_args_list[0][0][1] self.assertIsNotNone( re.search('spam: .*JSON.* NotImplementedError: nostr', message), message ) self.assertEqual({'ham': '"eggs"', 'code': 500}, e.kwargs) @mock.patch.object(exception.LOG, 'error', autospec=True) def test___init___invalid_kwarg_reraise(self, log_mock): CONF.set_override('fatal_exception_format_errors', True, group='ironic_lib') self.assertRaises(KeyError, TestException, spam=Unserializable(), ham='eggs') message = \ log_mock.call_args_list[0][0][0] % log_mock.call_args_list[0][0][1] self.assertIsNotNone( re.search('spam: .*JSON.* NotImplementedError: nostr', message), message ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/ironic_lib/tests/test_json_rpc.py0000664000175000017500000007213600000000000022303 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import os import tempfile from unittest import mock import fixtures import oslo_messaging import webob from ironic_lib import exception from ironic_lib.json_rpc import client from ironic_lib.json_rpc import server from ironic_lib.tests import base class FakeContext(server.EmptyContext): request_id = 'abcd' class FakeManager(object): def success(self, context, x, y=0): assert isinstance(context, FakeContext) assert context.user_name == 'admin' return x - y def no_result(self, context): assert isinstance(context, FakeContext) return None def no_context(self): return 42 def fail(self, context, message): assert isinstance(context, FakeContext) raise exception.IronicException(message) @oslo_messaging.expected_exceptions(exception.BadRequest) def expected(self, context, message): assert isinstance(context, FakeContext) raise exception.BadRequest(message) def crash(self, context): raise RuntimeError('boom') def copy(self, context, data): return copy.deepcopy(data) def init_host(self, context): assert False, "This should not be exposed" def _private(self, context): assert False, "This should not be exposed" # This should not be exposed either value = 42 class FakeSerializer: def serialize_entity(self, context, entity): return entity def deserialize_entity(self, context, data): return data class TestService(base.IronicLibTestCase): def setUp(self): super(TestService, self).setUp() self.config(auth_strategy='noauth', group='json_rpc') self.server_mock = self.useFixture(fixtures.MockPatch( 'oslo_service.wsgi.Server', autospec=True)).mock self.serializer = FakeSerializer() self.service = server.WSGIService(FakeManager(), self.serializer, FakeContext) self.app = self.service._application self.ctx = {'user_name': 'admin'} def _request(self, name=None, params=None, expected_error=None, request_id='abcd', **kwargs): body = { 'jsonrpc': '2.0', } if request_id is not None: body['id'] = request_id if name is not None: body['method'] = name if params is not None: body['params'] = params if 'json_body' not in kwargs: kwargs['json_body'] = body kwargs.setdefault('method', 'POST') kwargs.setdefault('headers', {'Content-Type': 'application/json'}) request = webob.Request.blank("/", **kwargs) response = request.get_response(self.app) self.assertEqual(response.status_code, expected_error or (200 if request_id else 204)) if request_id is not None: if expected_error: self.assertEqual(expected_error, response.json_body['error']['code']) else: return response.json_body else: return response.text def _check(self, body, result=None, error=None, request_id='abcd'): self.assertEqual('2.0', body.pop('jsonrpc')) self.assertEqual(request_id, body.pop('id')) if error is not None: self.assertEqual({'error': error}, body) else: self.assertEqual({'result': result}, body) def _setup_http_basic(self): with tempfile.NamedTemporaryFile(mode='w', delete=False) as f: f.write('myName:$2y$05$lE3eGtyj41jZwrzS87KTqe6.' 'JETVCWBkc32C63UP2aYrGoYOEpbJm\n\n\n') self.addCleanup(os.remove, f.name) self.config(http_basic_auth_user_file=f.name, group='json_rpc') self.config(auth_strategy='http_basic', group='json_rpc') # self.config(http_basic_username='myUser', group='json_rpc') # self.config(http_basic_password='myPassword', group='json_rpc') self.service = server.WSGIService(FakeManager(), self.serializer, FakeContext) self.app = self.server_mock.call_args[0][2] def test_http_basic_not_authenticated(self): self._setup_http_basic() self._request('success', {'context': self.ctx, 'x': 42}, request_id=None, expected_error=401) def test_http_basic(self): self._setup_http_basic() headers = { 'Content-Type': 'application/json', 'Authorization': 'Basic bXlOYW1lOm15UGFzc3dvcmQ=' } body = self._request('success', {'context': self.ctx, 'x': 42}, headers=headers) self._check(body, result=42) def test_success(self): body = self._request('success', {'context': self.ctx, 'x': 42}) self._check(body, result=42) def test_success_no_result(self): body = self._request('no_result', {'context': self.ctx}) self._check(body, result=None) def test_notification(self): body = self._request('no_result', {'context': self.ctx}, request_id=None) self.assertEqual('', body) def test_no_context(self): body = self._request('no_context') self._check(body, result=42) def test_non_json_body(self): for body in (b'', b'???', b"\xc3\x28"): request = webob.Request.blank("/", method='POST', body=body) response = request.get_response(self.app) self._check( response.json_body, error={ 'message': server.ParseError._msg_fmt, 'code': -32700, }, request_id=None) def test_invalid_requests(self): bodies = [ # Invalid requests with request ID. {'method': 'no_result', 'id': 'abcd', 'params': {'context': self.ctx}}, {'jsonrpc': '2.0', 'id': 'abcd', 'params': {'context': self.ctx}}, # These do not count as notifications, since they're malformed. {'method': 'no_result', 'params': {'context': self.ctx}}, {'jsonrpc': '2.0', 'params': {'context': self.ctx}}, 42, # We do not implement batched requests. [], [{'jsonrpc': '2.0', 'method': 'no_result', 'params': {'context': self.ctx}}], ] for body in bodies: body = self._request(json_body=body) self._check( body, error={ 'message': server.InvalidRequest._msg_fmt, 'code': -32600, }, request_id=body.get('id')) def test_malformed_context(self): body = self._request(json_body={'jsonrpc': '2.0', 'id': 'abcd', 'method': 'no_result', 'params': {'context': 42}}) self._check( body, error={ 'message': 'Context must be a dictionary, if provided', 'code': -32602, }) def test_expected_failure(self): body = self._request('fail', {'context': self.ctx, 'message': 'some error'}) self._check(body, error={ 'message': 'some error', 'code': 500, 'data': { 'class': 'ironic_lib.exception.IronicException' } }) def test_expected_failure_oslo(self): # Check that exceptions wrapped by oslo's expected_exceptions get # unwrapped correctly. body = self._request('expected', {'context': self.ctx, 'message': 'some error'}) self._check(body, error={ 'message': 'some error', 'code': 400, 'data': { 'class': 'ironic_lib.exception.BadRequest' } }) @mock.patch.object(server.LOG, 'exception', autospec=True) def test_unexpected_failure(self, mock_log): body = self._request('crash', {'context': self.ctx}) self._check(body, error={ 'message': 'boom', 'code': 500, }) self.assertTrue(mock_log.called) def test_method_not_found(self): body = self._request('banana', {'context': self.ctx}) self._check(body, error={ 'message': 'Method banana was not found', 'code': -32601, }) def test_no_deny_methods(self): for name in ('__init__', '_private', 'init_host', 'value'): body = self._request(name, {'context': self.ctx}) self._check(body, error={ 'message': 'Method %s was not found' % name, 'code': -32601, }) def test_missing_argument(self): body = self._request('success', {'context': self.ctx}) # The exact error message depends on the Python version self.assertEqual(-32602, body['error']['code']) self.assertNotIn('result', body) def test_method_not_post(self): self._request('success', {'context': self.ctx, 'x': 42}, method='GET', expected_error=405) def test_authenticated(self): self.config(auth_strategy='keystone', group='json_rpc') self.service = server.WSGIService(FakeManager(), self.serializer, FakeContext) self.app = self.server_mock.call_args[0][2] self._request('success', {'context': self.ctx, 'x': 42}, expected_error=401) def test_authenticated_with_allowed_role(self): self.config(auth_strategy='keystone', group='json_rpc') self.config(allowed_roles=['allowed', 'ignored'], group='json_rpc') self.service = server.WSGIService(FakeManager(), self.serializer, FakeContext) self.app = self.server_mock.call_args[0][2] self._request('success', {'context': self.ctx, 'x': 42}, expected_error=401, headers={'Content-Type': 'application/json', 'X-Roles': 'allowed,denied'}) def test_authenticated_no_admin_role(self): self.config(auth_strategy='keystone', group='json_rpc') self._request('success', {'context': self.ctx, 'x': 42}, expected_error=403) def test_authenticated_no_allowed_role(self): self.config(auth_strategy='keystone', group='json_rpc') self.config(allowed_roles=['allowed', 'ignored'], group='json_rpc') self._request('success', {'context': self.ctx, 'x': 42}, expected_error=403, headers={'Content-Type': 'application/json', 'X-Roles': 'denied,notallowed'}) @mock.patch.object(server.LOG, 'debug', autospec=True) def test_mask_secrets(self, mock_log): data = {'ipmi_username': 'admin', 'ipmi_password': 'secret'} node = self.serializer.serialize_entity(self.ctx, data) body = self._request('copy', {'context': self.ctx, 'data': data}) self.assertIsNone(body.get('error')) node = self.serializer.deserialize_entity(self.ctx, body['result']) logged_params = mock_log.call_args_list[0][0][2] logged_node = logged_params['data'] self.assertEqual({'ipmi_username': 'admin', 'ipmi_password': '***'}, logged_node) logged_resp = mock_log.call_args_list[1][0][2] self.assertEqual({'ipmi_username': 'admin', 'ipmi_password': '***'}, logged_resp) # The result is not affected, only logging self.assertEqual(data, node) @mock.patch.object(client, '_get_session', autospec=True) class TestClient(base.IronicLibTestCase): def setUp(self): super(TestClient, self).setUp() self.serializer = FakeSerializer() self.client = client.Client(self.serializer) self.context = FakeContext({'user_name': 'admin'}) self.ctx_json = self.context.to_dict() def test_can_send_version(self, mock_session): self.assertTrue(self.client.can_send_version('1.42')) self.client = client.Client(self.serializer, version_cap='1.42') self.assertTrue(self.client.can_send_version('1.42')) self.assertTrue(self.client.can_send_version('1.0')) self.assertFalse(self.client.can_send_version('1.99')) self.assertFalse(self.client.can_send_version('2.0')) def test_call_success(self, mock_session): response = mock_session.return_value.post.return_value response.json.return_value = { 'jsonrpc': '2.0', 'result': 42 } cctx = self.client.prepare('foo.example.com') self.assertEqual('example.com', cctx.host) result = cctx.call(self.context, 'do_something', answer=42) self.assertEqual(42, result) mock_session.return_value.post.assert_called_once_with( 'http://example.com:8089', json={'jsonrpc': '2.0', 'method': 'do_something', 'params': {'answer': 42, 'context': self.ctx_json}, 'id': self.context.request_id}) def test_call_ipv4_success(self, mock_session): response = mock_session.return_value.post.return_value response.json.return_value = { 'jsonrpc': '2.0', 'result': 42 } cctx = self.client.prepare('foo.192.0.2.1') self.assertEqual('192.0.2.1', cctx.host) result = cctx.call(self.context, 'do_something', answer=42) self.assertEqual(42, result) mock_session.return_value.post.assert_called_once_with( 'http://192.0.2.1:8089', json={'jsonrpc': '2.0', 'method': 'do_something', 'params': {'answer': 42, 'context': self.ctx_json}, 'id': self.context.request_id}) def test_call_ipv6_success(self, mock_session): response = mock_session.return_value.post.return_value response.json.return_value = { 'jsonrpc': '2.0', 'result': 42 } cctx = self.client.prepare('foo.2001:db8::1') self.assertEqual('2001:db8::1', cctx.host) self.assertEqual(8089, cctx.port) result = cctx.call(self.context, 'do_something', answer=42) self.assertEqual(42, result) mock_session.return_value.post.assert_called_once_with( 'http://[2001:db8::1]:8089', json={'jsonrpc': '2.0', 'method': 'do_something', 'params': {'answer': 42, 'context': self.ctx_json}, 'id': self.context.request_id}) def test_call_ipv6_success_rfc2732(self, mock_session): response = mock_session.return_value.post.return_value response.json.return_value = { 'jsonrpc': '2.0', 'result': 42 } cctx = self.client.prepare('foo.[2001:db8::1]:8192') self.assertEqual('2001:db8::1', cctx.host) result = cctx.call(self.context, 'do_something', answer=42) self.assertEqual(42, result) mock_session.return_value.post.assert_called_once_with( 'http://[2001:db8::1]:8192', json={'jsonrpc': '2.0', 'method': 'do_something', 'params': {'answer': 42, 'context': self.ctx_json}, 'id': self.context.request_id}) def test_call_success_with_version(self, mock_session): response = mock_session.return_value.post.return_value response.json.return_value = { 'jsonrpc': '2.0', 'result': 42 } cctx = self.client.prepare('foo.example.com:8192', version='1.42') self.assertEqual('example.com', cctx.host) result = cctx.call(self.context, 'do_something', answer=42) self.assertEqual(42, result) mock_session.return_value.post.assert_called_once_with( 'http://example.com:8192', json={'jsonrpc': '2.0', 'method': 'do_something', 'params': {'answer': 42, 'context': self.ctx_json, 'rpc.version': '1.42'}, 'id': self.context.request_id}) def test_call_success_with_version_and_cap(self, mock_session): self.client = client.Client(self.serializer, version_cap='1.99') response = mock_session.return_value.post.return_value response.json.return_value = { 'jsonrpc': '2.0', 'result': 42 } cctx = self.client.prepare('foo.example.com', version='1.42') self.assertEqual('example.com', cctx.host) result = cctx.call(self.context, 'do_something', answer=42) self.assertEqual(42, result) mock_session.return_value.post.assert_called_once_with( 'http://example.com:8089', json={'jsonrpc': '2.0', 'method': 'do_something', 'params': {'answer': 42, 'context': self.ctx_json, 'rpc.version': '1.42'}, 'id': self.context.request_id}) def test_call_with_ssl(self, mock_session): self.config(use_ssl=True, group='json_rpc') response = mock_session.return_value.post.return_value response.json.return_value = { 'jsonrpc': '2.0', 'result': 42 } cctx = self.client.prepare('foo.example.com') self.assertEqual('example.com', cctx.host) result = cctx.call(self.context, 'do_something', answer=42) self.assertEqual(42, result) mock_session.return_value.post.assert_called_once_with( 'https://example.com:8089', json={'jsonrpc': '2.0', 'method': 'do_something', 'params': {'answer': 42, 'context': self.ctx_json}, 'id': self.context.request_id}) def test_cast_success(self, mock_session): cctx = self.client.prepare('foo.example.com') self.assertEqual('example.com', cctx.host) result = cctx.cast(self.context, 'do_something', answer=42) self.assertIsNone(result) mock_session.return_value.post.assert_called_once_with( 'http://example.com:8089', json={'jsonrpc': '2.0', 'method': 'do_something', 'params': {'answer': 42, 'context': self.ctx_json}}) def test_cast_success_with_version(self, mock_session): cctx = self.client.prepare('foo.example.com', version='1.42') self.assertEqual('example.com', cctx.host) result = cctx.cast(self.context, 'do_something', answer=42) self.assertIsNone(result) mock_session.return_value.post.assert_called_once_with( 'http://example.com:8089', json={'jsonrpc': '2.0', 'method': 'do_something', 'params': {'answer': 42, 'context': self.ctx_json, 'rpc.version': '1.42'}}) def test_call_failure(self, mock_session): response = mock_session.return_value.post.return_value response.json.return_value = { 'jsonrpc': '2.0', 'error': { 'code': 418, 'message': 'I am a teapot', 'data': { 'class': 'ironic_lib.exception.BadRequest' } } } cctx = self.client.prepare('foo.example.com') self.assertEqual('example.com', cctx.host) # Make sure that the class is restored correctly for expected errors. exc = self.assertRaises(exception.BadRequest, cctx.call, self.context, 'do_something', answer=42) # Code from the body has priority over one in the class. self.assertEqual(418, exc.code) self.assertIn('I am a teapot', str(exc)) mock_session.return_value.post.assert_called_once_with( 'http://example.com:8089', json={'jsonrpc': '2.0', 'method': 'do_something', 'params': {'answer': 42, 'context': self.ctx_json}, 'id': self.context.request_id}) def test_call_unexpected_failure(self, mock_session): response = mock_session.return_value.post.return_value response.json.return_value = { 'jsonrpc': '2.0', 'error': { 'code': 500, 'message': 'AttributeError', } } cctx = self.client.prepare('foo.example.com') self.assertEqual('example.com', cctx.host) exc = self.assertRaises(exception.IronicException, cctx.call, self.context, 'do_something', answer=42) self.assertEqual(500, exc.code) self.assertIn('Unexpected error', str(exc)) mock_session.return_value.post.assert_called_once_with( 'http://example.com:8089', json={'jsonrpc': '2.0', 'method': 'do_something', 'params': {'answer': 42, 'context': self.ctx_json}, 'id': self.context.request_id}) def test_call_failure_with_foreign_class(self, mock_session): # This should not happen, but provide an additional safeguard response = mock_session.return_value.post.return_value response.json.return_value = { 'jsonrpc': '2.0', 'error': { 'code': 500, 'message': 'AttributeError', 'data': { 'class': 'AttributeError' } } } cctx = self.client.prepare('foo.example.com') self.assertEqual('example.com', cctx.host) exc = self.assertRaises(exception.IronicException, cctx.call, self.context, 'do_something', answer=42) self.assertEqual(500, exc.code) self.assertIn('Unexpected error', str(exc)) mock_session.return_value.post.assert_called_once_with( 'http://example.com:8089', json={'jsonrpc': '2.0', 'method': 'do_something', 'params': {'answer': 42, 'context': self.ctx_json}, 'id': self.context.request_id}) def test_cast_failure(self, mock_session): # Cast cannot return normal failures, but make sure we ignore them even # if server sends something in violation of the protocol (or because # it's a low-level error like HTTP Forbidden). response = mock_session.return_value.post.return_value response.json.return_value = { 'jsonrpc': '2.0', 'error': { 'code': 418, 'message': 'I am a teapot', 'data': { 'class': 'ironic.common.exception.IronicException' } } } cctx = self.client.prepare('foo.example.com') self.assertEqual('example.com', cctx.host) result = cctx.cast(self.context, 'do_something', answer=42) self.assertIsNone(result) mock_session.return_value.post.assert_called_once_with( 'http://example.com:8089', json={'jsonrpc': '2.0', 'method': 'do_something', 'params': {'answer': 42, 'context': self.ctx_json}}) def test_call_failure_with_version_and_cap(self, mock_session): self.client = client.Client(self.serializer, version_cap='1.42') cctx = self.client.prepare('foo.example.com', version='1.99') self.assertRaisesRegex(RuntimeError, "requested version 1.99, maximum allowed " "version is 1.42", cctx.call, self.context, 'do_something', answer=42) self.assertFalse(mock_session.return_value.post.called) @mock.patch.object(client.LOG, 'debug', autospec=True) def test_mask_secrets(self, mock_log, mock_session): request = { 'redfish_username': 'admin', 'redfish_password': 'passw0rd' } body = """{ "jsonrpc": "2.0", "result": { "driver_info": { "ipmi_username": "admin", "ipmi_password": "passw0rd" } } }""" response = mock_session.return_value.post.return_value response.text = body cctx = self.client.prepare('foo.example.com') cctx.cast(self.context, 'do_something', node=request) mock_session.return_value.post.assert_called_once_with( 'http://example.com:8089', json={'jsonrpc': '2.0', 'method': 'do_something', 'params': {'node': request, 'context': self.ctx_json}}) self.assertEqual(2, mock_log.call_count) node = mock_log.call_args_list[0][0][3]['params']['node'] self.assertEqual(node, {'redfish_username': 'admin', 'redfish_password': '***'}) resp_text = mock_log.call_args_list[1][0][3] self.assertEqual(body.replace('passw0rd', '***'), resp_text) @mock.patch('ironic_lib.json_rpc.client.keystone', autospec=True) class TestSession(base.IronicLibTestCase): def setUp(self): super(TestSession, self).setUp() client._SESSION = None def test_noauth(self, mock_keystone): self.config(auth_strategy='noauth', group='json_rpc') session = client._get_session() mock_keystone.get_auth.assert_called_once_with('json_rpc') auth = mock_keystone.get_auth.return_value mock_keystone.get_session.assert_called_once_with( 'json_rpc', auth=auth) internal_session = mock_keystone.get_session.return_value mock_keystone.get_adapter.assert_called_once_with( 'json_rpc', session=internal_session, additional_headers={ 'Content-Type': 'application/json' }) self.assertEqual(mock_keystone.get_adapter.return_value, session) def test_keystone(self, mock_keystone): self.config(auth_strategy='keystone', group='json_rpc') session = client._get_session() mock_keystone.get_auth.assert_called_once_with('json_rpc') auth = mock_keystone.get_auth.return_value mock_keystone.get_session.assert_called_once_with( 'json_rpc', auth=auth) internal_session = mock_keystone.get_session.return_value mock_keystone.get_adapter.assert_called_once_with( 'json_rpc', session=internal_session, additional_headers={ 'Content-Type': 'application/json' }) self.assertEqual(mock_keystone.get_adapter.return_value, session) def test_http_basic(self, mock_keystone): self.config(auth_strategy='http_basic', group='json_rpc') session = client._get_session() mock_keystone.get_auth.assert_called_once_with('json_rpc') auth = mock_keystone.get_auth.return_value mock_keystone.get_session.assert_called_once_with( 'json_rpc', auth=auth) internal_session = mock_keystone.get_session.return_value mock_keystone.get_adapter.assert_called_once_with( 'json_rpc', session=internal_session, additional_headers={ 'Content-Type': 'application/json' }) self.assertEqual(mock_keystone.get_adapter.return_value, session) def test_http_basic_deprecated(self, mock_keystone): self.config(auth_strategy='http_basic', group='json_rpc') self.config(http_basic_username='myName', group='json_rpc') self.config(http_basic_password='myPassword', group='json_rpc') session = client._get_session() mock_keystone.get_auth.assert_called_once_with( 'json_rpc', username='myName', password='myPassword') auth = mock_keystone.get_auth.return_value mock_keystone.get_session.assert_called_once_with( 'json_rpc', auth=auth) internal_session = mock_keystone.get_session.return_value mock_keystone.get_adapter.assert_called_once_with( 'json_rpc', session=internal_session, additional_headers={ 'Content-Type': 'application/json' }) self.assertEqual(mock_keystone.get_adapter.return_value, session) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/ironic_lib/tests/test_keystone.py0000664000175000017500000001247000000000000022322 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from keystoneauth1 import loading as ks_loading from oslo_config import cfg from ironic_lib import exception from ironic_lib import keystone from ironic_lib.tests import base class KeystoneTestCase(base.IronicLibTestCase): def setUp(self): super(KeystoneTestCase, self).setUp() self.test_group = 'test_group' self.cfg_fixture.conf.register_group(cfg.OptGroup(self.test_group)) keystone.register_auth_opts(self.cfg_fixture.conf, self.test_group, service_type='vikings') self.config(auth_type='password', group=self.test_group) # NOTE(pas-ha) this is due to auth_plugin options # being dynamically registered on first load, # but we need to set the config before plugin = ks_loading.get_plugin_loader('password') opts = ks_loading.get_auth_plugin_conf_options(plugin) self.cfg_fixture.register_opts(opts, group=self.test_group) self.config(auth_url='http://127.0.0.1:9898', username='fake_user', password='fake_pass', project_name='fake_tenant', group=self.test_group) def test_get_session(self): self.config(timeout=10, group=self.test_group) session = keystone.get_session(self.test_group, timeout=20) self.assertEqual(20, session.timeout) def test_get_auth(self): auth = keystone.get_auth(self.test_group) self.assertEqual('http://127.0.0.1:9898', auth.auth_url) def test_get_auth_fail(self): # NOTE(pas-ha) 'password' auth_plugin is used, # so when we set the required auth_url to None, # MissingOption is raised self.config(auth_url=None, group=self.test_group) self.assertRaises(exception.ConfigInvalid, keystone.get_auth, self.test_group) def test_get_adapter_from_config(self): self.config(valid_interfaces=['internal', 'public'], group=self.test_group) session = keystone.get_session(self.test_group) adapter = keystone.get_adapter(self.test_group, session=session, interface='admin') self.assertEqual('admin', adapter.interface) self.assertEqual(session, adapter.session) @mock.patch('keystoneauth1.service_token.ServiceTokenAuthWrapper', autospec=True) @mock.patch('keystoneauth1.token_endpoint.Token', autospec=True) def test_get_service_auth(self, token_mock, service_auth_mock): ctxt = mock.Mock(spec=['auth_token'], auth_token='spam') mock_auth = mock.Mock() self.assertEqual(service_auth_mock.return_value, keystone.get_service_auth(ctxt, 'ham', mock_auth)) token_mock.assert_called_once_with('ham', 'spam') service_auth_mock.assert_called_once_with( user_auth=token_mock.return_value, service_auth=mock_auth) class AuthConfTestCase(base.IronicLibTestCase): def setUp(self): super(AuthConfTestCase, self).setUp() self.test_group = 'test_group' self.cfg_fixture.conf.register_group(cfg.OptGroup(self.test_group)) keystone.register_auth_opts(self.cfg_fixture.conf, self.test_group) self.config(auth_type='password', group=self.test_group) # NOTE(pas-ha) this is due to auth_plugin options # being dynamically registered on first load, # but we need to set the config before plugin = ks_loading.get_plugin_loader('password') opts = ks_loading.get_auth_plugin_conf_options(plugin) self.cfg_fixture.register_opts(opts, group=self.test_group) self.config(auth_url='http://127.0.0.1:9898', username='fake_user', password='fake_pass', project_name='fake_tenant', group=self.test_group) def test_add_auth_opts(self): opts = keystone.add_auth_opts([]) # check that there is no duplicates names = {o.dest for o in opts} self.assertEqual(len(names), len(opts)) # NOTE(pas-ha) checking for most standard auth and session ones only expected = {'timeout', 'insecure', 'cafile', 'certfile', 'keyfile', 'auth_type', 'auth_url', 'username', 'password', 'tenant_name', 'project_name', 'trust_id', 'domain_id', 'user_domain_id', 'project_domain_id'} self.assertTrue(expected.issubset(names)) def test_os_service_types_alias(self): keystone.register_auth_opts(self.cfg_fixture.conf, 'barbican') self.assertEqual(self.cfg_fixture.conf.barbican.service_type, 'key-manager') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/ironic_lib/tests/test_mdns.py0000664000175000017500000003621300000000000021423 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import socket from unittest import mock from oslo_config import cfg import zeroconf from ironic_lib import exception from ironic_lib import mdns from ironic_lib.tests import base CONF = cfg.CONF @mock.patch.object(zeroconf, 'Zeroconf', autospec=True) class RegisterServiceTestCase(base.IronicLibTestCase): def test_ok(self, mock_zc): zc = mdns.Zeroconf() zc.register_service('baremetal', 'https://127.0.0.1/baremetal') mock_zc.assert_called_once_with( interfaces=zeroconf.InterfaceChoice.All, ip_version=zeroconf.IPVersion.All) mock_zc.return_value.register_service.assert_called_once_with(mock.ANY) info = mock_zc.return_value.register_service.call_args[0][0] self.assertEqual('_openstack._tcp.local.', info.type) self.assertEqual('baremetal._openstack._tcp.local.', info.name) self.assertEqual('127.0.0.1', socket.inet_ntoa(info.addresses[0])) self.assertEqual({b'path': b'/baremetal'}, info.properties) def test_with_params(self, mock_zc): CONF.set_override('params', {'answer': 'none', 'foo': 'bar'}, group='mdns') zc = mdns.Zeroconf() zc.register_service('baremetal', 'https://127.0.0.1/baremetal', params={'answer': b'42'}) mock_zc.return_value.register_service.assert_called_once_with(mock.ANY) info = mock_zc.return_value.register_service.call_args[0][0] self.assertEqual('_openstack._tcp.local.', info.type) self.assertEqual('baremetal._openstack._tcp.local.', info.name) self.assertEqual('127.0.0.1', socket.inet_ntoa(info.addresses[0])) self.assertEqual({b'path': b'/baremetal', b'answer': b'42', b'foo': b'bar'}, info.properties) @mock.patch.object(mdns.time, 'sleep', autospec=True) def test_with_race(self, mock_sleep, mock_zc): mock_zc.return_value.register_service.side_effect = [ zeroconf.NonUniqueNameException, zeroconf.NonUniqueNameException, zeroconf.NonUniqueNameException, None ] zc = mdns.Zeroconf() zc.register_service('baremetal', 'https://127.0.0.1/baremetal') mock_zc.return_value.register_service.assert_called_with(mock.ANY) self.assertEqual(4, mock_zc.return_value.register_service.call_count) mock_sleep.assert_has_calls([mock.call(i) for i in (0.1, 0.2, 0.4)]) def test_with_interfaces(self, mock_zc): CONF.set_override('interfaces', ['10.0.0.1', '192.168.1.1'], group='mdns') zc = mdns.Zeroconf() zc.register_service('baremetal', 'https://127.0.0.1/baremetal') mock_zc.assert_called_once_with(interfaces=['10.0.0.1', '192.168.1.1'], ip_version=None) mock_zc.return_value.register_service.assert_called_once_with(mock.ANY) info = mock_zc.return_value.register_service.call_args[0][0] self.assertEqual('_openstack._tcp.local.', info.type) self.assertEqual('baremetal._openstack._tcp.local.', info.name) self.assertEqual('127.0.0.1', socket.inet_ntoa(info.addresses[0])) self.assertEqual({b'path': b'/baremetal'}, info.properties) @mock.patch.object(mdns.time, 'sleep', autospec=True) def test_failure(self, mock_sleep, mock_zc): mock_zc.return_value.register_service.side_effect = ( zeroconf.NonUniqueNameException ) zc = mdns.Zeroconf() self.assertRaises(exception.ServiceRegistrationFailure, zc.register_service, 'baremetal', 'https://127.0.0.1/baremetal') mock_zc.return_value.register_service.assert_called_with(mock.ANY) self.assertEqual(CONF.mdns.registration_attempts, mock_zc.return_value.register_service.call_count) self.assertEqual(CONF.mdns.registration_attempts - 1, mock_sleep.call_count) class ParseEndpointTestCase(base.IronicLibTestCase): def test_simple(self): endpoint = mdns._parse_endpoint('http://127.0.0.1') self.assertEqual(1, len(endpoint.addresses)) self.assertEqual('127.0.0.1', socket.inet_ntoa(endpoint.addresses[0])) self.assertEqual(80, endpoint.port) self.assertEqual({}, endpoint.params) self.assertIsNone(endpoint.hostname) def test_simple_https(self): endpoint = mdns._parse_endpoint('https://127.0.0.1') self.assertEqual(1, len(endpoint.addresses)) self.assertEqual('127.0.0.1', socket.inet_ntoa(endpoint.addresses[0])) self.assertEqual(443, endpoint.port) self.assertEqual({}, endpoint.params) self.assertIsNone(endpoint.hostname) def test_with_path_and_port(self): endpoint = mdns._parse_endpoint('http://127.0.0.1:8080/bm') self.assertEqual(1, len(endpoint.addresses)) self.assertEqual('127.0.0.1', socket.inet_ntoa(endpoint.addresses[0])) self.assertEqual(8080, endpoint.port) self.assertEqual({'path': '/bm', 'protocol': 'http'}, endpoint.params) self.assertIsNone(endpoint.hostname) @mock.patch.object(socket, 'getaddrinfo', autospec=True) def test_resolve(self, mock_resolve): mock_resolve.return_value = [ (socket.AF_INET, None, None, None, ('1.2.3.4',)), (socket.AF_INET6, None, None, None, ('::2', 'scope')), ] endpoint = mdns._parse_endpoint('http://example.com') self.assertEqual(2, len(endpoint.addresses)) self.assertEqual('1.2.3.4', socket.inet_ntoa(endpoint.addresses[0])) self.assertEqual('::2', socket.inet_ntop(socket.AF_INET6, endpoint.addresses[1])) self.assertEqual(80, endpoint.port) self.assertEqual({}, endpoint.params) self.assertEqual('example.com.', endpoint.hostname) mock_resolve.assert_called_once_with('example.com', 80, mock.ANY, socket.IPPROTO_TCP) @mock.patch('ironic_lib.utils.get_route_source', autospec=True) @mock.patch('zeroconf.Zeroconf', autospec=True) class GetEndpointTestCase(base.IronicLibTestCase): def test_simple(self, mock_zc, mock_route): mock_zc.return_value.get_service_info.return_value = mock.Mock( address=socket.inet_aton('192.168.1.1'), port=80, properties={}, **{'parsed_addresses.return_value': ['192.168.1.1']} ) endp, params = mdns.get_endpoint('baremetal') self.assertEqual('http://192.168.1.1:80', endp) self.assertEqual({}, params) mock_zc.return_value.get_service_info.assert_called_once_with( 'baremetal._openstack._tcp.local.', 'baremetal._openstack._tcp.local.' ) mock_zc.return_value.close.assert_called_once_with() def test_v6(self, mock_zc, mock_route): mock_zc.return_value.get_service_info.return_value = mock.Mock( port=80, properties={}, **{'parsed_addresses.return_value': ['::2']} ) endp, params = mdns.get_endpoint('baremetal') self.assertEqual('http://[::2]:80', endp) self.assertEqual({}, params) mock_zc.return_value.get_service_info.assert_called_once_with( 'baremetal._openstack._tcp.local.', 'baremetal._openstack._tcp.local.' ) mock_zc.return_value.close.assert_called_once_with() def test_skip_invalid(self, mock_zc, mock_route): mock_zc.return_value.get_service_info.return_value = mock.Mock( port=80, properties={}, **{'parsed_addresses.return_value': ['::1', '::2', '::3']} ) mock_route.side_effect = [None, '::4'] endp, params = mdns.get_endpoint('baremetal') self.assertEqual('http://[::3]:80', endp) self.assertEqual({}, params) mock_zc.return_value.get_service_info.assert_called_once_with( 'baremetal._openstack._tcp.local.', 'baremetal._openstack._tcp.local.' ) mock_zc.return_value.close.assert_called_once_with() self.assertEqual(2, mock_route.call_count) def test_fallback(self, mock_zc, mock_route): mock_zc.return_value.get_service_info.return_value = mock.Mock( port=80, properties={}, **{'parsed_addresses.return_value': ['::2', '::3']} ) mock_route.side_effect = [None, None] endp, params = mdns.get_endpoint('baremetal') self.assertEqual('http://[::2]:80', endp) self.assertEqual({}, params) mock_zc.return_value.get_service_info.assert_called_once_with( 'baremetal._openstack._tcp.local.', 'baremetal._openstack._tcp.local.' ) mock_zc.return_value.close.assert_called_once_with() self.assertEqual(2, mock_route.call_count) def test_localhost_only(self, mock_zc, mock_route): mock_zc.return_value.get_service_info.return_value = mock.Mock( port=80, properties={}, **{'parsed_addresses.return_value': ['::1']} ) self.assertRaises(exception.ServiceLookupFailure, mdns.get_endpoint, 'baremetal') mock_zc.return_value.get_service_info.assert_called_once_with( 'baremetal._openstack._tcp.local.', 'baremetal._openstack._tcp.local.' ) mock_zc.return_value.close.assert_called_once_with() self.assertFalse(mock_route.called) def test_https(self, mock_zc, mock_route): mock_zc.return_value.get_service_info.return_value = mock.Mock( address=socket.inet_aton('192.168.1.1'), port=443, properties={}, **{'parsed_addresses.return_value': ['192.168.1.1']} ) endp, params = mdns.get_endpoint('baremetal') self.assertEqual('https://192.168.1.1:443', endp) self.assertEqual({}, params) mock_zc.return_value.get_service_info.assert_called_once_with( 'baremetal._openstack._tcp.local.', 'baremetal._openstack._tcp.local.' ) def test_with_custom_port_and_path(self, mock_zc, mock_route): mock_zc.return_value.get_service_info.return_value = mock.Mock( address=socket.inet_aton('192.168.1.1'), port=8080, properties={b'path': b'/baremetal'}, **{'parsed_addresses.return_value': ['192.168.1.1']} ) endp, params = mdns.get_endpoint('baremetal') self.assertEqual('https://192.168.1.1:8080/baremetal', endp) self.assertEqual({}, params) mock_zc.return_value.get_service_info.assert_called_once_with( 'baremetal._openstack._tcp.local.', 'baremetal._openstack._tcp.local.' ) def test_with_custom_port_path_and_protocol(self, mock_zc, mock_route): mock_zc.return_value.get_service_info.return_value = mock.Mock( address=socket.inet_aton('192.168.1.1'), port=8080, properties={b'path': b'/baremetal', b'protocol': b'http'}, **{'parsed_addresses.return_value': ['192.168.1.1']} ) endp, params = mdns.get_endpoint('baremetal') self.assertEqual('http://192.168.1.1:8080/baremetal', endp) self.assertEqual({}, params) mock_zc.return_value.get_service_info.assert_called_once_with( 'baremetal._openstack._tcp.local.', 'baremetal._openstack._tcp.local.' ) def test_with_params(self, mock_zc, mock_route): mock_zc.return_value.get_service_info.return_value = mock.Mock( address=socket.inet_aton('192.168.1.1'), port=80, properties={b'ipa_debug': True}, **{'parsed_addresses.return_value': ['192.168.1.1']} ) endp, params = mdns.get_endpoint('baremetal') self.assertEqual('http://192.168.1.1:80', endp) self.assertEqual({'ipa_debug': True}, params) mock_zc.return_value.get_service_info.assert_called_once_with( 'baremetal._openstack._tcp.local.', 'baremetal._openstack._tcp.local.' ) def test_binary_data(self, mock_zc, mock_route): mock_zc.return_value.get_service_info.return_value = mock.Mock( address=socket.inet_aton('192.168.1.1'), port=80, properties={b'ipa_debug': True, b'binary': b'\xe2\x28\xa1'}, **{'parsed_addresses.return_value': ['192.168.1.1']} ) endp, params = mdns.get_endpoint('baremetal') self.assertEqual('http://192.168.1.1:80', endp) self.assertEqual({'ipa_debug': True, 'binary': b'\xe2\x28\xa1'}, params) mock_zc.return_value.get_service_info.assert_called_once_with( 'baremetal._openstack._tcp.local.', 'baremetal._openstack._tcp.local.' ) def test_invalid_key(self, mock_zc, mock_route): mock_zc.return_value.get_service_info.return_value = mock.Mock( address=socket.inet_aton('192.168.1.1'), port=80, properties={b'ipa_debug': True, b'\xc3\x28': b'value'}, **{'parsed_addresses.return_value': ['192.168.1.1']} ) self.assertRaisesRegex(exception.ServiceLookupFailure, 'Cannot decode key', mdns.get_endpoint, 'baremetal') mock_zc.return_value.get_service_info.assert_called_once_with( 'baremetal._openstack._tcp.local.', 'baremetal._openstack._tcp.local.' ) def test_with_server(self, mock_zc, mock_route): mock_zc.return_value.get_service_info.return_value = mock.Mock( address=socket.inet_aton('192.168.1.1'), port=443, server='openstack.example.com.', properties={}, **{'parsed_addresses.return_value': ['192.168.1.1']} ) endp, params = mdns.get_endpoint('baremetal') self.assertEqual('https://openstack.example.com:443', endp) self.assertEqual({}, params) mock_zc.return_value.get_service_info.assert_called_once_with( 'baremetal._openstack._tcp.local.', 'baremetal._openstack._tcp.local.' ) @mock.patch('time.sleep', autospec=True) def test_not_found(self, mock_sleep, mock_zc, mock_route): mock_zc.return_value.get_service_info.return_value = None self.assertRaisesRegex(exception.ServiceLookupFailure, 'baremetal service', mdns.get_endpoint, 'baremetal') self.assertEqual(CONF.mdns.lookup_attempts - 1, mock_sleep.call_count) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/ironic_lib/tests/test_metrics.py0000664000175000017500000001556500000000000022137 0ustar00zuulzuul00000000000000# Copyright 2016 Rackspace Hosting # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import types from unittest import mock from oslo_utils import reflection from ironic_lib import metrics as metricslib from ironic_lib import metrics_utils from ironic_lib.tests import base METRICS = metrics_utils.get_metrics_logger(prefix='foo', backend='noop') @METRICS.timer('testing1') def timer_check(run, timer=None): pass @METRICS.counter('testing2') def counter_check(run, counter=None): pass @METRICS.gauge('testing2') def gauge_check(run, gauge=None): pass class MockedMetricLogger(metricslib.MetricLogger): _gauge = mock.Mock(spec_set=types.FunctionType) _counter = mock.Mock(spec_set=types.FunctionType) _timer = mock.Mock(spec_set=types.FunctionType) class TestMetricReflection(base.IronicLibTestCase): def test_timer_reflection(self): # Ensure our decorator is done correctly (functools.wraps) and we can # get the arguments of our decorated function. expected = ['run', 'timer'] signature = reflection.get_signature(timer_check) parameters = list(signature.parameters) self.assertEqual(expected, parameters) def test_counter_reflection(self): # Ensure our decorator is done correctly (functools.wraps) and we can # get the arguments of our decorated function. expected = ['run', 'counter'] signature = reflection.get_signature(counter_check) parameters = list(signature.parameters) self.assertEqual(expected, parameters) def test_gauge_reflection(self): # Ensure our decorator is done correctly (functools.wraps) and we can # get the arguments of our decorated function. expected = ['run', 'gauge'] signature = reflection.get_signature(gauge_check) parameters = list(signature.parameters) self.assertEqual(expected, parameters) class TestMetricLogger(base.IronicLibTestCase): def setUp(self): super(TestMetricLogger, self).setUp() self.ml = MockedMetricLogger('prefix', '.') self.ml_no_prefix = MockedMetricLogger('', '.') self.ml_other_delim = MockedMetricLogger('prefix', '*') self.ml_default = MockedMetricLogger() def test_init(self): self.assertEqual(self.ml._prefix, 'prefix') self.assertEqual(self.ml._delimiter, '.') self.assertEqual(self.ml_no_prefix._prefix, '') self.assertEqual(self.ml_other_delim._delimiter, '*') self.assertEqual(self.ml_default._prefix, '') def test_get_metric_name(self): self.assertEqual( self.ml.get_metric_name('metric'), 'prefix.metric') self.assertEqual( self.ml_no_prefix.get_metric_name('metric'), 'metric') self.assertEqual( self.ml_other_delim.get_metric_name('metric'), 'prefix*metric') def test_send_gauge(self): self.ml.send_gauge('prefix.metric', 10) self.ml._gauge.assert_called_once_with('prefix.metric', 10) def test_send_counter(self): self.ml.send_counter('prefix.metric', 10) self.ml._counter.assert_called_once_with( 'prefix.metric', 10, sample_rate=None) self.ml._counter.reset_mock() self.ml.send_counter('prefix.metric', 10, sample_rate=1.0) self.ml._counter.assert_called_once_with( 'prefix.metric', 10, sample_rate=1.0) self.ml._counter.reset_mock() self.ml.send_counter('prefix.metric', 10, sample_rate=0.0) self.assertFalse(self.ml._counter.called) def test_send_timer(self): self.ml.send_timer('prefix.metric', 10) self.ml._timer.assert_called_once_with('prefix.metric', 10) @mock.patch('ironic_lib.metrics._time', autospec=True) @mock.patch('ironic_lib.metrics.MetricLogger.send_timer', autospec=True) def test_decorator_timer(self, mock_timer, mock_time): mock_time.side_effect = [1, 43] @self.ml.timer('foo.bar.baz') def func(x): return x * x func(10) mock_timer.assert_called_once_with(self.ml, 'prefix.foo.bar.baz', 42 * 1000) @mock.patch('ironic_lib.metrics.MetricLogger.send_counter', autospec=True) def test_decorator_counter(self, mock_counter): @self.ml.counter('foo.bar.baz') def func(x): return x * x func(10) mock_counter.assert_called_once_with(self.ml, 'prefix.foo.bar.baz', 1, sample_rate=None) @mock.patch('ironic_lib.metrics.MetricLogger.send_counter', autospec=True) def test_decorator_counter_sample_rate(self, mock_counter): @self.ml.counter('foo.bar.baz', sample_rate=0.5) def func(x): return x * x func(10) mock_counter.assert_called_once_with(self.ml, 'prefix.foo.bar.baz', 1, sample_rate=0.5) @mock.patch('ironic_lib.metrics.MetricLogger.send_gauge', autospec=True) def test_decorator_gauge(self, mock_gauge): @self.ml.gauge('foo.bar.baz') def func(x): return x func(10) mock_gauge.assert_called_once_with(self.ml, 'prefix.foo.bar.baz', 10) @mock.patch('ironic_lib.metrics._time', autospec=True) @mock.patch('ironic_lib.metrics.MetricLogger.send_timer', autospec=True) def test_context_mgr_timer(self, mock_timer, mock_time): mock_time.side_effect = [1, 43] with self.ml.timer('foo.bar.baz'): pass mock_timer.assert_called_once_with(self.ml, 'prefix.foo.bar.baz', 42 * 1000) @mock.patch('ironic_lib.metrics.MetricLogger.send_counter', autospec=True) def test_context_mgr_counter(self, mock_counter): with self.ml.counter('foo.bar.baz'): pass mock_counter.assert_called_once_with(self.ml, 'prefix.foo.bar.baz', 1, sample_rate=None) @mock.patch('ironic_lib.metrics.MetricLogger.send_counter', autospec=True) def test_context_mgr_counter_sample_rate(self, mock_counter): with self.ml.counter('foo.bar.baz', sample_rate=0.5): pass mock_counter.assert_called_once_with(self.ml, 'prefix.foo.bar.baz', 1, sample_rate=0.5) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/ironic_lib/tests/test_metrics_collector.py0000664000175000017500000000476200000000000024202 0ustar00zuulzuul00000000000000# Copyright 2016 Rackspace Hosting # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from ironic_lib import metrics_collector from ironic_lib.tests import base def connect(family=None, type=None, proto=None): """Dummy function to provide signature for autospec""" pass class TestDictCollectionMetricLogger(base.IronicLibTestCase): def setUp(self): super(TestDictCollectionMetricLogger, self).setUp() self.ml = metrics_collector.DictCollectionMetricLogger( 'prefix', '.') @mock.patch('ironic_lib.metrics_collector.' 'DictCollectionMetricLogger._send', autospec=True) def test_gauge(self, mock_send): self.ml._gauge('metric', 10) mock_send.assert_called_once_with(self.ml, 'metric', 10, 'g') @mock.patch('ironic_lib.metrics_collector.' 'DictCollectionMetricLogger._send', autospec=True) def test_counter(self, mock_send): self.ml._counter('metric', 10) mock_send.assert_called_once_with(self.ml, 'metric', 10, 'c', sample_rate=None) @mock.patch('ironic_lib.metrics_collector.' 'DictCollectionMetricLogger._send', autospec=True) def test_timer(self, mock_send): self.ml._timer('metric', 10) mock_send.assert_called_once_with(self.ml, 'metric', 10, 'ms') def test_send(self): expected = { 'part1.part1': {'count': 2, 'type': 'counter'}, 'part1.part2': {'type': 'gauge', 'value': 66}, 'part1.magic': {'count': 2, 'sum': 22, 'type': 'timer'}, } self.ml._send('part1.part1', 1, 'c') self.ml._send('part1.part1', 1, 'c') self.ml._send('part1.part2', 66, 'g') self.ml._send('part1.magic', 2, 'ms') self.ml._send('part1.magic', 20, 'ms') results = self.ml.get_metrics_data() self.assertEqual(expected, results) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/ironic_lib/tests/test_metrics_statsd.py0000664000175000017500000000733700000000000023517 0ustar00zuulzuul00000000000000# Copyright 2016 Rackspace Hosting # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import socket from unittest import mock from ironic_lib import metrics_statsd from ironic_lib.tests import base def connect(family=None, type=None, proto=None): """Dummy function to provide signature for autospec""" pass class TestStatsdMetricLogger(base.IronicLibTestCase): def setUp(self): super(TestStatsdMetricLogger, self).setUp() self.ml = metrics_statsd.StatsdMetricLogger('prefix', '.', 'test-host', 4321) def test_init(self): self.assertEqual(self.ml._host, 'test-host') self.assertEqual(self.ml._port, 4321) self.assertEqual(self.ml._target, ('test-host', 4321)) @mock.patch('ironic_lib.metrics_statsd.StatsdMetricLogger._send', autospec=True) def test_gauge(self, mock_send): self.ml._gauge('metric', 10) mock_send.assert_called_once_with(self.ml, 'metric', 10, 'g') @mock.patch('ironic_lib.metrics_statsd.StatsdMetricLogger._send', autospec=True) def test_counter(self, mock_send): self.ml._counter('metric', 10) mock_send.assert_called_once_with(self.ml, 'metric', 10, 'c', sample_rate=None) mock_send.reset_mock() self.ml._counter('metric', 10, sample_rate=1.0) mock_send.assert_called_once_with(self.ml, 'metric', 10, 'c', sample_rate=1.0) @mock.patch('ironic_lib.metrics_statsd.StatsdMetricLogger._send', autospec=True) def test_timer(self, mock_send): self.ml._timer('metric', 10) mock_send.assert_called_once_with(self.ml, 'metric', 10, 'ms') @mock.patch('socket.socket', autospec=connect) def test_open_socket(self, mock_socket_constructor): self.ml._open_socket() mock_socket_constructor.assert_called_once_with( socket.AF_INET, socket.SOCK_DGRAM) @mock.patch('socket.socket', autospec=connect) def test_send(self, mock_socket_constructor): mock_socket = mock.Mock() mock_socket_constructor.return_value = mock_socket self.ml._send('part1.part2', 2, 'type') mock_socket.sendto.assert_called_once_with( b'part1.part2:2|type', ('test-host', 4321)) mock_socket.close.assert_called_once_with() mock_socket.reset_mock() self.ml._send('part1.part2', 3.14159, 'type') mock_socket.sendto.assert_called_once_with( b'part1.part2:3.14159|type', ('test-host', 4321)) mock_socket.close.assert_called_once_with() mock_socket.reset_mock() self.ml._send('part1.part2', 5, 'type') mock_socket.sendto.assert_called_once_with( b'part1.part2:5|type', ('test-host', 4321)) mock_socket.close.assert_called_once_with() mock_socket.reset_mock() self.ml._send('part1.part2', 5, 'type', sample_rate=0.5) mock_socket.sendto.assert_called_once_with( b'part1.part2:5|type@0.5', ('test-host', 4321)) mock_socket.close.assert_called_once_with() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/ironic_lib/tests/test_metrics_utils.py0000664000175000017500000001057400000000000023352 0ustar00zuulzuul00000000000000# Copyright 2016 Rackspace Hosting # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from ironic_lib import exception from ironic_lib import metrics as metricslib from ironic_lib import metrics_statsd from ironic_lib import metrics_utils from ironic_lib.tests import base CONF = cfg.CONF class TestGetLogger(base.IronicLibTestCase): def setUp(self): super(TestGetLogger, self).setUp() def test_default_backend(self): metrics = metrics_utils.get_metrics_logger('foo') self.assertIsInstance(metrics, metricslib.NoopMetricLogger) def test_statsd_backend(self): CONF.set_override('backend', 'statsd', group='metrics') metrics = metrics_utils.get_metrics_logger('foo') self.assertIsInstance(metrics, metrics_statsd.StatsdMetricLogger) CONF.clear_override('backend', group='metrics') def test_nonexisting_backend(self): self.assertRaises(exception.InvalidMetricConfig, metrics_utils.get_metrics_logger, 'foo', 'test') def test_numeric_prefix(self): self.assertRaises(exception.InvalidMetricConfig, metrics_utils.get_metrics_logger, 1) def test_numeric_list_prefix(self): self.assertRaises(exception.InvalidMetricConfig, metrics_utils.get_metrics_logger, (1, 2)) def test_default_prefix(self): metrics = metrics_utils.get_metrics_logger() self.assertIsInstance(metrics, metricslib.NoopMetricLogger) self.assertEqual(metrics.get_metric_name("bar"), "bar") def test_prepend_host_backend(self): CONF.set_override('prepend_host', True, group='metrics') CONF.set_override('prepend_host_reverse', False, group='metrics') metrics = metrics_utils.get_metrics_logger(prefix='foo', host="host.example.com") self.assertIsInstance(metrics, metricslib.NoopMetricLogger) self.assertEqual(metrics.get_metric_name("bar"), "host.example.com.foo.bar") CONF.clear_override('prepend_host', group='metrics') CONF.clear_override('prepend_host_reverse', group='metrics') def test_prepend_global_prefix_host_backend(self): CONF.set_override('prepend_host', True, group='metrics') CONF.set_override('prepend_host_reverse', False, group='metrics') CONF.set_override('global_prefix', 'global_pre', group='metrics') metrics = metrics_utils.get_metrics_logger(prefix='foo', host="host.example.com") self.assertIsInstance(metrics, metricslib.NoopMetricLogger) self.assertEqual(metrics.get_metric_name("bar"), "global_pre.host.example.com.foo.bar") CONF.clear_override('prepend_host', group='metrics') CONF.clear_override('prepend_host_reverse', group='metrics') CONF.clear_override('global_prefix', group='metrics') def test_prepend_other_delim(self): metrics = metrics_utils.get_metrics_logger('foo', delimiter='*') self.assertIsInstance(metrics, metricslib.NoopMetricLogger) self.assertEqual(metrics.get_metric_name("bar"), "foo*bar") def test_prepend_host_reverse_backend(self): CONF.set_override('prepend_host', True, group='metrics') CONF.set_override('prepend_host_reverse', True, group='metrics') metrics = metrics_utils.get_metrics_logger('foo', host="host.example.com") self.assertIsInstance(metrics, metricslib.NoopMetricLogger) self.assertEqual(metrics.get_metric_name("bar"), "com.example.host.foo.bar") CONF.clear_override('prepend_host', group='metrics') CONF.clear_override('prepend_host_reverse', group='metrics') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/ironic_lib/tests/test_qemu_img.py0000664000175000017500000001616300000000000022267 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from unittest import mock from oslo_concurrency import processutils from oslo_config import cfg from oslo_utils import imageutils from ironic_lib import qemu_img from ironic_lib.tests import base from ironic_lib import utils CONF = cfg.CONF class ImageInfoTestCase(base.IronicLibTestCase): @mock.patch.object(os.path, 'exists', return_value=False, autospec=True) def test_image_info_path_doesnt_exist(self, path_exists_mock): self.assertRaises(FileNotFoundError, qemu_img.image_info, 'noimg') path_exists_mock.assert_called_once_with('noimg') @mock.patch.object(utils, 'execute', return_value=('out', 'err'), autospec=True) @mock.patch.object(imageutils, 'QemuImgInfo', autospec=True) @mock.patch.object(os.path, 'exists', return_value=True, autospec=True) def test_image_info_path_exists(self, path_exists_mock, image_info_mock, execute_mock): qemu_img.image_info('img') path_exists_mock.assert_called_once_with('img') execute_mock.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', 'img', '--output=json', prlimit=mock.ANY) image_info_mock.assert_called_once_with('out', format='json') class ConvertImageTestCase(base.IronicLibTestCase): @mock.patch.object(utils, 'execute', autospec=True) def test_convert_image(self, execute_mock): qemu_img.convert_image('source', 'dest', 'out_format') execute_mock.assert_called_once_with( 'qemu-img', 'convert', '-O', 'out_format', 'source', 'dest', run_as_root=False, prlimit=mock.ANY, use_standard_locale=True, env_variables={'MALLOC_ARENA_MAX': '3'}) @mock.patch.object(utils, 'execute', autospec=True) def test_convert_image_flags(self, execute_mock): qemu_img.convert_image('source', 'dest', 'out_format', cache='directsync', out_of_order=True, sparse_size='0') execute_mock.assert_called_once_with( 'qemu-img', 'convert', '-O', 'out_format', '-t', 'directsync', '-S', '0', '-W', 'source', 'dest', run_as_root=False, prlimit=mock.ANY, use_standard_locale=True, env_variables={'MALLOC_ARENA_MAX': '3'}) @mock.patch.object(utils, 'execute', autospec=True) def test_convert_image_retries(self, execute_mock): ret_err = 'qemu: qemu_thread_create: Resource temporarily unavailable' execute_mock.side_effect = [ processutils.ProcessExecutionError(stderr=ret_err), ('', ''), processutils.ProcessExecutionError(stderr=ret_err), ('', ''), ('', ''), ] qemu_img.convert_image('source', 'dest', 'out_format') convert_call = mock.call('qemu-img', 'convert', '-O', 'out_format', 'source', 'dest', run_as_root=False, prlimit=mock.ANY, use_standard_locale=True, env_variables={'MALLOC_ARENA_MAX': '3'}) execute_mock.assert_has_calls([ convert_call, mock.call('sync'), convert_call, mock.call('sync'), convert_call, ]) @mock.patch.object(utils, 'execute', autospec=True) def test_convert_image_retries_alternate_error(self, execute_mock): ret_err = 'Failed to allocate memory: Cannot allocate memory\n' execute_mock.side_effect = [ processutils.ProcessExecutionError(stderr=ret_err), ('', ''), processutils.ProcessExecutionError(stderr=ret_err), ('', ''), ('', ''), ] qemu_img.convert_image('source', 'dest', 'out_format') convert_call = mock.call('qemu-img', 'convert', '-O', 'out_format', 'source', 'dest', run_as_root=False, prlimit=mock.ANY, use_standard_locale=True, env_variables={'MALLOC_ARENA_MAX': '3'}) execute_mock.assert_has_calls([ convert_call, mock.call('sync'), convert_call, mock.call('sync'), convert_call, ]) @mock.patch.object(utils, 'execute', autospec=True) def test_convert_image_retries_and_fails(self, execute_mock): ret_err = 'qemu: qemu_thread_create: Resource temporarily unavailable' execute_mock.side_effect = [ processutils.ProcessExecutionError(stderr=ret_err), ('', ''), processutils.ProcessExecutionError(stderr=ret_err), ('', ''), processutils.ProcessExecutionError(stderr=ret_err), ('', ''), processutils.ProcessExecutionError(stderr=ret_err), ] self.assertRaises(processutils.ProcessExecutionError, qemu_img.convert_image, 'source', 'dest', 'out_format') convert_call = mock.call('qemu-img', 'convert', '-O', 'out_format', 'source', 'dest', run_as_root=False, prlimit=mock.ANY, use_standard_locale=True, env_variables={'MALLOC_ARENA_MAX': '3'}) execute_mock.assert_has_calls([ convert_call, mock.call('sync'), convert_call, mock.call('sync'), convert_call, ]) @mock.patch.object(utils, 'execute', autospec=True) def test_convert_image_just_fails(self, execute_mock): ret_err = 'Aliens' execute_mock.side_effect = [ processutils.ProcessExecutionError(stderr=ret_err), ] self.assertRaises(processutils.ProcessExecutionError, qemu_img.convert_image, 'source', 'dest', 'out_format') convert_call = mock.call('qemu-img', 'convert', '-O', 'out_format', 'source', 'dest', run_as_root=False, prlimit=mock.ANY, use_standard_locale=True, env_variables={'MALLOC_ARENA_MAX': '3'}) execute_mock.assert_has_calls([ convert_call, ]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/ironic_lib/tests/test_utils.py0000664000175000017500000010513000000000000021615 0ustar00zuulzuul00000000000000# Copyright 2011 Justin Santa Barbara # Copyright 2012 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import errno import os import os.path from unittest import mock from oslo_concurrency import processutils from oslo_config import cfg from ironic_lib import exception from ironic_lib.tests import base from ironic_lib import utils CONF = cfg.CONF class BareMetalUtilsTestCase(base.IronicLibTestCase): def test_unlink(self): with mock.patch.object(os, "unlink", autospec=True) as unlink_mock: unlink_mock.return_value = None utils.unlink_without_raise("/fake/path") unlink_mock.assert_called_once_with("/fake/path") def test_unlink_ENOENT(self): with mock.patch.object(os, "unlink", autospec=True) as unlink_mock: unlink_mock.side_effect = OSError(errno.ENOENT) utils.unlink_without_raise("/fake/path") unlink_mock.assert_called_once_with("/fake/path") class ExecuteTestCase(base.IronicLibTestCase): # Allow calls to utils.execute() and related functions block_execute = False @mock.patch.object(processutils, 'execute', autospec=True) @mock.patch.object(os.environ, 'copy', return_value={}, autospec=True) def test_execute_use_standard_locale_no_env_variables(self, env_mock, execute_mock): utils.execute('foo', use_standard_locale=True) execute_mock.assert_called_once_with('foo', env_variables={'LC_ALL': 'C'}) @mock.patch.object(processutils, 'execute', autospec=True) def test_execute_use_standard_locale_with_env_variables(self, execute_mock): utils.execute('foo', use_standard_locale=True, env_variables={'foo': 'bar'}) execute_mock.assert_called_once_with('foo', env_variables={'LC_ALL': 'C', 'foo': 'bar'}) @mock.patch.object(processutils, 'execute', autospec=True) def test_execute_not_use_standard_locale(self, execute_mock): utils.execute('foo', use_standard_locale=False, env_variables={'foo': 'bar'}) execute_mock.assert_called_once_with('foo', env_variables={'foo': 'bar'}) def test_execute_without_root_helper(self): CONF.set_override('root_helper', None, group='ironic_lib') with mock.patch.object( processutils, 'execute', autospec=True) as execute_mock: utils.execute('foo', run_as_root=False) execute_mock.assert_called_once_with('foo', run_as_root=False) def test_execute_without_root_helper_run_as_root(self): CONF.set_override('root_helper', None, group='ironic_lib') with mock.patch.object( processutils, 'execute', autospec=True) as execute_mock: utils.execute('foo', run_as_root=True) execute_mock.assert_called_once_with('foo', run_as_root=False) def test_execute_with_root_helper(self): with mock.patch.object( processutils, 'execute', autospec=True) as execute_mock: utils.execute('foo', run_as_root=False) execute_mock.assert_called_once_with('foo', run_as_root=False) def test_execute_with_root_helper_run_as_root(self): with mock.patch.object( processutils, 'execute', autospec=True) as execute_mock: utils.execute('foo', run_as_root=True) execute_mock.assert_called_once_with( 'foo', run_as_root=True, root_helper=CONF.ironic_lib.root_helper) @mock.patch.object(utils, 'LOG', autospec=True) def _test_execute_with_log_stdout(self, log_mock, log_stdout=None): with mock.patch.object( processutils, 'execute', autospec=True) as execute_mock: execute_mock.return_value = ('stdout', 'stderr') if log_stdout is not None: utils.execute('foo', log_stdout=log_stdout) else: utils.execute('foo') execute_mock.assert_called_once_with('foo') name, args, kwargs = log_mock.debug.mock_calls[0] if log_stdout is False: self.assertEqual(1, log_mock.debug.call_count) self.assertNotIn('stdout', args[0]) else: self.assertEqual(2, log_mock.debug.call_count) self.assertIn('stdout', args[0]) def test_execute_with_log_stdout_default(self): self._test_execute_with_log_stdout() def test_execute_with_log_stdout_true(self): self._test_execute_with_log_stdout(log_stdout=True) def test_execute_with_log_stdout_false(self): self._test_execute_with_log_stdout(log_stdout=False) @mock.patch.object(utils, 'LOG', autospec=True) @mock.patch.object(processutils, 'execute', autospec=True) def test_execute_command_not_found(self, execute_mock, log_mock): execute_mock.side_effect = FileNotFoundError self.assertRaises(FileNotFoundError, utils.execute, 'foo') execute_mock.assert_called_once_with('foo') name, args, kwargs = log_mock.debug.mock_calls[0] self.assertEqual(1, log_mock.debug.call_count) self.assertIn('not found', args[0]) class MkfsTestCase(base.IronicLibTestCase): @mock.patch.object(utils, 'execute', autospec=True) def test_mkfs(self, execute_mock): utils.mkfs('ext4', '/my/block/dev') utils.mkfs('msdos', '/my/msdos/block/dev') utils.mkfs('swap', '/my/swap/block/dev') expected = [mock.call('mkfs', '-t', 'ext4', '-F', '/my/block/dev', run_as_root=True, use_standard_locale=True), mock.call('mkfs', '-t', 'msdos', '/my/msdos/block/dev', run_as_root=True, use_standard_locale=True), mock.call('mkswap', '/my/swap/block/dev', run_as_root=True, use_standard_locale=True)] self.assertEqual(expected, execute_mock.call_args_list) @mock.patch.object(utils, 'execute', autospec=True) def test_mkfs_with_label(self, execute_mock): utils.mkfs('ext4', '/my/block/dev', 'ext4-vol') utils.mkfs('msdos', '/my/msdos/block/dev', 'msdos-vol') utils.mkfs('swap', '/my/swap/block/dev', 'swap-vol') expected = [mock.call('mkfs', '-t', 'ext4', '-F', '-L', 'ext4-vol', '/my/block/dev', run_as_root=True, use_standard_locale=True), mock.call('mkfs', '-t', 'msdos', '-n', 'msdos-vol', '/my/msdos/block/dev', run_as_root=True, use_standard_locale=True), mock.call('mkswap', '-L', 'swap-vol', '/my/swap/block/dev', run_as_root=True, use_standard_locale=True)] self.assertEqual(expected, execute_mock.call_args_list) @mock.patch.object(utils, 'execute', autospec=True, side_effect=processutils.ProcessExecutionError( stderr=os.strerror(errno.ENOENT))) def test_mkfs_with_unsupported_fs(self, execute_mock): self.assertRaises(exception.FileSystemNotSupported, utils.mkfs, 'foo', '/my/block/dev') @mock.patch.object(utils, 'execute', autospec=True, side_effect=processutils.ProcessExecutionError( stderr='fake')) def test_mkfs_with_unexpected_error(self, execute_mock): self.assertRaises(processutils.ProcessExecutionError, utils.mkfs, 'ext4', '/my/block/dev', 'ext4-vol') class IsHttpUrlTestCase(base.IronicLibTestCase): def test_is_http_url(self): self.assertTrue(utils.is_http_url('http://127.0.0.1')) self.assertTrue(utils.is_http_url('https://127.0.0.1')) self.assertTrue(utils.is_http_url('HTTP://127.1.2.3')) self.assertTrue(utils.is_http_url('HTTPS://127.3.2.1')) self.assertFalse(utils.is_http_url('Zm9vYmFy')) self.assertFalse(utils.is_http_url('11111111')) class ParseRootDeviceTestCase(base.IronicLibTestCase): def test_parse_root_device_hints_without_operators(self): root_device = { 'wwn': '123456', 'model': 'FOO model', 'size': 12345, 'serial': 'foo-serial', 'vendor': 'foo VENDOR with space', 'name': '/dev/sda', 'wwn_with_extension': '123456111', 'wwn_vendor_extension': '111', 'rotational': True, 'hctl': '1:0:0:0', 'by_path': '/dev/disk/by-path/1:0:0:0'} result = utils.parse_root_device_hints(root_device) expected = { 'wwn': 's== 123456', 'model': 's== foo%20model', 'size': '== 12345', 'serial': 's== foo-serial', 'vendor': 's== foo%20vendor%20with%20space', 'name': 's== /dev/sda', 'wwn_with_extension': 's== 123456111', 'wwn_vendor_extension': 's== 111', 'rotational': True, 'hctl': 's== 1%3A0%3A0%3A0', 'by_path': 's== /dev/disk/by-path/1%3A0%3A0%3A0'} self.assertEqual(expected, result) def test_parse_root_device_hints_with_operators(self): root_device = { 'wwn': 's== 123456', 'model': 's== foo MODEL', 'size': '>= 12345', 'serial': 's!= foo-serial', 'vendor': 's== foo VENDOR with space', 'name': ' /dev/sda /dev/sdb', 'wwn_with_extension': 's!= 123456111', 'wwn_vendor_extension': 's== 111', 'rotational': True, 'hctl': 's== 1:0:0:0', 'by_path': 's== /dev/disk/by-path/1:0:0:0'} # Validate strings being normalized expected = copy.deepcopy(root_device) expected['model'] = 's== foo%20model' expected['vendor'] = 's== foo%20vendor%20with%20space' expected['hctl'] = 's== 1%3A0%3A0%3A0' expected['by_path'] = 's== /dev/disk/by-path/1%3A0%3A0%3A0' result = utils.parse_root_device_hints(root_device) # The hints already contain the operators, make sure we keep it self.assertEqual(expected, result) def test_parse_root_device_hints_string_compare_operator_name(self): root_device = {'name': 's== /dev/sdb'} # Validate strings being normalized expected = copy.deepcopy(root_device) result = utils.parse_root_device_hints(root_device) # The hints already contain the operators, make sure we keep it self.assertEqual(expected, result) def test_parse_root_device_hints_no_hints(self): result = utils.parse_root_device_hints({}) self.assertIsNone(result) def test_parse_root_device_hints_convert_size(self): for size in (12345, '12345'): result = utils.parse_root_device_hints({'size': size}) self.assertEqual({'size': '== 12345'}, result) def test_parse_root_device_hints_invalid_size(self): for value in ('not-int', -123, 0): self.assertRaises(ValueError, utils.parse_root_device_hints, {'size': value}) def test_parse_root_device_hints_int_or(self): expr = ' 123 456 789' result = utils.parse_root_device_hints({'size': expr}) self.assertEqual({'size': expr}, result) def test_parse_root_device_hints_int_or_invalid(self): expr = ' 123 non-int 789' self.assertRaises(ValueError, utils.parse_root_device_hints, {'size': expr}) def test_parse_root_device_hints_string_or_space(self): expr = ' foo foo bar bar' expected = ' foo foo%20bar bar' result = utils.parse_root_device_hints({'model': expr}) self.assertEqual({'model': expected}, result) def _parse_root_device_hints_convert_rotational(self, values, expected_value): for value in values: result = utils.parse_root_device_hints({'rotational': value}) self.assertEqual({'rotational': expected_value}, result) def test_parse_root_device_hints_convert_rotational(self): self._parse_root_device_hints_convert_rotational( (True, 'true', 'on', 'y', 'yes'), True) self._parse_root_device_hints_convert_rotational( (False, 'false', 'off', 'n', 'no'), False) def test_parse_root_device_hints_invalid_rotational(self): self.assertRaises(ValueError, utils.parse_root_device_hints, {'rotational': 'not-bool'}) def test_parse_root_device_hints_invalid_wwn(self): self.assertRaises(ValueError, utils.parse_root_device_hints, {'wwn': 123}) def test_parse_root_device_hints_invalid_wwn_with_extension(self): self.assertRaises(ValueError, utils.parse_root_device_hints, {'wwn_with_extension': 123}) def test_parse_root_device_hints_invalid_wwn_vendor_extension(self): self.assertRaises(ValueError, utils.parse_root_device_hints, {'wwn_vendor_extension': 123}) def test_parse_root_device_hints_invalid_model(self): self.assertRaises(ValueError, utils.parse_root_device_hints, {'model': 123}) def test_parse_root_device_hints_invalid_serial(self): self.assertRaises(ValueError, utils.parse_root_device_hints, {'serial': 123}) def test_parse_root_device_hints_invalid_vendor(self): self.assertRaises(ValueError, utils.parse_root_device_hints, {'vendor': 123}) def test_parse_root_device_hints_invalid_name(self): self.assertRaises(ValueError, utils.parse_root_device_hints, {'name': 123}) def test_parse_root_device_hints_invalid_hctl(self): self.assertRaises(ValueError, utils.parse_root_device_hints, {'hctl': 123}) def test_parse_root_device_hints_invalid_by_path(self): self.assertRaises(ValueError, utils.parse_root_device_hints, {'by_path': 123}) def test_parse_root_device_hints_non_existent_hint(self): self.assertRaises(ValueError, utils.parse_root_device_hints, {'non-existent': 'foo'}) def test_extract_hint_operator_and_values_single_value(self): expected = {'op': '>=', 'values': ['123']} self.assertEqual( expected, utils._extract_hint_operator_and_values( '>= 123', 'size')) def test_extract_hint_operator_and_values_multiple_values(self): expected = {'op': '', 'values': ['123', '456', '789']} expr = ' 123 456 789' self.assertEqual( expected, utils._extract_hint_operator_and_values(expr, 'size')) def test_extract_hint_operator_and_values_multiple_values_space(self): expected = {'op': '', 'values': ['foo', 'foo bar', 'bar']} expr = ' foo foo bar bar' self.assertEqual( expected, utils._extract_hint_operator_and_values(expr, 'model')) def test_extract_hint_operator_and_values_no_operator(self): expected = {'op': '', 'values': ['123']} self.assertEqual( expected, utils._extract_hint_operator_and_values('123', 'size')) def test_extract_hint_operator_and_values_empty_value(self): self.assertRaises( ValueError, utils._extract_hint_operator_and_values, '', 'size') def test_extract_hint_operator_and_values_integer(self): expected = {'op': '', 'values': ['123']} self.assertEqual( expected, utils._extract_hint_operator_and_values(123, 'size')) def test__append_operator_to_hints(self): root_device = {'serial': 'foo', 'size': 12345, 'model': 'foo model', 'rotational': True} expected = {'serial': 's== foo', 'size': '== 12345', 'model': 's== foo model', 'rotational': True} result = utils._append_operator_to_hints(root_device) self.assertEqual(expected, result) def test_normalize_hint_expression_or(self): expr = ' foo foo bar bar' expected = ' foo foo%20bar bar' result = utils._normalize_hint_expression(expr, 'model') self.assertEqual(expected, result) def test_normalize_hint_expression_in(self): expr = ' foo foo bar bar' expected = ' foo foo%20bar bar' result = utils._normalize_hint_expression(expr, 'model') self.assertEqual(expected, result) def test_normalize_hint_expression_op_space(self): expr = 's== test string with space' expected = 's== test%20string%20with%20space' result = utils._normalize_hint_expression(expr, 'model') self.assertEqual(expected, result) def test_normalize_hint_expression_op_no_space(self): expr = 's!= SpongeBob' expected = 's!= spongebob' result = utils._normalize_hint_expression(expr, 'model') self.assertEqual(expected, result) def test_normalize_hint_expression_no_op_space(self): expr = 'no operators' expected = 'no%20operators' result = utils._normalize_hint_expression(expr, 'model') self.assertEqual(expected, result) def test_normalize_hint_expression_no_op_no_space(self): expr = 'NoSpace' expected = 'nospace' result = utils._normalize_hint_expression(expr, 'model') self.assertEqual(expected, result) def test_normalize_hint_expression_empty_value(self): self.assertRaises( ValueError, utils._normalize_hint_expression, '', 'size') class MatchRootDeviceTestCase(base.IronicLibTestCase): def setUp(self): super(MatchRootDeviceTestCase, self).setUp() self.devices = [ {'name': '/dev/sda', 'size': 64424509440, 'model': 'ok model', 'serial': 'fakeserial'}, {'name': '/dev/sdb', 'size': 128849018880, 'model': 'big model', 'serial': 'veryfakeserial', 'rotational': 'yes'}, {'name': '/dev/sdc', 'size': 10737418240, 'model': 'small model', 'serial': 'veryveryfakeserial', 'rotational': False}, ] def test_match_root_device_hints_one_hint(self): root_device_hints = {'size': '>= 70'} dev = utils.match_root_device_hints(self.devices, root_device_hints) self.assertEqual('/dev/sdb', dev['name']) def test_match_root_device_hints_rotational(self): root_device_hints = {'rotational': False} dev = utils.match_root_device_hints(self.devices, root_device_hints) self.assertEqual('/dev/sdc', dev['name']) def test_match_root_device_hints_rotational_convert_devices_bool(self): root_device_hints = {'size': '>=100', 'rotational': True} dev = utils.match_root_device_hints(self.devices, root_device_hints) self.assertEqual('/dev/sdb', dev['name']) def test_match_root_device_hints_multiple_hints(self): root_device_hints = {'size': '>= 50', 'model': 's==big model', 'serial': 's==veryfakeserial'} dev = utils.match_root_device_hints(self.devices, root_device_hints) self.assertEqual('/dev/sdb', dev['name']) def test_match_root_device_hints_multiple_hints2(self): root_device_hints = { 'size': '<= 20', 'model': ' model 5 foomodel small model ', 'serial': 's== veryveryfakeserial'} dev = utils.match_root_device_hints(self.devices, root_device_hints) self.assertEqual('/dev/sdc', dev['name']) def test_match_root_device_hints_multiple_hints3(self): root_device_hints = {'rotational': False, 'model': ' small'} dev = utils.match_root_device_hints(self.devices, root_device_hints) self.assertEqual('/dev/sdc', dev['name']) def test_match_root_device_hints_no_operators(self): root_device_hints = {'size': '120', 'model': 'big model', 'serial': 'veryfakeserial'} dev = utils.match_root_device_hints(self.devices, root_device_hints) self.assertEqual('/dev/sdb', dev['name']) def test_match_root_device_hints_no_device_found(self): root_device_hints = {'size': '>=50', 'model': 's==foo'} dev = utils.match_root_device_hints(self.devices, root_device_hints) self.assertIsNone(dev) @mock.patch.object(utils.LOG, 'warning', autospec=True) def test_match_root_device_hints_empty_device_attribute(self, mock_warn): empty_dev = [{'name': '/dev/sda', 'model': ' '}] dev = utils.match_root_device_hints(empty_dev, {'model': 'foo'}) self.assertIsNone(dev) self.assertTrue(mock_warn.called) def test_find_devices_all(self): root_device_hints = {'size': '>= 10'} devs = list(utils.find_devices_by_hints(self.devices, root_device_hints)) self.assertEqual(self.devices, devs) def test_find_devices_none(self): root_device_hints = {'size': '>= 100500'} devs = list(utils.find_devices_by_hints(self.devices, root_device_hints)) self.assertEqual([], devs) def test_find_devices_name(self): root_device_hints = {'name': 's== /dev/sda'} devs = list(utils.find_devices_by_hints(self.devices, root_device_hints)) self.assertEqual([self.devices[0]], devs) class WaitForDisk(base.IronicLibTestCase): def setUp(self): super(WaitForDisk, self).setUp() CONF.set_override('check_device_interval', .01, group='disk_partitioner') CONF.set_override('check_device_max_retries', 2, group='disk_partitioner') @mock.patch.object(utils, 'execute', autospec=True) def test_wait_for_disk_to_become_available(self, mock_exc): mock_exc.return_value = ('', '') utils.wait_for_disk_to_become_available('fake-dev') fuser_cmd = ['fuser', 'fake-dev'] fuser_call = mock.call(*fuser_cmd, run_as_root=True, check_exit_code=[0, 1]) self.assertEqual(1, mock_exc.call_count) mock_exc.assert_has_calls([fuser_call]) @mock.patch.object(utils, 'execute', autospec=True, side_effect=processutils.ProcessExecutionError( stderr='fake')) def test_wait_for_disk_to_become_available_no_fuser(self, mock_exc): self.assertRaises(exception.IronicException, utils.wait_for_disk_to_become_available, 'fake-dev') fuser_cmd = ['fuser', 'fake-dev'] fuser_call = mock.call(*fuser_cmd, run_as_root=True, check_exit_code=[0, 1]) self.assertEqual(2, mock_exc.call_count) mock_exc.assert_has_calls([fuser_call, fuser_call]) @mock.patch.object(utils, 'execute', autospec=True) def test_wait_for_disk_to_become_available_device_in_use_psmisc( self, mock_exc): # Test that the device is not available. This version has the 'psmisc' # version of 'fuser' values for stdout and stderr. # NOTE(TheJulia): Looks like fuser returns the actual list of pids # in the stdout output, where as all other text is returned in # stderr. # The 'psmisc' version has a leading space character in stdout. The # filename is output to stderr mock_exc.side_effect = [(' 1234 ', 'fake-dev: '), (' 15503 3919 15510 15511', 'fake-dev:')] expected_error = ('Processes with the following PIDs are ' 'holding device fake-dev: 15503, 3919, 15510, ' '15511. Timed out waiting for completion.') self.assertRaisesRegex( exception.IronicException, expected_error, utils.wait_for_disk_to_become_available, 'fake-dev') fuser_cmd = ['fuser', 'fake-dev'] fuser_call = mock.call(*fuser_cmd, run_as_root=True, check_exit_code=[0, 1]) self.assertEqual(2, mock_exc.call_count) mock_exc.assert_has_calls([fuser_call, fuser_call]) @mock.patch.object(utils, 'execute', autospec=True) def test_wait_for_disk_to_become_available_device_in_use_busybox( self, mock_exc): # Test that the device is not available. This version has the 'busybox' # version of 'fuser' values for stdout and stderr. # NOTE(TheJulia): Looks like fuser returns the actual list of pids # in the stdout output, where as all other text is returned in # stderr. # The 'busybox' version does not have a leading space character in # stdout. Also nothing is output to stderr. mock_exc.side_effect = [('1234', ''), ('15503 3919 15510 15511', '')] expected_error = ('Processes with the following PIDs are ' 'holding device fake-dev: 15503, 3919, 15510, ' '15511. Timed out waiting for completion.') self.assertRaisesRegex( exception.IronicException, expected_error, utils.wait_for_disk_to_become_available, 'fake-dev') fuser_cmd = ['fuser', 'fake-dev'] fuser_call = mock.call(*fuser_cmd, run_as_root=True, check_exit_code=[0, 1]) self.assertEqual(2, mock_exc.call_count) mock_exc.assert_has_calls([fuser_call, fuser_call]) @mock.patch.object(utils, 'execute', autospec=True) def test_wait_for_disk_to_become_available_no_device(self, mock_exc): # NOTE(TheJulia): Looks like fuser returns the actual list of pids # in the stdout output, where as all other text is returned in # stderr. mock_exc.return_value = ('', 'Specified filename /dev/fake ' 'does not exist.') expected_error = ('Fuser exited with "Specified filename ' '/dev/fake does not exist." while checking ' 'locks for device fake-dev. Timed out waiting ' 'for completion.') self.assertRaisesRegex( exception.IronicException, expected_error, utils.wait_for_disk_to_become_available, 'fake-dev') fuser_cmd = ['fuser', 'fake-dev'] fuser_call = mock.call(*fuser_cmd, run_as_root=True, check_exit_code=[0, 1]) self.assertEqual(2, mock_exc.call_count) mock_exc.assert_has_calls([fuser_call, fuser_call]) @mock.patch.object(utils, 'execute', autospec=True) def test_wait_for_disk_to_become_available_dev_becomes_avail_psmisc( self, mock_exc): # Test that initially device is not available but then becomes # available. This version has the 'psmisc' version of 'fuser' values # for stdout and stderr. # The 'psmisc' version has a leading space character in stdout. The # filename is output to stderr mock_exc.side_effect = [(' 1234 ', 'fake-dev: '), ('', '')] utils.wait_for_disk_to_become_available('fake-dev') fuser_cmd = ['fuser', 'fake-dev'] fuser_call = mock.call(*fuser_cmd, run_as_root=True, check_exit_code=[0, 1]) self.assertEqual(2, mock_exc.call_count) mock_exc.assert_has_calls([fuser_call, fuser_call]) @mock.patch.object(utils, 'execute', autospec=True) def test_wait_for_disk_to_become_available_dev_becomes_avail_busybox( self, mock_exc): # Test that initially device is not available but then becomes # available. This version has the 'busybox' version of 'fuser' values # for stdout and stderr. # The 'busybox' version does not have a leading space character in # stdout. Also nothing is output to stderr. mock_exc.side_effect = [('1234 5895', ''), ('', '')] utils.wait_for_disk_to_become_available('fake-dev') fuser_cmd = ['fuser', 'fake-dev'] fuser_call = mock.call(*fuser_cmd, run_as_root=True, check_exit_code=[0, 1]) self.assertEqual(2, mock_exc.call_count) mock_exc.assert_has_calls([fuser_call, fuser_call]) @mock.patch.object(utils, 'execute', autospec=True) class GetRouteSourceTestCase(base.IronicLibTestCase): def test_get_route_source_ipv4(self, mock_execute): mock_execute.return_value = ('XXX src 1.2.3.4 XXX\n cache', None) source = utils.get_route_source('XXX') self.assertEqual('1.2.3.4', source) def test_get_route_source_ipv6(self, mock_execute): mock_execute.return_value = ('XXX src 1:2::3:4 metric XXX\n cache', None) source = utils.get_route_source('XXX') self.assertEqual('1:2::3:4', source) def test_get_route_source_ipv6_linklocal(self, mock_execute): mock_execute.return_value = ( 'XXX src fe80::1234:1234:1234:1234 metric XXX\n cache', None) source = utils.get_route_source('XXX') self.assertIsNone(source) def test_get_route_source_ipv6_linklocal_allowed(self, mock_execute): mock_execute.return_value = ( 'XXX src fe80::1234:1234:1234:1234 metric XXX\n cache', None) source = utils.get_route_source('XXX', ignore_link_local=False) self.assertEqual('fe80::1234:1234:1234:1234', source) def test_get_route_source_indexerror(self, mock_execute): mock_execute.return_value = ('XXX src \n cache', None) source = utils.get_route_source('XXX') self.assertIsNone(source) @mock.patch('shutil.rmtree', autospec=True) @mock.patch.object(utils, 'execute', autospec=True) @mock.patch('tempfile.mkdtemp', autospec=True) class MountedTestCase(base.IronicLibTestCase): def test_temporary(self, mock_temp, mock_execute, mock_rmtree): with utils.mounted('/dev/fake') as path: self.assertIs(path, mock_temp.return_value) mock_execute.assert_has_calls([ mock.call("mount", '/dev/fake', mock_temp.return_value, run_as_root=True, attempts=1, delay_on_retry=True), mock.call("umount", mock_temp.return_value, run_as_root=True, attempts=3, delay_on_retry=True), ]) mock_rmtree.assert_called_once_with(mock_temp.return_value) def test_with_dest(self, mock_temp, mock_execute, mock_rmtree): with utils.mounted('/dev/fake', '/mnt/fake') as path: self.assertEqual('/mnt/fake', path) mock_execute.assert_has_calls([ mock.call("mount", '/dev/fake', '/mnt/fake', run_as_root=True, attempts=1, delay_on_retry=True), mock.call("umount", '/mnt/fake', run_as_root=True, attempts=3, delay_on_retry=True), ]) self.assertFalse(mock_temp.called) self.assertFalse(mock_rmtree.called) def test_with_opts(self, mock_temp, mock_execute, mock_rmtree): with utils.mounted('/dev/fake', '/mnt/fake', opts=['ro', 'foo=bar']) as path: self.assertEqual('/mnt/fake', path) mock_execute.assert_has_calls([ mock.call("mount", '/dev/fake', '/mnt/fake', '-o', 'ro,foo=bar', run_as_root=True, attempts=1, delay_on_retry=True), mock.call("umount", '/mnt/fake', run_as_root=True, attempts=3, delay_on_retry=True), ]) def test_with_type(self, mock_temp, mock_execute, mock_rmtree): with utils.mounted('/dev/fake', '/mnt/fake', fs_type='iso9660') as path: self.assertEqual('/mnt/fake', path) mock_execute.assert_has_calls([ mock.call("mount", '/dev/fake', '/mnt/fake', '-t', 'iso9660', run_as_root=True, attempts=1, delay_on_retry=True), mock.call("umount", '/mnt/fake', run_as_root=True, attempts=3, delay_on_retry=True), ]) def test_failed_to_mount(self, mock_temp, mock_execute, mock_rmtree): mock_execute.side_effect = OSError self.assertRaises(OSError, utils.mounted('/dev/fake').__enter__) mock_execute.assert_called_once_with("mount", '/dev/fake', mock_temp.return_value, run_as_root=True, attempts=1, delay_on_retry=True) mock_rmtree.assert_called_once_with(mock_temp.return_value) def test_failed_to_unmount(self, mock_temp, mock_execute, mock_rmtree): mock_execute.side_effect = [('', ''), processutils.ProcessExecutionError] with utils.mounted('/dev/fake', '/mnt/fake') as path: self.assertEqual('/mnt/fake', path) mock_execute.assert_has_calls([ mock.call("mount", '/dev/fake', '/mnt/fake', run_as_root=True, attempts=1, delay_on_retry=True), mock.call("umount", '/mnt/fake', run_as_root=True, attempts=3, delay_on_retry=True), ]) self.assertFalse(mock_rmtree.called) class ParseDeviceTagsTestCase(base.IronicLibTestCase): def test_empty(self): result = utils.parse_device_tags("\n\n") self.assertEqual([], list(result)) def test_parse(self): tags = """ PTUUID="00016a50" PTTYPE="dos" LABEL="" TYPE="vfat" PART_ENTRY_SCHEME="gpt" PART_ENTRY_NAME="EFI System Partition" """ result = list(utils.parse_device_tags(tags)) self.assertEqual([ {'PTUUID': '00016a50', 'PTTYPE': 'dos', 'LABEL': ''}, {'TYPE': 'vfat', 'PART_ENTRY_SCHEME': 'gpt', 'PART_ENTRY_NAME': 'EFI System Partition'} ], result) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/ironic_lib/utils.py0000664000175000017500000006500600000000000017423 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara # Copyright (c) 2012 NTT DOCOMO, INC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utilities and helper functions.""" import contextlib import copy import errno import ipaddress import logging import os import re import shlex import shutil import tempfile from urllib import parse as urlparse from oslo_concurrency import processutils from oslo_config import cfg from oslo_utils import excutils from oslo_utils import specs_matcher from oslo_utils import strutils from oslo_utils import units import tenacity from ironic_lib.common.i18n import _ from ironic_lib import exception utils_opts = [ cfg.StrOpt('root_helper', default='sudo ironic-rootwrap /etc/ironic/rootwrap.conf', help='Command that is prefixed to commands that are run as ' 'root. If not specified, no commands are run as root.'), ] CONF = cfg.CONF CONF.register_opts(utils_opts, group='ironic_lib') LOG = logging.getLogger(__name__) # A dictionary in the form {hint name: hint type} VALID_ROOT_DEVICE_HINTS = { 'size': int, 'model': str, 'wwn': str, 'serial': str, 'vendor': str, 'wwn_with_extension': str, 'wwn_vendor_extension': str, 'name': str, 'rotational': bool, 'hctl': str, 'by_path': str, } ROOT_DEVICE_HINTS_GRAMMAR = specs_matcher.make_grammar() def execute(*cmd, use_standard_locale=False, log_stdout=True, **kwargs): """Convenience wrapper around oslo's execute() method. Executes and logs results from a system command. See docs for oslo_concurrency.processutils.execute for usage. :param cmd: positional arguments to pass to processutils.execute() :param use_standard_locale: Defaults to False. If set to True, execute command with standard locale added to environment variables. :param log_stdout: Defaults to True. If set to True, logs the output. :param kwargs: keyword arguments to pass to processutils.execute() :returns: (stdout, stderr) from process execution :raises: UnknownArgumentError on receiving unknown arguments :raises: ProcessExecutionError :raises: OSError """ if use_standard_locale: env = kwargs.pop('env_variables', os.environ.copy()) env['LC_ALL'] = 'C' kwargs['env_variables'] = env # If root_helper config is not specified, no commands are run as root. run_as_root = kwargs.get('run_as_root', False) if run_as_root: if not CONF.ironic_lib.root_helper: kwargs['run_as_root'] = False else: kwargs['root_helper'] = CONF.ironic_lib.root_helper def _log(stdout, stderr): if log_stdout: try: LOG.debug('Command stdout is: "%s"', stdout) except UnicodeEncodeError: LOG.debug('stdout contains invalid UTF-8 characters') stdout = (stdout.encode('utf8', 'surrogateescape') .decode('utf8', 'ignore')) LOG.debug('Command stdout is: "%s"', stdout) try: LOG.debug('Command stderr is: "%s"', stderr) except UnicodeEncodeError: LOG.debug('stderr contains invalid UTF-8 characters') stderr = (stderr.encode('utf8', 'surrogateescape') .decode('utf8', 'ignore')) LOG.debug('Command stderr is: "%s"', stderr) try: result = processutils.execute(*cmd, **kwargs) except FileNotFoundError: with excutils.save_and_reraise_exception(): LOG.debug('Command not found: "%s"', ' '.join(map(str, cmd))) except processutils.ProcessExecutionError as exc: with excutils.save_and_reraise_exception(): _log(exc.stdout, exc.stderr) else: _log(result[0], result[1]) return result def try_execute(*cmd, **kwargs): """The same as execute but returns None on error. Executes and logs results from a system command. See docs for oslo_concurrency.processutils.execute for usage. Instead of raising an exception on failure, this method simply returns None in case of failure. :param cmd: positional arguments to pass to processutils.execute() :param kwargs: keyword arguments to pass to processutils.execute() :raises: UnknownArgumentError on receiving unknown arguments :returns: tuple of (stdout, stderr) or None in some error cases """ try: return execute(*cmd, **kwargs) except (processutils.ProcessExecutionError, OSError) as e: LOG.debug('Command failed: %s', e) def mkfs(fs, path, label=None): """Format a file or block device :param fs: Filesystem type (examples include 'swap', 'ext3', 'ext4' 'btrfs', etc.) :param path: Path to file or block device to format :param label: Volume label to use """ if fs == 'swap': args = ['mkswap'] else: args = ['mkfs', '-t', fs] # add -F to force no interactive execute on non-block device. if fs in ('ext3', 'ext4'): args.extend(['-F']) if label: if fs in ('msdos', 'vfat'): label_opt = '-n' else: label_opt = '-L' args.extend([label_opt, label]) args.append(path) try: execute(*args, run_as_root=True, use_standard_locale=True) except processutils.ProcessExecutionError as e: with excutils.save_and_reraise_exception() as ctx: if os.strerror(errno.ENOENT) in e.stderr: ctx.reraise = False LOG.exception('Failed to make file system. ' 'File system %s is not supported.', fs) raise exception.FileSystemNotSupported(fs=fs) else: LOG.exception('Failed to create a file system ' 'in %(path)s. Error: %(error)s', {'path': path, 'error': e}) def unlink_without_raise(path): try: os.unlink(path) except OSError as e: if e.errno == errno.ENOENT: return else: LOG.warning("Failed to unlink %(path)s, error: %(e)s", {'path': path, 'e': e}) def dd(src, dst, *args): """Execute dd from src to dst. :param src: the input file for dd command. :param dst: the output file for dd command. :param args: a tuple containing the arguments to be passed to dd command. :raises: processutils.ProcessExecutionError if it failed to run the process. """ LOG.debug("Starting dd process.") execute('dd', 'if=%s' % src, 'of=%s' % dst, *args, use_standard_locale=True, run_as_root=True) def is_http_url(url): url = url.lower() return url.startswith('http://') or url.startswith('https://') def list_opts(): """Entry point for oslo-config-generator.""" return [('ironic_lib', utils_opts)] def _extract_hint_operator_and_values(hint_expression, hint_name): """Extract the operator and value(s) of a root device hint expression. A root device hint expression could contain one or more values depending on the operator. This method extracts the operator and value(s) and returns a dictionary containing both. :param hint_expression: The hint expression string containing value(s) and operator (optionally). :param hint_name: The name of the hint. Used for logging. :raises: ValueError if the hint_expression is empty. :returns: A dictionary containing: :op: The operator. An empty string in case of None. :values: A list of values stripped and converted to lowercase. """ expression = str(hint_expression).strip().lower() if not expression: raise ValueError( _('Root device hint "%s" expression is empty') % hint_name) # parseString() returns a list of tokens which the operator (if # present) is always the first element. ast = ROOT_DEVICE_HINTS_GRAMMAR.parseString(expression) if len(ast) <= 1: # hint_expression had no operator return {'op': '', 'values': [expression]} op = ast[0] return {'values': [v.strip() for v in re.split(op, expression) if v], 'op': op} def _normalize_hint_expression(hint_expression, hint_name): """Normalize a string type hint expression. A string-type hint expression contains one or more operators and one or more values: [] [ ]*. This normalizes the values by url-encoding white spaces and special characters. The operators are not normalized. For example: the hint value of " foo bar bar" will become " foo%20bar bar". :param hint_expression: The hint expression string containing value(s) and operator (optionally). :param hint_name: The name of the hint. Used for logging. :raises: ValueError if the hint_expression is empty. :returns: A normalized string. """ hdict = _extract_hint_operator_and_values(hint_expression, hint_name) result = hdict['op'].join([' %s ' % urlparse.quote(t) for t in hdict['values']]) return (hdict['op'] + result).strip() def _append_operator_to_hints(root_device): """Add an equal (s== or ==) operator to the hints. For backwards compatibility, for root device hints where no operator means equal, this method adds the equal operator to the hint. This is needed when using oslo.utils.specs_matcher methods. :param root_device: The root device hints dictionary. """ for name, expression in root_device.items(): # NOTE(lucasagomes): The specs_matcher from oslo.utils does not # support boolean, so we don't need to append any operator # for it. if VALID_ROOT_DEVICE_HINTS[name] is bool: continue expression = str(expression) ast = ROOT_DEVICE_HINTS_GRAMMAR.parseString(expression) if len(ast) > 1: continue op = 's== %s' if VALID_ROOT_DEVICE_HINTS[name] is str else '== %s' root_device[name] = op % expression return root_device def parse_root_device_hints(root_device): """Parse the root_device property of a node. Parses and validates the root_device property of a node. These are hints for how a node's root device is created. The 'size' hint should be a positive integer. The 'rotational' hint should be a Boolean value. :param root_device: the root_device dictionary from the node's property. :returns: a dictionary with the root device hints parsed or None if there are no hints. :raises: ValueError, if some information is invalid. """ if not root_device: return root_device = copy.deepcopy(root_device) invalid_hints = set(root_device) - set(VALID_ROOT_DEVICE_HINTS) if invalid_hints: raise ValueError( _('The hints "%(invalid_hints)s" are invalid. ' 'Valid hints are: "%(valid_hints)s"') % {'invalid_hints': ', '.join(invalid_hints), 'valid_hints': ', '.join(VALID_ROOT_DEVICE_HINTS)}) for name, expression in root_device.items(): hint_type = VALID_ROOT_DEVICE_HINTS[name] if hint_type is str: if not isinstance(expression, str): raise ValueError( _('Root device hint "%(name)s" is not a string value. ' 'Hint expression: %(expression)s') % {'name': name, 'expression': expression}) root_device[name] = _normalize_hint_expression(expression, name) elif hint_type is int: for v in _extract_hint_operator_and_values(expression, name)['values']: try: integer = int(v) except ValueError: raise ValueError( _('Root device hint "%(name)s" is not an integer ' 'value. Current value: %(expression)s') % {'name': name, 'expression': expression}) if integer <= 0: raise ValueError( _('Root device hint "%(name)s" should be a positive ' 'integer. Current value: %(expression)s') % {'name': name, 'expression': expression}) elif hint_type is bool: try: root_device[name] = strutils.bool_from_string( expression, strict=True) except ValueError: raise ValueError( _('Root device hint "%(name)s" is not a Boolean value. ' 'Current value: %(expression)s') % {'name': name, 'expression': expression}) return _append_operator_to_hints(root_device) def find_devices_by_hints(devices, root_device_hints): """Find all devices that match the root device hints. Try to find devices that match the root device hints. In order for a device to be matched it needs to satisfy all the given hints. :param devices: A list of dictionaries representing the devices containing one or more of the following keys: :name: (String) The device name, e.g /dev/sda :size: (Integer) Size of the device in *bytes* :model: (String) Device model :vendor: (String) Device vendor name :serial: (String) Device serial number :wwn: (String) Unique storage identifier :wwn_with_extension: (String): Unique storage identifier with the vendor extension appended :wwn_vendor_extension: (String): United vendor storage identifier :rotational: (Boolean) Whether it's a rotational device or not. Useful to distinguish HDDs (rotational) and SSDs (not rotational). :hctl: (String): The SCSI address: Host, channel, target and lun. For example: '1:0:0:0'. :by_path: (String): The alternative device name, e.g. /dev/disk/by-path/pci-0000:00 :param root_device_hints: A dictionary with the root device hints. :raises: ValueError, if some information is invalid. :returns: A generator with all matching devices as dictionaries. """ LOG.debug('Trying to find devices from "%(devs)s" that match the ' 'device hints "%(hints)s"', {'devs': ', '.join([d.get('name') for d in devices]), 'hints': root_device_hints}) parsed_hints = parse_root_device_hints(root_device_hints) for dev in devices: device_name = dev.get('name') for hint in parsed_hints: hint_type = VALID_ROOT_DEVICE_HINTS[hint] device_value = dev.get(hint) hint_value = parsed_hints[hint] if hint_type is str: try: device_value = _normalize_hint_expression(device_value, hint) except ValueError: LOG.warning( 'The attribute "%(attr)s" of the device "%(dev)s" ' 'has an empty value. Skipping device.', {'attr': hint, 'dev': device_name}) break if hint == 'size': # Since we don't support units yet we expect the size # in GiB for now device_value = device_value / units.Gi LOG.debug('Trying to match the device hint "%(hint)s" ' 'with a value of "%(hint_value)s" against the same ' 'device\'s (%(dev)s) attribute with a value of ' '"%(dev_value)s"', {'hint': hint, 'dev': device_name, 'hint_value': hint_value, 'dev_value': device_value}) # NOTE(lucasagomes): Boolean hints are not supported by # specs_matcher.match(), so we need to do the comparison # ourselves if hint_type is bool: try: device_value = strutils.bool_from_string(device_value, strict=True) except ValueError: LOG.warning('The attribute "%(attr)s" (with value ' '"%(value)s") of device "%(dev)s" is not ' 'a valid Boolean. Skipping device.', {'attr': hint, 'value': device_value, 'dev': device_name}) break if device_value == hint_value: continue elif specs_matcher.match(device_value, hint_value): continue LOG.debug('The attribute "%(attr)s" (with value "%(value)s") ' 'of device "%(dev)s" does not match the hint %(hint)s', {'attr': hint, 'value': device_value, 'dev': device_name, 'hint': hint_value}) break else: yield dev def match_root_device_hints(devices, root_device_hints): """Try to find a device that matches the root device hints. Try to find a device that matches the root device hints. In order for a device to be matched it needs to satisfy all the given hints. :param devices: A list of dictionaries representing the devices containing one or more of the following keys: :name: (String) The device name, e.g /dev/sda :size: (Integer) Size of the device in *bytes* :model: (String) Device model :vendor: (String) Device vendor name :serial: (String) Device serial number :wwn: (String) Unique storage identifier :wwn_with_extension: (String): Unique storage identifier with the vendor extension appended :wwn_vendor_extension: (String): United vendor storage identifier :rotational: (Boolean) Whether it's a rotational device or not. Useful to distinguish HDDs (rotational) and SSDs (not rotational). :hctl: (String): The SCSI address: Host, channel, target and lun. For example: '1:0:0:0'. :by_path: (String): The alternative device name, e.g. /dev/disk/by-path/pci-0000:00 :param root_device_hints: A dictionary with the root device hints. :raises: ValueError, if some information is invalid. :returns: The first device to match all the hints or None. """ try: dev = next(find_devices_by_hints(devices, root_device_hints)) except StopIteration: LOG.warning('No device found that matches the root device hints %s', root_device_hints) else: LOG.info('Root device found! The device "%s" matches the root ' 'device hints %s', dev, root_device_hints) return dev def wait_for_disk_to_become_available(device): """Wait for a disk device to become available. Waits for a disk device to become available for use by waiting until all process locks on the device have been released. Timeout and iteration settings come from the configuration options used by the in-library disk_partitioner: ``check_device_interval`` and ``check_device_max_retries``. :params device: The path to the device. :raises: IronicException If the disk fails to become available. """ pids = [''] stderr = [''] interval = CONF.disk_partitioner.check_device_interval max_retries = CONF.disk_partitioner.check_device_max_retries def _wait_for_disk(): # A regex is likely overkill here, but variations in fuser # means we should likely use it. fuser_pids_re = re.compile(r'\d+') # There are 'psmisc' and 'busybox' versions of the 'fuser' program. The # 'fuser' programs differ in how they output data to stderr. The # busybox version does not output the filename to stderr, while the # standard 'psmisc' version does output the filename to stderr. How # they output to stdout is almost identical in that only the PIDs are # output to stdout, with the 'psmisc' version adding a leading space # character to the list of PIDs. try: # NOTE(ifarkas): fuser returns a non-zero return code if none of # the specified files is accessed. # NOTE(TheJulia): fuser does not report LVM devices as in use # unless the LVM device-mapper device is the # device that is directly polled. # NOTE(TheJulia): The -m flag allows fuser to reveal data about # mounted filesystems, which should be considered # busy/locked. That being said, it is not used # because busybox fuser has a different behavior. # NOTE(TheJuia): fuser outputs a list of found PIDs to stdout. # All other text is returned via stderr, and the # output to a terminal is merged as a result. out, err = execute('fuser', device, check_exit_code=[0, 1], run_as_root=True) if not out and not err: return True stderr[0] = err # NOTE: findall() returns a list of matches, or an empty list if no # matches pids[0] = fuser_pids_re.findall(out) except processutils.ProcessExecutionError as exc: LOG.warning('Failed to check the device %(device)s with fuser:' ' %(err)s', {'device': device, 'err': exc}) return False retry = tenacity.retry( retry=tenacity.retry_if_result(lambda r: not r), stop=tenacity.stop_after_attempt(max_retries), wait=tenacity.wait_fixed(interval), reraise=True) try: retry(_wait_for_disk)() except tenacity.RetryError: if pids[0]: raise exception.IronicException( _('Processes with the following PIDs are holding ' 'device %(device)s: %(pids)s. ' 'Timed out waiting for completion.') % {'device': device, 'pids': ', '.join(pids[0])}) else: raise exception.IronicException( _('Fuser exited with "%(fuser_err)s" while checking ' 'locks for device %(device)s. Timed out waiting for ' 'completion.') % {'device': device, 'fuser_err': stderr[0]}) def get_route_source(dest, ignore_link_local=True): """Get the IP address to send packages to destination.""" try: out, _err = execute('ip', 'route', 'get', dest) except (EnvironmentError, processutils.ProcessExecutionError) as e: LOG.warning('Cannot get route to host %(dest)s: %(err)s', {'dest': dest, 'err': e}) return try: source = out.strip().split('\n')[0].split('src')[1].split()[0] if (ipaddress.ip_address(source).is_link_local and ignore_link_local): LOG.debug('Ignoring link-local source to %(dest)s: %(rec)s', {'dest': dest, 'rec': out}) return return source except (IndexError, ValueError): LOG.debug('No route to host %(dest)s, route record: %(rec)s', {'dest': dest, 'rec': out}) @contextlib.contextmanager def mounted(source, dest=None, opts=None, fs_type=None, mount_attempts=1, umount_attempts=3): """A context manager for a temporary mount. :param source: A device to mount. :param dest: Mount destination. If not specified, a temporary directory will be created and removed afterwards. An existing destination is not removed. :param opts: Mount options (``-o`` argument). :param fs_type: File system type (``-t`` argument). :param mount_attempts: A number of attempts to mount the device. :param umount_attempts: A number of attempts to unmount the device. :returns: A generator yielding the destination. """ params = [] if opts: params.extend(['-o', ','.join(opts)]) if fs_type: params.extend(['-t', fs_type]) if dest is None: dest = tempfile.mkdtemp() clean_up = True else: clean_up = False mounted = False try: execute("mount", source, dest, *params, run_as_root=True, attempts=mount_attempts, delay_on_retry=True) mounted = True yield dest finally: if mounted: try: execute("umount", dest, run_as_root=True, attempts=umount_attempts, delay_on_retry=True) except (EnvironmentError, processutils.ProcessExecutionError) as exc: LOG.warning( 'Unable to unmount temporary location %(dest)s: %(err)s', {'dest': dest, 'err': exc}) # NOTE(dtantsur): don't try to remove a still mounted location clean_up = False if clean_up: try: shutil.rmtree(dest) except EnvironmentError as exc: LOG.warning( 'Unable to remove temporary location %(dest)s: %(err)s', {'dest': dest, 'err': exc}) def parse_device_tags(output): """Parse tags from the lsblk/blkid output. Parses format KEY="VALUE" KEY2="VALUE2". :return: a generator yielding dicts with information from each line. """ for line in output.strip().split('\n'): if line.strip(): try: yield {key: value for key, value in (v.split('=', 1) for v in shlex.split(line))} except ValueError as err: raise ValueError( _("Malformed blkid/lsblk output line '%(line)s': %(err)s") % {'line': line, 'err': err}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/ironic_lib/wsgi.py0000664000175000017500000000453300000000000017232 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import socket from oslo_config import cfg from oslo_service import service from oslo_service import wsgi from ironic_lib import utils CONF = cfg.CONF class WSGIService(service.ServiceBase): def __init__(self, name, app, conf): """Initialize, but do not start the WSGI server. :param name: The name of the WSGI server given to the loader. :param app: WSGI application to run. :param conf: Object to load configuration from. :returns: None """ self.name = name self._conf = conf if conf.unix_socket: utils.unlink_without_raise(conf.unix_socket) self.server = wsgi.Server(CONF, name, app, socket_family=socket.AF_UNIX, socket_file=conf.unix_socket, socket_mode=conf.unix_socket_mode, use_ssl=conf.use_ssl) else: self.server = wsgi.Server(CONF, name, app, host=conf.host_ip, port=conf.port, use_ssl=conf.use_ssl) def start(self): """Start serving this service using loaded configuration. :returns: None """ self.server.start() def stop(self): """Stop serving this API. :returns: None """ self.server.stop() if self._conf.unix_socket: utils.unlink_without_raise(self._conf.unix_socket) def wait(self): """Wait for the service to stop serving this API. :returns: None """ self.server.wait() def reset(self): """Reset server greenpool size to default. :returns: None """ self.server.reset() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1724334195.7696767 ironic-lib-6.2.0/ironic_lib.egg-info/0000775000175000017500000000000000000000000017374 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334195.0 ironic-lib-6.2.0/ironic_lib.egg-info/PKG-INFO0000664000175000017500000000345100000000000020474 0ustar00zuulzuul00000000000000Metadata-Version: 1.2 Name: ironic-lib Version: 6.2.0 Summary: Ironic common library Home-page: https://docs.openstack.org/ironic-lib/ Author: OpenStack Ironic Author-email: openstack-discuss@lists.openstack.org License: UNKNOWN Description: ========== ironic-lib ========== Team and repository tags ------------------------ .. image:: https://governance.openstack.org/tc/badges/ironic-lib.svg :target: https://governance.openstack.org/tc/reference/tags/index.html Overview -------- A common library to be used **exclusively** by projects under the `Ironic governance `_. Running Tests ------------- To run tests in virtualenvs (preferred):: $ sudo pip install tox $ tox To run tests in the current environment:: $ sudo pip install -r requirements.txt -r test-requirements.txt $ stestr run Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: 3 :: Only Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: Python :: 3.9 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Requires-Python: >=3.8 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334195.0 ironic-lib-6.2.0/ironic_lib.egg-info/SOURCES.txt0000664000175000017500000000556100000000000021267 0ustar00zuulzuul00000000000000.stestr.conf AUTHORS CONTRIBUTING.rst ChangeLog LICENSE MANIFEST.in README.rst TESTING.rst bindep.txt extra-requirements.txt requirements.txt setup.cfg setup.py test-requirements.txt tox.ini doc/requirements.txt doc/source/conf.py doc/source/index.rst doc/source/contributor/index.rst doc/source/reference/index.rst etc/ironic/rootwrap.d/ironic-lib.filters ironic_lib/__init__.py ironic_lib/auth_basic.py ironic_lib/capabilities.py ironic_lib/disk_partitioner.py ironic_lib/disk_utils.py ironic_lib/exception.py ironic_lib/keystone.py ironic_lib/mdns.py ironic_lib/metrics.py ironic_lib/metrics_collector.py ironic_lib/metrics_statsd.py ironic_lib/metrics_utils.py ironic_lib/qemu_img.py ironic_lib/utils.py ironic_lib/wsgi.py ironic_lib.egg-info/PKG-INFO ironic_lib.egg-info/SOURCES.txt ironic_lib.egg-info/dependency_links.txt ironic_lib.egg-info/entry_points.txt ironic_lib.egg-info/not-zip-safe ironic_lib.egg-info/pbr.json ironic_lib.egg-info/requires.txt ironic_lib.egg-info/top_level.txt ironic_lib/common/__init__.py ironic_lib/common/config.py ironic_lib/common/i18n.py ironic_lib/json_rpc/__init__.py ironic_lib/json_rpc/client.py ironic_lib/json_rpc/server.py ironic_lib/tests/__init__.py ironic_lib/tests/base.py ironic_lib/tests/test_base.py ironic_lib/tests/test_basic_auth.py ironic_lib/tests/test_capabilities.py ironic_lib/tests/test_disk_partitioner.py ironic_lib/tests/test_disk_utils.py ironic_lib/tests/test_exception.py ironic_lib/tests/test_json_rpc.py ironic_lib/tests/test_keystone.py ironic_lib/tests/test_mdns.py ironic_lib/tests/test_metrics.py ironic_lib/tests/test_metrics_collector.py ironic_lib/tests/test_metrics_statsd.py ironic_lib/tests/test_metrics_utils.py ironic_lib/tests/test_qemu_img.py ironic_lib/tests/test_utils.py releasenotes/notes/add-additional-disk-conversion-retry-dfff93cbdf779f81.yaml releasenotes/notes/add-metrics-collection-b9549ec62ce4feda.yaml releasenotes/notes/add-port-to-rpc-client-2f2f0cd60547843f.yaml releasenotes/notes/basic-auth-middleware-e5af29651b2d7979.yaml releasenotes/notes/conver-str-to-bytes-11a665d0fa8828ec.yaml releasenotes/notes/drop-py-2-7-3c01e37309077c06.yaml releasenotes/notes/erase-tiny-partitions-c408a3a4afe60d44.yaml releasenotes/notes/extend-list-partitions-b71f81c77f6ecfdb.yaml releasenotes/notes/fix-dd-async-gpt-erasure-bbc6b084b0344d30.yaml releasenotes/notes/fix-metadisk-partitioning-32d3fca274290dd6.yaml releasenotes/notes/increase-efi-partition-size-9479d069b17804ce.yaml releasenotes/notes/json_rpc-allowed_roles-3bee50b5936c2be3.yaml releasenotes/notes/remove-iscsi-verify-attempts-ff9eb5b7a28e6143.yaml releasenotes/notes/rescan-for-partition-write-out-3fbb92ae5c2a33c6.yaml releasenotes/notes/restore-centos7-compatibility-bfbe2bcf1d1fb7f0.yaml releasenotes/notes/support-4096-sector-size-aa479b4040399975.yaml releasenotes/notes/wipe-gpt-on-metadata-wipe-ac0a93b16e00893f.yaml zuul.d/ironic-lib-jobs.yaml zuul.d/project.yaml././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334195.0 ironic-lib-6.2.0/ironic_lib.egg-info/dependency_links.txt0000664000175000017500000000000100000000000023442 0ustar00zuulzuul00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334195.0 ironic-lib-6.2.0/ironic_lib.egg-info/entry_points.txt0000664000175000017500000000100000000000000022661 0ustar00zuulzuul00000000000000[oslo.config.opts] ironic_lib.disk_partitioner = ironic_lib.disk_partitioner:list_opts ironic_lib.disk_utils = ironic_lib.disk_utils:list_opts ironic_lib.exception = ironic_lib.exception:list_opts ironic_lib.json_rpc = ironic_lib.json_rpc:list_opts ironic_lib.mdns = ironic_lib.mdns:list_opts ironic_lib.metrics = ironic_lib.metrics_utils:list_opts ironic_lib.metrics_statsd = ironic_lib.metrics_statsd:list_opts ironic_lib.qemu_img = ironic_lib.qemu_img:list_opts ironic_lib.utils = ironic_lib.utils:list_opts ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334195.0 ironic-lib-6.2.0/ironic_lib.egg-info/not-zip-safe0000664000175000017500000000000100000000000021622 0ustar00zuulzuul00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334195.0 ironic-lib-6.2.0/ironic_lib.egg-info/pbr.json0000664000175000017500000000005600000000000021053 0ustar00zuulzuul00000000000000{"git_version": "1ca3c8c", "is_release": true}././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334195.0 ironic-lib-6.2.0/ironic_lib.egg-info/requires.txt0000664000175000017500000000023000000000000021767 0ustar00zuulzuul00000000000000WebOb>=1.7.1 bcrypt>=3.1.3 oslo.concurrency>=3.26.0 oslo.config>=5.2.0 oslo.i18n>=3.15.3 oslo.utils>=3.34.0 pbr>=2.0.0 tenacity>=6.2.0 zeroconf>=0.24.0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334195.0 ironic-lib-6.2.0/ironic_lib.egg-info/top_level.txt0000664000175000017500000000001300000000000022120 0ustar00zuulzuul00000000000000ironic_lib ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1724334195.761677 ironic-lib-6.2.0/releasenotes/0000775000175000017500000000000000000000000016262 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1724334195.7736764 ironic-lib-6.2.0/releasenotes/notes/0000775000175000017500000000000000000000000017412 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/releasenotes/notes/add-additional-disk-conversion-retry-dfff93cbdf779f81.yaml0000664000175000017500000000107000000000000032074 0ustar00zuulzuul00000000000000--- fixes: - | Adds an additional error to look for in the ``qemu-img`` image conversion retry logic to automatically retry if 'Cannot allocate memory' is encountered, as ``qemu-img`` makes a number of memory allocation requests and the most likely is upon creating the convesrsion thread resulting in 'qemu: qemu_thread_create_: Resource temporarily unavailable' but other memory allocation fails can result in 'Failed to allocate memory: Cannot allocate memory'. Both types of errors are now checked and automatically retried upon. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/releasenotes/notes/add-metrics-collection-b9549ec62ce4feda.yaml0000664000175000017500000000067000000000000027274 0ustar00zuulzuul00000000000000--- features: - | Adds a new metrics collection backend, ``collector``, to collect counter, gauge, and timer information, enabling the applicationg to access these statistics during process runtime. Adds a new metrics method ``get_metrics_data`` to allow the dictionary structure containing the metrics data to be accessed. This feature may be enabled by setting the ``[metrics]\backend`` option to ``collector``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/releasenotes/notes/add-port-to-rpc-client-2f2f0cd60547843f.yaml0000664000175000017500000000030500000000000026623 0ustar00zuulzuul00000000000000--- features: - | Adds the capability for the ``json_rpc`` client to identify and utilize a specific port from the supplied ``topic`` field as opposed to the default configured port. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/releasenotes/notes/basic-auth-middleware-e5af29651b2d7979.yaml0000664000175000017500000000134200000000000026606 0ustar00zuulzuul00000000000000--- features: - | Implement Basic HTTP authentication middleware. This middleware is added to ironic-lib so that it can eventually be used by ironic and ironic-inspector as an alternative to noauth in standalone environments. This middleware is passed a path to a file which supports the Apache htpasswd syntax[1]. This file is read for every request, so no service restart is required when changes are made. The only password digest supported is bcrypt, and the ``bcrypt`` python library is used for password checks since it supports ``$2y$`` prefixed bcrypt passwords as generated by the Apache htpasswd utility. [1] https://httpd.apache.org/docs/current/misc/password_encryptions.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/releasenotes/notes/conver-str-to-bytes-11a665d0fa8828ec.yaml0000664000175000017500000000036300000000000026364 0ustar00zuulzuul00000000000000--- fixes: - | Fixes an py3 compatibility issue in metrics_statsd where str need be explicitly converted to bytes before send with socket. See `Story 2007537 `_ for details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/releasenotes/notes/drop-py-2-7-3c01e37309077c06.yaml0000664000175000017500000000032200000000000024156 0ustar00zuulzuul00000000000000--- upgrade: - | Python 2.7 support has been dropped. Last release of ironic-lib to support Python 2.7 is OpenStack Train. The minimum version of Python now supported by ironic-lib is Python 3.6. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/releasenotes/notes/erase-tiny-partitions-c408a3a4afe60d44.yaml0000664000175000017500000000020600000000000027031 0ustar00zuulzuul00000000000000--- fixes: - | Fixes cleaning errors when trying to erase a GPT from a partition which is smaller than a GPT (33 sectors). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/releasenotes/notes/extend-list-partitions-b71f81c77f6ecfdb.yaml0000664000175000017500000000027500000000000027403 0ustar00zuulzuul00000000000000--- fixes: - | Fixes an issues when parsing GPT partitions with names or multiple flags. See `story 2005322 `_ for details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/releasenotes/notes/fix-dd-async-gpt-erasure-bbc6b084b0344d30.yaml0000664000175000017500000000027200000000000027215 0ustar00zuulzuul00000000000000--- fixes: - | Fixes a bug when erasing a partition table: the corresponding I/O needs to be synchronous in order to avoid masking failed write requests to broken devices. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/releasenotes/notes/fix-metadisk-partitioning-32d3fca274290dd6.yaml0000664000175000017500000000031300000000000027576 0ustar00zuulzuul00000000000000--- fixes: - | Fixes an issue where the incorrect partition naming was used for metadisk (md) devices. See `Story 2006154 `_ for details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/releasenotes/notes/increase-efi-partition-size-9479d069b17804ce.yaml0000664000175000017500000000121000000000000027674 0ustar00zuulzuul00000000000000--- other: - | The default size of EFI system partitions to be created, when writing a partition image, has been increased to 550 Megabytes from 200 Megabytes. If this change is undesirable, please utilize the ``efi_system_partition_size`` configuration option. This value is now also consistent with the internal default when creating ESP volumes for Software RAID with ``ironic-python-agent``, and the default carried by ``diskimage-builder``. The prime driver for an increased partition size is to enable OS driven firmware updates and appropriate space to house Unikernels which requires additional space. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/releasenotes/notes/json_rpc-allowed_roles-3bee50b5936c2be3.yaml0000664000175000017500000000024000000000000027231 0ustar00zuulzuul00000000000000--- features: - | The new ``[json_rpc] allowed_roles`` parameter has been added. This parameter determines the list of roles allowed to use JSON RPC. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/releasenotes/notes/remove-iscsi-verify-attempts-ff9eb5b7a28e6143.yaml0000664000175000017500000000033200000000000030347 0ustar00zuulzuul00000000000000--- upgrade: - | The configuration option ``[disk_utils]iscsi_verify_attempts was deprecated in Train and it's now removed. Please use the ``[disk_utils]partition_detection_attempts`` option instead. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/releasenotes/notes/rescan-for-partition-write-out-3fbb92ae5c2a33c6.yaml0000664000175000017500000000041400000000000030645 0ustar00zuulzuul00000000000000--- fixes: - | Fixes an issue with the ``disk_utils`` method ``make_partitions``, which is utilized to facilitate the write-out of partition images to disk. Previously when this method was invoked on a ramdisk, the partition may not have been found. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/releasenotes/notes/restore-centos7-compatibility-bfbe2bcf1d1fb7f0.yaml0000664000175000017500000000025200000000000030773 0ustar00zuulzuul00000000000000--- fixes: - | Restore compatibility of blkid command with CentOS 7. For more details see `Story 2009328 `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/releasenotes/notes/support-4096-sector-size-aa479b4040399975.yaml0000664000175000017500000000023300000000000026664 0ustar00zuulzuul00000000000000--- fixes: - | Adds support for disks with 4096 sector size when cleaning disk metadata. Previously, only 512 sector size disks where supported. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/releasenotes/notes/wipe-gpt-on-metadata-wipe-ac0a93b16e00893f.yaml0000664000175000017500000000043300000000000027372 0ustar00zuulzuul00000000000000--- fixes: - | Fixes an issue where CRC errors in the GPT partition information would cause cleaning to fail. A similar issue was previously encountered with ironic-python-agent. See `Story 1737556 `_ for details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/requirements.txt0000664000175000017500000000040000000000000017047 0ustar00zuulzuul00000000000000pbr>=2.0.0 # Apache-2.0 oslo.concurrency>=3.26.0 # Apache-2.0 oslo.config>=5.2.0 # Apache-2.0 oslo.i18n>=3.15.3 # Apache-2.0 oslo.utils>=3.34.0 # Apache-2.0 zeroconf>=0.24.0 # LGPL bcrypt>=3.1.3 # Apache-2.0 WebOb>=1.7.1 # MIT tenacity>=6.2.0 # Apache-2.0 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1724334195.7776763 ironic-lib-6.2.0/setup.cfg0000664000175000017500000000334000000000000015412 0ustar00zuulzuul00000000000000[metadata] name = ironic-lib summary = Ironic common library description_file = README.rst author = OpenStack Ironic author_email = openstack-discuss@lists.openstack.org home_page = https://docs.openstack.org/ironic-lib/ python_requires = >=3.8 classifier = Environment :: OpenStack Intended Audience :: Information Technology Intended Audience :: System Administrators License :: OSI Approved :: Apache Software License Operating System :: POSIX :: Linux Programming Language :: Python Programming Language :: Python :: Implementation :: CPython Programming Language :: Python :: 3 :: Only Programming Language :: Python :: 3 Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.9 Programming Language :: Python :: 3.10 Programming Language :: Python :: 3.11 [files] data_files = etc/ironic/rootwrap.d = etc/ironic/rootwrap.d/* packages = ironic_lib [entry_points] oslo.config.opts = ironic_lib.disk_partitioner = ironic_lib.disk_partitioner:list_opts ironic_lib.disk_utils = ironic_lib.disk_utils:list_opts ironic_lib.exception = ironic_lib.exception:list_opts ironic_lib.json_rpc = ironic_lib.json_rpc:list_opts ironic_lib.mdns = ironic_lib.mdns:list_opts ironic_lib.metrics = ironic_lib.metrics_utils:list_opts ironic_lib.metrics_statsd = ironic_lib.metrics_statsd:list_opts ironic_lib.qemu_img = ironic_lib.qemu_img:list_opts ironic_lib.utils = ironic_lib.utils:list_opts [extra] keystone = keystoneauth1>=4.2.0 # Apache-2.0 os-service-types>=1.2.0 # Apache-2.0 json_rpc = keystoneauth1>=4.2.0 # Apache-2.0 os-service-types>=1.2.0 # Apache-2.0 oslo.service!=1.28.1,>=1.24.0 # Apache-2.0 [codespell] quiet-level = 4 ignore-words-list = crypted,assertIn [egg_info] tag_build = tag_date = 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/setup.py0000664000175000017500000000127100000000000015304 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import setuptools setuptools.setup( setup_requires=['pbr>=2.0.0'], pbr=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/test-requirements.txt0000664000175000017500000000034000000000000020027 0ustar00zuulzuul00000000000000coverage>=4.0 # Apache-2.0 stestr>=2.0.0 # Apache-2.0 oslotest>=3.2.0 # Apache-2.0 fixtures>=3.0.0 # Apache-2.0/BSD # used for JSON RPC unit tests keystonemiddleware>=4.17.0 # Apache-2.0 oslo.messaging>=5.29.0 # Apache-2.0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/tox.ini0000664000175000017500000000500300000000000015102 0ustar00zuulzuul00000000000000[tox] minversion = 4.4.0 envlist = py3,pep8 ignore_basepython_conflict=true [testenv] constrain_package_deps = true usedevelop = True setenv = VIRTUAL_ENV={envdir} PYTHONDONTWRITEBYTECODE = 1 LANGUAGE=en_US TESTS_DIR=./ironic_lib/tests/ deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -r{toxinidir}/test-requirements.txt -r{toxinidir}/extra-requirements.txt -r{toxinidir}/requirements.txt commands = stestr run {posargs} [flake8] show-source = True # [E129] Visually indented line with same indent as next logical line. # [W503] Line break occurred before a binary operator. Conflicts with W504. ignore = E129,W503 exclude=.*,dist,doc,*lib/python*,*egg,build import-order-style = pep8 application-import-names = ironic_lib # [H106] Don't put vim configuration in source files. # [H203] Use assertIs(Not)None to check for None. # [H204] Use assert(Not)Equal to check for equality. # [H205] Use assert(Greater|Less)(Equal) for comparison. # [H210] Require 'autospec', 'spec', or 'spec_set' in mock.patch/mock.patch.object calls # [H904] Delay string interpolations at logging calls. enable-extensions=H106,H203,H204,H205,H210,H904 [testenv:pep8] deps = flake8-import-order~=0.18.0 # LGPLv3 hacking~=6.1.0 # Apache-2.0 pycodestyle>=2.0.0,<3.0.0 # MIT doc8~=1.1.0 # Apache 2.0 -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} commands = flake8 {posargs} doc8 README.rst doc/source --ignore D001 [testenv:cover] setenv = VIRTUALENV={envdir} LANGUAGE=en_US PYTHON=coverage run --source ironic_lib --omit='*tests*' --parallel-mode commands = coverage erase stestr run {posargs} coverage combine coverage report --omit='*tests*' coverage html -d ./cover --omit='*tests*' [testenv:venv] commands = {posargs} [testenv:docs] deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -r{toxinidir}/doc/requirements.txt -r{toxinidir}/extra-requirements.txt commands = sphinx-build -W -b html doc/source doc/build/html [testenv:pdf-docs] deps = {[testenv:docs]deps} allowlist_externals = make commands = sphinx-build -b latex doc/source doc/build/pdf make -C doc/build/pdf [testenv:codespell] description = Run codespell to check spelling deps = codespell # note(adamcarthur): {posargs} lets us run `tox -ecodespell -- -w` to get codespell # to correct spelling issues in our code it's aware of. commands = codespell {posargs} ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1724334195.7776763 ironic-lib-6.2.0/zuul.d/0000775000175000017500000000000000000000000015012 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/zuul.d/ironic-lib-jobs.yaml0000664000175000017500000000243400000000000020663 0ustar00zuulzuul00000000000000- job: name: ironic-lib-base parent: ironic-base irrelevant-files: - ^test-requirements.txt$ - ^.*\.rst$ - ^api-ref/.*$ - ^doc/.*$ - ^ironic_lib/tests/.*$ - ^releasenotes/.*$ - ^setup.cfg$ - ^tools/.*$ - ^tox.ini$ required-projects: - openstack/ironic-lib vars: tempest_test_timeout: 1800 devstack_localrc: BUILD_TIMEOUT: 900 IRONIC_BUILD_DEPLOY_RAMDISK: True IRONIC_TEMPEST_BUILD_TIMEOUT: 900 SWIFT_ENABLE_TEMPURLS: True SWIFT_TEMPURL_KEY: secretkey - job: name: ironic-lib-uefi-ipmi-src parent: ironic-lib-base timeout: 7200 vars: devstack_services: s-account: True s-container: True s-object: True s-proxy: True devstack_localrc: IRONIC_TEMPEST_WHOLE_DISK_IMAGE: True IRONIC_VM_EPHEMERAL_DISK: 0 - job: name: ironic-lib-bios-ipmi-src parent: ironic-lib-base timeout: 7200 vars: devstack_services: s-account: True s-container: True s-object: True s-proxy: True devstack_localrc: IRONIC_BOOT_MODE: bios - job: name: ironic-lib-tox-codespell parent: openstack-tox timeout: 7200 vars: tox_envlist: codespell ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1724334165.0 ironic-lib-6.2.0/zuul.d/project.yaml0000664000175000017500000000073400000000000017350 0ustar00zuulzuul00000000000000- project: templates: - check-requirements - openstack-cover-jobs - openstack-python3-antelope-jobs - publish-openstack-docs-pti check: jobs: - ironic-lib-uefi-ipmi-src - ironic-lib-bios-ipmi-src - ironic-lib-tox-codespell: voting: false gate: jobs: - ironic-lib-uefi-ipmi-src - ironic-lib-bios-ipmi-src post: jobs: - ironic-python-agent-build-image-tinyipa