././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648108936.1106672 ironic-20.1.0/0000775000175000017500000000000000000000000013100 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/.mailmap0000664000175000017500000000032100000000000014515 0ustar00zuulzuul00000000000000# Format is: # # Joe Gordon Aeva Black ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/.stestr.conf0000664000175000017500000000010200000000000015342 0ustar00zuulzuul00000000000000[DEFAULT] test_path=${TESTS_DIR:-./ironic/tests/unit/} top_dir=./ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108935.0 ironic-20.1.0/AUTHORS0000664000175000017500000005664600000000000014171 0ustar00zuulzuul00000000000000119Vik ANURADHAJHA99 Abhishek Kekane Adam Gandelman Adam Kimball Adam Young Aeva Black Aija Jaunteva Aija Jauntēva Akhila Kishore Akilan Pughazhendi Alberto Planas Alessandro Pilotti Alex Meade Alexander Gordeev Alexandra Settle Alexandra Settle Alexey Galkin Alexis Lee Aline Bousquet Ana Krivokapic Andrea Frittoli Andreas Jaeger Andreas Jaeger Andrew Bogott Andrey Kurilin Andrey Shestakov Angus Thomas Anh Tran Anita Kuno Ankit Kumar Anne Gentle Annie Lezil Anshul Jain Anson Y.W Anton Arefiev Anup Navare Anusha Ramineni Anusha Ramineni Aparna Arata Notsu Armando Migliaccio Armstrong Liu Arne Wiebalck Arne Wiebalck Artem Rozumenko Arun S A G Atsushi SAKAI Bernard Van De Walle Bernd Mueller Bertrand Lallau Bharath kumar Bill Dodd Bob Ball Bob Fournier Boden R Boris Pavlovic Brian Elliott Brian Waldon Bruno Cornec Béla Vancsics Caio Oliveira Cameron.C Cao Shufeng Cao Xuan Hoang Carmelo Ragusa Carol Bouchard Cenne Chang Bo Guo ChangBo Guo(gcb) Charlle Daniel Charlle Dias Chris Behrens Chris Dearborn Chris Jones Chris Krelle Chris Krelle Chris Krelle Chris Krelle Chris St. Pierre Christian Berendt Christopher Dearborn Christopher Dearborn Chuck Short Chuck Short Clark Boylan Claudiu Belu Clenimar Filemon Clif Houck Clint Byrum Colleen Murphy Corey Bryant Cuong Nguyen D G Lee Dan Prince Dan Smith Dan Smith Daniel Abad Dao Cong Tien Daryl Walleck Davanum Srinivas Davanum Srinivas David Edery David Hewson David Kang David McNally David Shrewsbury Davide Guerri Debayan Ray Derek Higgins Devananda van der Veen Dhuldev Valekar DhuldevValekar3 Dima Shulyak Dirk Mueller Dmitry Galkin Dmitry Nikishov Dmitry Tantsur Dmitry Tantsur Dmitry Tantsur DongCan Dongcan Ye Dongdong Zhou Doug Hellmann Dr. Jens Harbott Edan David Edwin Zhai Eli Qiao Elizabeth Elwell Ellen Hui Emilien Macchi Erhan Ekici Eric Fried Eric Guo Eric Windisch Faizan Barmawer Fang Jinxing Felix Maurer Fellype Cavalcante Fengqian Gao Feruzjon Muyassarov Flavio Percoco Félix Bouliane Gabriel Assis Bezerra Galyna Zholtkevych Gary Kotton Gauvain Pocentek Gaëtan Trellu Ghanshyam Mann Ghe Rivero Ghe Rivero Ghe Rivero Gleb Stepanov Gonéri Le Bouder Graham Hayes Gregory Haynes Grzegorz Grasza Gábor Antal Ha Van Tu Hadi Bannazadeh Hamdy Khader Han Guangyu Hans Lindgren Haomeng, Wang Harald Jensas Harald Jensås Harshada Mangesh Kakad He Yongli Hervé Beraud Hieu LE Hironori Shiina Hoang Trung Hieu Honza Pokorny Hugo Nicodemos Hugo Nicodemos IWAMOTO Toshihiro Ian Wienand Igor Kalnitsky Ihar Hrachyshka Ilya Etingof Ilya Pekelny Imre Farkas Ionut Balutoiu Iury Gregory Melo Ferreira Iury Gregory Melo Ferreira Jacek Tomasiak Jacob Anders Jakub Libosvar James E. Blair James E. Blair James Slagle Jan Gutter Jan Horstmann Jason Anderson Jason Kölker Javier Pena Jay Faulkner Jeffrey Zhang Jens Harbott Jeremy Stanley Jerry Jesse Andrews Jesse Pretorius Jim Rollenhagen Jing Sun Joanna Taryma Joe Gordon Johannes Erdfelt John Garbutt John Garbutt John L. Villalovos John L. Villalovos John Trowbridge Jonathan Provost Josh Gachnang Joshua Harlow Joshua Harlow Juan Antonio Osorio Robles Julia Kreger Julian Edwards Julien Danjou Junya Akahira KATO Tomoyuki Kafilat Adeleke Kaifeng Wang Kamlesh Chauvhan Kan Ken Igarashi Ken'ichi Ohmichi Kobi Samoray Kun Huang Kurt Taylor Kurt Taylor Kyle Stevenson Kyrylo Romanenko Lance Bragstad Lars Kellogg-Stedman Laura Moore Lenny Verkhovsky LiYucai Lilia Lilia Sampaio Lin Tan Lin Tan LinPeiWen <591171850@qq.com> Lokesh S Lucas Alvares Gomes Luong Anh Tuan M V P Nitesh Madhuri Kumari Madhuri Kumari Mahnoor Asghar Manuel Buil MaoyangLiu Marc Methot Marcin Juszkiewicz Marco Morais Marcus Rafael Mario Villaplana Mark Atwood Mark Beierl Mark Goddard Mark Goddard Mark McClain Mark McLoughlin Mark Silence Martin Kletzander Martin Roy Martyn Taylor Mathieu Gagné Mathieu Mitchell Matt Joyce Matt Keeann Matt Riedemann Matt Riedemann Matt Wagner Matthew Gilliard Matthew Thode Matthew Treinish Mauro S. M. Rodrigues Max Lobur Max Lobur Michael Davies Michael Kerrin Michael Krotscheck Michael Still Michael Tupitsyn Michael Turek Michael Turek Michal Arbet Michelle (Shelly) Mutu-Grigg Michey Mehta michey.mehta@hp.com Mike Bayer Mike Turek MikeG451 Mikhail Durnosvistov Mikyung Kang Miles Gould Mitsuhiro SHIGEMATSU Mitsuhiro SHIGEMATSU Mohammed Naser Monty Taylor Moshe Levi Motohiro OTSUKA Motohiro Otsuka Mudit Nam Nguyen Hoai Naohiro Tamura Ngo Quoc Cuong Nguyen Hai Nguyen Hung Phuong Nguyen Phuong An Nguyen Van Duc Nguyen Van Trung Nikolay Fedotov Nisha Agarwal Nisha Agarwal Nisha Brahmankar Noam Angel Noor Muhammad Malik OctopusZhang Oleksiy Petrenko Om Kumar Ondřej Nový OpenStack Release Bot Pablo Fernando Cargnelutti Paul Belanger Pavlo Shchelokovskyy Pavlo Shchelokovskyy Peeyush Gupta Peng Yong Peter Kendall Phil Day Philippe Godin Pierre Riteau Pierre Riteau PollyZ Pradip Kadam Pádraig Brady Qian Min Chen Qianbiao NG Qianbiao.NG R-Vaishnavi Rachit7194 Rafi Khardalian Rakesh H S Ramakrishnan G Ramamani Yeleswarapu Raphael Glon Raphael Glon Ricardo Araújo Santos Riccardo Pittau Richard Pioso Rick Harris Robert Collins Robert Collins Rohan Kanade Rohan Kanade Roman Bogorodskiy Roman Dashevsky Roman Podoliaka Roman Prykhodchenko Roman Prykhodchenko Ruby Loo Ruby Loo Ruby Loo Ruby Loo Ruby Loo Rushil Chugh Russell Bryant Russell Haering Ryan Bridges SHIGEMATSU Mitsuhiro Sam Betts Sana Khan Sandeep Yadav Sandhya Balakrishnan Sandy Walsh Sanjay Kumar Singh Sascha Peilicke Sascha Peilicke Sasha Chuzhoy Satoru Moriya Sean Dague Sean Dague Sean McGinnis Sean McGinnis Serge Kovaleff Sergey Lukjanov Sergey Lupersolsky Sergey Lupersolsky Sergey Nikitin Sergey Vilgelm Sergii Golovatiuk Shane Wang Shilla Saebi Shinn'ya Hoshino Shivanand Tendulker Shivanand Tendulker Shuangtai Tian Shuichiro MAKIGAKI Shuquan Huang Sinval Vieira Sirushti Murugesan SofiiaAndriichenko Solio Sarabia Srinivasa Acharya Stanislaw Pitucha Stenio Araujo Stephen Finucane Steve Baker Steven Dake Steven Hardy Stig Telfer Sukhdev Kapur Sukhdev Kapur Surya Seetharaman Tadeas Kot Takashi Kajinami Takashi NATSUME Tan Lin Tang Chen Tao Li Thiago Paiva Thierry Carrez Thomas Bechtold Thomas Goirand Thomas Herve TienDC Tim Burke Tom Fifield Tony Breeds Tran Ha Tuyen Tuan Do Anh TuanLAF Tushar Kalra Tzu-Mainn Chen Vadim Hmyrov Vanou Ishii Varsha Varun Gadiraju Vasyl Saienko Vic Howard Victor Lowther Victor Sergeyev Vikas Jain Vinay B S Vincent S. Cojot Vishvananda Ishaya Vladyslav Drok Vu Cong Tuan Wang Jerry Wang Wei Wanghua Wei Du Will Szumski Xavier Xian Dong, Meng Xian Dong, Meng Xiaobin Qu XiaojueGuan XieYingYun Yaguo Zhou Yatin Kumbhare Yibo Cai Yogesh Ramachandra Yolanda Robla Yolanda Robla Mota Yuiko Takada Yuiko Takada Mori Yuiko Takada Mori Yun Mao Yuriy Taraday Yuriy Yekovenko Yuriy Zveryanskyy Yushiro FURUKAWA Zachary Zane Bitter Zenghui Shi Zhang Yang Zhao Lei Zhenguo Niu Zhenguo Niu Zhenzan Zhou ZhiQiang Fan ZhiQiang Fan ZhongShengping Zhongyue Luo Zhongyue Luo Zhou Hao akhiljain23 anascko ankit azvyagintsev baiwenteng baiyuan bin yu blue55 brandonzhao caoyuan chao liu chenaidong1 chenghang chenglch chenjiao chenxiangui chenxing daz dekehn digambar divakar-padiyar-nandavar douyali dparalen dujinxiu e ericxiett fpxie gaoxiaoyong gaozx gecong1973 gengchc2 ghanshyam ghanshyam ghanshyam gugug houming-wang huang.zhiping huth <428437106@qq.com> huwenhui jiang wei jiangfei jiangwt100 jiapei jinxingfang jinxingfang junbo jxiaobin kafilat-adeleke kesper kesper kkillsfirst klyang lei-zhang-99cloud licanwei lijunjie likui lin shengrong linggao liumk liusheng liushuobj liuyuanfeng lukasz lvdongbing maaoyu maelk mallikarjuna.kolagatla max_lobur melanie witt melissaml michaeltchapman mkumari mpardhi23 mvpnitesh nishagbkar noor_muhammad_dell paresh-sao pawnesh.kumar pedh pengyuesheng poojajadhav pradeepcsekar rabi rajinir rajinir ricolin root ryo.kurahashi saripurigopi shangxiaobj shenjiatong shenxindi shuangyang.qian sjing sonu.kumar spranjali srobert stephane suichangyin sunqingliang6 takanorimiyagishi tanlin taoruizhe tianhui tiendc tonybrad vinay50muddu vishal mahajan vmud213 vsaienko wangdequn wanghao wanghongtaozz wangkf wangkf wangqi wangxiyuan wangzhengwei weizhao whaom whitekid whoami-rajat wu.chunyang wu.chunyang wu.shiming wudong xgwang5843 xiexs yangxurong yatin yuan liang yuanliu yufei yuhui_inspur yunhong jiang yushangbin yuyafei zackchen zhang.lei zhangbailin zhangdebo zhangjl zhangyanxian zhangyanxian zhangyanying zhu.fanglei zhufl zhurong zouyee zshi 翟小君 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/CONTRIBUTING.rst0000664000175000017500000000076400000000000015550 0ustar00zuulzuul00000000000000If you would like to contribute to the development of OpenStack, you must follow the steps documented at: https://docs.openstack.org/infra/manual/developers.html#development-workflow Pull requests submitted through GitHub will be ignored since OpenStack projects use a Gerrit instance hosted on OpenDev. https://review.opendev.org Contributor documentation for the Ironic project can be found in the OpenStack Ironic documentation. https://docs.openstack.org/ironic/latest/contributor/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108935.0 ironic-20.1.0/ChangeLog0000664000175000017500000110132300000000000014653 0ustar00zuulzuul00000000000000CHANGES ======= 20.1.0 ------ * Fix 20.1 prelude releasenote * Prepare Yoga release with 20.1 * Link from deploy interface docs to anaconda docs and move it to advanced * [trivial]Remove is\_pxe\_enabled function doc about ipxe\_enabled option * Create API documentation from docstrings * Improve efficiency of storage cleaning in mixed media envs - documentation * Use pycdlib to extract deploy iso * Suppress Bandit B509 in snmp.py * Fix rebuilds using anaconda deploy interface * Anaconda deploy handles configdrive correctly * Minor updates to rbac doc * Update minimum requirements for ilo drivers * Fix failure of create\_vfat\_image function * Troubleshooting guide: node locked error * [Trivial] Fix formatting in troubleshooting docs * More fixes for anaconda deploy interface * Deprecate instance network boot * Update python-dracclient version * CI: force config drive on the multinode job's subnode * Ensure tox.ini is ASCII * Shorten error messages in commonly used modules 20.0.0 ------ * Build the new cirros image even when netboot is the default * Fix Redfish RAID for non-immediate controllers * Fix prepare ramdisk for 'wait' states * CI: use a custom cirros partition image instead of the default * [doc][trivial] Fixing typos in RBAC documentation * Fix release mapping (19.1 -> 20.0) * Update some releasenotes for 19.1 release * Add release mappings for 19.0 and 19.1 bugfix * iRMC: Fix instructions for boot interface * Set correct initrd\_filename for iPXE when using Swift * [trivial] Fix typo in policy error message * Support listening on a Unix socket * Fix the benchmark job * Clean up jobs with legacy names * Re-enable the standalone job * Avoid non-Stream CentOS and temporary disable the standalone job * Support img\_type Glance property * Explicit parameter to distinguish partition/whole-disk images * deploy\_utils: only check glance for image properties kernel/ramdisk * Fix DevStack plugin ipxe-snponly-x86\_64.efi name * Add additional ramdisk tests * Wait for conductor start before notifying systemd * Return non-zero exit code on failures * Make account prefix of Swift confgurable * Recommend various installers in the install guide * Fix resource\_url in the remaining resources * Add idrac-wsman clean steps to not require ramdisk * Add idrac-redfish clean steps to not require ramdisk * Fix Node Console Duplicate Sol Session * Migrates docs from wiki * Inspector: better error message on DiscoveryFailure * Set resource\_url when getting all ports or portgroups * Set resource\_url when getting all nodes * Add known issue for iDRAC Swift firmware update * Add more sources to redfish firmware upgrade * Update idrac-redfish export configuration step * Do not fail inspection on invalid MAC * Fix validating input for redfish update\_firmware * Use mtools mcopy in create\_vfat\_image * Add dhcp options for each ip\_version once * ImageCache: respect Cache-Control: no-store * Ensure 'port' is up2date after binding:host\_id * [doc] Add note about shellinabox console enable error * [doc] Fix broken link in "Node Deployment" * Make bootloader installation failures fatal for whole disk images * Fix redfish RAID failed tasks * Use driver\_internal\_info methods for other drivers * Use driver\_internal\_info methods for drac driver * Use driver\_internal\_info methods for ilo driver * Use driver\_internal\_info methods for redfish driver * Use driver\_internal\_info methods for driver utils * Fix Redfish RAID deploy steps * Automatically configure enabled\_\*\*\*\_interfaces * Derive FakeHardware from GenericHardware * Update RAID docs * Fix Mock objects as specs * Enable foreign keys in SQLite * Re-add python 3.6/3.7 in classifier * [doc] Adoption with Nova * Fix redfish update\_firmware for newer Sushy * Document single process Ironic * Adoption: do not validate boot interface when local booting * Write initial grub config on startup * Move place\_loaders\_for\_boot to boot driver \_\_init\_\_ * Ensure desired permissions on all written pxe files * [doc] Add parameters for burn-in log files * Ignore fake nodes in the power sync loop 19.0.0 ------ * CI: Lower test VM memory by 200MB * CI: reduce api worker processes to 1 * Allow enabling fast-track per node * devstack: provide a default for OS\_CLOUD * Use more granular lock in BaseDriverFactory * Add "none" RPC transport that disables the RPC bus * All-in-one Ironic service with a local RPC bus * Trivial: minor follow-up to redfish fix * Prepare for bugfix release * Avoid RPC notify\_conductor\_resume\_{deploy,clean} in agent\_base * Trivial: log current state when continuing cleaning * Add missing mode setting on pxe created directories * Install isolinux on devstack * Fix markup syntax * Refactor driver\_internal\_info updates to methods * Remove redfish cache entry upon errors * Refactor common configuration bits from service commands * Remove isfile check from place\_loaders\_for\_boot * Remove default option from create iso image * Add a unit test job with Sushy from source * Do not assume sushy constants are strings - part 2 * Updating yoga tested python versions in classifier * Clarify driver load error message * Use test\_with\_retry to get the tap device name * CI: Fix devstack plugin with RBAC changes * Use default test dir * Fix some of the SRBAC tests * Add an option to create inspector-compatible boot.ipxe * Reduce the number of small functions in pxe\_utils * Stop relying on explicit values of Redfish constants * [iRMC] Convert the type of irmc\_port to int * Fix RedfishManagement.get\_mac\_addresses and related functions * Create node get\_interface method * Enable iPXE boot interface by default * Enable Redfish by default * [Trivial] Clarify conditions under which power recovery is attempted * Avoid handling a deploy failure twice * Fix idrac-wsman deploy with existing non-BIOS jobs * Document commands to leave WAIT states * Add description to the mod\_wsgi part * Add platform:rpm shim, grub packages to bindep * Fix various issues in the anaconda deploy interface * Trivial: do not stop None rpcserver * Fix restricted allocation creation for old policy defaults * changed code for memory burin vm-bytes, 75 to 75% * Add a description of stopping ironic-api.service * Add Xena versions to release notes * There is no aim, we do deploy/manage baremetal * Do not use any parts of image URL in temporary file names * Remove redundant node\_periodic tests * node\_periodics: encapsulate the interface class check * Add a helper for node-based periodics * Add and document high-level helpers for async steps * Remove debian packages file for devstack * SRBAC - Prepare for additional services * Retool devstack plugin to use pxe loaders configuration * Do not append filename parameter to image URL when using local file * Update the list of supported database filters * Follow up to Add support for verify steps * add snmp power action delay * Demote three warning messages * Make iDRAC management steps verify steps * CI: Change CI ipxe file to snponly * Yoga: Change default boot mode to uefi * Add support for verify steps * Fix iDRAC configuration mold docs * Document recovery from power faults * Remove legacy rpm install list and use bindep * Update release doc * PXE: avoid trailing dots in exception messages * require\_exclusive\_lock: log traceback that lead to an error * Clean up caches periodically * Update iDRAC doc for idrac-redfish RAID * Devstack: don't scan /opt, /etc looking for isolinux * [iRMC] Set polling after RAID is built * [iRMC] Avoid repeatedly resuming clean after creating raid configuration * Add Python3 yoga unit tests * Update master for stable/xena * Use an ImageCache for provided boot/deploy ISO images 18.2.0 ------ * Set stage for release 18.2 * Remove docker reference from legacy image build * Reno for default\_boot\_mode change in Yoga * Update python-dracclient version * Refactor: move base\_iso handling from create\_boot\_iso * Disable Neutron firewall * Fix idrac-wsman having Completed with Errors jobs * Facilitate asset copy for bootloader ops * Dial back gate job memory allocation * API endpoints to get node history * Fix idrac-wsman set\_power\_state to wait on HW * Support HttpHeaders in create\_subscription * Fix clear foreign config in idrac-redfish RAID * Fix driver task pattern to reduce periodic db load * Fix iDRAC import configuration missing task handling * Record node history and manage events in db * Document eject\_vmedia for Redfish * Implements node history: database * Fix iDRAC import configuration job with errors * Enable parallel downloads and allow tuning concurrency * Always update cache for HTTP images if Last Modified is unknown * Clean step to remove CA certificates from iLO * Use packaged grub efi for network boot * Fix RAID steps for non-RAID in idrac-redfish * Trivial: shorten the deploy/clean step failure message * Fix to unblock oslodb 11.0.0 * Move ramdisk deploy to its own module * Fix in-band cleaning for ramdisk and anaconda deploy * Add release note upgrade version check handling change * Remove images from the OS profiler docs * Expand the driver contributor documentation * Fix typo and add subscription docs * Improve edge-case debugging for deployment and cleaning * update irmc document * Redfish RAID: Use identity instead of durable\_name * Split node verification code out of manager.py * Revert "Allow reboot to hard disk following iso ramdisk deploy." * Remove manager param for iDRAC OEM calls * Add support for fields in drivers API * Silence debug logging from oslo\_policy * [doc] Cross-reference the updated power sync docs from tuning * Minor formatting and doc changes to change boot mode feature commit * Add api endpoints for changing boot\_mode and secure\_boot state * Add better error messages for invalid conf molds * rfc3986: Replace deprecated URIReference.is\_valid * [doc] Update power sync documentation * Allow initial versions to not be created yet * Make curl in DevStack Bypass Proxy * [Trivial] Fix typo in burn-in docs * Enable priority overrides to enable/disable steps * Fix upgrade logic to allow for bundled changes * Retry stdlib ssl.SSLError * Fix subscription vendor passthru * Set postgresql password encryption for FIPS compliance * Use shim-signed on Ubuntu, shim is empty now * Add lower-constraints job to current development branch * ci-workarounds/get\_extra\_logging.yaml : avoid grep failure * Increase version of hacking and pycodestyle * Fixes missing argument for log format string * Fix regression in ramdisk deploy kernel parameters * Minor updates to anaconda doc * Expand the IPMI documentation * Document making bugfix releases and branches 18.1.0 ------ * Update vendor\_passthru subscriptions releasenote * Prepare 18.1 and clean up release notes * Add vendor\_passthru method for subscriptions * Set glance limit for baremetal friendly images * Scoped RBAC Devstack Plugin support * Burn-in: Add documentation * Add a section to redfish doc for BIOS registry fields * Fix iPXE docs: snponly is not always available * Update requirements * Sanity check object version changes * Allow reboot to hard disk following iso ramdisk deploy * Add ipxe ramdisk kernel append param test * Update iDRAC virtual media boot known issue * Add \`boot\_mode\` and \`secure\_boot\` to node object and expose in api * Add missing BIOSSettings version mapping for older releases * Fix typos in API sanitization change notes * Bring boot\_iso/deploy\_iso handling in iLO closer to Redfish * Use selectinload for all list queries * Clean up images when ejecting an ISO with Redfish * Allow node\_sanitize function to be provided overrides * Avoid double file removal in create\_boot\_iso * Add note regarding configuration drives to tuning docs * Support "swift" for ramdisk\_image\_download\_source * Defer checking image size until instance info is built * Fix oslo policy DeprecatedRule warnings * Add support for configdrive in anaconda interface * Add reno and reset legacy policy deprecation expectation * [doc] Bootloader reinstallations on Software RAID * [doc] Update section on ESP consistency * Fix Redfish RAID interface\_type physical disk hint * Suppress policy deprecation and default change warnings * Skip port create if MAC is blank * Ramdisk: do not require image\_source * Refactor deploy\_utils.validate\_image\_properties * Redfish: Skip non-RAID controllers for RAID * Deprecate [pxe]ip\_version parameter * Upgrade oslo.db version * Update the clear job id's constant * API to pass fields to node object list * Set stage for objects to handle selected field lists * Only return the requested fields from the DB * Cache AgentClient on Task, not globally * Change UEFI ipxe bootloader default * Refactor: untie IloVendor from validate\_image\_properties * Fix handling driver\_info[agent\_verify\_ca] == False * Fix ramdisk boot option handling * Allow ramdisk\_image\_download\_source in instance\_info for ramdisk deploy * Nicer error message when a deploy step fails * Don't run the inspector job on changes to inspector tests * CI: change ilo\_deploy\_iso to deploy\_iso * Fix node detail instance\_uuid request handling * Clean up vendor prefixes for iRMC boot * Add documentation for anaconda deploy interface * Fix typos in inspection docs * Remove redundant/legacy is\_admin logic * Split a community page out of the contributor docs * Document managed inspection * dhcp-less: mention how to provide network\_data to instance * Use env to find python3 * Rename ilo\_boot\_iso -> boot\_iso * Clean up vendor prefixes for iLO boot * Handle non-key-value params in [inspector]extra\_kernel\_params * Fix ironic-status db index check * Changes made to enrollment documentation * Update Redfish RAID disk\_type unit test * Redfish: Get only RAID controller's physical disks 18.0.0 ------ * Changes made to release documentation: * Prepare 18.0 and clean up release note * Trivial: Fix version number in comment * Improve agent\_client logging * Add bifrost-benchmark-ironic job * Trivial: fix an outdated link * Move provision states documentation to the user guide * Follow-up to Include bios registry fields in bios API * Include bios registry fields in bios API * Update project conundrum related docs * Add basic tools for benchmarking * Secure RBAC - Efficent node santiziation * Add missing bios\_interface to api-ref for validate API * Trivial: comment why we don't check retired in allocations * Refactor iDRAC OEM extension manager calls * Retrieve BIOS registry from sushy * Delay rendering configdrive * Remove inventory time workaround for WS-man BIOS * Rename redfish\_deploy\_iso -> deploy\_iso * Add additional node indexes * Update min version of tox to use allowlist * Clean up kernel\_append\_params for iRMC * Clean up kernel\_append\_params for Redfish and iLO * Clean up kernel\_append\_params for PXE/iPXE * Add bios\_interface to api-ref spec * Expand the deployment guide * Add iDRAC configuration mold docs * setup.cfg: Replace dashes with underscores * [doc] Update documentation about force\_persistent\_boot\_device * Rework the user guide * Upgrade guide: remove ancient versions and group with Installation * Delete unavailable py2 package * Fix deployment when executing a command fails after the command starts * Document the custom-agent deploy interface * Provide an option to not cache bootable iso ramdisks * Avoid unnecessary validation in boot interfaces * Inherit InvalidImageRef from InvalidParameterValue * Bye-bye iSCSI deploy, you served us well * Update refarch with information about image\_download\_source * Clean up deprecated features of the agent deploy * Stop testing the iscsi deploy interface * Deploy interface that fully relies on custom deploy steps * Do not mask configdrive when executing in-band deploy steps * Docs: dhcp-less works with Glean 1.19.0 * Follow up to add iDRAC management via Redfish * Remove temporary cleaning information on starting cleaning * Update CI jobs list and description in docs * [doc][trivial] Fix diagram in agent token admin docs * [doc][trivial] Fix typo in agent token admin docs * Update basic local.conf for quickstart * Read default cirros version from stackrc * Aliases for a few unfortunately named state transitions * Process in-band deploy steps on fast-track * Followup patch for security dashboard clean steps * Remove a pause before cleaning when fast-tracking * Fix fast track with redfish-virtual-media * Update to not use deprecated \`get\_task\_monitor\` * Do not use pregenerated tokens with a pre-built ISO * Wipe agent tokens on inspection start and abort * Fix idrac-wsman BIOS factory\_reset result finding * Update python-dracclient version * Add security dashboard clean steps to ilo drivers * Imported Translations from Zanata * Update release note version header for wallaby * Add Python3 xena unit tests * Update master for stable/wallaby * Followup idrac configuration mold steps 17.0.0 ------ * Add agent\_status and agent\_status\_message params to heartbeat * Restrict syncing of boot mode to Supermicro * Follow-up Automaticaly set cipher suite * Add iDRAC management via Redfish to idrac HW type * Generic way to configure clean step priorites * Fix typo in security docs around is\_admin rule * Fix configuration generation for ironic doc pages * Fix Bandit check * Always add 'boot\_method' vmedia in redfish/ilo vmedia boot * Add import, export configuration to idrac-redfish * Move configuration mold utilities * [doc] Warning about out-of-sync ESPs for UEFI software RAID * [doc] Update available software RAID levels * Automaticaly set cipher suite * Fix webserver\_verify\_ca config documentation * Allow using per-site network\_data schema * Add configuration mold storage * DRAC : idrac-redfish inspect updates pxe port * update grub2 file name * redfish boot\_interfaces, ipmitool -> pxe * redfish-virtual-media: allow USB devices instead of floppies * Allow overriding an external URL for virtual media * Fix idrac-wsman BIOS step async error handling * Follow up of Use OOB inspection to fetch MACs for IB inspection * Validate the kickstart template and file before use * Add anaconda support in the pxe boot driver * Allow running RAID cleaning steps with in-band cleaning * Support pre-built deploy/rescue ISO in Redfish * Version 17.0 mapping/doc updates * Add prelude for Ironic 17.0 * Add known iDRAC virtual media boot issue * Update idrac-redfish RAID release note * Remove extra/vif\_port\_id * Revise release notes for 17.0 release * Allow unsupported redfish set\_boot\_mode * Add anaconda configuration and template * Add Redfish RAID interface to idrac HW type * Follow-up to RBAC allocation changes * Increment API version for Secure RBAC * Add runtime gpu capabilities to ilo inspection * Deprecate legacy policies, update project scoped docs * Enable Reuse of Zuul Job in 3rd Party CI Environment * Add iRMC Driver Support to DevStack Code * API to force manual cleaning without booting IPA * Allow ansible deploys to be fast-tracked * Trivial: add a missing argument to an exception * Allocation support for project scoped RBAC * Mark multinode non-voting due to high failure rate * Don't try to use attempts=None with tenacity * [doc] Add initial system-scoped text to secure-rbac * Switch to JSON RPC from ironic-lib * Update dev quickstart docs * Update Redfish RAID release note * Allow users to configure priority for {create,delete}\_configuration * Add CentOS7 for supported ramdisk for dhcpless deploy * RBAC Follow-up: Review follow-up * Enforce autospec in test\_console\_utils * Rework the standalone guide * docs: move overriding interfaces to the standalone documentation * Update iDRAC doc with idrac-redfish-virtual-media * Update iDRAC doc on Redfish vendor passthru interface * Allow instance\_info to override node interface * Allow support for multipath volumes * Enforce autospec in test\_portgroup * Revert "Update iDRAC doc with missing interfaces" * Follow-up on project scoped trait tests * Project scope driver vendor pass-through * Volume targets/connectors Project Scoped RBAC * Enforce autospec in test\_port * Enforce autospec in test\_volume\_connector * Enforce autospec in test\_volume\_target * Add Redfish RAID management to Ironic * Port/Portgroup project scoped access * Project Scoping Node endpoint * Add support for using NVMe specific cleaning * Prepare to use tinycore 12 for tinyipa * Lazy-load node details from the DB * Adds config parameter kernel\_append\_param for iLO * secure-rbac - minor follow-up for project scoped tests * Add both IPv4 and IPv6 DHCP options if interface has both * [Trivial] Fix testing of volume connector exception * Switch iLO and iRMC to the new secure boot framework * devstack: a safeguard for disabled tempurls * Enable swift temporary URLs in grenade and provide a good error message * [trivial] Remove default parameter from execute * Initial Project scoped tests * RBAC System Scope: observer -> reader * Implement system scoped RBAC for the deploy templates APIs * Implement system scoped RBAC for the event APIs * [trivial] fix typos in conductor * Enforce autospec in test\_driver * Review feedback follow-up on Node System Scoped RBAC * Implement system scoped RBAC for the allocation APIs * Implement system scoped RBAC for conductor APIs * Implement system scoped RBAC for volume APIs * Implement system scoped RBAC for utility APIs * Implement system scoped RBAC for node and driver passthru * Implement system scoped RBAC for baremetal drivers * Implement system scoped RBAC for chassis * Implement system scoped RBAC for port groups * Implement "system" scoped RBAC for ports * Implement "system" scoped RBAC for the node endpoint * Add support for using NVMe specific cleaning * Trivial: fix incorrect ordering in iLO tests * Enforce autospec in test\_notification\_utils module * Enforce autospec in test\_node * Correct release mappings for 16.2 * Fix broken configdrive\_use\_object\_store * Replace pysendfile with os.sendfile * Validate configdrive string format * Enforce autospec in test\_chassis * Replace retrying with tenacity * Switch multinode jobs to 512M RAM * Add some tuning documentation 16.2.0 ------ * Move the IPv6 job to the experimental pipeline * Trivial: update version for deploy steps * Address some rbac review feedback in merged patches * Introduce common personas for secure RBAC * Duplicate testing for system scoped ACL testing * Populate existing policy tests * devstack: support installing ironic-lib from source in DIB IPA * Prepare 16.2 and clean up release note * Report the slowest tests after a test run * Use OOB inspection to fetch MACs for IB inspection * Generate policy.yaml.sample * Add support to manage certificates in iLO * Update oslo.policy requirement to version 3.6.2 * Prevent redfish-virtual-media from being used with Dell nodes * Don't mark an agent as alive if rebooted * Add 'deploy steps' parameter for provisioning API * Trivial: log the newly detected vendor * Swap Metalsmith job out for centos8-uefi * More GPU support in idrac-wsman inspect interface * Make boot\_mode more consistent with other capabilities * ilo: do not change deploy\_boot\_mode in instance\_info * Apply force\_persistent\_boot\_device to all boot interfaces * Add release version to release notes * Fix Mis-Ordering of Bash Variable Definition in DevStack * Fixes issue of redfish firmware update * Update python packages to python3 in quickstart.rst * Set default to prevent out of memory conditions * Guard conductor from consuming all of the ram * For Supermicro BMCs set enable when changing boot device * Refactor vendor detection and add Redfish implementation * Add a few words about UEFI user images * Redfish secure boot management * Add centralized secure boot documentation * Pass context objects directly to policy enforcement * redfish-virtual-media: allow a link to raw configdrive image * Update minversion of tox * Attempt to slim down protection test base class * Write stub ACL test for every existing API call * Update iDRAC doc with missing interfaces * Raw image size estimation improved * Bump oslo.log requirement to 4.3.0 * Common framework for configuring secure boot * redfish-virtual-media: make fewer calls when preparing boot * Add a delay/retry is vmedia insert fails * Fix redfish-virtual-media boot mode handling ordering * Enable testing to dynamicly leverage ACL roles * CI: Collect a snapshot of network connections * Follow-up for ramdisk deploy configdrive support * Register all hardware\_interfaces together * Do not enter maintenance if cleaning fails before running the 1st step * Policy json to yaml migration * Add troubleshooting on changing ironic.conf default interfaces * Modify port group document for ironic * add openstack-python3-wallaby-jobs-arm64 job * Mark the iSCSI deploy as deprecated in the docs * update python packages to python3 in quickstart.rst * Support configdrive when doing ramdisk deploy with redfish-virtual-media 16.1.0 ------ * Update outdated descripton for \`default\_boot\_option\` * remove lower-constraints in tox * Rewrite existing ACL tests with ddt, yaml * Consistently use utils functions for policy auth * Document that DHCP-less deploy does work with debian-minimal * Document using ramdisks with the ramdisk deploy interface * Include HeartbeatMixin in the ramdisk deploy * Remove detect\_vendor decorator * Complete the REST API POST documentation * [doc-only] Add BFV basic flow and networking context * Remove lower-constraints job * Revert "devstack: build DIB images with CentOS Stream by default" * Document the current status of the DHCP-less deploy * Rewrite DHCP-less documentation * Inject TLS certificate when using virtual media * Fix release guide and include intermediate branches * Avoid a full install in tox environments that do not need it * Prepare release 16.1 * IPMI: Handle vendor set boot device differences * CI: switch the multinode job to tempest-multinode-full-base * Do not pin Python version in tox config * Use ddt for existing policy tests * Fix lower-constraints with the new pip resolver * devstack: build DIB images with CentOS Stream by default * Add TLS troubleshooting guide entry * Support port name * Use openstack-tox for ironic-tox-unit-with-driver-libs * Test patching booleans with string values * Improve object\_to\_dict arguments * CI: add a non-voting bifrost-vmedia-uefi job * Add secure boot support to ilo-uefi-https * JSON conversion followup change * Allow disabling automated\_clean per node * Always retry locking when performing task handoff * Add vendor\_passthru method for virtual media * Make standalone jobs voting again * Expunge the internal version of WSME * Convert volume/targets endpoint to plain JSON * Convert volume/connectors endpoint to plain JSON * Don't allow patching port internal\_info * [doc][trivial] Fix spelling error * Fix disk label to account for UEFI * Minor follow-up doc change * Remove from\_dict function from context * Update .rst files * Convert volume endpoint to plain JSON * Convert ramdisk endpoint to plain JSON * Convert portgroups endpoint to plain JSON * Convert ports endpoint to plain JSON * Convert nodes endpoint to plain JSON * Convert event endpoint to plain JSON * Convert drivers endpoint to plain JSON * Convert deploy\_templates endpoint to plain JSON * Convert conductors endpoint to plain JSON * Convert chassis endpoint to plain JSON * Convert bios endpoint to plain JSON * Convert allocations endpoint to plain JSON * Utility functions for REST API JSON handling * Duplicate trait validation with jsonschema * Add expose body decorator, status\_code argument * New argument validate decorator * Update \`cleaning\_error\_handler\` * Convert last bionic jobs to focal * Simplify injecting network data into an ISO image * Fix incorrect network\_data.json location * Retrieve BIOS configuration when moving node to \`\`manageable\`\` * Document how to build an ESP image for redfish-virtual-media * Fix DHCP-less operations with the noop network interface * Make driver documentation more prominent on the landing page * Fixes the issue that instance bond port can't get IP address * Docs: a more complete example of ramdisk boot with redfish-virtual-media * Limit the default value of [api]api\_workers to 4 * Enforce autospec in some api controllers modules * Update TOX\_CONSTRAINTS\_FILE * Fix idrac-wsman RAID step async error handling * Fix redfish BIOS apply config error handling * Enforce autospec in some api controllers modules * Fix typo in ipxe\_config.template * Prevent timeouts when using fast-track with redfish-virtual-media * Handle agent still doing the prior command * Make redfish-virtual-media respect default\_boot\_mode * devstack: log all requests to sushy-emulator * Remove the support to create port in API * Mark standalone job non-voting/remove from gate * json-rpc: surround IPv6 address with [] in conductor URL * Changes to add exception in "default\_interface()" function * Move the multinode grenade job to the experimental pipeline * CI: increase cleaning timeout and tie it to PXE boot timeout * devstack: remove no longer required UEFI hacks * Fixes empty physical\_network is not guarded * Add node name to ironic-conductor ramdisk log filename * Add timeout to image operations in the direct deploy * Allow passing rootfs\_uuid for the standalone case * Sync boot mode when changing the boot device via Redfish * Revert patching \_syscmd\_uname * Python 3.9: base64.{en,de}codestring function is removed * Document by\_path root device hint * Change auth type to none in dev-quickstart * Update test requirements * Update .rst files * Fix ipmitool timing argument calculation * Run bifrost integration job on focal * Use centos as base element for dib images * Do not pass BOOTIF=None if no BOOTIF can be guessed * Remove obsolete trusted boot doc * Update checking reno script to use python3 * Remove root device hint after delete\_configuration * docs: Add information on post-branch release tasks for bifrost * Log the ports we bind in flat * Updates ironic documentation * Refactoring: split away continue\_node\_deploy/clean * Trivial: use the correct error handler for do\_next\_clean\_step * Wiping agent tokens on reboot via API - take 2 * migrate testing to ubuntu focal * CI: update bindep for centos-8 py36 job changes * Imported Translations from Zanata * Fix inspection for idrac * Clarify power state logging * Add Python3 wallaby unit tests * Update master for stable/victoria 16.0.0 ------ * Add GPU reporting to idrac-wsman inspect interface * Don't migrate away from iscsi if it is the default * Minor agent version code cleanup * Support iRMC hardware type again * devstack: do not default to swift if SWIFT\_ENABLE\_TEMPURLS is False * Trivial: fix formatting in the prelude * Fix handling OctetString for pysnmp * Handle patching node /protected value with None * Remove compatibility with conductors that do not support groups * Handle conductor\_affinity earlier in the deployment process * Update 'idrac' driver requirements for Victoria * Redfish driver firmware update * Release notes prelude for the Victoria release * Do not silently ignore exceptions when running next steps * Fix a race condition in the hash ring code * Follow-up patch for iso less vmedia support * Route conductor notification RPC to the same conductor * Fix invalid assert\_has\_calls * Adding changes for iso less vmedia support * Use correct error handler instead of calling process\_event('fail') * Fix deprecated 'idrac' interface '\_\_init\_\_'s * Amend the agent\_verify\_ca release note to mention the configuration option * Add documentation for ISO boot * Update release mappings and API history for 16.0 * autospec for classmethods and staticmethods for Python < 3.7.4 * Release note updates for Victoria release * Update tests for Redfish BIOS apply\_configuration * Add Redfish BIOS interface to idrac HW type * Expose agent\_verify\_ca in configuration and correctly handle booleans * Use tempdirs for /tftpboot, /httpboot creation tests * Fix redfish BIOS to use @Redfish.SettingsApplyTime * Make the standalone-redfish job voting * Deprecate the iscsi deploy interface * Reduce VMs for multinode and standalone jobs * Reduce grenade node count * Limit inspector jobs to 1 testing VM * CI: Remove the build check for pre-build ramdisks only * Do not retry locking when heartbeating * Adds ilo-uefi-https boot interface to ilo5 * Native zuulv3 grenade multinode multitenant * Allow configuring IPMI cipher suite * Clarify HPE Edgeline support * Add redfish options to the generated docs * Also wipe agent token on manual power off or reboot * Remove install unnecessary packages * Fix lower-constraints for Ubuntu Focal * Add release note for dhcp-less deploy * Handle default\_boot\_mode during cleaning/inspection with PXE/iPXE * Add 'agent\_token' to heartbeat request * Follow-up patch for One Button Secure Erase clean step * Accept and use a TLS certificate from the agent * clean up mac address with the pxe configuration files * Change [agent]image\_download\_source=http * Add an option to require TLS for agent callback\_url * Allow HttpImageService to accept custom certificate * Adds few of the security dashboard parameters to capabilities * Remove token-less agent support * Detail iPXE + LACP troubleshooting information * Do not assume that prepare\_image is the last command to run * OOB one button secure erase for iLO5 based HPE Proliant servers * Add missing log for clean failed * Trivial: fix minor typo on RFC number * Update deploy steps documentation * documentation: follow-up to file:// support in direct deploy * Enhance certificate verification for ilo harware type * Add L3 boot section to the docs * Allow setting image\_download\_source per node * Support caching http:// images locally in the direct deploy * Support file:/// images for the direct deploy * Explicitely do not allocate initial space for virtual volumes * Feat: add ibmc hardware info support for devstack * [trivial] remove emacs config from devstack script * Remove absolute path with iptables when L3 enabled * Update nodes in notifications documentation * Suffix the ISO names with '.iso' * Remove the unused coding style modules * driver\_internal\_info in provision notifications * Enforce autospec in ilo tests * Enforce autospec in common tests * Decouple the ISO creation logic from redfish * Ansible deploy - Ignore invalid devices * Move redfish-virtual-media to the back of supported\_boot\_interfaces * Increase memory of tinyipa vms * Ensure in-band deploy steps are present in time for fast-track deployments * Fix network\_data path for dhcpless deployments * Switch Ironic to openstacksdk for Neutron * Fix: port attribute name propagate\_uplink\_status * Remove qemu-img rootwrap filter * ISO ramdisk virtual media test enablement * Documentation update for ilo hardware type 15.2.0 ------ * Fix time usage in unit tests for agent power interface * Enforce autospec in irmc tests * Pass global-request-id to ironic-python-agent * [Trivial]Fix some typos in docs * Handle an older agent with agent\_token * Follow up to I44336423194eed99f026c44b6390030a94ed0522 * Adds support SUM based firmware update as deploy step * Enforce autospec in xclarity tests * Enforce autospec in test\_cinder * agent\_client: support custom TLS certificates * Use property plus abstractmethod for abstractproperty * Fix console auto port allocation under IPv6 * Fix iscsi deploy steps priorities * Wipe agent token and URL on rescue and unrescue * Fix error word presistent in docs * Prevents power sync with ADOPTFAIL nodes * Change UEFI PXE job to use tinyipa * Enable deploy-time software RAID in standalone jobs * Use TLS for json\_rpc when configured * Log the traceback of unexpected errors when verifying power credentials * Add cirros-specific FAQ item for troubleshooting * Fix idrac-wsman RAID apply\_configuration * Break out collection functions for json usage * Convert v1 controller to plain, return JSON * Convert root controller to plain controller * Enforce autospec in test\_iscsi\_deploy * Fix invalid assertTrue which should be assertEqual * Trivial: fix a minor issue in standalone docs and improve formatting * AgentRAID: Account for empty results in post-configuration checks * Remove locks before RPC bus is started * Mark IPv6 job as non-voting to unblock the gate * Reset power state upon adoption failure * Make the final deploy step validation actually fail deploy * Enforce autospec in some api tests * Add subsections to the standalone documentation * Deprecate http\_basic\_username and http\_basic\_password in [json\_rpc] * Adds raid validation for in-band AgentRAID deploy step * Allow node lessee to see node's ports * Force RAX hosts to run tinyipa * Update how to release section * Extend PXE boot retry timeout for RAX hosts * Add an option to choose the hash ring algorithm * Extend base build timeouts * Stop running test\_schedule\_to\_all\_nodes in the multinode job * Document fast-track and the agent power interface * Add agent power interface * Account for power interfaces that cannot power on * Convert root path / to use plain JSON * Rename Response to PassthruResponse * Remove Link type * Remove File type * Update number of VM on ironic-base * Replace oslo\_utils.netutils type compares with ipaddress * Auto extend the timeout for RAX hosts * Allow disabling retries in AgentClient.get\_command\_statuses * Explicitly set jobs to ML2/OVS * Enforce autospec in test\_notification\_utils * Enforce autospec in test\_deployments * iPXE ISO Ramdisk booting * Follow-up on blocking port deletions * Remove old driver name from cross-gating job * Stop wiping driver\_internal\_info on node.driver updates * Ironic to use DevStack's neutron"-legacy" module * Fixes to skip validation of in-band deploy steps before agent boot * Implement get\_deploy\_steps for AgentRAID * Set min version of tox to 3.2.1 * Use default timeout for all jobs * Wipe agent token during reboot or power off * Add missing agent RAID compatibility for ilo5 and idrac * Add wsme core types, remove WSME * Add json and param parsing to args * Change non-tinyipa jobs to use multiple cores * Add knob for read-only and "erase\_devices" * Decompose the core deploy step on iscsi and ansible deploy * Iso booting via redfish virtual media * Use min\_command\_interval when ironic does IPMI retries * fix error word presistent in docs * Remove non-inclusive language * fix error word confiuration to configuration in docs * add tempest boot\_mode config 15.1.0 ------ * Decompose the core deploy step of the direct deploy * agent\_base: support inserting in-band deploy steps * Add documentation on how to propose a release * Provide a path to set explicit ipxe bootloaders * Do not verify error messages from jsonpatch in unit tests * Use assertCountEqual instead of assertItemsEqual, part 2 * Enforce autospec in test\_task\_manager * Stop using md5 for \_\_repr\_\_ of objects * Follow up of enforce autospec in test\_manager * CI: Make ipv6 job to voting * Allow deleting nodes with a broken driver * Do not validate driver on changing non-driver fields * Follow up of fix uefi jobs with ovmf native ubuntu package * Fix missing print format in log message * Use native oslo.concurrency execution timeout in ipmitool * [doc] Describe how to extract an fs UUID from an image * Do not put the whole JSON error from the agent in last\_error * Use unittest mock instead of 3rd party mock in test\_raid * Correct Redfish boot once fallback conditional * Fix the error message when reached max number of traits * Software RAID: don't try to set local\_gb to MAX * Clean up nodes in DELETING on conductor restart * Log when a node should be fast-track-able but it's not * Fix uefi jobs with native ubuntu ovmf package * Enforce autospec in test\_manager * Prepare release notes and docs for release 15.1 * Add ironic 15.1 release mapping * Use getfullargspec to inspect functions * Fix fast track following managed inspection * Add \`get\_node\_network\_data\` to Neutron NetworkInterface * Adds boot mode support to iLO management interface * Explicitly use python3 for ironic\_lib\_prefix * Fix hacking min version to 3.0.1 * devstack: get rid of python3\_enabled * Use IRONIC\_VM\_SPECS\_RAM from ironic-base in ipv6 job * Update git URLs * Minor agent token log handling * DRAC: Fix a failure to create virtual disk * Use virtio bus with uefi * Untie the ramdisk deploy from AgentDeploy * Enforce autospec in test\_utils * Enforce autospec in test\_flat * Enforce autospec in test\_common * Update [console]kill\_timeout description * Trivial: clarify usage of AgentCommandTimeout in \_wait\_for\_command * Add a bug reporting and triaging guide * Make test-setup.sh compatible with mysql8 * agent: poll long-running commands till completion * Networking boot fallback for local boot * Fix fast-track with the direct deploy interface * Add api-ref for indicators API * ironic-standalone, use http basic auth for json-rpc * Use configure\_keystone\_authtoken\_middleware * Stop allocating double space for raw images * Enforce autospec in test\_neutron * Enforce autospec in test\_base\_manager * Enable HTTP Basic authentication for JSON-RPC * Enforce autospec in test\_rpc\_api and test\_steps * Fix mock callable for Python 3.6 and precedent * Fix: review from dtantsur of 728123 * Fix internal server error on deleting nodes with allocations * Fix agent token and URL handling during fast-track deployment * Fix Redfish handle no continuous override boot src * Allow node vif attach to specify port\_uuid or portgroup\_uuid * CI: make grenade voting again * devstack: Use uwsgi binary from path; temporary disable grenade * Document http\_basic auth strategy * doc: fix endpoint\_override for inspector * Use assertCountEqual instead of assertItemsEqual * Use unittest.mock instead of mock * Fix requirements check * Add virtualbmc as an extra dependency * [doc] Fix indentation in admin/troubleshooting * Feature: Add raid configuration support for ibmc driver * [doc] Extend trouble shooting docs for node stuck in wait states * devstack: increase concurrency and worker timeout for sushy-tools * Enforce autospec in test\_snmp * Enforce autospec in test\_ipxe and test\_pxe * Use the new extension call for getting partition UUIDs * Enforce autospec in test\_deploy\_utils module * Follow-up patch for ilo out-of-band \`erase\_devices\` * Add troubleshooting docs on -ing state failures * Enable Basic HTTP authentication middleware * [doc] Harmonize the prompt for bash commands * [doc] Check placement in case of "No valid host found" * Fix fast track when exiting cleaning * Block port deletions where vif is present * Enforce autospec in test\_agent\_base module * Enforce autospec in test\_agent module * Move introspection timeout option to base job * Add function definition handling * Add validate decorator to expose * Remove ironic-grenade-dsvm * Fix redfish-virtual-media file permission * Enforce autospec in ansible tests * Change default to \`\`False\`\` for \`\`use\_ipmitool\_retries\`\` * Enforce autospec in drac tests * Extend vmedia job timeout * agent: do not hard power off if soft power off actually succeeds * New configuration parameter to use ipmitool retries * Add IPv6 ci Job * Increase callback timeout * Set default tempest\_test\_timeout for ironic-base * CI: use the default devstack cirros version * Cap jsonschema 3.2.0 as the minimal version * change devstack ipa-builder default to BRANCHLESS\_TARGET\_BRANCH * Add the noop management interface to the redfish hardware type * Add agent\_token param to docstrings * Make redfish boot interface to use \`network\_data\` * Add \`get\_node\_network\_data\` to non-Neutron NetworkInterface * Add \`get\_node\_network\_data\` method to NetworkInterface * Change IRONIC\_VM\_SPECS\_RAM default * Do not fail when raw image is larger than memory for http * Add py38 package metadata * [Trivial]Add missing white space between words * Fix pygments style * Remove iDRAC Redfish boot device known issue * Revert "Deprecate ibmc" * Increase timeout for introspection in ironic-inspector-tempest * Collect tftpd info from journald * Switch to newer openstackdocstheme and reno versions * Fix image type reference * rm duplicate word 'that' * RAID docs: fix an invalid json example * Change ibmcclient compatible version * Raise the JSON RPC connection timeout to 2 minutes and disable retries * CI: add test-requirements to irrelevant-files * Encapsulate auth\_token middleware * Pin the python-ibmcclient upper bounds due to six usage * Collect ramdisk logs also during cleaning * Doc building: Silence automated\_steps * Remove deprecated [cinder]url * Remove deprecated [DEFAULT]fatal\_exception\_format\_errors * Remove translation sections from setup.cfg * Add ussuri release notes version * Fix the wrong description * Cap pycodestyle explicitly to be < 2.6.0 * Update lower-constraints.txt * Fix pep8 * Increase timeout value for ironic-base job * CI: raise DIB jobs memory to 3 GiB * If the "[conductor]XXX\_timeout" is less than 0,disable periodic task * [Trivial] Fix awkward mocking in redfish boot tests * redfish: handle hardware that is unable to set persistent boot * Change the name of the inspector job * redfish: split reboot into power off followed by power on * Enforce autospec in some unit tests modules * Add timeout and retries to JSON RPC client * Fix 3rd party driver mocks and related tests * Temporary add per-file-ignores to flake8 tests * Mark more configuration options as reloadable * Silence debug messages from oslo\_messaging * Do all serialization in the expose decorator * Use tempest\_plugins Zuul CI role * Add \`network\_data\` field to ironic node object * Fix tempest plugin location * Update grenade job * Remove testscenarios, testrepository and Babel from test-requirements * Monkey patch original current\_thread \_active * Native zuulv3 grenade job for ironic * Remove [conductor]api\_url * Imported Translations from Zanata * Switch to unittest mock * Refactor RedfishVirtualMediaBoot * Add RPC objects for deployment API * Add Python3 victoria unit tests * Update master for stable/ussuri 15.0.0 ------ * CI: exclude the inspection abort tests from the inspector-redfish-vmedia job * CI: repair the SNMP job and make it voting * Add ironic-python-agent-builder to grenade projects and use netboot * Update python-dracclient version * Fix configure-vm.py and xml file for centos8 * Add PXE reset known issue to the docs * Don't break UEFI install with older IPAs * Fix supported sushy-oem-idrac version * Implements: Reactive HUAWEI ibmc driver * Fix agent\_client handling of embedded errors * In-band deploy steps: correctly wipe driver\_internal\_info * Add ironic-standalone-redfish * CI: migrate UEFI jobs to Cirros 0.5.1 * Restore missing node.save() in agent\_base.py * Add link to other Redfish parms to iDRAC doc * Log when IPA fallback occurs on bootloader install * Delay validating deploy templates until we get all steps * Support executing in-band deploy steps * Upgrade flake8-import-order version to 0.17.1 * Stop configuring install\_command in tox * Prepare release notes/docs for 15.0 release * Ironic 15.0 prelude * DRAC: Added redfish management interface issue * Fix SpanLength calculation for DRAC RAID configuration * Fix RAID configuration with idrac-wsman interface * Revert "Generalize ISO building for virtual media driver" * Add ironic 15.0 release mapping * Fixes unusable Guru meditation report * Don't use wsme test webapp for patch tests * Centralise imports of wsme types * Update iDRAC doc about soft power off timeout * Implement the bios-interface for idrac-wsman driver * Improve the command status checks in the agent's process\_next\_step * Change [deploy]/default\_boot\_option to local * Update iDRAC doc about vendor passthru timeout * Use trailing slash in the agent command URL * Fix missing print format in log messages * Extend timeout on CI job with automated cleaning * Fix issue where server fails to reboot * Add my new address to .mailmap * "dual stack" support for PXE/iPXE * Generalize ISO building for virtual media driver * Remove six minions * Increase VM RAM value in local.conf example * Release reservation when stoping the ironic-conductor service * Update jobs description * Change default ram value * Added node multitenancy doc * Support burning configdrive into boot ISO * [doc] Remove the device selection limitation for Software RAID * Add sushy-cli to client libraries release list * Fix AttributeError in check allowed port fields * Fix gunicorn name on Py3@CentOS7 in devstack * Add node lessee field * Software RAID: Pass the boot mode to the IPA * Refactor AgentBase.heartbeat and process\_next\_step * [doc] Images need some metadata for software RAID * Drop netaddr - use netutils.is\_valid\_ipv6() * Allow INSPECTWAIT state for lookup * Improve \`redfish\` set-boot-device behaviour * Improve \`redfish\` set-boot-mode implementation * Change multinode job to voting * Cleanup Python 2.7 support * Use auth values from neutron conf when managing Neutron ports * Fetch netmiko session log * Doc - IPv6 Provisioning * Additional IP addresses to IPv6 stateful ports * Add network\_type to port local\_link\_connection * Make oslo.i18n an optional dependency * Make oslo.reports an optional dependency * Do not autoescape all Jinja2 templates * Make deploy step failure logging indicate the error * Fix the remaining hacking issues * Bump hacking to 3.0.0 * Extend install\_bootloader command timeout * Document deploy\_boot\_mode and boot\_option for standalone deployments * Remove future usage * Fix enabled\_hardware\_types from idrac-wsman to idrac * Document our policies for stable branches * Retry agent get\_command\_status upon failures * Add troubleshooting on IPMI section * Default IRONIC\_RAMDISK\_TYPE to dib * Generalize clean step functions to support deploy steps * Raise human-friendly messages on attempt to use pre-deploy steps drivers * Hash the rescue\_password * DRAC: Fix a failure to create virtual disk bug * [doc] Add documentation for retirement support * Add info on how to enable ironic-tempest-plugin * Follow-up releasenote use\_secrets * Add indicators REST API endpoints * Do not use random to generate token * Signal agent token is required * Support centos 7 rootwrap data directory * Refactoring: split out wrap\_ipv6 * Refactoring: move iSCSI deploy code to iscsi\_deploy.py * Clean up nits from adding additional node update policies * Allow specifying target devices for software RAID * Documentation clarifications for software RAID * Drop rootwrap.d/ironic-lib.filters file * Expand user-image doc * Move ipmi logging to a separate option * Change readfp to read\_file * Make image\_checksum optional if other checksum is present * Remove compatibility with pre-deploy steps drivers * Extend power sync timeout for Ericsson SDI * Skip clean steps from 'fake' interfaces in the documentation * Rename ironic-tox-unit-with-driver-libs-python3 * Send our token back to the agent * Enable agent\_token for virtual media boot * Add separate policies for updating node instance\_info and extra * Follow up to console port allocation * Change force\_raw\_images to use sha256 if md5 is selected * Make reservation checks caseless * [doc] Missing --name option * Bump minimum supported ansible version to 2.7 * Set abstract for ironic-base * Refactoring: move generic agent clean step functions to agent\_base * Docs: split away user image building and highlight whole disk images * Redfish: Add root\_prefix to Sushy * Cleanup docs building * Rename \`create\_isolinux\_image\_for\_uefi\` function as misleading * Finalize removal of ipxe\_enabled option * Start removing ipxe support from the pxe interface * Pre-shared agent token * DRAC: Fix RAID create\_config clean step * Expose allocation owner to additional policy checks * Project Contributing updates for Goal * Refactoring: rename agent\_base\_vendor to agent\_base * Use FIPS-compatible SHA256 for comparing files * Revert "Move ironic-standalone to non-voting" * Move ironic-standalone to non-voting * Make \`redfish\_system\_id\` property optional * Lower tempest concurrency * Refactoring: finish splitting do\_node\_deploy 14.0.0 ------ * Fix up release notes for 14.0.0 * Actually use ironic-python-agent from source in source builds * Update release mappings for Ussuri * Automatic port allocation for the serial console * Remove the [pxe]ipxe\_enabled configuration option * tell reno to ignore the kilo branch * Update API version history for v1.61 * [Trivial] Remove redundant brackets * Split cleaning-related functions from manager.py into a new module * Split deployment-related functions from manager.py into a new module * Disable debug output in doc building * Fix bash comparisons for grenade multinode switch * Fix jsonpatch related tests * Fix ipxe interface to perform ipxe boot without ipxe\_enabled enabled * Fix typo in setup-network.sh script * Support node retirement * Make ironic-api compatible with WSGI containers other than mod\_wsgi * Don't require root partition when installing a whole disk image * Clean up api controller base classes * Deprecate irmc hardware type * Subclass wsme.exc.ClientSideError * Use str type instead of wsme.types.text * Use bionic job for bifrost integration * Follow up to root device hints in instance\_info * Deprecate ibmc * Fix incorrect ibmc\_address parsing on Python 3.8 * Fix entry paths for cleaning and deployment * Nodes in maintenance didn't fail, when they should have * Fix API docs for target\_power\_state response * Document using CentOS 8 DIB IPA images for Ussuri and newer * Lower RAM for DIB jobs to 2 GiB * Remove reference to deprecated [disk\_utils]iscsi\_verify\_attempts * Add node info and exc name when getting rootfs info from Glance * Fix fast\_track + agent\_url update fix * CI: make the metalsmith job voting and gating * devstack: install bindep for diskimage-builder * Allow reading root\_device from instance\_info * Implement managed in-band inspection boot for ilo-virtual-media * Add a missing versionadded for configdrive[vendor\_data] * Make qemu hook running with python3 * Refactor glance retry code to use retrying lib * Fix duplicated words issue like "are are placed" * devstack: switch to using CentOS 8 DIB ramdisks by default * Remove the deprecated [glance]glance\_num\_retries * Fix missing job\_id parameter in the log message * Fix get\_boot\_option logic for software raid * Allow node owners to administer associated ports * Explicitly use ipxe as boot interface for iPXE testing * Replace disk-image-create with ironic-python-agent-builder * Remove those switches for python2 * Fix invalid assertIsNone statements * Add librsvg2\* to bindep * Stop using six library * Add notes on the pxe template for aarch64 * Enforce running tox with correct python version based on env * Tell the multinode subnode and grenade to use /opt * Disable automated clean on newer jobs * Extend service timeout * Tune down multinode concurrency * Restrict ability to change owner on provisioned or allocated node * Correct power state handling for managed in-band inspection * Implement managed in-band inspection boot for redfish-virtual-media * redfish-vmedia: correctly pass ipa-debug * Add a CI job to UEFI boot over Redfish virtual media * Fix use of urlparse.urljoin * Import importlib directly * Increasing BUILD\_TIMEOUT value for multinode job * Remove deprecated ironic-agent element * Add owner to allocations and create relevant policies * CI: do not enable rescue on indirect jobs * Update nova os-server-external-events response logic * DRAC: Drives conversion from raid to jbod * Changed to bug fix to follow-on idrac job patch * Fixes issue with checking whether ISO is passed * docs: add a missing heading * Add a CI job to legacy boot over Redfish virtual media * Fix UEFI NVRAM collision in devstack * Remove references to 'firewall\_driver' * Make redfish CI jobs pulling sushy-tools from git * Prevent localhost from being used as ironic-inspector callback URL * Add an ironic-inspector job with managed boot * Add timeout when querying agent's command statuses * docs: update the local development quickstart to use JSON RPC * Drop python 2.7 support and testing * Remove unused migration tests * Wire in in-band inspection for PXE boot and neutron-based networking * Foundation for boot/network management for in-band inspection * Add \`instance\_info/kernel\_append\_params\` to \`redfish\` * Add indicator management to redfish hw type * Mock out the correct greenthread sleep method * Don't install syslinux-nonlinux on rhel7 * Ensure text-only console in devstack * Pass correct flags during PXE cleanup in iPXEBoot * Drop [agent]heartbeat\_timeout * Remove old online migration codes * Block ability update callback\_url * Stop supporting incompatible heartbeat interfaces * Allow node owners to administer nodes * Fix variable name in cleanup\_baremetal\_basic\_ops func * Switch legacy jobs to Py3 * Ensure \`isolinux.bin\` is present and configured in devstack * Fix \`snmp\` unit test * Backward compatibility for the ramdisk\_params change * Allow vendor\_data to be included in a configdrive dict * Improve iDrac Documentation * Correct handling of ramdisk\_params in (i)PXE boot * Software RAID: Identify the root fs via its UUID from image metadata * Change integration jobs to run under Python3 * Using loop instead of with\_X * CI: add ironic-python-agent-builder to the multinode job * Update release with information about zuul job * Add virtual media boot section to the docs * CI: limit rescue testing to only two jobs * Mask secrets when logging in json\_rpc * Use new shiny Devices class instead of old ugly Device * Switch to ussuri job * Do not ignore 'fields' query parameter when building next url * Update sushy library version * Minor string formatting follow-up to idrac jbod patch * Document systemd-nspawn as a nice trick for patching a ramdisk * DRAC: Drives conversion from JBOD to RAID * Setup ipa-builder before building ramdisk * Fix EFIBOOT image upload in devstack * Fix drive sensors collection in \`redfish\` mgmt interface * Add Redfish vmedia boot interface to idrac HW type * Change MTU logic to allow for lower MTUs automatically * DRAC: Fix a bug for clear\_job\_queue clean step with non-BIOS pending job * Documentation for iLO hardware type deploy steps * ironic-tempest-functional-python3 unused variables * docs: use openstackdocstheme extlink extension * grub configuration should use user kernel & ramdisk * Raising minimum version of oslo.db * DRAC: Fix a bug for delete\_config with multiple controllers * Use correct function to stop service * Fix devstack installation failure * DRAC: Fix a bug for job creation when only required * Add a CI job with a DIB-built ramdisk * Remove old online migrations and new models * Remove earliest version from releasing docs, update examples * Change log level based on node status * enable\_python3\_package should not be necessary anymore * Update doc for CI * Add versions to release notes series * Document pre-built ramdisk images (including DIB) * Run DIB with tracing enabled and increase the DHCP timeout * Improve documentation about releasing deliverables * Update master for stable/train 13.0.0 ------ * Update release mappings for Train * Release notes cleanup for 13.0.0 (mk2) * Document PXE retries * Update env. variables in the documentation * Add iDRAC RAID deploy steps * Don't resume deployment or cleaning on heartbeat when polling * Make multinode jobs non-voting * devstack: wait for conductor to start and register itself * Allow retrying PXE boot if it takes too long * Lower MTU override * Devstack: Fix iPXE apache log location bug * Serve virtual media boot images from ironic conductor * Add Redfish inspect interface to idrac HW type * Add deploy steps for iLO Management interface * Do not log an error on heartbeat in deploying/cleaning/rescuing * Add an option to abort cleaning and deployment if node is in maintenance * CI: move libvirt images to /opt for standalone and multinode jobs * Add first idrac HW type Redfish interface support * Remove cisco references and add release note * Add \`FLOPPY\` boot device constant * Combined gate fixes * Read in non-blocking fashion when starting console * Release notes cleanup for 13.0.0 * CI: move the fast-track job to the experimental pipeline * Remove support for CoreOS images * Fix gate failure related to jsonschema * Minor: change a misleading InvalidState error message * Build pdf doc * iLO driver doc update * Use openstack cli in image creation guide * iLO driver doc update * devstack: save iPXE httpd logs * Prelude for 13.0.0 * Add a release note for iscsi\_verify\_attempts deprecation * Fix typo in handling of exception FailedToGetIPAddressOnPort * Add iLO RAID deploy steps * add table of available cleaning steps to documentation * Prepare for deprecation of iscsi\_verify\_attempts in ironic-lib * Add software raid release note to ironic * Add ironic-specs link to readme.rst * Fixed problem with UEFI iSCSI boot for nic adapters * DRAC : clear\_job\_queue clean step to fix pending bios config jobs * Add deploy steps for iLO BIOS interface * Follow-up for deploy steps for Redfish BIOS interface * Adding file uri support for ipa image location * Adjust placement query for reserved nodes * Add indicator management harness to ManagementInterface * Adds dhcp-all-interfaces element * Do not wait for console being started on timeout * Out-of-band \`erase\_devices\` clean step for Proliant Servers * Pass target\_raid\_config field to ironic variable * Allow deleting unbound ports on active node * Follow up to Option to send all portgroup data * Lower standalone concurrency to 3 from 4 * Make ironic\_log Ansible callback Python 3 ready * Remove ironic command bash completion * devstack: Fix libvirtd/libvirt-bin detection * Add iPXE boot interface to 'ilo' hardware type * Move to unsafe caching * Allow to configure additional ipmitool retriable errors * Fix exception on provisioning with idrac hw type * Add logic to determine Ironic node is HW or not into configure\_ironic\_dirs * Install sushy if redfish is a hardware type * Add \`filename\` parameter to Redfish virtual media boot URL * Add set\_boot\_device hook in \`redfish\` boot interface * Add Redfish Virtual Media Boot support * Follow-up to power sync reno * Add new method 'apply\_configuration' to RAIDInterface * Do not tear down node upon cleaning failure * Switch non-multinode jobs to new-style neutron services * Add deploy steps for Redfish BIOS interface * Ansible: fix partition\_configdrive for logical root\_devices * Support power state change callbacks to nova using ksa\_adapter * Docu: Fix broken link * Fixing broken links * DRAC : Fix issue for RAID-0 creation for multiple disks for PERC H740P * Uses IPA-B to build in addition to CoreOS * Asynchronous out of band deploy steps fails to execute * Clean up RAID documentation * Enable testing software RAID in the standalone job * devstack: allow creating more than one volume for a VM * Allow configuring global deploy and rescue kernel/ramdisk * Fix missing print format error * Update software RAID configuration documentation * Use HTTPProxyToWSGI middleware from oslo * RAID creation fails with 'ilo5' RAID interface * RAID create fails if 'controller' is missing in 'target\_raid\_config' * Use openstacksdk for accessing ironic-inspector * CI Documentation * Enable no IP address to be returned * Change debug to error for heartbeats * CI: stop using pyghmi from git master * Fixes power-on failure for 'ilo' hardware type * Creation of UEFI ISO fails with efiboot.img * Remove deprecated Neutron authentication options * Follow-up to the IntelIPMIHardware patch * Ansible driver: fix deployment with serial specified as root device hint * Enable testing adoption in the CI * Fix serial/wwn gathering for ansible+python3 * Update api-ref location * IPA does not boot up after cleaning reboot for 'redfish' bios interface * Revert "Add logic to determine Ironic node is HW or not into configure\_ironic\_dirs" * Filter security group list on the ID's we expect * Clean lower-constraints.txt * [Trivial] Fix is\_fast\_track parameter doc string * Failure in get\_sensor\_data() of 'redfish' management interface * Abstract away pecan.request/response * Fix potential race condition on node power on and reboot * iLO firmware update fails with 'update\_firmware\_sum' clean step * Bump keystonauth and warlock versions * Don't install ubuntu efi debs on cent * Remove the PXE driver page * Ansible module: fix deployment for private and/or shared images * Add logic to determine Ironic node is HW or not into install\_ironic * Add logic to determine Ironic node is HW or not into configure\_ironic\_dirs * Deal with iPXE boot interface incompatibility in Train * Bump openstackdocstheme to 1.20.0 * Remove deprecated app.wsgi script * devstack: Install arch specific debs only when deploying to that arch * DRAC: Upgraded RAID delete\_config cleaning step * Fix invalid assert state * CI: remove quotation marks from TEMPEST\_PLUGINS variable * Remove CIMC/UCS drivers * Add IntelIPMIHardware * Collect sensor data in \`\`redfish\`\` hardware type * [Trivial] Software RAID: Documentation edits * Software RAID: Add documentation * Blacklist sphinx 2.1.0 (autodoc bug) * Follow-up on UEFI/Grub2 job * Adds bandit template and exclude some of tests * Add documentation for IntelIPMI hardware * Add check on get\_endpoint returning None * Option to send all portgroup data 12.2.0 ------ * Replace deprecated with\_lockmode with with\_for\_update * Spruce up release notes for 12.2.0 release * Update API history and release mapping for 12.2.0 * Refactoring: flatten the glance service module * Remove the deprecated glance authentication options * DRAC: Adding reset\_idrac and known\_good\_state cleaning steps * devstack: add missing variables for ironic-python-agent-builder * Remove ipxe tags when ipx6 is in use * Update qemu hook to facilitate Multicast * redfish: handle missing Bios attribute * Fix :param: in docstring * Updates ironic for using ironic-python-agent-builder * Do not log an exception if Allocation is deleted during handling * Add release note updating status of smartnics * Switch to use exception from ironic-lib * Change constraints opendev.org to release.openstack.org * Incorporate bandit support in CI * Remove elilo support * Ansible module: fix configdrive partition creation step * Remove deprecated option [DEFAULT]enabled\_drivers * Fix regex string in the hacking check * Add api-ref for allocation update * Add a pxe/uefi/grub2 CI job * Bump lower mock version to 3.0.0 * Start using importlib for Python 3.x * Remove XML support in parsable\_error middleware * Fix binary file upload to Swift * fix typo in code comment * Software RAID: Trigger grub installation on the holder disks * Move stray reno file * Trivial: correct configuration option copy-pased from inspector * Remove commit\_required in iDRAC hardware type * Make the multinode grenade job voting again * devstack: configure rabbit outside of API configuration * Blacklist python-cinderclient 4.0.0 * Publish baremetal endpoint via mdns * Fix inaccurate url links * Update sphinx requirements * Allocation API: correct setting name to None * Allocation API: backfilling allocations * Fix GRUB config path when building EFI ISO * Add DHCP server part to make the document more detail * Do not try to return mock as JSON in unit tests * Remove deprecated option [ilo]power\_retry * Add API to allow update allocation name and extra field * Update Python 3 test runtimes for Train * Replace hardcoded "stack" user to $STACK\_USER * Run vbmcd as stack user in devstack * Adding enabled\_boot\_interface attribute in tempest config * Add openstack commands in node deployment guide * Add a high level vision reflection document * Add iDRAC driver realtime RAID creation and deletion * Correct spelling errors * Replace git.openstack.org URLs with opendev.org URLs * Direct bridge to be setup * Fix pyghmi path * OpenDev Migration Patch * Removes \`hash\_distribution\_replicas\` configuration option * Truncate node text fields when too long * Add note for alternative checksums * Make the JSON RPC server work with both IPv4 and IPv6 * Jsonschema 3.0.1: Binding the schema to draft-04 * Place upper bound on python-dracclient version * devstack: Remove syslinux dependency * Do not try to create temporary URLs with zero lifetime * Ansible module: fix partition\_configdrive.sh file * Use the PUBLIC\_BRIDGE for vxlan * Move devstack emulators configs under /etc/ironic * Uncap jsonschema in requirements * Split ibmc power/reboot classes * Temporarily mark grenade multinode as non-voting * Improve VirtualBMC use in Devstack * Run IPMI, SNMP and Redfish BMC emulators as stack * Add UEFI firmware to Redfish emulator config * Add systemd unit for sushy emulator in devstack * Ansible module: fix clean error handling * [Trivial] Fix typo in agent\_base\_vendor unit test * Fix exception generation errors * Add a request\_timeout to neutron * doc: update ibmc driver support servers document * Ansible module fix: stream\_url * Make it possible to send sensor data for all nodes * Slightly rephrase note in tenant networking docs * Bump sphinxcontrib-pecanwsme to 0.10.0 * ipmi: Ignore sensor debug data * Make 'noop' the explicit default of default\_storage\_interface * Docs: correct expected host format for drac\_address * Check for deploy.deploy deploy step in heartbeat * Workaround for sendfile size limit * Workaround for uefi job with ubuntu bionic * Replace openstack.org git:// URLs with https:// * Remove vbmc log file in devstack * Add versions to release notes series * Imported Translations from Zanata * Update master for stable/stein 12.1.0 ------ * Fix capabilities passed as string in agent prepare * Respect $USE\_PYTHON3 settings for gunicorn * Add systemd unit for vbmcd in devstack * Workaround for postgres job with ubuntu bionic * Add release note on conntrack issue on bionic * Update release-mappings and api version data for Stein release * Pass kwargs to exception to get better formatted error message * Advance python-dracclient version requirement * Add prelude and update release notes for 12.1.0 * Optimize: HUAWEI iBMC driver utils * Set boot\_mode in node properties during OOB Introspection * Fix idrac driver unit test backwards compat issue * Deploy Templates: factor out ironic.conductor.steps * Make metrics usable * Kg key for IPMIv2 authentication * Add fast-track testing * fast tracked deployment support * Update doc for UEFI first * Fix lower-constraints job * Fix idrac Job.state renamed to Job.status * Deprecates \`hash\_distribution\_replicas\` config option * Add Huawei iBMC driver support * Fix misuse of assertTrue * Allow methods to be both deploy and clean steps * Adding ansible python interpreter as driver\_info * Return 405 for old versions in allocation and deploy template APIs * honor ipmi\_port in serial console drivers * Follow up to available node protection * Migrate ironic-grenade-dsvm-multinode-multitenant job to Ubuntu Bionic * Deploy templates: conductor and API nits * Deploy Templates: documentation * Fixing a bash test in devstack ironic lib * Deploy Templates: API reference * Fix formatting issue in doc * Update dist filter for devstack ubuntu * Add a non-voting metalsmith job for local boot coverage * Document building configdrive on the server side * Check microversions before validations for allocations and deploy templates * Add python3 unit test with drivers installed * Fix missing print format error * Fix typo and docstring in pxe/ipxe * Stop requiring root\_gb for whole-disk images * driver-requirements: mark UcsSdk as Python 2 only * Set boot\_mode in node properties during Redfish introspection * Add option to set python interpreter for ansible * Document using a URL for image\_checksum * [docs] IPv6 support for iLO * Temporary marking ironic-standalone non-voting * Allow building configdrive from JSON in the API * Allocation API: optimize check on candidate nodes * Fix TypeError: \_\_str\_\_ returned non-string (type ImageRefValidationFailed) * Deploy templates: API & notifications * Deploy templates: conductor * Drop installing python-libvirt system package * Test API max version is in RELEASE\_MAPPINGS * Update the log message for ilo drivers * Deploy templates: fix updating steps in Python 3 * Fix pysendfile requirement marker * Add option to protect available nodes from accidental deletion * Deploy Templates: add 'extra' field to DB & object * Trivial: Fix error message when waiting for power state * Allocation API: fix minor issues in the API reference * Allocation API: reference documentation * Adding bios\_interface reference to api docs * Set available\_nodes in tempest conf * Update the proliantutils version in documentation * [trivial] Removing python 3.5 template jobs * Deploy Templates: Fix DB & object nits * Add check for object versions * [Trivial] Fix incorrect logging in destroy\_allocation * Allocation API: taking over allocations of offline conductors * Allocation API: resume allocations on conductor restart * Devstack - run vbmc as sudo * Documentation update for iLO Drivers * Follow up - API - Implement /events endpoint * Follow up to node description * ensure that socat serial proxy keeps running * Deprecate Cisco drivers * Follow up to ISO image build patch * API - Implement /events endpoint * Add a requisite for metadata with BFV * [Follow Up] Add support for Smart NICs * Support using JSON-RPC instead of oslo.messaging * Deploy templates: data model, DB API & objects * [Follow Up] Expose is\_smartnic in port API * Prioritize sloppy nodes for power sync * Expose conductors: api-ref * Remove duplicated jobs and refactor jobs * Allocation API: fix a small inconsistency * Expose is\_smartnic in port API * [Trivial] Allocation API: correct syntax in API version history docs * Allocation API: REST API implementation * Make power sync unit test operational * Allow case-insensitivity when setting conductor\_group via API * Optionally preserve original system boot order upon instance deployment * Add support for Smart NICs * Add a voting CI job running unit tests with driver-requirements * [Refactor] Make caching BIOS settings explicit * [docs] OOB RAID implementation for ilo5 based HPE Proliant servers * Make iLO BIOS interface clean steps asynchronous * Provides mount point as cinder requires it to attach volume * Add description field to node: api-ref * Add description field to node * Fix test for 'force\_persistent\_boot\_device' (i)PXE driver\_info option * Fix iPXE boot interface with ipxe\_enabled=False * Allocation API: conductor API (without HA and take over) * Removing deprecated drac\_host property * Add is\_smartnic to Port data model * Remove uses of logger name "oslo\_messaging" * [Trivial] Fix typo in noop interface comment * Remove duplicated fault code * Fix listing nodes with conductor could raise * Parallelize periodic power sync calls follow up * Build ISO out of EFI system partition image * Make versioned notifications topics configurable * Build UEFI-only ISO for UEFI boot * Parallelize periodic power sync calls * Limit the timeout value of heartbeat\_timeout * Replace use of Q\_USE\_PROVIDERNET\_FOR\_PUBLIC * Make ipmi\_force\_boot\_device more user friendly * Follow-up logging change * Remove dsvm from zuulv3 jobs * Allocation API: allow picking random conductor for RPC topic * Fix updating nodes with removed or broken drivers * Fix ironic port creation after Redfish inspection * Allocation API: minor fixes to DB and RPC * Allocation API: allow skipping retries in TaskManager * Allocation API: database and RPC * Allow missing \`\`local\_gb\`\` property * Fix typo in release note * Fix IPv6 iPXE support * OOB RAID implementation for ilo5 based HPE Proliant servers * Fix SushyError namespacing in Redfish inspection * Allow disabling TFTP image cache * Add pxe template per node * Fix the misspelling of "configuration" * Switch to cirros 0.4.0 * Update tox version to 2.0 * Disable metadata\_csum when creating ext4 filesystems * Switch the default NIC driver to e1000 * Change openstack-dev to openstack-discuss * Fix XClarity driver management defect * Ignore newly introduced tables in pre-upgrade versions check * Switch CI back to xenial 12.0.0 ------ * Add "owner" information field * Introduce configuration option [ipmi]ipmi\_disable\_timeout * Enroll XClarity machines in Ironic's devstack setting * spelling error * api-ref: update node.resource\_class description * Add a note regarding IPA multidevice fix * Allow disabling instance image cache * Add a prelude for ironic 12.0 * Set proper version numbering * Change multinode jobs to default to local boot * Follow-up Retries and timeout for IPA command * Fix "import xxx as xxx" grammar * Kill misbehaving \`ipmitool\` process * Fix OOB introspection to use pxe\_enabled flag in idrac driver * Add configurable Redfish client authentication * Expose conductors: api * Fix node exclusive lock not released on console start/restart * Fix IPv6 Option Passing * Let neutron regenerate mac on port unbind * Slim down grenade jobs * Extend job build timeout * Mark several tests to not test cleaning * Add BIOS interface to Redfish hardware type * Avoid cpu\_arch None values in iscsi deployments * Expose conductors: db and rpc * Fix Chinese quotes * Add ipmi\_disable\_timeout to avoid problematic IPMI command * Correct author email address * Ensure we unbind flat network ports and clear BM mac addresses * Retries and timeout for IPA command * Support for protecting nodes from undeploying and rebuilding * Add download link apache configuration with mod\_wsgi * spelling error * Add Redfish inspect interface follow up * Add the noop management interface to the manual-management hardware type * Add missing ws separator between words * Switch ironic-tempest-...-tinyipa-multinode to zuulv3 * Add a non-voting bifrost job to ironic * Increase RAM for the ironic node in UEFI job * Reuse Redfish sessions follow up * Improve logs when hard linking images fails * Don't fail when node is in CLEANFAIL state * Fix ipv6 URL formatting for pxe/iPXE * Fix redfish test\_get\_system\_resource\_not\_found test * Improve sushy mocks * Recommend to set boot mode explicitly * Add Redfish inspect interface * Fix CPU count returned by introspection in Ironic iDRAC driver * Add ironic-status upgrade check command framework * Passing thread pool size to IPA for parallel erasure * Change BFV job to use ipxe interface * [devstack] Allow setting TFTP max blocksize * Reuse Redfish sessions * Migration step to update objects to latest version * Cleanup of remaining pxe focused is\_ipxe\_enabled * Remove the xclarity deprecation * Follow-up to fix not exist deploy image of patch 592247 * Remove pywsman reference * Fix DHCPv6 support * Revert "Add openstack/placement as a required project for ironic-grenade\*" * Add api-ref for conductor group * Follow-up patch for I71feefa3d0593fd185a286bec4ce38607203641d * Fix ironic developer quickstart document * Add note to pxe configuration doc * Create base pxe class * Wrap up PXE private method to pxe\_utils move * Enhanced checksum support * Enable configuration of conversion flags for iscsi * Document how to implement a new deploy step * Refactor API code for checking microversions * Allow streaming raw partition images * Remove Vagrant * ipxe boot interface * Remove oneview drivers * Completely remove support for deprecated Glance V1 * Avoid race with nova on power sync and rescue * Log a warning for Gen8 Inspection * Doc: Adds cinder as a service requires creds * Fix unit test run on OS X * Fixes a race condition in the hash ring code * Add automated\_clean field to the API * Stop console at tearing down without unsetting console\_enabled * Add functionality for individual cleaning on nodes * Documentation for 'ramdisk' deploy with 'ilo-virtual-media' boot * Add documentation for soft power for ilo hardware type * Add documentation for 'inject nmi' for ilo hardware type * Remove unnecessary checks in periodic task methods * Remove token expiration * Adds support for soft power operations to 'ilo' power interface * Add openstack/placement as a required project for ironic-grenade\* * Remove tox checkconfig * Add admin documentation for rescue mode in iLO driver * Correct headings in README.rst * Minor fixes for docs on changing hardware types * Add admin documentation for rescue interface * pxe/ipxe: Move common calls out pxe.py * Switch ironic-tempest-dsvm-functional-python3 to zuulv3 * Switch ironic-tempest-dsvm-functional-python2 to zuulv3 * Switch grenade nic driver to e1000 * Remove ironic experimental jobs * Restore the nova-api redirect * Update docs to portgroup with creating windows images * Use templates for cover and lower-constraints * Remove wrong install-guide-jobs in zuul setup * Fix grenade tests * Add a more detailed release note for Dell BOSS RAID1 fix * Honors return value from BIOS interface cleansteps * Reuse checksum calculation from oslo * Adds support for 'ramdisk' deploy with 'ilo-virtual-media' boot * Remove inspecting state support from inspect\_hardware * Adds support for 'Inject NMI' to 'ilo' management interface * Docs for agent http provisioning * Ensure pagination marker is always set * Direct deploy serve HTTP images from conductor * Fix doc builds for ironic * Fix async keyword for Python 3.7 * Add vendor step placement suggestion * Prevent HTML from appearing in API error messages * Replace assertRaisesRegexp with assertRaisesRegex * Add version discovery information to the /v1 endpoint * Replace assertRaisesRegexp with assertRaisesRegex * Fix provisioning failure with \`ramdisk\` deploy interface * Minor fixes to contributor vision * Add automated\_clean field * Use HostAddressOpt for opts that accept IP and hostnames * Remove the duplicated word * add python 3.6 unit test job * switch documentation job to new PTI * import zuul job settings from project-config * Prevents deletion of ports for active nodes * Disable periodic tasks if interval set to 0 * Reformat instructions related with various OS * Imported Translations from Zanata * Add conductor\_group docs * Switch ironic-tempest-dsvm-ironic-inspector too zuulv3 * Switch ironic-tempest-dsvm-bfv too zuulv3 * A minor update to documentation of \`ilo\` hardware type * Imported Translations from Zanata * Update reno for stable/rocky * Fix not exist deploy image within irmc-virtual-media booting 11.1.0 ------ * Switch the "snmp" hardware type to "noop" management * Add "noop" management and use it in the "ipmi" hardware type * Update docs on ironic boot mode management * Follow-up to always link MAC address files * Simplify subclasses for PXERamdiskDeploy * Node gets stuck in ING state when conductor goes down * Add notes on Redfish boot mode management * Prepare for Rocky release * Update the reno for the reset\_interfaces feature * Use max version of an object * A vision * Improve the "Ironic behind mod wsgi" documentation * Deploy steps documentation * Mark the ZeroMQ driver deprecated * Remove rabbit\_max\_retries option * Fix iDRAC hardware type does not work with UEFI * Pass prep\_boot\_part\_uuid to install\_bootloader for ppc64\* partition images * Remove redundant swift vars * Document locale requirement for local testing * Switch ironic-tempest-dsvm-ipa-partition-pxe\_ipmitool-tinyipa-python3 * Improve doc of Node serial console * Follow-up patch to ramdisk interface * Ramdisk deploy driver doc * Change PXE logic to always link macs with UEFI * Add documentation for BIOS settings * Fix for failure of cleaning for iRMC restore\_bios\_config * Refactor RAID configuration via iRMC driver * Adds ramdisk deploy driver * Follow-up patch for 7c5a04c1149f14900f504f32e000a7b4e69e661f * Switch ironic-tempest-dsvm-ipa-partition-uefi-pxe\_ipmitool-tinyipa * Switch ironic-tempest-dsvm-ipa-wholedisk-bios-pxe\_snmp-tinyipa * Switch ironic-tempest-dsvm-ipa-wholedisk-bios-agent\_ipmitool-tinyipa * Switch ironic-tempest-dsvm-pxe\_ipmitool-postgres * Documentation update of iLO BIOS settings * Follow-up to improve pep8 checking with hacking * Fix for failure in cleaning in iRMC driver * Add deploy\_step to NodePayload.SCHEMA * Add conductor\_group to node notifications * Deprecate xclarity hardware type * Be more precise with conductor group API tests * Simplify hash ring tests * Add documentation for changing node's hardware type * Fix the list of irrelevant-files * snmp: Keep get\_next method backward-compatible * Fix for failure in cleaning * Expose node.conductor\_group in the REST API * Use conductor group for hash ring calculations * Expose BIOS interface * Ignore bashate E044 * Remove deprecated option [ipmi]retry\_timeout * iLO BIOS interface implementation * Make pxelinux.cfg folder configurable * Use openstack client instead of neutron client * Replace port 35357 with 5000 for "auth\_url" * Add conductor\_group field to config, node and conductor objects * Add reset\_interfaces parameter to node's PATCH * Don't handle warnings as errors * Follow up Add CUSTOM\_CPU\_FPGA Traits value to ironic inspection * Follow-up changes to iRMC bios interface * Minor changes for deploy\_steps framework * Caching of PDU autodiscovery * Migrate ironic \`snmp\` driver to the latest pysnmp API * Add conductor\_group field to nodes and conductors tables * Add mock object for get\_bios\_settings * Fix bug to doc:configdrive * Add notes for future job migrations * Assert a build timeout for zuul templated CI jobs * Fixed link to Storyboard instead of launchpad * Update CI jobs for rescue mode * Fix bug to doc:kernel-boot-parameters * Deploy steps - API & notifications * Deploy steps - conductor & drivers * Add CUSTOM\_CPU\_FPGA Traits value to ironic inspection * Implement iRMC BIOS configuration * Deploy steps - versioned objects * Deploy steps - DB model * Follow-up to RAID configuration via iRMC driver patch * Poweroff server after 10 tries * Make the lower-constraints tox env actually use lower-constraints * Fix typo of function naming conventions in test\_deploy\_utils.py * Update the doc regarding the removal of calssic drivers * Update boot-from-volume feature docs * [doc] Use openstack client commands to replace neutron client * Detect skip version upgrades from version earlier than Pike * Update API version history with release 11.0.0 * Bump osprofiler minimum requirement to 1.5.0 * Add 11.0 to release mapping * Add read&write SNMP community names to \`snmp\` driver * Add unit tests that "remove" is acceptable on /XXX\_interface node fields * Fix 11.0 prelude formatting * Change docs bug link to storyboard 11.0.0 ------ * Support RAID configuration for BM via iRMC driver * Fix list node vifs api error * Remove support for creating and loading classic drivers * Ensure we allow Ironic API traffic from baremetal network * Add a prelude for version 11 * iDRAC RAID10 creation with greater than 16 drives * Remove doc of classic drivers from the admin guide * Modifying 'whole\_disk\_image\_url' and 'whole\_disk\_image\_checksum' variable * Follow-up to update doc for oneview driver * Small change of doc title for the drivers * Fix wrong in apidoc\_excluded\_paths * Switch ironic-tempest-dsvm-ipa-partition-redfish-tinyipa * Switch ironic-dsvm-standalone to zuulv3 native * Follow-up to update doc for ilo driver * Add BayTech MRP27 snmp driver type * Improve pep8 checking along with hacking * Follow-up to update doc for irmc driver * DevStack: Tiny changes following iRMC classic driver removal * include all versions of Node in release\_mappings * Deprecate [inspector]enabled option * Do not disable inspector periodic tasks if [inspector]enabled is False * Remove the ipmitool classic drivers * Add snmp driver auto discovery * During cleaning, use current node.driver\_internal\_info * Rename test class * Remove the iRMC classic drivers * Remove the OneView classic drivers * Remove the deprecated pxe\_snmp driver * Remove the deprecated classic drivers for Cisco UCS hardware * Remove the iDRAC classic drivers * Separate unit tests into different classes * Add helper method for testing node fields * Fix conductor manager unit tests * Remove the ilo classic drivers * Move parse\_instance\_info\_capabilities() to common utils.py * Fix error when deleting a non-existent port * BIOS Settings: update admin doc * BIOS Settings: add bios\_interface field in NodePayload * BIOS Settings: update default BIOS setting version in db utils * Add documentation for XClarity Driver * Release note clean-ups for ironic release * Move boot-related code to boot\_mode\_utils.py * Raise TemporaryFailure if no conductors are online * BIOS Settings: add sync\_node\_setting * Fix for Unable to create RAID1 on Dell BOSS card * Add an external storage interface * fix typos * fix typos * Add detail=[True, False] query string to API list endpoints * Adds enable\_ata\_secure\_erase option * Remove the remaining fake drivers * Document that nova-compute attaches VIF to active nodes on start up * Added Redfish boot mode management * iRMC: Support ipmitool power interface with irmc hardware * Doc: Remove -r option for running a specific unit test * Fix stestr has no lower bound in test-requirements * Adds boot mode support to ManagementInterface * Modify the Ironic api-ref's parameters in parameters.yaml * rectify 'a image ID' to 'an image ID' * change 'a ordinary file ' to 'an ordinary file' * Validating fault value when querying with fault field * change 'a optional path' to 'an optional path' * Update links in README * Remove the fake\_ipmitool, fake\_ipmitool\_socat and fake\_snmp drivers * Add release notes link to README * BIOS Settings: add admin doc * Remove deprecated [keystone] config section * Make method public to support out-of-band cleaning * Remove the fake\_agent, fake\_pxe and fake\_inspector drivers * Consolidate the setting of ironic-extra-vars * Remove deprecated ansible driver options * Remove dulicate uses for zuul-cloner * Comply with PTI for Python testing * fix tox python3 overrides * Remove the "fake" and "fake\_soft\_power" classic drivers * Completely stop using the "fake" classic driver in unit tests * Power fault recovery follow up * Adds more \`ipmitool\` errors as retryable * Stop using pxe\_ipmitool in grenade * Fix FakeBIOS to allow tempest testing * Power fault recovery: Notification objects * Power fault recovery: API implementation * Add mock to doc requirements to fix doc build * Fix task\_manager process\_event docstring * Implements baremetal inspect abort * Add the ability to setup enabled bios interfaces in devstack * [Doc] Scheduling needs validated 'management' interface * Fix authentication issues along with add multi extra volumes * Stop passing IP address to IPA by PXE * Add Node BIOS support - REST API * Follow up to power fault recovery db tests * Power fault recovery: apply fault * Reraise exception with converting node ID * Gracefully handle NodeLocked exceptions during heartbeat * SNMPv3 security features added to the \`snmp\` driver * Allow customizing libvirt NIC driver * Convert conductor manager unit tests to hardware types * Remove excessive usage of mock\_the\_extension\_manager in unit tests - part 2 * Improve exception handling in agent\_base\_vendor * Check pep8 without ignoring D000 * Missing import of "\_" * Remove endpoint\_type from configuration * Power fault recovery: db and rpc implementation * Change exception msg of BIOS caching * Remove excessive usage of mock\_the\_extension\_manager in unit tests - part 1 * Mark xclarity password as secret * Fix E501 errors * Fix tenant DeprecationWarning from oslo\_context * update "auth\_url" in documents * Fix tenant DeprecationWarning from oslo\_context * Tear down console during unprovisioning * Fix XClarity parameters discrepancy * Follow up to inspect wait implementation * Silence F405 errors * Fix W605 Errors * Fix E305 Errors * Fix W504 errors * Gate fix: Cap hacking to avoid gate failure * Preserve env when running vbmc * Make validation failure on node deploy a 4XX code * Install OSC during quickstart * Ignore new errors until we're able to fix them * BIOS Settings: Add BIOS caching * BIOS Settings: Add BIOSInterface * Remove ip parameter from ipxe command line * Clarify image\_source with BFV * Update install guide to require resource classes * Fix error thrown by logging in common/neutron.py * Add note to oneview docs re: derprecation * Deprecate Oneview * Switch to the fake-hardware hardware type for API tests * Remove the Keystone API V2.0 endpoint registration * Move API (functional) tests to separate jobs * Add unit test for check of glance image status * Devstack plugin support for Redfish and Hardware * Collect periodic tasks from all enabled hardware interfaces * Stop verifying updated driver in creating task * BIOS Settings: Add RPC object * fix a typo * Trivial: Update pypi url to new url * Add more parameter explanation when create a node * Fix test\_get\_nodeinfo\_list\_with\_filters * Install reno to venv for creating release note * Stop removing root uuid in vendor interfaces * Fix \`\`agent\`\` deploy interface to call \`\`boot.prepare\_instance\`\` * Update wording used in removal of VIFs * [devstack] Switch ironic to uWSGI * Make ansible error message clearer * BIOS Settings: Add DB API * BIOS Settings: Add bios\_interface db field * BIOS Settings: Add DB model * Clean up driver\_internal\_info after tear\_down * Run jobs if requirements change * Remove vifs upon teardown * uncap eventlet * Update auth\_uri option to www\_authenticate\_uri * Resolve pep8 E402 errors and no longer ignore E402 * Remove pycodestyle version pin. Add E402 and W503 to ignore * Pin pycodestyle to <=2.3.1 * Check for PXE-enabled ports when creating neutron ports * Implementation of inspect wait state * Update Launchpad references to Storyboard * Add reno for new config [disk\_utils]partprobe\_attempts * Implement a function to check the image status * Fix callback plugin for Ansible 2.5 compatability * Follow the new PTI for document build * Clarify deprecation of "async" parameter * Fix incompatible requirement in lower-constraints * Reference architecture: small cloud with trusted tenants * Update and replace http with https for doc links * Assume node traits in instance trait validation * Adding grub2 bootloader support to devstack plugin * Describe unmasking fields in security document * Copy port[group] VIF info from extra to internal\_info * DevStack: Enroll node with iRMC hardware * Stop overriding tempdir in unit test * Uniformly capitalize parameter description * Gate: run ironic tests in the regular multinode job * Do not use async parameter * Remove the link to the old drivers wiki page * add lower-constraints job * Test driver-requirements changes on standalone job * Updated from global requirements * Exclude Ansible 2.5 from driver-reqs * Fix typos There are two 'the', delete one of them * fix typos in documentation * Fix nits in the XClarity Driver codebase * Validate instance\_info.traits against node traits * Prevent overwriting of last\_error on cleaning failures * Infiniband Port Configuration update[1] * Rework Bare Metal service overview in the install guide * Gate: stop setting IRONIC\_ENABLED\_INSPECT\_INTEFACES=inspector * Follow-up patch for rescue mode devstack change * devstack: enabled fake-hardware and fake interfaces * Updated from global requirements * Add descriptions for config option choices * devstack: add support for rescue mode * Updated from global requirements * Implements validate\_rescue() for IRMCVirtualMediaBoot * Updated from global requirements * Update config option for collecting sensor data * Use node traits during upgrade * multinode, multitenant grenade votes in gate * zuul: Remove duplicated TEMPEST\_PLUGIN entry * Use more granular mocking in test\_utils * change python-libguestfs to python-guestfs for ubuntu * Update links in README * Updated from global requirements * Remove useless variable * Don't validate local\_link\_connection when port has client-id * Updated from global requirements * Update docstring to agent client related codes * Move execution of 'tools/check-releasenotes.py' to pep8 * reloads mutable config values on SIGHUP * Make grenade-mulinode voting again * tox.ini: flake8: Remove I202 from ignore list * fix a typo in driver-property-response.json: s/doman/domain/ * Trivial: Remove the non ascii codes in tox.ini * Register traits on nodes in devstack * [devstack] block iPXE boot from HTTPS TempURLs * Fix issue with double mocking of utils.execute functions * Updates boot mode on the baremetal as per \`boot\_mode\` * Support nested objects and object lists in as\_dict * Revert "Don't try to lock for vif detach" * Rework logic handling reserved orphaned nodes in the conductor * Set 'initrd' to 'rescue\_ramdisk' for rescue with iPXE * Update iLO documentation for deprecating classical drivers * Increase the instance\_info column size to LONGTEXT on MySQL/MariaDB * Update release instructions wrt grenade * [ansible] use manual-mgmt hw type in unit tests * Use oslo\_db.sqlalchemy.test\_fixtures * Disable .pyc files for grenade multinode * Add docs for ansible deploy interface * Update comment and mock about autospec not working on staticmethods * Build instance PXE options for unrescue * Updated from global requirements * Fix default object versioning for Rocky * Allow sqalchemy filtering by id and uuid * Fix rare HTTP 400 from port list API * Clean nodes stuck in CLEANING state when ir-cond restarts * Imported Translations from Zanata * tox: stop validating locale files * Switch contributor documentation to hardware types * Stop using --os-baremetal-api-version in devstack by default * Conductor version cannot be null in Rocky * Add 'Other considerations' to security doc * Updated from global requirements * Implements validate\_rescue() for IloVirtualMediaBoot * Update to standalone ironic doc * Remove too large configdrive for handling error * Added known issue to iDRAC driver docs * Add missing noop implementations to fake-hardware * Stop running standalone tests for classic drivers * Stop running non-voting jobs in gate * Add optional healthcheck middleware * releasing docs: document stable jobs for the tempest plugin * Add meaningful exception in Neutron port show * Clean up CI playbooks * Fix broken log message * Add validate\_rescue() method to boot interface * Empty commit to bump minor pre-detected version * Remove test\_contains\_current\_release\_entry * Fix grammar errors * Clean up RPC versions and database migrations for Rocky * Remove validate\_boot\_option\_for\_trusted\_boot metric * Update reno for stable/queens 10.1.0 ------ * Add some missed test cases in node object tests * [reno] timeout parameter worked * Remove unnecessary lines from sample local.conf * Stop guessing mime types based on URLs * Clean up release notes before a release * Don't try to lock for vif detach * Revert grenade jobs to classic drivers * Handle case when a glance image contains no data * Add 10.1 and queens to the release mapping * Do not pass credentials to the ramdisk on cleaning * correct grammar, duplicate the found * Update iRMC document for classic driver deprecation * correct grammar, duplicate the found * Correct grammar, duplicate the found * Only set default network interface flat if enabled in config * Fix handling of 'timeout' parameter to power methods * Fixed some typos in test code * Replace chinese quotes to English quotes * Zuul: Remove project name * Modify error quotation marks * cleanup: Remove usage of some\_dict.keys() * Use zuul.override\_checkout instead of custom branch\_override var * Add validate\_rescue() method to network interface * [docs] Firmware based boot from volume for iLO drivers * Follow-up patch for api-ref documentation for rescue * Remove sample policy and config files * correct referenced url in comments * Remove unused code in unittest * Fix configure-networking docs * Migrate the remaining classic drivers to hardware types * Remove mode argument from boot.(prepare|clean\_up)\_ramdisk * Do not use asserts with business logic * Add option to specify mac adress in devstack/.../create-node.sh * Updated from global requirements * [api-ref] clarify what /v1/lookup returns * Update FAQ about updates of release notes * Add documentation for baremetal mech * Flat networks use node.uuid when binding ports * Add missing ilo vendor to the ilo hardware types * Follow-up for Switch OneView driver to hpOneView and ilorest libraries * Soft power operations for OneView hardware type * Deprecate classic drivers * Declare support for Python 3.5 in setup.cfg * Add api-ref and ironic state documentation for rescue * Mock check\_dir in ansible interface tests * Add documentation for node traits * Fix nits found in node traits * Follow-up for Implementation for UEFI iSCSI boot for ILO * Explicitly mark earliest-version for release notes * Remove unused code in common/neutron.py * Correct link address * Wait for ironic-neutron-agent to report state * Devstack - use neutron segments (routed provider networks) * Zuul: Remove project name * Add traits field to node notifications * Update description for config params of 'rescue' interface * Add rescue interface field to node-related notifications * Follow-up for API methods for rescue implementation * Add support for preparing rescue ramdisk in iLO PXE * Automatically migrate nodes to hardware types * Add API methods for [un]rescue * Fix unit tests for UEFI iSCSI boot for ILO * Follow-up for agent rescue implementation * iRMC:Support preparing rescue ramdisk in iRMC PXE * Redundant alias in import statement * Agent rescue implementation * Allow data migrations to accept options * Resolve race in validating neutron networks due to caching * Update api-ref for port group create * Implementation for UEFI iSCSI boot for ILO * Add node traits to API reference * Add a timeout for powering on/off a node on oneview * Fix persistent information when getting boot device * Remove python-oneviewclient from oneview hardware type * API: Node Traits API * Add RPC API and conductor manager for traits * Be more sane about cleaning * Fix node update with PostgreSQL * Switch the CI to hardware types * Migrate python-oneviewclient validations to oneview hardware type * Updated from global requirements * Add RPC object for traits * Allow setting {provisioning,cleaning,rescuing}\_network in driver\_info * Migrate oneview hardware type to use python-hpOneView * remeber spelling error * Add rescuewait timeout periodic task * Add rescue related methods to network interface * Add XClarity Driver * [docs] mention new nova scheduler option * Add a version argument to traits DB API * Mark multinode job as non-voting * Updated from global requirements * Fix docs for Sphinx 1.6.6 * fix a typo in ilo.rst: s/fimware/firmware/ * Do not send sensors data for nodes in maintenance mode 10.0.0 ------ * Adds RPC calls for rescue interface * Make the Python 3 job voting * Add additional context to contribution guide * node\_tag\_exists(): raise exception if bad node * Setup ansible interface in devstack * Remove the deprecated "giturl" option * Join nodes with traits * Update links * Node traits: Add DB API & model * Add release 10.0 to release mappings * Remove ironic\_tempest\_plugin/ directory * Do not validate root partition size for whole disk images in iscsi deploy * Switch non-vendor parts admin guide to hardware types * Clean up release notes before a release * Add Error Codes * Remove ironic\_tempest\_plugin/ directory * Fix initialization of auth token AuthProtocol * Rework exception handling on deploy failures in conductor * Add a provisioning target:adopt * Devstack: install qemu-system-x86 on RHEL * Add uWSGI support * Fix ironic node create cli * zuul: Update TLSPROXY based on branch * Run in superconductor cellsv2 mode for non-grenade jobs * Updated from global requirements * Add documentation covering storage multi-attach * Adds rescue\_interface to base driver class * Document the check done in "ironic-dbsync upgrade" * zuul: Add ability to specify a 'branch\_override' value * zuul: Remove some redundancy by consolidating the 'post.yaml' files * Use openstack port create instead of neutron port-create * ansible: handle mount of /sys the same way IPA does it * [ansible] add defaults to config * Prevent changes to the ironic\_tempest\_plugin/ directory * Finalize migration to keystoneauth adapters * Updated from global requirements * Follow up Add additional capabilities discovery for iRMC driver * Use NamedExtensionManager for drivers * Use the tempest plugin from openstack/ironic-tempest-plugin * Switch emphasis to hardware types in the installation guide * Use adapters for neutronclient * Remove deprecated ironic.common.policy.enforce() * Introduce hpOneView and ilorest to OneView * Auto-detect the defaults for [glance]swift\_{account,temp\_url\_key,endpoint\_url} * Add 'nova hypervisor-list' in example set of commands to compare the resources in Compute service and Bare Metal service * Receive and store agent version on heartbeat * tox: Use the default version of Python 3 for tox tests * Remove unused methond \_get\_connect\_string * Update comment on location of webapi-version-history.rst * Updated from global requirements * Do not access dbapi attributes on dbsync import * Fix swiftclient creation * Update docs to include API version pinning * Add networking-fujitsu ML2 driver to multitenacy doc * Updated from global requirements * 9.2.0 is the ironic version with rebuild configdrive * Pin API version during rolling upgrade * devstack to \`git pull sushy-tools\` if required * Add spec & priorities links to contributor doc * Fix HPE headers for oneview * Updated from global requirements * Fix the format command-line * Add information about neutron ML2 drivers to multitenancy docs * Apply pep8 check to app.wsgi * ironic.conf.sample includes default\_resource\_class * Add a configuration option for the default resource class * Rework drivers page in the admin documentation * Update bindep.txt for doc builds * Don't collect logs from powered off nodes * Add additional capabilities discovery for iRMC driver * Use adapters for inspectorclient * Use adapters for cinderclient * Imported Translations from Zanata * Followup to I07fb8115d254e877d8781207eaec203e3fdf8ad6 * Add missing gzip call to FAQ item on how to repack IPA * Rework keystone auth for glance * Remove setting of version/release from releasenotes * zuul.d: Remove unneeded required-projects * Updated from global requirements * Add 9.2 to release mappings * Remove provisioning network ports during tear down * Fix image type for partition-pxe\_ipmitool-tinyipa-python3 job 9.2.0 ----- * update description for Change Node Power State * Add no-vendor interface to the idrac hardware types * Updated from global requirements * Fail deploy if agent returns >= 400 * Don't run multinode jobs for changes to driver-requirements.txt * Revert "Introduce hpOneView and ilorest to OneView" * Revert "Migrate oneview driver to use python-hpOneView" * Revert "Fix persistent information when getting boot device" * Revert "Add a timeout for powering on/off a node on HPE OneView Driver" * Revert "Migrate python-oneviewclient validations to Ironic OneView drivers" * Revert "Remove python-oneviewclient from Ironic OneView drivers" * Revert "Get a new OneView client when needed" * Revert "Update python-ilorest-library to hardware type OneView" * Add missing 'autospec' to unit tests - /unit/objects/ * Add ansible deploy interface * Clean up release notes from the upcoming release * Fix misplaced reno note * Make the api format correctly * [devstack] stop setting or relying on standard properties * Remove some deprecated glance options * zuul.d/projects.yaml: Sort the job list * project.yaml: Remove 'branches:' & jobs that don't run on master * Miss node\_id in devstack lib * Update idrac hardware type documentation * Update Zuul 'gate' job * Rolling upgrades related dev documentation * Update python-ilorest-library to hardware type OneView * Add rescue\_interface to node DB table * Get a new OneView client when needed * Run tempest jobs when update requirements * Updated from global requirements * Remove unused IronicObjectIndirectionAPI from ironic-api * Add release note for fix to port 0 being valid * Simplify the logic of validate\_network\_port * Follow up Secure boot support for irmc-virtual-media driver * devstack: Clean up some of the devstack code * Remove python-oneviewclient from Ironic OneView drivers * Allow to set default ifaces in DevStack * Reword interface information in multitenancy docs * Ensure ping actually succed * Fix minor documentation missing dependency * Small fixes in the common reference architecture docs * [reno] Update ironic-dbsync's check object version * Migrate python-oneviewclient validations to Ironic OneView drivers * Remove unnesessary description for config parameters in cinder group * Update ironic.sample.conf * Fix the format issues of User guide * Zuul: add file extension to playbook path * Add I202 to flake ignore list * Revise deploy process documentation * Add a timeout for powering on/off a node on HPE OneView Driver * ironic-dbsync: check object versions * Update validating node information docs * Use jinja rendering from utils module * Add ability to provide configdrive when rebuilding * Finish the guide on upgrading to hardware types * Move ironic legacy jobs into the ironic tree * Fix missing logging format error * Add missing 'autospec' to unit tests - /unit/common/ * [bfv] Set the correct iqn for pxe * Fix "import xx as xx" grammer * Secure boot support for irmc-virtual-media driver * Change pxe dhcp options name to codes * Updated from global requirements * [docs] describe vendor passthru in hw types * Add bindep.txt file * Fix some mis-formatted log messages in oneview driver * Disallow rolling upgrade from Ocata to Queens * Add online data migrations for conductor version * [Devstack] Replace tap with veth * Support SUM based firmware update as clean step for iLO drivers * Add missing 'autospec' to unit tests - /unit/dhcp/ * Fix mis-formatted log messages * Use oslotest for base test case * Update tests to do not use deprecated test.services() * Follow-up patch 'Cleanup unit tests for ipmittool' * Makes ironic build reproducible * Remove 'next' for GET /nodes?limit=1&instance\_uuid= * ListType preserves the order of the input * Stop passing raw Exceptions as the reasons for ironic Image exceptions * Update after recent removal of cred manager aliases * ipmitool: reboot: Don't power off node if already off * Reduce complexity of node\_power\_action() function * Add default configuration files to data\_files * Documentation for 'oneview' hardware type * Cleanup unit tests for ipmittool * Use DocumentedRuleDefault instead of RuleDefault * main page: add links to docs on Upgrade to HW Types * Add documentation describing each Ironic state * Cleanup test-requirements * Fix API VIF tests when using flat network * Updated from global requirements * Migrate to stestr as unit tests runner * [reno] update for MAC address update fix * Revert "Change pxe dhcp options name to codes." * Drop neutron masking exception in vif\_attach * Rework update\_port\_address logic * api-ref portgroup\_id should be portgroup\_ident * Document setting discover\_hosts\_in\_cells\_interval in nova.conf * Adds more exception handling for ironic-conductor heartbeat * Updated from global requirements * Change pxe dhcp options name to codes * Updated from global requirements * Updated from global requirements * Reference architecture: common bits * Stop using Q\_PLUGIN\_EXTRA\_CONF\_{PATH|FILES} variables * Put unit test file in correct directory * Update vif\_attach from NeutronVIFPortIDMixin * Replace http with https for doc links * flake8: Enable some off-by-default checks * Update upgrade guide to use new pike release * [install docs] ironic -> openstack baremetal CLI * Using devstack configure\_rootwrap to configure ironic rootwrap * Use newer location for iso8601 UTC * reformat REST API Version History page * Fix persistent information when getting boot device * Migrate oneview driver to use python-hpOneView * [reno] Clarify fix for missing boot.prepare\_instance * [doc] Non-word updates to releasing doc * Introduce hpOneView and ilorest to OneView * Fix race condition in backfill\_version\_column() * Switch API ref to use versionadded syntax throughout * Replace DbMigrationError with DBMigrationError * [reno] Clarify fix for BFV & image\_source * Fix unit test for new fields in invaid API version * Put tests in correct location for ironic/api/controllers/v1/ * Troubleshooting docs: explain disabled compute services * Update documentation for \`\`ilo\`\` hardware type * Updated from global requirements * Boot from volume fails with 'iscsi' deploy interface * Boot from volume fails with 'iscsi' deploy interface * [contributor docs] ironic -> OSC baremetal CLI * Minor improvements to the resource classes documentation * Update Nova configuration documentation * Build docs with Python 2 for now * [doc] add FAQ about updating release notes * Follow-up for commit cb793d013610e6905f58c823e68580714991e2df * [docs] Update Releasing Ironic Projects * Add doc/source/\_static to .gitignore * Fix indentation in few of the documentation pages * Upgrade guide for \`snmp\` hardware type * tox.ini: Add 'py36' to the default envlist * devstack: Comment variables related to multi-tenant networking * Test ironic-dbsync online\_data\_migrations * Add a comment about default devstack images * Fix to use "." to source script files * Add #!/bin/bash to devstack/common\_settings * Add Sem-Ver flag to increment master branch version * conductor saves version in db * Update Pike release title to include version range * Updated from global requirements * remove REST API examples from RAID doc * [admin docs] ironic -> openstack baremetal CLI * [doc] change absolute to relative URL * Configuration documentation migrated * fix a typo in agent.py: s/doman/domain/ * Documentation for irmc hardware type * correct URLs in contributor docs & main index * Correct URLs in install docs * correct URLs in admin docs * Documentation for 'snmp' hardware type * Fix incorrect documentation urls * Updated from global requirements * Partially revert "Set resource class during upgrade" * Introduce keystoneauth adapters for clients * [doc] Replace http with https * Follow-up to \`\`ilo\`\` hardware type documentation * Set explicit default to enabled driver interfaces * Set resource class during upgrade * Fix names of capabilities for FibreChannel volume boot * iRMC: Follow-up: volume boot for virtual media boot interface * Do not restart n-cpu during upgrade * Make SNMP UDP transport settings configurable * Enable OSProfiler support in Ironic - follow-up * Wait for cleaning is completed after base smoke tests * Add 'hardware type' for Dell EMC iDRACs * Fix DRAC classic driver double manage/provide * [devstack] use resource classes by default * Add 9.1 to release\_mappings * Imported Translations from Zanata * Add 'force\_persistent\_boot\_device' to pxe props * devstack: Remove unused variable IRONIC\_VM\_NETWORK\_RANGE * Adds 9.0 to release\_mappings * Get rid of sourcing stackrc in grenade settings * Update reno for stable/pike * Revert "[reno] Add prelude for Pike release" 9.0.0 ----- * Add the new capabilities to the iLO InspectInterface * [docs] update irmc boot-from-volume * [releasenotes] update irmc's boot-from-volume support * [reno] Add prelude for Pike release * Add storage interface to enabling-drivers doc * Add admin guide for boot from volume * iRMC: Add documentation for remote volume boot * Remove ensure\_logs\_exist check during upgrade * Add functional API tests for volume connector and volume target * Follow-up to rolling upgrade docs * Update proliantutils version for Pike release * [reno] update * Documetation for 'ilo' hardware type * Follow up Secure boot support for irmc-pxe driver * Update the documentation links - code comments * Update the documentation links - install guide * Remove translator assignments from i18n * Add hardware types to support Cisco UCS Servers * Remove setting custom http\_timeout in grenade * Upgrade to hardware types: document changing interfaces for active nodes * Update the resource classes documentation based on recent progress * [devstack] switch to the latest API version and OSC commands * Prevent changes of a resource class for an active node * Guide on upgrading to hardware types * iRMC: Support volume boot for iRMC virtual media boot interface * Rolling upgrade procedure documentation * Release notes clean up for the next release * Fix missing print format error * Secure boot support for irmc-pxe driver * Adds hardware type for SNMP powered systems * Add a guide for Devstack configuration for boot-from-volume * Add a flag to always perform persistent boot on PXE interface * Put tests in correct location for ironic/api/controllers/v1/ * [tempest] also catch BadRequest in negative tests with physical\_network in old API * Use more specific asserts in tests * [Trivialfix]Fix typos in ironic * Remove WARNING from pin\_release\_version's help * Update ironic.conf.sample due to non-ironic code * Add new dbsync command with first online data migration * BFV Deploy skip minor logging, logic, and test fixes * Add hardware type for HPE OneView * [doc-migration] Add configuration folder for documentation * Add storage interface to api-ref * Add API for volume resources to api-ref * Disable automated cleaning for single node grenade * Optimize node locking on heartbeat * Remove file RELEASE-NOTES * Removed unnecessary setUp() call in unit tests * Adds doc for restore\_irmc\_bios\_config clean step * Remove SSH-based driver interfaces and drivers * [Tempest] fix negative tests on old API versions * Remove install-guide env which is no longer effective * Address review feedback for ipxe boot file fix * Change ramdisk log filename template * Remove usage of some of the deprecated methods * Updated from global requirements * grenade: Use test\_with\_retry to check if route is up * Don't use multicell setup for ironic & increase timeout * Tempest scenario test for boot-from-volume * Refactor VIFPortIDMixin: factor out common methods * Add negative attribute to negative port tests * Rolling upgrades support for create\_port RPCAPI * Fixes hashing issues for py3.5 * Generate iPXE boot script on start up * grenade: For multi-node grenade, do not upgrade nova * Changes log level of a message * Fix small issues in the installation documentation * Removes agent mixin from oneview drivers * Fix docstring and default value for local\_group\_info * [doc] update ironic's landing page * Adding note for ironic virt driver nova-compute changes * Added a condition for 'ilo' hardware type * Updated from global requirements * py3.5:Workaround fix for forcing virtualbmc installation with pip2 * [devstack] add support for running behind tls-proxy * Start passing portgroup information to Neutron * Add tempest tests for physical networks * Updated from global requirements * Refactor VIFPortIDMixin: rename * Doc for disk erase support in iLO drivers * DevStack: Add configuration for boot-from-volume * Refactor get\_physnets\_by\_portgroup\_id * Rolling upgrades support for port.physical\_network * Allow updating interfaces on a node in available state * replace 'interrace' with 'interface' * Improve port update API unit tests * Improve ports API reference * Expose ports' physical network attribute in API * Rename 'remove\_unavail\_fields' parameter * Updated from global requirements * Add missing parameter descriptions * Updated from global requirements * Generate iPXE boot script when deploying with boot from volume * Add Driver API change in 1.33 to history * Update URL home-page in documents according to document migration * Using non-persistent boot in PXE interface * Modifications for rolling upgrades * Update comments related to ipmi & old BMCs * Follow-up to fix for power action failure * Fix copy/paste error in VIF attach note * [reno] Clarify fix for inspect validation failures * [trivial] Fix argument descriptions * Remove \_ssh drivers from dev-quickstart * Fix broken links in tempest plugin README * Remove future plan from portgroup document * Enable OSProfiler support in Ironic * Revert "Wait until iDRAC is ready before out-of-band cleaning" * Force InnoDB engine on interfaces table * Add storage interface field to node-related notifications * Removed nonexistent option from quickstart snippet * Enable cinder storage interface for generic hardware * Mock random generator for BackoffLoopingCall in IPMI unittests * Raise HTTP 400 rather than 500 error * Make IP address of socat console configurable * Set nomulticell flag for starting nova-compute in grenade * Physical network aware VIF attachment * Update README to point at new doc location * Move ironic dbsync tool docs into doc/source/cli * Move doc/source/dev to doc/source/contributor * Move operator docs into into doc/source/admin * Move install guide into new doc/source/install location * Improve graceful shutdown of conductor process * switch from oslosphinx to openstackdocstheme * Fix quotes in documentation and schema description * Follow-up for bugfix 1694645 patch * Add REST API for volume connector and volume target operation * Add node power state validation to volume resource update/deletion * Make redfish power interface wait for the power state change * Refactor common keystone methods * Adds clean step 'restore\_irmc\_bios\_config' to iRMC drivers * Add CRUD notification objects for volume connector and volume target * Updated from global requirements * Don't retry power status if power action fails * Fix VIF list for noop network interface * Fetch Glance endpoint from Keystone if it's not provided in the configuration * Replace the usage of 'manager' with 'os\_primary' * Logic for skipping deployment with BFV * iPXE template support for iSCSI * Move \_abort\_attach\_volumes functionality to detach\_volumes * Allow to load a subset of object fields from DB * Unit test consistency: DB base and utils prefix * Updated from global requirements * Updated from global requirements * Remove unnecessary line in docstring * Validate portgroup physical network consistency * Wire in storage interface attach/detach operations * Wait until iDRAC is ready before out-of-band cleaning * Minor changes to object version-related code * Remove times.dbm prior to test run * Discover hosts while waiting for hypervisors to show up in devstack * Add docs for node.resource\_class and flavor creation * Updated from global requirements * Move port object creation to conductor * Make default\_boot\_option configurable in devstack * Trigger interface attach tests * Support setting inbound global-request-id * Follow-up docstring revision * Runs the script configure\_vm.py in py3.5 * Replace get\_transport with get\_rpc\_transport * Add version column * Add ldlinux.c32 to boot ISO for virtual media * Remove legacy auth loading * Add a note for specifying octal value of permission * Improve driver\_info/redfish\_verify\_ca value validation * Updated from global requirements * Stop sending custom context values over RPC * Replace assertTrue(isinstance()) with assertIsInstance() * Change volume metadata not to use nested dicts * Add physical network to port data model * Move deploy\_utils warnings to conductor start * Remove unused methods from GlanceImageService * [install-guide] explain the defaults calculation for hardware types * Improve driver\_info/redfish\_system\_id value validation * Add guru meditation report support * Adds parameters to run CI with hardware types * Fix description for [cinder] action\_retries option * Deprecate elilo support * Updated from global requirements * Update ipmitool installation and usage documentation * Replace test.attr with decorators.attr * Updated from global requirements * Replace test.attr with decorators.attr * remove explicit directions for release notes on current branch * Use cfg.URIOpt for URLs with required schemes * Updated from global requirements * Remove unneeded lookup policy check * Add Cinder storage driver * Add ipmitool vendor interface to the ipmi hardware type * Replace test.attr with decorators.attr * Fix directories permission for tftpboot * Comment the default values in policy.json.sample * Replace deprecated .assertRaisesRegexp() * Updated from global requirements * Remove remaining vendor passthru lookup/heartbeat * Prevent tests from using utils.execute() * Remove unit tests that test oslo\_concurrency.processutils.execute * Remove single quoted strings in json sample * Refactor install-guide: update node enrollment * Refactor install-guide: driver and hardware types configuration * Minor clean up in iLO drivers unit tests * Remove translation of log messages * Enable getting volume targets by their volume\_id * Check if sort key is allowed in API version * Updated from global requirements * Remove logging translation calls from ironic.common * [install-guide] add section on Glance+Swift config * Fix attribute name of cinder volume * Update reno for new ilo hardware type * Remove log translations from ironic/drivers Part-1 * Update developer quickstart doc about required OS version * Add 'iscsi' deploy support for 'ilo' hardware type * Trivial fix typos while reading doc * Fix docstrings in conductor manager * [devstack] start virtualpdu using full path * [Devstack] Increase default NIC numbers for VMs to 2 * Remove usage of parameter enforce\_type * Properly allow Ironic headers in REST API * Updated from global requirements * Fix a typo * DevStack: Install gunicorn and sushy based on g-r constraints * Fix keystone.py 'get\_service\_url' method parameter * Add functional api tests for node resource class * Refactor install-guide: integration with other services * Remove references to EOLed version of Ironic from the install guide * DevStack: Setup a Redfish environment * Add hardware type for HPE ProLiant servers based on iLO 4 * Bring the redfish driver address parameter closer to one of other drivers * [Grenade]: Do not run ir-api on primary node after upgrade * Validate outlet index in SNMP driver * [Devstack] Rework VMs connection logic * Fix oslo.messaging log level * Add context to IronicObject.\_from\_db\_object() * Add release notes for 8.0.0 * [api-ref] remove reference to old lookup/heartbeat * Follow-up patch to redfish documentation * [devstack] use the generic function to setup logging * Fix cleaning documents * Remove obsolete sentence from comment * TrivialFix: Remove logging import unused * Remove translation of log messages from ironic/drivers/modules/irmc * Run db\_sync after upgrade * Remove translation of log messages from ironic/drivers/modules/ucs * Start enforcing config variables type in tests * Add documentation for the redfish driver * Read disk identifier after config drive setup * Add a paragraph about image validation to Install Guide * Make terminal timeout value configurable * Remove nova mocks from documentation configuration * Remove fake\_ipmitool\_socat driver from the documentation * Add redfish driver * Ensure we install latest libivrt * Set env variables when all needed files are source * save\_and\_reraise\_exception() instead of raise * Follow-up patch of 7f12be1b14e371e269464883cb7dbcb75910e16f * VirtualPDU use libvirt group instead of libvirtd * Fix unit tests for oslo.config 4.0 * Always set host\_id when adding neutron ports * Add /baremetal path instead of port 6385 * Add SUSE instructions to the install guide * Remove pre-allocation model for OneView drivers * Remove log translations from iLO drivers * Follow-up patch of 565b31424ef4e1441cae022486fa6334a2811d21 * Setup logging in unit tests * Remove deprecated DHCP provider methods * Make config generator aware of 'default\_log\_levels' override * [Devstack] Fix libvirt group usage * Common cinder interface additional improvements * Config drive support for ceph radosgw * Improve error message for deleting node from error state * Updated from global requirements * Add comments re RPC versions being in sync * Help a user to enable console redirection * Fix some reST field lists in docstrings * Avoid double ".img" postfix of image file path in devstack installation * add portgroups in the task\_manager docstrings * Remove unneeded exception handling from agent driver * Updated from global requirements * Remove translation of log messages from ironic/dhcp and ironic/cmd * Updated from global requirements * Bypassing upload deploy ramdisk/kernel to glance when deploy iso is given * Drop commented import * Enforce releasenotes file naming * Remove unused methods in common/paths and common/rpc * Remove translation of log messages from ironic/api * Fix access to CONF in dhcp\_options\_for\_instance * Add string comparison for 'IRONIC\_DEPLOY\_DRIVER' * Modify the spelling mistakes Change explictly to explicitly 8.0.0 ----- * Revert "[Devstack] Rework VMs connection logic" * Fix base object serialization checks * Node should reflect what was saved * Changes 'deploy' and 'boot' interface for 'pxe\_ilo' driver * Use standard deploy interfaces for iscsi\_ilo and agent\_ilo * Refactor iLO drivers code to clean 'boot' and 'deploy' operations * Updated from global requirements * Add base cinder common interface * Updates to RPC and object version pinning * Add release note for messaging alias removal * Remove deprecated method build\_instance\_info\_for\_deploy() * Remove deprecated, untested ipminative driver * [Devstack] Rework VMs connection logic * Docs: bump tempest microversion caps after branching * Add assertion of name to test\_list\_portgroups test * Skip PortNotFound when unbinding port * Remove unnecessary setUp function in testcase * Remove deprecated [ilo]/clean\_priority\_erase\_devices config * Remove extra blank space in ClientSide error msg * Updated from global requirements * Convert BaseDriver.\*\_interfaces to tuples * [Devstack] cleanup upgrade settings * [doc] Update examples in devstack section * devstack: install python-dracclient if DRAC enabled * Call clean\_up\_instance() during node teardown for Agent deploy * Don't pass sqlite\_db in db\_options.set\_defaults() * Fix some api field lists in docstrings * Copy and append to static lists * Define minimum required API ver for portgroups * Add RPC and object version pinning * Updated from global requirements * Fix docstrings for creating methods in baremetal api tests * Extend tests and checks for node VIFs * Remove translation of log messages from ironic/conductor * Add functional API tests for portgroups * Revert the move of the logger setup * [devstack] Use global requirements for virtualbmc * Updates documentation to install PySqlite3 * Remove log translation function calls from ironic.db * Fix local copy of scenario manager * Add standalone tests using direct HTTP links * devstack: When Python 3 enabled, use Python 3 * Remove old oslo.messaging transport aliases * Fix file\_has\_content function for Py3 * Fix usage of various deprecated methods * Prune local copy of tempest.scenario.manager.py * devstack: Don't modprobe inside containers * Include a copy of tempest.scenario.manager module * flake8: Specify 'ironic' as name of app * Updated from global requirements * Fix API doc URL in GET / response * Add ironic standlaone test with ipmi dynamic driver * Update new proliantutils version to 2.2.1 * Add Ironic standalone tests * Fix typos of filename in api-ref * Updated from global requirements * Fix the exception message in tempest plugin * Speed up test\_touch\_conductor\_deadlock() * Cleanup hung iscsi session * Refactor waiters in our tempest plugin * Deprecate support for glance v1 * This adds a tempest test for creating a chassis with a specific UUID * Address a shell syntax mistake * Update ironic.conf.sample * grenade: Only 'enable\_plugin ironic' if not already in conf * Remove overwriting the default value of db\_max\_retries * Do not load credentials on import in tempest plugin clients.py * Update the Ironic Upgrade guide * Validation before perform node deallocation * Add wsgi handling to ironic-api in devstack * Fix updating node.driver to classic * devstack: Make sentry \_IRONIC\_DEVSTACK\_LIB a global variable * Use Sphinx 1.5 warning-is-error * Fixed release note for DBDeadLock handling * Remove references to py34 from developer guide * Delete release note to fix build * Correct typos in doc files * Clean up eventlet monkey patch comment and reno * Moved fix-socat-command release note * Allow to attach/detach VIFs to active ironic nodes * Move eventlet monkey patch code * Updated from global requirements * doc: update FAQ for release notes * Update test requirement * Add tempest plugin API tests for driver * Updated from global requirements * Remove gettext.install() for unit tests * Fix missing \_ import in driver\_factory * Add support for DBDeadlock handling * Fix BaseBaremetalTest.\_assertExpected docstring * Updated ramdisk API docstrings * Trivial: Change hardcoded values in tempest plugin * Developer guide should not include Python 3.4 * Add testcases for iLO drivers * Deduplicate \_assertExpected method in tests * Remove unused logging import * Use specific end version since liberty is EOL * Use flake8-import-order * Document PXE with Spanning Tree in troubleshooting FAQ * Skip VIF tests for standalone ironic * Switch to new location for oslo.db test cases * Explicitly use python 2 for the unit-with-driver-libs tox target * Add ironic port group CRUD notifications * Remove logging import unused * Update release nodes for Ocata * reno 'upgrades' should be 'upgrade' * Updated from global requirements * Update docs create port group 7.0.0 ----- * Clean up release notes for 7.0.0 * Add a summary release note for ocata * Walk over all objects when doing VIF detach * Fix unit tests with UcsSdk installed * Mock client initializations for irmc and oneview * Follow up patch for SNMPv3 support * Add a tox target for unit tests with driver libraries * Fix missed '\_' import * Change misc to test\_utils for tempest test * Source lib/ironic in grenade settings * Update api-ref for dynamic drivers * Switch to use test\_utils.call\_until\_true * Add port groups configuration documentation * Remove most unsupported drivers * SNMP agent support for OOB inspection for iLO Drivers * No node interface settings for classic drivers * Unbind tenant ports before rebuild * Remove a py34 environment from tox * Fix object save after refresh failure * Pass session directly to swiftclient * Adds network check in upgrade phase in devstack * Fix log formating in ironic/common/neutron * Follow-up iRMC power driver for soft reboot/poff * Use https instead of http for git.openstack.org * Validate the network interface before cleaning * log if 'flat' interface and no cleaning network * exception from driver\_factory.default\_interface() * devstack: Adding a README for ironic-bm-logs directory * [devstack] Allow using "ipmi" hardware type * Remove trailing slash from base\_url in tempest plugin * Improve enabled\_\*\_interfaces config help and validation * Prepare for using standard python tests * [Devstack] fix waiting resources on subnode * Log an actual error message when failed to load new style credentials * Speed up irmc power unit tests * Add bumping sem-ver to the releasing docs * Make \_send\_sensors\_data concurrent * [devstack] remove deprecated IRONIC\_IPMIINFO\_FILE * Fail conductor startup if invalid defaults exist * Add dynamic interfaces fields to base node notification * Improve conductor driver validation at startup * Remove iSCSI deploy support for IPA Mitaka * Do not change admin\_state for tenant port * Use delay configoption for ssh.SSHPower drivers * Add the timeout parameter to relevant methods in the fake power interface * Adding clean-steps via json string examples * Allow duplicate execution of update node DB api method * Remove deprecated heartbeat policy check * Add sem-ver flag so pbr generates correct version * Fix a few docstring warnings * Remove deprecated [deploy]erase\_devices\_iterations * Remove support for driver object periodic tasks * Log reason for hardware type registration failure * Duplicated code in ..api.get\_active\_driver\_dict() * Add hardware type 'irmc' for FUJITSU PRIMERGY servers * Allow using resource classes * DevStack: Only install edk2-ovmf on Fedora * [Devstack] Add stack user to libvirt group * Add soft reboot, soft power off and power timeout to api-ref * Add dynamic interfaces fields to nodes API * Add dynamic driver functionality to REST API * [Devstack] Download both disk and uec images * [Devstack] Set DEFAULT\_IMAGE\_NAME variable * Update the outdated link in user-guide * Add Inject NMI to api-ref * Don't override device\_owner for tenant network ports * Validate port info before assume we may use it * Switch to decorators.idempotent\_id * Updated from global requirements * Minor updates to multi-tenancy documentation * Follow-up iRMC driver doc update * Devstack: Create a "no ansi" logfile for the baremetal console logs * Add hardware type for IPMI using ipmitool * [Devstack] enable only pxe|agent\_ipmitool by default * Update iRMC driver doc for soft reboot and soft power off * Fix broken link in the iLO driver docs * DevStack: Fix cleaning up nodes with NVRAM (UEFI) * iRMC power driver for soft reboot and soft power off * Update proliantutils version required for Ocata release * Fix rel note format of the new feature Inject NMI * iRMC management driver for Inject NMI * Revert "Revert "Remove ClusteredComputeManager"" * Use context manager for better file handling * Updated from global requirements * Fix typo in the metrics.rst file * Allow to use no nova installation * Fix api-ref warnings * Turn NOTE into docstring * Updated from global requirements * Correctly cache "abortable" flag for manual clean steps * Use global vars for storing image deploy path's * Ipmitool management driver for Inject NMI * Generic management I/F for Inject NMI * Clean up driver\_factory.enabled\_supported\_interfaces * Add hardware types to the hash ring * Default ironic to not use nested KVM * Do not use user token in neutron client * Use only Glance V2 by default (with a compatibility option) * Enable manual-management hardware type in devstack * Register/unregister hardware interfaces for conductors * Validate the generated swift temp url * Move to tooz hash ring implementation * Add VIFs attach/detach to api-ref * DevStack: Configure nodes/environment to boot in UEFI mode * Add tests for Payloads with SCHEMAs * make sure OVS\_PHYSICAL\_BRIDGE is up before bring up vlan interface * Update troubleshooting docs on no valid host found error * Expose default interface calculation from driver\_factory * Add default column to ConductorHardwareInterfaces * Do not fail in Inspector.\_\_init\_\_ if [inspector]enabled is False * Use TENANT\_VIF\_KEY constant everywhere * Updated from global requirements * Allow to attach/detach VIF to portgroup * Refactor DRAC driver boot-device tests * Updated from global requirements * Remove check for UEFI + Whole disk images * Updated from global requirements * Update validate\_ports from BaremetalBasicOps * Ipmitool power driver for soft reboot and soft power off * Allow to set min,max API microversion in tempest * Skip VIF api tests for old api versions * Fix assertEqual parmeters position in unittests * Ensures that OneView nodes are free for use by Ironic * Move default image logic from DevStack to Ironic * Document HCTL for root device hints * Removes unnecessary utf-8 encoding * Move heartbeat processing to separate mixin class * Add Virtual Network Interface REST APIs * Fix logging if power interface does not support timeout * Add lsblk to ironic-lib filters * Fix setting persistent boot device does not work * Updated from global requirements * Add docs about creating release note when metrics change * Fix take over of ACTIVE nodes in AgentDeploy * Fix take over for ACTIVE nodes in PXEBoot * Don't translate exceptions w/ no message * Correct logging of loaded drivers/hardware types/interfaces * Move baremetal tempest config setting from devstack * Change object parameter of swift functions * Remove greenlet useless requirement * Fixes grammar in the hash\_partition\_exponent description * Revert "Disable placement-api by default" * Remove service argument from tempest plugin client manager * Fix the comma's wrong locations * Remove netaddr useless requirement * Generic power interface for soft reboot and soft power off * Create a table to track loaded interfaces * Remove trailing backtick * Updated from global requirements * Remove 'fork' option from socat command * Add Virtual Network Interface RPC APIs * Catch unknown exceptions in validate driver ifaces * Disable placement-api by default * Update regenerate-samples.sh api-ref script * Updated from global requirements * Add Virtual Network Interface Driver APIs * 'updated\_at' field value after node is updated * Add node console notifications * Add node maintenance notifications * Add ironic resources CRUD notifications * Auto-set nullable notification payload fields when needed * Update dev-quickstart: interval value cannot be -1 * Fix wrong exception message when deploy failed * Add storage\_interface to base driver class * Update multi-tenancy documentation * Add storage\_interface to node DB table * Add API reference for portgroup's mode and properties * Set access\_policy for messaging's dispatcher * Add a NodePayload test * Add test to ensure policy is always authorized * Fix bashate warning in devstack plugin * Forbid removing portgroup mode * Configure tempest for multitenancy/flat network * Wrap iscsi portal in []'s if IPv6 * Fix policy dict checkers * Updated from global requirements * Introduce generic hardware types * Remove grenade config workaround * Add portgroup configuration fields * Onetime boot when set\_boot\_device isn't persistent * Revert "Change liberty's reno page to use the tag" * Update multitenancy docs * Use oslo\_serialization.base64 to follow OpenStack Python3 * Updated from global requirements * Support defining and loading hardware types * Change liberty's reno page to use the tag * DevStack: Make $IRONIC\_IMAGE\_NAME less dependent of the name in DevStack * Fix error when system uses /usr/bin/qemu-kvm, as in CentOS 7.2 * Adds another validation step when using dynamic allocation * Fix return values in OneView deploy interface * Clarify the comment about the object hashes * Reusing oneview\_client when possible * Enhance wait\_for\_bm\_node\_status waiter * Use polling in set\_console\_mode tempest test * Make CONF.debug also reflect on IPA * Fail ironic startup if no protocol prefix in ironic api address * Remove agent vendor passthru completely * Remove iBoot, WoL and AMT drivers * Remove agent vendor passthru from OneView drivers * Move CONF.service\_available.ironic to our plugin * devstack: add vnc listen address * Autospec ironic-lib mocks, fix test error string * Remove deprecation of snmp drivers * Allow setting dhcp\_provider in devstack * Fix default value of "ignore\_req\_list" config option * Add unit test for create\_node RPC call * Documentation for Security Groups for baremetal servers * Remove agent vendor passthru from iLO drvers * Updated from global requirements * Add release names & numbers to API version history * Remove the VALID\_ROOT\_DEVICE\_HINTS list * Make "enabled\_drivers" config option more resilient to failures * Fix double dots at the end of a message to single dot * Clean up object code * Use IronicObject.\_from\_db\_object\_list method * Update help for 'provisioning\_network' option * Updated from global requirements * Add virtualpdu to ironic devstack plugin * Auto enable the deploy driver * Add volume\_connectors and volume\_targets to task * Renaming audit map conf sample file * Support names for {cleaning,provisioning}\_network * Allow use \*\_ipmitool with vbmc on multinode * Add RPCs to support volume target operations * Fix import method to follow community guideline * Add VolumeTarget object * Unneeded testing in DB migration of volume connector * Add volume\_targets table to database * Cleanup adding Ironic to cluster on upgrade case * Move interface validation from API to conductor side * Update the links in iLO documentation * Turn off tempest's multitenant network tests * Make all IronicExceptions RPC-serializable * Do not source old/localrc twise in grenade * Fix docs error about OOB RAID support * Remove agent vendor passthru from most drivers * Follow-up for volume connector db\_id * Remove file prefix parameter from lockutils methods * Install syslinux package only for Wheezy / Trusty * Show team and repo badges on README * Drac: Deprecate drac\_host property * Update keystone\_authtoken configuration sample in the install guide * Add RPCs to support volume connector operation * Add VolumeConnector object * Add volume\_connectors table to save connector information * Minor changes to neutron security groups code * Drop bad skip check in tempest plugin * Correct DB Interface migration test * Updated from global requirements * Add support for Security Groups for baremetal servers * mask private keys for the ssh power driver * Remove deprecated Neutron DHCP provider methods * Add notification documentation to install guide * Fix the message in the set\_raid\_config method * Convert iPXE boot script to Jinja template * Fix PXE setup for fresh Ubuntu Xenial * Add node (database and objects) fields for all interfaces * Move \`deploy\_forces\_oob\_reboot\` to deploy drivers * Add route to Neutron private network * Rely on portgroup standalone\_ports\_supported * Add node provision state change notification * Update the alembic migration section in the developer FAQ * Add notification documentation to administrator's guide * Revert "Remove ClusteredComputeManager" * Remove ClusteredComputeManager * Followup to 0335e81a8787 * Update iptables rules and services IPs for multinode * Add devstack setup\_vxlan\_network() * Skip some steps for multinode case * Timing metrics: iRMC drivers * Use function is\_valid\_mac from oslo.utils * Docs: Document using operators with root device hints * Add portgroup to api-ref * Updated from global requirements * Add user and project domains to ironic context * Bring configurations from tempest to ironic\_tempest\_plugin * Do not pass ipa-driver-name as kernel parameter * Timing metrics: OneView drivers * Add unit test for microversion validator * Update ironic node names for multinode case * Update devstack provision net config for multihost * Add CI documentation outline * Add possibility to remove chassis\_uuid from a node * Create dummy interfaces for use with hardware types * [install-guide] describe service clients auth * Simplify base interfaces in ironic.drivers.base * Integrate portgroups with ports to support LAG * Updated from global requirements * Increase verbosity of devstack/lib/ironic * Update to hacking 0.12.0 and use new checks * Add PS4 for better logfile information of devstack runs * Update guide section for messaging setup * Updated from global requirements * Replaces uuid.uuid4 with uuidutils.generate\_uuid() * Enable PXE for systems using petitboot * Fix typo of 'authenticaiton' * Add a unit test for microversion validation V1.22 * Clean up unit test of API root test * DevStack: Fix standard PXE on Ubuntu Xenial * Skip db configuration on subnodes * Ignore required\_services for multinode topology * Add PortGroups API * DevStack: Support for creating UEFI VMs * Updated from global requirements * Clarify ironic governance requirements and process * API: lookup() ignore malformed MAC addresses * TrivialFix: Fix typo in config file * DRAC get\_bios\_config() passthru causes exception * Fix exception handling in iscsi\_deploy.continue\_deploy * Log currently known iSCSI devices when we retry waiting for iSCSI target * Use kvm for ironic VMs when possible * Correct log the node UUID on failure * Updated from global requirements * Change 'writeable' to 'writable' * Add the way to get the deploy ram disks * Remove use of 'vconfig' command in devstack ironic script * Imported Translations from Zanata * Updated from global requirements * Revert "Set SUBNETPOOL\_PREFIX\_V4 to FIXED\_RANGE" * Fix typo in release note filename * Use function import\_versioned\_module from oslo.utils * Updated from global requirements * Remove "dhcp" command from the iPXE template * Fixes a small documentation typo in snmp * IPMI command should depend on console type * Trivial fix of notifications doc * Mock ironic-lib properly in test\_deploy\_utils * Remove ..agent.build\_instance\_info\_for\_deploy() in Pike * Trivial: fix typo in docstring * Add a missing error check in ipmitool driver's reboot * Adding Timing metrics for DRAC drivers * Remove 'agent\_last\_heartbeat' from node.driver\_internal\_info * Add power state change notifications * Skip create\_ovs\_taps() for multitenancy case * Remove unnecessary '.' before ':' in ironic rst * Updated from global requirements * Imported Translations from Zanata * Replace parse\_root\_device\_hints with the ironic-lib version one * Fixes parameters validation in SSH power manager * Fix API docs to include API version history * fix a typo in document * Updated from global requirements * Update guide for PXE multi-architecture setup * Remove "agent\_last\_heartbeat" internal field from agent drivers * No need to clear "target\_provision\_state" again from conductor * Trivial: fix warning message formatting * Updated from global requirements * Fix some typos * Add docs about releasing ironic projects * Fix unit tests failing with ironic-lib 2.1.1 * Do not hide unexpected exceptions in inspection code * Avoid name errors in oneview periodics * A few fixes in Multitenancy document * Introduce default\_boot\_option configuration option * Fix broken xenial job * Fix setting custom IRONIC\_VM\_NETWORK\_BRIDGE * Update configure\_tenant\_networks * Remove wrong check from conductor periodic task * Remove reservation from sync power states db filter * Fix a typo in deploy.py * Updated from global requirements * Fix some PEP8 issues and Openstack Licensing * Clarify when oneview node can be managed by ironic * Add tense guide to release note FAQ * Refactor \_test\_build\_pxe\_config\_options tests * Imported Translations from Zanata * OneView driver docs explaining hardware inspection * Enable release notes translation * Clean up provision ports when reattempting deploy * Remove unnecessary option from plugin settings * Cleanup unused (i)PXE kernel parameters * Set SUBNETPOOL\_PREFIX\_V4 to FIXED\_RANGE * Enable DeprecationWarning in test environments * Fix \_lookup() method for node API routing * Log node state transitions at INFO level * Update ironic config docs for keystone v3 * Clean exceptions handling in conductor manager * Move build\_instance\_info\_for\_deploy to deploy\_utils * Fix undisplayed notes in Quick-Start * Keep numbering of list in Install Guide * Add description for vendor passthru methods * [install-guide] describe pxe.ipxe\_swift\_tempurl * Fix docstrings in tempest plugin baremetal json client * Add entry\_point for oslo policy scripts * Remove unneeded exception handling from conductor * Remove unused methods in common/utils.py * Do not use mutable object as func default param * Trivial: Fix some typos in comments and docstring * doc: Add oslo.i18n usage link * Replace assertTrue(isinstance()) with assertIsInstance() * Fix typo: remove redundant 'the' * Support multi arch deployment * Updated from global requirements * Use method delete\_if\_exists from oslo.utils * Use assertRaises() instead of fail() * Cleanup get\_ilo\_license() * Fix grenade jobs * Add a missing whitespace to an error message * Invalid URL and Typo in enrollment.rst * Update configuration reference link to latest draft * Update external links to developer documentation * Fail test if excepted error was not raised * Add inspection feature for the OneView drivers * Use correct option value for standalone install * Move flavor create under 'VIRT\_DRIVER == ironic' * Change links to point to new install guide * Fix inexact config option name in multitenancy.rst * Fix typos in docstring/comments * Have bashate run for entire project * Change 'decom' to clean/cleaning * Fix docstring typo in test\_common.py * Fix invalid git url in devstack/local.conf sample * Fix absolute links to install-guide.rst in developer docs * Update developer's guide "Installation Guide" link * Add link to new guide in old install guide * Fixing Typo * [install-guide] Import "Setup the drivers for the Bare Metal service" * [install-guide] Import "Trusted boot with partition image" * [install-guide] Import "Building or downloading a deploy ramdisk image" * [install-guide] Import "Appending kernel parameters to boot instances" * [install-guide] Import configdrive * [install-guide] Import HTTPS, standalone and root device hints * [install-guide] Import "Enrollment" and "Troubleshooting" sections * [install-guide] Import "Local boot with partition images" * [install-guide] Import "Flavor creation" * [install-guide] Import "Image requirements" * [install-guide] Import "integration with other OpenStack components" * [install-guide] Import Install and configure sections * [install-guide] Import "Bare Metal service overview" * Remove unused method is\_valid\_ipv6\_cidr * Support https in devstack plugin * Use six.StringIO instead of six.moves.StringIO * Remove unneeded try..except in heartbeat * Fix a typo in helper.py * Add more details to MIGRATIONS\_TIMEOUT note * Fixes wrong steps to perform migration of nodes * Increase timeout for migration-related tests * Update reno index for Newton * Add i18n \_() to string * Change the logic of selecting image for tests * Always return chassis UUID in node's API representation * Updated from global requirements * Fix iLO drivers to not clear local\_gb if its not detected 6.2.0 ----- * Clean up release notes for 6.2.0 * Fix DRAC passthru 'list\_unfinished\_jobs' desc * DevStack: Use Jinja2 for templating when creating new VMs * DRAC: list unfinished jobs * Fix broken unit tests for get\_ilo\_object * Sync ironic-lib.filters from ironic-lib * Documentation change for feature updates in iLO drivers * Remove websockify from requirements * Add a note about security groups in install guide * Remove unnecessary setUp * Adds a missing space in a help string * Remove duplicated line wrt configdrive * Notification event types have status 'error' * Refactor common checks when instantiating the ipmitool classes * Grub2 by default for PXE + UEFI * Support configdrive in iscsi deploy for whole disk images * Remove NotificationEventTypeError as not needed * Mark untested drivers as unsupported * [trivial] Fix typo in docstring * Replace "phase" with "status" in notification base * Updated from global requirements * Fix test syntax error in devstack/lib/ironic * Separate WSGIService from RPCService * Fix link from doc index to user guide * Update proliantutils version required for Newton release * Remove unused argument in Tempest Plugin * Fix docstrings in Tempest Plugin REST client for Ironic API * Fix docstrings to match with method arguments * Remove cyclic import between rpcapi and objects.base * Fix nits on DRAC OOB inspection patch * Fix DRAC failure during automated cleaning * Replace six iteration methods with standard ones * Timing metrics: iLO drivers * Use assertEqual() instead of assertDictEqual() * Configure clean network to provision network * Updated from global requirements * \_\_ne\_\_() unit tests & have special methods use (self, other) * Add metrics to administrator guide * Add \_\_ne\_\_() function for API Version object * Update unit tests for neutron interface * Update ironic/ironic.conf.sample * Allow using TempURLs for deploy images * Log a warning for unsupported drivers and interfaces * Add a basic install guide * [api-ref] Remove temporary block in conf.py * Deny chassis with too long description * Update the string format * [api-ref] Correcting type of r\_addresses parameter * Remove unused file: safe\_utils.py * DRAC OOB inspection * Remove neutron client workarounds * Update driver requirement for iRMC * Refresh fsm in task when a shared lock is upgraded * Updated from global requirements * Fix exception handling in NodesController.\_lookup * Remove unused LOG and CONF * Fix updating port.portgroup\_uuid for node * Add a newline at the end of release note files * Replace DOS line endings with Unix * Fix ironic-multitenant-network job * Update test\_update\_portgroup\_address\_no\_vif\_id test * Use assertIsInstance/assertNotIsInstance in tests * Add standalone\_ports\_supported to portgroup - DB * Config logABug feature for Ironic api-ref * DevStack: Configure retrieving logs from the deploy ramdisk * DRAC RAID configuration * Metrics for ConductorManager * Option to enroll nodes with drac driver * Allow suppressing ramdisk logs collection * Fix pep8 on Python3.5 * Fix incorrect order of params of assertEqual() * Updated from global requirements * Fix for check if dynamic allocation model is enabled * Add multi-tenancy section to security doc * Fix formatting strings in LOG.error * Mask instance secrets in API responses * Update documentation for keystone policy support * Fix typo in policy.json.sample * Add node serial console documentation * Prevent URL collisions with sub-controllers: nodes/ports * Centralize Config Options - patch merge, cleanup * Update the webapi version history reference * Fix fall back to newer keystonemiddleware options * OneView test nodes to use dynamic allocation * Updated from global requirements * Fix issues in dev-quickstart and index * Updated from global requirements * Add notification base classes and docs * Update hacking test-requirement * Documentation update * Removed unneeded vlan settings from neutron config * iLO drivers documentation update * Move console documentation to separate file * Switch Inspector interface to pass keystoneauth sessions * Adds instructions to perform nodes migration * Replace DB API call to object's method in iLO drivers * Move "server\_profile\_template\_uri" to REQUIRED\_ON\_PROPERTIES * Using assertIsNone() is preferred over assertEqual() * Updated from global requirements * Update api-ref for v1.22 * Updated from global requirements * Pass swiftclient header values as strings * Get ready for os-api-ref sphinx theme change * Log node uuid rather than id when acquiring node lock * Allow changing lock purpose on lock upgrade * Fix typo: interations -> iterations * Update code to use Pike as the code name * Operator documentation for multitenancy * Always set DEFAULT/host in devstack * Fix AgentDeploy take\_over() docstring * Clean imports in code * Copy iPXE script over only when needed * Fix incorrect order of params of assertEqual() * Fix iLO drivers inconsistent boot mode default value * Update readme file * Bring upgrade documentation up to date * Fix test\_find\_node\_by\_macs test * Use memory mode for sqlite in db test * Fix key word argument interface\_type -> interface * Use upper-constraints for all tox targets * Add nova scheduler\_host\_subset\_size option to docs * Fix the description of inspection time fields * DevStack: No need to change the ramdisk filesystem type * Fix incorrect order of params of assertEqual() in test\_objects.py * Fix assertEqual(10, 10) in unit/api/v1/test\_utils.py * Adding InfiniBand Support * Doc: Recommend users to update their systems * Centralize config options - [iscsi] * Centralize config options - [pxe] * Add "erase\_devices\_metadata\_priority" config option * Updated from global requirements * Update renos for fix to ipmi's set-boot-device * Remove unused [pxe]disk\_devices option * IPMINative: Check the boot mode when setting the boot device * IPMITool: Check the boot mode when setting the boot device * Fix ssh credential validation message * Remove CONF.import\_opt() from api/controllers/v1/node.py * Document retrieving logs from the deploy ramdisk * Fix updating port MAC address for active nodes * Remove incorrect CONF.import\_opt() from test\_ipmitool.py 6.1.0 ----- * Rename some variables in test\_ipminative.py * Update proliantutils version required for Newton release * Refactor OneView dynamic allocation release notes * Clean up release notes for 6.1.0 * Refactor multitenant networking release notes * DevStack guide: Bump IRONIC\_VM\_SPECS\_RAM to 1280 * Deprecate ClusteredComputeManager * 'As of' in documentation is incorrect * Updated Dev quickstart for viewing doc changes * Remove duplicate parameters from local.conf example * Check keyword arguments * Deprecate putting periodic tasks on a driver object * Updated from global requirements * Add metrics for the ipminative driver * test\_console\_utils: using mock\_open for builtin open() * Update devstack configure\_ironic\_ssh\_keypair * Trivial: Remove useless function call in glance service test * Simplify code by using mask\_dict\_password (again) * Officially deprecate agent passthru classes and API * Timing metrics: pxe boot and iscsi deploy driver * Fix the mistakes in Installation Guide doc * Use devstack test-config phase * Rename BaseApiTest.config to app\_config * Documentation fixes for iLO SSL Certificate feature * Metrics for agent client * Simplify code by using mask\_dict\_password * OneView driver docs explaining Dynamic Allocation * Docs: Run py34 tox test before py27 * Collect deployment logs from IPA * Fix typo * Remove oslo-incubator references * Promote agent vendor passthru to core API * Update add nova user to baremetal\_admin behaviour * Fix typo in Install-guide.rst file * Replacing generic OneViewError w/ InvalidNodeParameter * Add Dynamic Allocation feature for the OneView drivers * Fix \_\_all\_\_ module attributes * Fix tempest realted exceptions during docs build * Add keystone policy support to Ironic * Follow up to keystoneauth patch * Add a data migration to fill node.network\_interface * Test that network\_interface is explicitly set on POST/PATCH * Updated from global requirements * Create a custom StringField that can process functions * Revert "Devstack should use a prebuilt ramdisk by default" * Fix for "db type could not be determined" error message * Update devstack plugin with new auth options * Migrate to using keystoneauth Sessions * Updating dev quickstart to include compatiblity for newest distros * Update nova scheduler\_host\_manager config docs * Extend the "configuring ironic-api behind mod\_wsgi" guide * Add metrics for the ipmitool driver * Timing metrics for agent deploy classes * Pass agent metrics config via conductor * Minor docstring and unittests fixes for IPMIConsole * Move default network\_interface logic in node object * Updated from global requirements * Devstack should use a prebuilt ramdisk by default * Updated tests for db migration scripts * Centralize config options - [agent] * Log full config only once in conductor * Add node.resource\_class field * Add api-ref for new port fields * Add support for the audit middleware * Change comment regarding network\_interface * Fix rendering for version 1.14 * Use 'UUID', not 'uuid' in exception strings * IPMITool: add IPMISocatConsole and IPMIConsole class * Use assertEqual() instead of assertDictEqual() * Remove unused code when failing to start console * Trivial: Fix a trivial flake8 error * Centralize config options - [deploy] * Centralize config options - [api] * Added note to local.conf addressing firewall/proxy blocking Git protocol * Bug fixes and doc updates for adoption * Do the VM setup only when requested * Remove unused import * Remove duplicate copyright * Add build-essential to required packages for development * Implement new heartbeat for AgentDeploy * Add Python 3.5 tox venv * Updated from global requirements * Doc update for in-band cleaning support on more drivers * Updated from global requirements * Support to validate iLO SSL certificate in iLO drivers * Update {configure|cleanup}ironic\_provision\_network * Add test to verify ironic multitenancy * Add multitenancy devstack configuration examples * Following the hacking rule for string interpolation at logging * Centralize config options - [DEFAULT] * Add py35 to tox environments * Metric chassis, driver, node, and port API calls * Fix fake.FakeBoot.prepare\_ramdisk() signature * Follow-up to 317392 * Follow-up patch of 0fcf2e8b51e7dbbcde6d4480b8a7b9c807651546 * Updated from global requirements * Expose node's network\_interface field in API * Update devstack section of quickstart to use agent\_ipmitool * Grammar fix in code contribution guide * Deprecate [ilo]/clean\_priority\_erase\_devices config * Add configure\_provision\_network function * Update Ironic VM network connection * Centralize config options - [neutron] * Follow-up fixes to 206244 * Nova-compatible serial console: socat console\_utils * Updated from global requirements * Add multitenancy-related fields to port API object * Update the deploy drivers with network flipping logic * Add 'neutron' network interface * Fix docstring warnings * Add and document the "rotational" root device hint * Add network interface to base driver class * Increase devstack BM VM RAM for coreos to boot * Config variable to configure [glance] section * Add support for building ISO for deploy ramdisk * Add a doc about appending kernel parameters to boot instances * Trivial grammar fixes to the upgrade guide * Remove unused expected\_filter in the unit test * Updated from global requirements * Remove white space between print and () * Remove IBootOperationError exception * Delete bios\_wsman\_mock.py from DRAC driver * Correct reraising of exception * Allow to enroll nodes with oneview driver * Add internal\_info field to ports and portgroups * Centralize config options - [glance] * Document API max\_limit configuration option * Fix two types in ironic.conf.sample * Remove unused LOG * Remove iterated form of side effects * Improve the readability of configuration drive doc part * Drop IRONIC\_DEPLOY\_DRIVER\_ISCSI\_WITH\_IPA from documentation * Allow to use network interfaces in devstack * Updated from global requirements * Centralize config options - [virtualbox] * Centralize config options - [swift] * Centralize config options - [ssh] * Centralize config options - [snmp] * Add Ironic specs process to the code contribution guide * Add network\_interface node field to DB and object * Fix typo in inspection.rst * Add missing translation marker to clear\_node\_target\_power\_state * Throwing an exception when creating a node with tags * Follow-up patch of 9a1aeb76da2ed53e042a94ead8640af9374a10bf * Fix releasenotes formatting error * Improve tests for driver's parse\_driver\_info() * Centralize config options - [seamicro] * Centralize config options - [oneview] * Centralize config options - [keystone] * Centralize config options - [irmc] * Centralize config options - [ipmi] * Centralize config options - [inspector] * Centralize config options - [ilo] * Introduce new driver call and RPC for heartbeat * Remove unnecessary calls to dict.keys() * Fail early if ramdisk type is dib, and not building * Add dbapi and objects functions to get a node by associated MAC addresses * Drop references to RPC calls from user-visible errors * Centralize config options - [iboot] * Updated from global requirements * Replace dict.get(key) in api & conductor tests * Use PRIVATE\_NETWORK\_NAME for devstack plugin * Create common neutron module * Updated from global requirements * Properly set ephemeral size in agent drivers * Add validation of 'ilo\_deploy\_iso' in deploy.validate() * Restore diskimage-builder install 6.0.0 ----- * Updated from global requirements * Mask password on agent lookup according to policy * Clear target\_power\_state on conductor startup * Replace assertRaisesRegexp with assertRaisesRegex * Fix test in test\_agent\_client.py * Replace dict.get(key) in drivers unit tests * Docs: Fix some typos in the documentation * Removes the use of mutables as default args * Follow-up to Active Node Creation * Fix parameter create-node.sh * Replace dict.get(key) in drivers/modules/\*/ tests * Change port used for Ironic static http to 3928 * Centralize config options - [dhcp] * Centralize config options - [database] * Centralize config options - [conductor] * Centralize config options - [cisco\_ucs] * Centralize config options - [cimc] * Centralize config options - [console] * No need for 'default=None' in config variable * Fix typo in agent driver * Use assertIn and assertNotIn * Document testing an in-review patch with devstack * Replace vif\_portgroup\_id with vif\_port\_id * Use assert\_called\_once\_with in test\_cleanup\_cleanwait\_timeout * Trivial comments fix * Add Link-Local-Connection info to ironic port * Remove workaround for nova removing instance\_uuid during cleaning * Document support for APC AP7921 * Updated from global requirements * Add cleanwait timeout cleanup process * Add restrictions for changing portgroup-node association * Imported Translations from Zanata * Support for APC AP7922 * fix sed strings in developer doc * Replace dict.get(key) with dict[key] in unit tests * Fix JSON error in documentation * Remove support for the old ramdisk (DIB deploy-ironic element) * Updated from global requirements * Document packing and unpacking the deploy ramdisk * Fix nits related to Ports api-ref * Gracefully degrade start\_iscsi\_target for Mitaka ramdisk * Update the api-ref documentation for Drivers * Update comment from NOTE to TODO * Active Node Creation via adopt state * Update resources subnet CIDR * remove neutron stuff from devstack deb packages * Keep original error message when cleaning tear down fails * Add config option for ATA erase fallback in agent * Fix markup in documentation * Imported Translations from Zanata * Updated from global requirements * Add debug environment to tox * Correct RAID documentation JSON * Added ironic-ui horizon dashboard plugin to ironic docs * Updated from global requirements * Disable disk\_config compute-feature-enabled in tempest * Make sure create\_ovs\_taps creates unique taps * NOTIFICATION\_TRANSPORT should be global * Remove links to github for OpenStack things * Update the api-ref documentation for Ports * Add one use case for configdrive * Updated from global requirements * Remove hard-coded keystone version from setup * Use a single uuid parameter in api-ref * Use correct iscsi portal port in continue\_deploy * Fix raises to raise an instance of a class * Fix formatting of a release note * Remove support for 'hexraw' iPXE type * Use messaging notifications transport instead of default * Updated from global requirements * tempest: start using get\_configured\_admin\_credentials * Fix signature for request method * Remove backward compatibility code for agent url * Add 'How to get a decision on something' to FAQ * Follow-up patch of 8e5e69869df476788b3ccf7e5ba6c2210a98fc8a * Introduce provision states: AVAILABLE, ENROLL * minor changes to security documentation * Add support for API microversions in Tempest tests * Make use of oslo-config-generator * Mention RFEs in README * Make the ssh driver work on headless VirtualBox machines * Allow to specify node arch * Remove unused is\_valid\_cidr method * Updated from global requirements * Restart n-cpu after Ironic install * Move all cleanups to cleanup\_ironic * Keep backward compatibility for openstack port create * Revert "Run smoke tests after upgrade" * Add some docs about firmware security * Change HTTP\_SERVER's default value to TFTPSERVER\_IP * Update the api-ref documentation for Root and Nodes * Read the Sphinx html\_last\_updated\_fmt option correctly in py3 * devstack: Configure console device name * Updated from global requirements * Replace project clients calls with openstack client * Stop unit-testing processutils internals * Fix start order for Ironic during upgrade * Run smoke tests after upgrade * Add ironic to enabled\_services * Remove link to Liberty configs * Updated from global requirements * Fix shutdown.sh & upgrade.sh for grenade * add mitaka configuration reference link to the index page * Remove "periodic\_interval" config option * Remove verbose option * Updated from global requirements * Eliminate warnings about rm in api-ref build * Remove deprecated driver\_periodic\_task * Remove backward compat for Liberty cleaning * Remove [conductor]/clean\_nodes config option * Remove "message" attribute support from IronicException * Setup for using the Grenade 'early\_create' phase * Add support for dib based agent ramdisk in lib/ironic * Remove deprecated [pxe]/http\_\* options * Remove [agent]/manage\_tftp option * Remove "discoverd" configuration group * Regenerate sample config * Doc: Replace nova image-list * Migrate to os-api-ref library * Add require\_exclusive\_lock decorators to conductor methods * Fix syntax error in devstack create-node script * Updated from global requirements * Fix formatting error in releasenotes * Allow vendor drivers to acquire shared locks * Modify doc for RAID clean steps in manual cleaning * Make iPXE + TinyIPA the defaults for devstack * Only install DIB if going to use DIB * Add some docs/comments to devstack/plugin.sh * devstack: Fetch tarball images via https * DevStack: Support to install virtualbmc from source * Regenerate sample configuration * Allow configuring shred's final overwrite with zeros * Updated from global requirements * Deployment vmedia operations to run when cleaning * Extend IRONIC\_RAMDISK\_TYPE to support 'dib' * Cleanup unused conf variables * Adds RAID interface for 'iscsi\_ilo' * Pass environment through to create-node.sh * DevStack: Support to install pyghmi from source * RAID interface to support JBOD volumes * Remove ClusteredComputeManager docs * API: Check for reserved words when naming a node * File download fails with swift pseudo folder * Migrate api-ref into our tree * Updating dev-quickstart.rst file links * Devstack: allow extra PXE params * Updated from global requirements * Update resources only for specific node during deletion * Fix tox cover command * Fix VirtualBox cannot set boot device when powered on * Set root hints for disks less than 4Gb and IPA * Use Ironic node name for VM * Allow to sepecify VM disk format * Update compute\_driver in documentation * Replace logging constants with oslo.log * iscsi: wipe the disk before deployment * Joined 'tags' column while getting node * FIX: IPMI bmc\_reset() always executed as "warm" * Fix API node name updates * DevStack: Parametrize automated\_clean * Very important single character typo fix * Remove two DEPRECATED config options from [agent] * Allow to set Neutron port setup delay from config * Update ironic.config.sample * Fix usage of rest\_client expected\_success() in tests * Fixed nits in the new inspection doc page * Imported Translations from Zanata * Updated from global requirements * Document how to run the tempest tests * Update the inspection documentation * ipxe: retry on failure * Add note on prerequisite of 'rpm' file extraction * Follow-up patch of 0607226fc4b4bc3c9e1738dc3f78ed99e5d4f13d * Devstack: Change to use 'ovs-vsctl get port tag' * Restart consoles on conductor startup * Remove backwards compat for CLEANING * Make sure Cisco drivers are documented on IRONIC\_DEPLOY\_DRIVER * Remove two deprecated config option names from [agent] section * Updated from global requirements * Add support for Cisco drivers in Ironic devstack * Updated from global requirements * [docstring] Update ironic/api/controllers/v1/\_\_init\_\_.py comment * add new portal\_port option for iscsi module * Fix tinyipa initrd tarballs.openstack.org file name * Remove description of 'downgrade' for ironic-dbsync * In node\_power\_action() add node.UUID to log message * Rename juno name state modification method * Prepare for transition to oslo-config-generator * Updated from global requirements * Reduce amount of unhelpful debug logging in the API service * Correct api version check conditional for node.name * Updated from global requirements * Enable download of tinyipa prebuilt image * Follow-up to I244c3f31d0ad26194887cfb9b79f96b5111296c6 * Use get\_admin\_context() to create the context object * Updated from global requirements * Don't power off non-deploying iLO nodes in takeover * deployment vmedia ops should not be run when not deploying * Fix NamedTemporaryFile() OSError Exception * Updated from global requirements * Fix \_do\_next\_clean\_step\_fail\_in\_tear\_down\_cleaning() * Make tox respect upper-constraints.txt * Adopt Ironic's own context * Allow fetching IPA ramdisk with branch name * Tune interval for node provision state check * Fix typo in devstack script * Note on ilo firmware update swift url scheme * Force iRMC vmedia boot from remotely connected CD/DVD * Normalize MAC OctetString to fix InvalidMAC exception * Enable Grenade usage as a plugin * Readability fixes for cleaning\_reboot code * Support reboot\_requested bool on agent clean\_steps * Update tempest compute flavor\_ref/flavor\_ref\_alt * Move testcases related to parse\_instance\_info() * Improve check for ssh-key to include public and private files * Assign valid values to UUIDFields in unit tests * Fix typos in some source files * Follow up patch of 843ce0a16160f2e2710ef0901028453cd9a0357c * Clean up test node post data * Fix: Duplicated driver causes conductor to fail * Use trueorfalse function instead of specific value * Update reno for stable/mitaka * Doc update to enable HTTPS in Glance and Ironic comm * Fix race in hash ring refresh unit test * Addressing nits on I2984cd9d469622a65201fd9d50f964b144cce625 * Config to stop powering off nodes on failure 5.1.0 ----- * Documentation update for partition image support * Delete bridge "brbm" in devstack/unstack.sh * Remove unneeded use of task.release\_resources() * [Devstack]Add ability to enable shellinabox SSL certificate * Append 'Openstack-Request-Id' header to the response * Add disk\_label and node\_uuid for agent drivers * Fix sphinx docs build * Update authorized\_keys with new key only * Agent: Out-of-band power off on deploy * Document partition image support with agent\_ilo * Add support for partition images in agent drivers * Update the text in user guide of ironic * Translate requests exception to IronicException * Extend the Conductor RPC object * Make sure target state is cleared on stable states * Removes redundant "to" * Install apparmor b/c Docker.io has undeclared dep * Don't depend on existing file perm for qemu hook * Move \_normalize\_mac to driver utils * Devstack: add check of chassis creating * Allow user to specify cleaning network * Update ironic\_ssh\_check method * Adds doc - firmware update(iLO) manual clean step * Add ensure\_thread\_contain\_context() to task\_manager * [devstack] Do not die if neutron is disabled * Follow-up of firmware update(iLO) as manual cleaning step * Updating driver docs with DL hardwares requirements * Remove unneeded 'wait=False' to be more clean and consistent * Pass region\_name to SwiftAPI * Uses jsonschema library to verify clean steps * Fix important typo in the ipmitool documentation * DevStack: Allow configuring the authentication strategy * Add documentation for RAID 5.0.0 ----- * Add documentation about the disk\_label capability * SSH driver: Remove pipes from virsh's list\_{all, running} * Add documentation for the IPMITool driver * Fix error in cleaning docs * Replace depricated tempest-lib with tempest.lib * Add new 'disk\_label' capability * Fix JSON string in example of starting manual cleaning * Remove 'grub2' option in creating whole-disk-images * Update iRMC driver doc for inspection * Don't use token for glance & check for some unset vars * Use 'baremetal' flavor in devstack * [devstack] Fix IPA source build on Fedora * DevStack: Enable VirtualBMC logs * Support for passing CA certificate in Ironic Glance Communication * Updated from global requirements * Firmware update(iLO) as manual cleaning step * Updated from global requirements * Remove code duplication * Update iLO documentation for clean step 'reset\_ilo' * Refactor the management verbs check to utils * Updated from global requirements * Remove duplicate doc in ironic.conf.sample * Prep for 5.0 release * Fix unittests after new releases of libraries * Updating docs with support for DL class servers * Update CIMC driver docs to install ImcSdk from PyPi * Add returns to send\_raw() ipmitool function * Add function for dump SDR to ipmitool driver * Add clean step in iLO drivers to activate iLO license * Update proliantutils version to 2.1.7 for Mitaka release * ipxe: add --timeout parameter to kernel and initrd * Updated iLO driver documentation to recommend ipmitool version * Refactor driver loading to load a driver instance per node * Clean up driver loading in init\_host * add wipefs to ironic-lib.filters * Updated from global requirements * Use assertEqual/Greater/Less/IsNone * Follow up nits of 3429e3824c060071e59a117c19c95659c78e4c8b * API to list nodes using the same driver * [devstack] set ipa-debug=1 for greater debugability * Loose python-oneviewclient version requirement * Set node last\_error in TaskManager * Add possible values for config options * Follow up nits of irmc oob inspection * Enable removing name when updating node * Make some agent functions require exclusive lock * Add db api layer for CRUD operations on node tags * Update proliantutils version required for Mitaka release * Add deprecated\_for\_removal config info in ironic.conf.sample * Update ironic.conf.sample * Tolerate roles in context.RequestContext * Switch to Futurist library for asynchronous execution and periodic tasks * Move \_from\_db\_object() into base class * Add ironic\_tempest\_plugin to the list of packages in setup.cfg * Fix gate broken by sudden remove of SERVICE\_TENANT\_NAME variable * Add manual cleaning to documentation * Import host option in base test module * Fixes automated cleaning failure in iLO drivers * Updated from global requirements * DevStack: Add support for deploying nodes with pxe\_ipmitool * Change the libvirt NIC driver to virtio * DevStack: Support to install diskimage-builder from source * [Devstack]Add ability to enable ironic node pty console * Use 'node' directly in update\_port() * Add links to the standalone configdrive documentation * DevStack: Install squashfs-tools * [DevStack] fix restart of nova compute * Use http\_{root, url} config from "deploy" instead of "pxe" * During cleaning, store clean step index * Use oslo\_config.fixture in unit tests * Introduce driver\_internal\_info in code-contribution-guide * Updated from global requirements * Correct instance parameter description * Add node.uuid to InstanceDeploy error message * Set existing ports pxe\_enabled=True when adding pxe\_enabled column * Augmenting the hashing strategy * Add hardware inspection module for iRMC driver * Document possible access problems with custom IRONIC\_VM\_LOG\_DIR path * Add documentation for proxies usage with IPA * Updated from global requirements * Devstack: create endpoint in catalog unconditionally * Comment out test options that already exists on tempest's tree * Replace config 'clean\_nodes' with 'automated\_clean' * Remove 'zapping' from code * Cache agent clean steps on node * API to manually clean nodes * Replace ifconfig with ip * Updated iLO documentation for boot mode capability * Agent vendor handles manual cleaning * Remove downgrade support from migrations * Enable tinyipa for devstack Ironic * Disable clean step 'reset\_ilo' for iLO drivers by default * Add proxy related parameters to agent driver * Update ironic.conf.samle * Fix genconfig "tempdir" inconsistency * Update the home page * Follow-up on dracclient refactor * Log warning if ipmi\_username/ipmi\_password missing * Add portgroups to support LAG interfaces - net * Add portgroups to support LAG interfaces - RPC * Add portgroups to support LAG interfaces - objs * Add portgroups to support LAG interfaces - DB * Fix missing lookup() vendor method error for pxe\_drac * Refresh ssh verification mechanism * Refactor install-guide to configure API/Conductor seperately * Enable Ironic Inspector for Cisco Drivers * Fix doc8's "duplicated target names" (D000) error * Remove conditional checking the auth\_strategy values * Extend root device hints to support device name * Fix spawn error hook in "continue\_node\_clean" RPC method * Enable doc8 style checker for \*.rst files * Updated from global requirements * Show transitions initiated by API requests * Remove hard-coded DEPLOYWAIT timeout from Baremetal Scenario * Fix tiny format issue with install\_guide * Add priority to manual clean step example * Use node uuid in some exception log * Fix error message in devstack * Updated from global requirements * [devstack] Restart nova compute before checking hypervisor stats * Imported Translations from Zanata * Fix minor typo * DRAC: cleanup after switch to python-dracclient * API service logs access requests again * Updated from global requirements * Correct port\_id parameter description * Remove duplicate words in API version history * Remove unneeded enable\_service in dev-quickstart.rst * Clarify that size in root device hints and local\_gb are often different * Update ImcSdk requirement to use PyPi * Clean up 'no\_proxy' unit tests * Add more unit tests for NO\_PROXY validation * Add ability to cache swift temporary URLs * DRAC: switch to python-dracclient on vendor-passthru * Migrate Tempest tests into Ironic tree * Use Tempest plugin interface * Fix issues with uefi-ipxe booting * Update links to OpenStack manuals * Fix issue where system hostname can impact genconfig * Add choices option to several options * Add xinetd and its TFTP configuration in Install Guide * Reorganize the developer's main page * Document backwards compat for passthru methods * Drop MANIFEST.in - it's not needed pbr * Clean up unneeded deprecated\_group * Devstack: replace 'http' with SERVICE\_PROTOCOL * Clarify rejected status in RFE contribution docs * Bring UP baremetal bridge * Adjust ipminative.\_reboot to comply with pyghmi contract * Document the process of proposing new features * Updated from global requirements * Use assertTrue/False instead of assertEqual(T/F) * devstack 'cleanup-node' script should delete OVS bridges * Change default IRONIC\_VM\_SPECS\_RAM to 1024 * Remove release differences from flavor creation docs * Add documentation for standalone ilo drivers * Devstack: Make sure libvirt's hooks directory exists * Update the ironic.conf.sample file * Follow-up on refactor DRAC management interface * Allow user to set arch for the baremetal flavor and ironic node * tox: make it possible to run pep8 on current patch only * Devstack: Use [deploy] erase\_devices\_priority config option * Remove bashate from envlist * Use ironic-lib's util methods * Refactor objects into a magic registry * Don't return tracebacks in API response in debug mode * Updated from global requirements * Change assertTrue(isinstance()) by optimal assert * Remove \*/openstack/common\* in tox * Remove vim headers in source files * Trival: Remove unused logging import * Use ironic-lib's qemu\_img\_info() & convert\_image() * Update "Developer Quick-Start" guide for Fedora 23+ * Enable ironic devstack plugin in local.conf sample * Correct a tiny issue in install-guide * Install 'shellinabox' package for Ironic * Fix translations in driver base * Run flake8 against the python scripts under tools/ and devstack/tools * Add UEFI support for iPXE * Add console feature to ssh driver * Conductor handles manual cleaning * Add extensions to the scripts at devstack/tools/ironic/scripts * Fix "No closing quotation" error when building with tox * Devstack: Remove QEMU hook at ./unstack * Run bashate as part of the pep8 command * Fix bashate errors in grenade plugin * Fix syntax errors in the shell scripts under devstack/tools * Use the apache-ironic.template from our tree * Fix typo in ironic/conductor/manager.py * genconfig: Debug info for unknown config types * Keep the console logs for all boots * Use imageutils from oslo.utils * Add documentation for user inputs as HTTPS URLs * Add bashate tox command * Updated from global requirements * Add documentation for swiftless intermediate images * DRAC: switch to python-dracclient on management interface * DRAC: switch to python-dracclient on power interface * Follow up nits of Exception to str type conversion * Clean up variables in plugin.sh * Replace assertEqual(None, \*) with assertIsNone in tests * Add utility function to validate NO\_PROXY * Add bifrost as an option projects in Service overview * Sequence diagrams for iLo driver documentation * Refactor ilo documentation for duplicate information * Update swift HTTPs information in ilo documentation * Updated from global requirements * Deprecated tox -downloadcache option removed * Remove override-defaults * Use 'service\_type' of 'network'. Not 'neutron' * Update ironic.conf.sample by applying the bug fix #1522841 * Add grenade plugin * Follow up patch to correct code-contribute-guide * Fix iPXE template for whole disk image * Add devstack plugin * Copy devstack code to ironic tree * Add FSM.is\_stable() method * Explicitly depend on WebTest>=2.0 * Always pass keystone credentials to neutronclient * Remove extra space in 'host' config comment * Add oslo\_config.Opt support in Ironic config generator * Refactor disk partitioner code from ironic and use ironic-lib * Simplifies exception message assurance for oneview.common tests * Use node.uuid directly in stop\_console() * Correct NotImplemented to NotImplementedError in rpcapi.py * Adding oneview.common tests for some method not well tested * Add port option support for ipmitool * Numerous debug messages due to iso8601 log level * Handle deprecated opts' group correctly * Updated from global requirements * Clarify what changes need a release note * Remove wsgi reset\_pool\_size\_to\_default test * Add Mitaka release notes page * Update python-scciclient version number * Add release notes from Icehouse to Liberty * Add Code Contribution Guide for Ironic * Replace HTTP 'magic numbers' with constants * Documentation points to official release notes 4.3.0 ----- * Fix awake AMT unit test * Fix bug where clean steps do not run * Add reno for AMT wakeup patch * Updating OneView driver requirements and docs * Correct the db connection string in dev-quickstart * Split BaseConductorManager from ConductorManager * Validate arguments to clean\_step() decorator * test: Remove \_BaseTestCase * Wake up AMT interface before send request * Fall back to old boot.ipxe behaviour if inc command is not found * Only mention IPA in the quick start and user guides for DevStack * Improve options help for image caching * Add troubleshooting docs for "no valid host found" * change mysql url in dev-quickstart doc * Extend FAQ with answer of how to create a new release note * Sync ironic.conf sample * Comment spelling error in ironic-images.filters file * Updated from global requirements * Add a developer FAQ * Add tests for RequestContextSerializer * Add a test to enforce object version bump correctly * force releasenotes warnings to be treated as errors * Avoid RequestContextSerializer from oslo.messaging * Follow up patch for the first commit of iRMC new boot I/F * Move iso8601 as a test dependency only * Catch up release notes for Mitaka * Move common code from ironic.conductor.manager to ironic.conductor.utils * Add deprecated config info in ironic.conf.sample * Add switch to enable/disable streaming raw images for IPA * SwiftAPI constructor should read CONF variables at runtime * Take over console session if enabled * Drop some outdated information from our quick start guide * Refactor IRMCVirtualMediaAgentDeploy by applying new BootInterface * Refactor IRMCVirtualMediaIscsiDeploy by applying new BootInterface * Updated from global requirements * Fix: Next cleaning hangs if the previous cleaning was aborted * Add clean up method for the DHCP factory * Add missing packages to dev-quickstart * Support arguments for clean step methods * Validate all tcp/udp port numbers * Add manual cleaning to state machine * Specifying target provision states in fsm * Use server\_profile\_template\_uri at scheduling * Check shellinabox started successfully or not * Add SSL support to the Ironic API * Updated from global requirements * Use wsgi from oslo.service for Ironic API * Remove duplicated unit tests in test\_manager * Get mandatory patch attrs from WSME properties * Add and document two new root device hints: wwn\_{with, vendor}\_extension * Sort root device hints when parsing * add "unreleased" release notes page * Follow up patch for 39e40ef12b016a1aeb37a3fe755b9978d3f9934f * Document 'erase\_devices\_iterations' config option * Update iLO documentation * Adds test case for the iscsi\_ilo recreate boot iso * Refactor agent\_ilo driver to use new boot interface * Updated from global requirements * Refactor iLO driver console interface into new module * Add reno for release notes management * Add choices to temp\_url\_endpoint\_type config option * Fix oslo namespace in default log level * Remove \_\_name\_\_ attribute from WSME user types * refine the ironic installation guide * Revert "Add Pillow to test-requirements.txt" * Update etc/ironic/ironic.conf.sample * Make task parameter mandatory in get\_supported\_boot\_devices * Follow up patch for Ib8968418a1835a4131f2f22fb3e4df5ecb9b0dc5 * Check shellinabox process during stopping console * Add whole disk image creation command to Installation Guide * Fix docker.io bug in the Install Guide * Updated from global requirements * Node's last\_error to show the actual error from sync\_power\_state * Updated from global requirements * Rename test\_conductor\_utils.py to test\_utils.py * Follow up patch for 8c3e102fc5736bfcf98525ebab59b6598a69b428 * Add agent\_iboot entrypoint * Validate console port number in a valid range * iboot: add wait loop for pstate to activate * Don't reraise the exception in \_set\_console\_mode * Check seamicro terminal port as long as it specified * Add missing unit tests for some PXE drivers * Validate the input of properties of nodes * Add documentation for Ceph Object Gateway support * Refactor iscsi\_ilo driver to use new boot interface * Fix comments on DRAC BIOS vendor\_passthru * cautiously fail on unhandled heartbeat exception * Add "agent\_wol" (AgentAndWakeOnLanDriver) * Added unit tests for CORS middleware * Use oslo\_config new type PortOpt for port options * Fix markup error in deploy/drivers.rst * Update the Configuration Reference to Liberty in doc * Updated from global requirements * Use self.\_\_class\_\_.X instead of self.X * Rename utils.py to mgr\_utils.py to avoid namespace collision * XenAPI: Add support for XenServer VMs * Add PortOpt to config generator * Imported Translations from Zanata * Move hash\_ring refresh logic out of sync\_local\_state * Move ironic.tests.unit.base to ironic.tests.base * Change required version of ImcSdk to 0.7.2 * Add an iboot reboot\_delay setting * iPXE document about the existence of prebuilt images * Fix a typo * Switched order of CORS middleware * DRAC BIOS vendor\_passthru: enable rebooting the node * Replace deprecated LOG.warn with warning * Add db migration and model for tags table * Add OneView driver documentation * Fix snmp property descriptions * Updated from global requirements * Slightly reword README * Remove unused functions from agent driver * mocking syscalls to make the tests run on OS X * Enable cmd/api & cmd/conductor to be launched directly * Add reboot\_delay option to snmp driver * Add self.raid for iSCSI based drivers * Move test\_pxe.py inside unit/drivers/modules directory * Move pxe.\_parse\_instance\_info() to deploy\_utils * Add note about driver API breakage * Fix a missing detail in install guide * Enable radosgw support in ironic * Updated from global requirements * Add agent\_amt docs * Add release notes for 4.2.1 * Convert set() to list in ListType * remove lxml requirement * Update python-oneviewclient version * Fix an annoying detail in the developer quick-start * Updated from global requirements * Expose versioning information on GET / endpoint * Fixes logging of failure in deletion of swift temporary object * ucs\_hostname changed to ucs\_address * Updated from global requirements * Remove functions: \_cleanse\_dict & format\_message * Move FakeOneViewDriver to the fake.py module * Add testresources and testscenarios used by oslo.db fixture * Add agent\_amt driver * Imported Translations from Zanata * Stop adding translation function to builtins * Fix tests giving erroneous output during os-testr run * OneView Driver for Ironic * Fix agent\_ilo to remove temporary images * Updated from global requirements * iPXE: Fix assumption that ${mac} is the MAC of the NIC it's booting * Prevent iRMC unit test from potential failure at the gate * Add secret=True to password option * Fix a bug error by passwords only includes numbers * Add support for in-band cleaning in ISCSIDeploy * Fix typo in document * Remove unused import of oslo\_log * Use power manager to reboot in agent deployments * Add retries to ssh.\_get\_hosts\_name\_for\_node * Refactor deploy\_utils methods * Fix irmc driver unit test * PXE: Support Extra DHCP Options for IPv6 * Use standard locale when executing 'parted' command * Updated from global requirements * To run a specific unit test with ostestr use -r * Add .eggs to gitignore * Fix log formatting issue in agent base * Add notes to functions which are in ironic-lib * Allow empty password for ipmitool console * Update help string on tftp\_root option * Updated from global requirements * Fix conductor deregistration on non init conductor * Imported Translations from Zanata * Add Pillow to test-requirements.txt * Add agent inspection support for IPMI and SSH drivers * Python 3.4 unit tests fail with LANG=C * Fix ubuntu install command in install guide * Move unit tests to correct directory * Add 'whitelist\_externals = bash' for two testenvs * Rename 'message' attribute to '\_msg\_fmt' in IronicException * Follow up for: Prepare for functional testing patch * Fix documentation for installing mariaDB * Update help strings for DRAC configs * Switch tox unit test command to use ostestr * Use standard locale when executing 'dd' command * Imported Translations from Zanata * Fix typo: add a missing white space * Prepare for functional testing * Fix some iBoot strings * Replace six.iteritems() with .items() * Make generation of ironic.conf.sample deterministic * Cached file should not be deleted if time equal to master 4.2.0 ----- * Cleanup of Translations * Update architecture docs to mention new driver interfaces * Add 4.2.0 release notes * Update docs for Fedora 22 * Add i18n \_ import to cimc common * Update proliantutils version required for L release * Use of 'the Bare Metal service' in guide * Update install guide to reflect latest code * Implement indirection\_api * Add 'abort' to state machine diagram * Unit test environment setup clarification * Make end-points discoverable via Ironic API * Updated from global requirements * Allow unsetting node.target\_raid\_config * Allow abort for CLEANWAIT states * Clean up CIMC driver docs and comments * Add Cisco IMC PXE Driver * Fix final comments in RAID commits * Refactor agent {prepare,tear\_down}\_cleaning into deploy\_utils * Handle unquoted node names from virt types * Fix iRMC vmedia deploy failure due to already attached image * Implement take\_over for iscsi\_ilo driver * Fix typo in vendor method dev documentation * Fix incorrect urls * Check image size before provisioning for agent driver * Help patch authors to remember to update version docs * Add constraint target to tox.ini * Add IPMINative vendor methods to \*IPMINative drivers * Fix string formatting issues * Remove DictMatches custom matcher from unit tests * Imported Translations from Zanata * Remove unused object function * Use oslo.versionedobjects remotable decorators * Base IronicObject on VersionedObject * Update descriptions in RAID config schema * Document GET ...raid/logical\_disk\_properties * Convert functools.wraps() usage to six.wraps() * Remove comment about exception decorator * Replace metaclass registry with explicit opt-in registry from oslo * Add config option to override url for links * Fix iBoot test\_\_switch\_retries test to not waste time sleeping * Allow tftpd usage of '--secure' by using symlinks * Add support for inband raid configuration agent ramdisk * Agent supports post-clean-step operations * Update 'Installation Guide' for RHEL7/CentOS7/Fedora * Fix docs about --is-public parameter for glance image-create * Fix indentation of the console docs * Fix heading levels in the install-guide * Cache the description of RAID properties * Remove the hard dependency of swift from ilo drivers * Fix mistakes in comments * Updated from global requirements * Fix object field type calling conventions * Add version info for pyghmi in driver-requirements.txt 4.1.0 ----- * Add 4.1.0 release notes * Try to standardize retrieval of an Exception's description * Add description how to restart ironic services in Fedora/RHEL7/CentOS7 * Improve the ability to resolve capability value * Add supported environment 'VMware' to comments * Updated from global requirements * Remove policy 'admin' rule support * Handle missing is\_whole\_disk\_image in pxe.\_build\_pxe\_config\_options * Raise InvalidPrameterValue when ipmi\_terminal\_port is '' * Fix doc typo * Remove executable permission from irmc.py * Add APIs for RAID configuration * agent\_ilo fails to bring up instance * Updated from global requirements * Remove 'is\_valid\_event' method * Set boot device in PXE Boot interface method prepare\_instance() * Revert "Do not overwrite the iPXE boot script on every deployment" * Add vendor interface to ipminative driver * When boot option is not persisted, set boot on next power on * Document nodes in enroll state, in install guide * Added CORS support middleware to Ironic * Refactor map\_color() * Removes unused posix-ipc requirement * Add retry options to iBoot power driver * Trusted boot doc * Prevent ilo drivers powering off active nodes during take over * Add release notes for 4.0.0 * Clean up cleaning error handling on heartbeats * Use vendor mixin in IPMITool drivers * Use oslo.messaging serializers * Add RPC APIs for RAID configuration * Add new method validate\_raid\_config to RAIDInterface * Fix docker package name in Ubuntu 14.04 in Install Guide * Updated from global requirements * Do not overwrite the iPXE boot script on every deployment * Reset tempdir config option after NestedTempfile fixture applied * Remove unused dep discover from test reqs * Add deprecation warning to periodic tasks with parallel=False * Use six.text\_type in parse\_image\_ref * Ensure that pass\_deploy\_info() always calls boot.prepare\_instance() * Add minimum and maximum on port option * Update ironic.conf.sample with tox -egenconfig * Update documentation to install grub2 when creating the user image * Fix logging and exceptions messages in ipminative driver * Fix minor spelling/grammar errors * Put py34 first in the env order of tox * format links in the readme to work with the release notes tools * Periodically checks for nodes being cleaned * Add links for UEFI secure boot support to iLO driver documentation * Add cleanup in console utils tests * Follow up the nits in iRMC vmedia driver merged patch * Refactor agent driver with pxe boot interface * Update tests to reflect WSME 0.8 fixes * Remove ObjectListBase * Remove broken workaround code for old mock * Create a versions.py file * Improve comparison operators for api/controllers/base.py * Switch to post-versioning 4.0.0 ----- * Fix improper exception catching * Fix nits from 'HTTP constants' patch * Use JsonEncoded{Dict,List} from oslo\_db * Move tests into correct directories * Fix logging levels in do\_node\_deploy * Fix misspelling from "applicatin" to "application" * Updated from global requirements * Remove unneeded module variable '\_\_all\_\_' * Updated from global requirements * Change and edit of Ironic Installation Guide * Remove the --autofree option from boot.ipxe * Switch from deprecated timeutils.isotime * Fix "tox -egenconfig" by avoiding the MODULEPATH env variable * Improve logging for agent driver * Refactor the essential prop list of inspect driver * Reset clean\_step if error occurs in CLEANWAIT * Fix bug sending sensor data for drivers w/o management * Replace HTTP 'magic numbers' with constants * Address final comments on update image cache based on update time * 'updated\_at' field shows old value after resource is saved * Increase size of nodes.driver column * Add better dbapi support for querying reservation * Allow digits in IPA driver names * Updated from global requirements * Add documentation for iRMC virtual media driver * Add copyright notice to iRMC driver source code * Remove CONF.agent.agent\_pxe\_bootfile\_name * Update single letter release names to full names * Enforce flake8 E711 * Update docstring for agent deploy's take\_over * Update cached images based on update time * Updated from global requirements * Add RAIDInterface for RAID configuration * get\_supported\_boot\_devices() returns static device list * add ironic client and ironic inspector projects into contribution list * Updated from global requirements * Use the oslo\_utils.timeutils 'StopWatch' class * Update the documentation to use IPA as deploy ramdisk * Inspector inspection fails due to node locked error * Prevent power actions when the node is in CLENWAIT state * Imported Translations from Transifex * Remove unnecessary trailing backslash in Installation Guide * Refactor some minor issues to improve code readability * Fix misspelling in comment * Make app.wsgi more like ironic.cmd.api * Migrate IronicObjectSerializer to subclass from oslo * Updated from global requirements * Fix warnings on doc builds * Change vagrant.yml to vagrant.yaml * Developer quickstart documentation fixes * Document configuring ironic-api behind mod\_wsgi * Updated from global requirements * Add deprecation messages on the bash ramdisk endpoints * Document API versioning * Log configuration values as DEBUG, not INFO * Update ironic.conf.sample * Update ironic.conf.sample * Add information 'node\_uuid' in debug logs to facilitate the reader's life * Clean up instance\_uuid as part of the node's tear down * Fix a trusted boot test bug * Add more info level log to deploy\_utils.work\_on\_disk() method * Fix broken agent virtual media drivers * Updated from global requirements * Fix apache wsgi import * Add raises docstring tag into object.Ports methods * Only take exclusive lock in sync\_power\_state if node is updated * Secure boot support for pxe\_ilo driver * UCS: node-get-boot-device is failing for Cisco servers * grub2 bootloader support for uefi boot mode * Add Nova scheduler\_tracks\_instance\_changes config to docs * Use automaton's converters/pydot * enroll/verify/cleanwait in state machine diagram * Save and re-raise exception * Cache Keystone client instance * Refactor pxe - New PXEBoot and ISCSIDeploy interfaces * Don't prevent updates if power transition is in progress * Follow-on to b6ed09e297 to fix docstrings/comments * Make inspector driver test correctly * Allow inspector driver to work in standalone mode * Remove outdated TODO.rst file * Updated from global requirements * Introduce support for APC MasterSwitchPlus and Rack PDU * Allow agent lookup to directly accept node UUID * Add CLEANWAIT state * Allow updates in VERIFYING state * Allow deleting nodes in ENROLL state * Updated from global requirements * Fixes a testcase related to trusted boot in UEFI boot mode * Clarify inspection upgrade guide * Refactor refresh method in objects for reuse * Imported Translations from Transifex * Use utils.mkfs directly in deploy\_utils * Updated from global requirements * Migrate ObjectListBase to subclass from the Oslo one * Clean up tftp files if agent deployed disk image * Don't do a premature reservation check in the provision API * Move the http\_url and http\_root to deploy config * Allow upgrading shared lock to an exclusive one * Fix the DEPLOYWAIT check for agent\_\* drivers * Add a missing comma in Vendor Methods of Developer Guide * Replacing dict.iteritems() with dict.items() * Updated from global requirements * db: use new EngineFacade feature of oslo.db * Address minor comments on the ENROLL patch * Remove requirements.txt from tox.ini deps * Updated from global requirements * Replace common.fileutils with oslo\_utils.fileutils * Updated from global requirements * Switch to the oslo\_utils.fileutils * Start using new ENROLL state * Add .idea to .gitignore * Periodically checks the status of nodes in DEPLOYING state * Add IPA support for iscsi\_irmc driver * Updated from global requirements * Vagrant configuration generation now uses pymysql * Remove deprecated code for driver vendor passthru * Add DRAC BIOS config vendor passthru API * Use DEPLOYWAIT while waiting for agent to write image * Fix unittests due mock 1.1.0 release * Migrate RPC objects to oslo.versionedobjects Fields * Imported Translations from Transifex * Updated from global requirements * Mock the file creation for the GetConfigdriveTestCase tests * Address follow-up comments * Clear ilo\_boot\_iso before deploy for glance images * Enable translation for config option help messages * Replace is\_hostname\_safe with a better check * Initial oslo.versionedobjects conversion * Add whole disk image support for iscsi\_irmc driver * Add localboot support for iscsi\_irmc driver * Add iRMC Virtual Media Deploy module for iRMC Driver * add python-scciclient version number requirement * Remove db connection string env variable from tox.ini * Make use of tempdir configuration * Updated from global requirements * Fix failing unit tests under py34 * Allow vendor methods to serve static files * Allow updates when node is on ERROR provision state * Add sequence diagrams for pxe\_ipmi driver * Fix logging for soft power off failures * Mute ipmi debug log output * Validate IPMI protocol version for IPMIShellinaboxConsole * Image service should not be set in ImageCache constructor * Clean nodes stuck in DEPLOYING state when ir-cond restarts * Add ability to filter nodes by provision\_state via API * Refactor check\_allow\_management\_verbs * Add node fields for raid configuration * Switch to oslo.service * Fix "boot\_mode\_support" hyper link in Installation Guide * Log configuration options on ironic-conductor startup * Allow deleting even associated and active node in maintenance mode * Use oslo\_log * Replace self.assertEqual(None,\*) to self.assertIsNone() * Improve warning message in conductor.utils.node\_power\_action() * Add a new boot section 'trusted\_boot' for PXE * use versionutils from oslo\_utils * Make task\_manager logging more helpful * Add IPMI 1.5 support for the ipmitool power driver * Add iBoot driver documentation * Updated from global requirements * Add unit test for ilo\_deploy \_configure\_vmedia\_boot() * Do not use "private" attribute in AuthTokenMiddleware * API: Get a subset of fields from Ports and Chassis * Save disk layout information when deploying * Add ENROLL and related states to the state machine * Refactor method to add or update capability string * Use LOGDIR instead of SCREEN\_LOGDIR in docs * Always allow removing instance\_uuid from node in maintenance mode * API: Get a subset of fields from Nodes * Switch from MySQL-python to PyMySQL * Updated from global requirements * copy editing of ironic deploy docs * Transition state machine to use automaton oslo lib * Finish switch to inspector and inspector-client * Rename ilo\_power.\_attach\_boot\_iso to improve readability * Expose current clean step in the API * Fix broken ACL tests * Add option to configure passes in erase\_devices * Refactor node's and driver's vendor passthru to a common place * Change return value of [driver\_]vendor\_passthru to dict * Add Wake-On-Lan driver documentation * Fixes a bug on the iLO driver tutorial * Address follow-up comments on ucs drivers * Added documentation to Vagrantfile * Updated from global requirements * Addresses UcsSdk install issue * Don't raise exception from set\_failed\_state() * Add disk layout check on re-provisioning * Add boot interface in Ironic * Fix Cisco UCS slow tests * Validate capability in properties and instance\_info * Pass environment variables of proxy to tox * DRAC: fix set/get boot device for 11g * Enable flake8 checking of ironic/nova/\* * Remove tools/flakes.py * Wake-On-Lan Power interface * IPA: Do a soft power off at the end of deployment * Remove unnecessary validation in PXE * Add additional logging around cleaning * remove unneeded sqlalchemy-migrate requirement * Add vendor-passthru to attach and boot an ISO * Updated from global requirements * Sync with latest oslo-incubator * Add pxe\_ucs and agent\_ucs drivers to manage Cisco UCS servers * Doc: Use --notest for creating venv * Updated from global requirements * Fix DRAC driver job completion detection * Add additional required RPMs to dev instructions * Update docs for usage of python-ironicclient * Install guide reflects changes on master branch * Remove auth token saving from iLO driver * Don't support deprecated drivers' vendor\_passthru * Updated from global requirements * Enforce flake8 E123/6/7/8 in ironic * Change driver\_info to driver\_internal\_info in conductor * Use svg as it looks better/scales better than png * Updated from global requirements * Use oslo config import methods for Keystone options * Add documentation for getting a node's console * fix node-get-console returns url always start with http * Update the config drive doc to replace deprecated value * Updated from global requirements * Remove bogus conditional from node\_update * Prevent node delete based on provision, not power, state * Revert "Add simplegeneric to py34 requirements" * Do not save auth token on TFTP server in PXE driver * Updated from global requirements * Update iLO documentation for UEFI secure boot * ironic-discoverd is being renamed to ironic-inspector * Update doc "install from packages" section to include Red Hat * Improve strictness of iLO test cases error checking * Remove deprecated pxe\_deploy\_{kernel, ramdisk} * Get admin auth token for Glance client in image\_service * Fix: iSCSI iqn name RFC violation * Update documentation index.rst * Update AMT Driver doc * Refactor ilo.common.\_prepare\_floppy\_image() * Do not add auth token in context for noauth API mode * DRAC: config options for retry values * Disable meaningless sort keys in list command * Update pyremotevbox documentation * Fix drac implementation of set\_boot\_device * Update to hacking 0.10.x * Prepare for hacking 0.10.x * Rename gendocs tox environment * Add simplegeneric to py34 requirements * Reduce AMT Driver's dependence on new release of Openwsman * Fixes some docstring warnings * Slight changes to Vagrant developer configs * Delete neutron ports when the node cleaning fails * Update docstring DHCPNotFound -> DHCPLoadError * Wrap all DHCP provider load errors * Add partition number to list\_partitions() output fields * Added vagrant VM for developer use * Execute "parted" from root in list\_partitions() * Remove unused CONF variable in test\_ipminative.py * Ironic doesn't use cacert while talking to Swift * Fix chainloading iPXE (undionly.kpxe) * Updated from global requirements * Improve root partition size check in deploy\_partition\_image * ironic/tests/drivers: Add autospec=True and spec\_set= * Fix and enhance "Exercising the Services Locally" docs * Fix typos in Ironic docs * Fix spelling error in docstring * Remove deprecated exceptions * Check temp dir is usable for ipmitool driver * Improve strictness of AMT test cases error checking * Improve strictness of iRMC test cases error checking * Fix Python 3.4 test failure * Remove unneeded usage of '# noqa' * Drop use of 'oslo' namespace package * Updated from global requirements * Specify environment variables needed for a standalone usage * Adds OCS Power and Management interfaces * Run tests in py34 environment * Adds docstrings to some functions in ironic/conductor/manager.py * Add section header to state machines page * Update config generator to use oslo released libs * Use oslo\_log lib * Include graphviz in install prerequisites * Link to config reference in our docs * Adopt config generator * Remove cleanfail->cleaning from state diagram * Imported Translations from Transifex * Return HTTP 400 for invalid sort\_key * Update the Vendor Passthru documentation * Add maintenance mode example with reason * Add logical name example to install-guide * Improve strictness of DRAC test cases error checking * Add a venv that can generate/write/update the states diagram * Log attempts while trying to sync power state * Disable clean\_step if config option is set to 0 * Improve iSCSI deployment logs * supports alembic migration for db2 * Updated from global requirements * Update iLO documentation for capabilities 2015.1.0 -------- * ironic/tests/drivers/amt: Add autospec=True to mocks * ironic/tests/drivers/irmc: Add spec\_set & autospec=True * Updated from global requirements * ironic/tests/drivers/drac: Add spec\_set= or autospec=True * Create a 3rd party mock specs file * Release Import of Translations from Transifex * Document how to configure Neutron with iPXE * Remove state transition: CLEANFAIL -> CLEANING * Remove scripts for migrating nova baremetal * Add a missing comma and correct some typos * Remove API reboot from cleaning docs * Remove scripts for migrating nova baremetal * Fixed is\_glance\_image(image\_href) predicate logic * Rearrange some code in PXEDeploy.prepare * Fixes typo in ironic/api/hooks.py and removes unnecessary parenthesis * update .gitreview for stable/kilo * Add cleaning network docs * Remove ironic compute driver and sched manager * ironic/tests/drivers/ilo: Add spec= & autospec=True to mocks * Replace 'metrics' with 'meters' in option * Update some config option's help strings * document "scheduler\_use\_baremetal\_filters" option in nova.conf * Fix heartbeat when clean step in progress * Fix heartbeat when clean step in progress * Update ilo drivers documentation for inspection * Open Liberty development 2015.1.0rc1 ----------- * Local boot note about updated deploy ramdisk * Convert internal RPC continue\_node\_cleaning to a "cast" * iLO driver documentation for node cleaning * Fix typos in vendor-passthru.rst * Add Ceilometer to Ironic's Conceptual Architecture * Improve AMT driver doc * iLO driver documentation for UEFI secure boot * Fix for automated boot iso issue with IPA ramdisk * Update session headers during initialization of AgentClient * Agent driver fails without Ironic-managed TFTP * Add notes about upgrading juno->kilo to docs * Address comments on I5cc41932acd75cf5e9e5b626285331f97126932e * Use mock patch decorator for eventlet.greenthread.sleep * Cleanup DHCPFactory.\_dhcp\_provider after tests * Follow-up to "Add retry logic to \_exec\_ipmitool" * Nit fixes for boot\_mode being overwritten * Update installation service overview * Don't pass boot\_option: local for whole disk images * Fixup post-merge comments on cleaning document * Use hexhyp instead of hexraw iPXE type * Fix exception handling in Glance image service * Update proliantutils version required for K release * Fix type of value in error middleware response header * Imported Translations from Transifex * Fix mocks not being stopped as intended * Add maintenance check before call do\_node\_deploy * Fix VM stuck when deploying with pxe\_ssh + local boot * Fix bad quoting in quickstart guide * Set hash seed to 0 in gendocs environment * boot\_mode is overwritten in node properties * Add retry logic to \_exec\_ipmitool * Check status of bootloader installation for DIB ramdisk * Add missing mock for test\_create\_cleaning\_ports\_fail * Shorten time for unittest test\_download\_with\_retries * Disable XML now that we have WSME/Pecan support * tests/db: Add autospec=True to mocks * Sync with oslo.incubator * Enable cleaning by default * Improve error handling when JSON is not returned by agent * Fix help string for glance auth\_strategy option * Document ports creating configuration for in-band inspection * Remove DB tests workarounds * Fix formatting issue in install guide * Add missing test for DB migration 2fb93ffd2af1 * Regenerate states diagram after addition of CLEANING * Fix UnicodeEncodeError issue when the language is not en\_US * pxe deploy fails for whole disk images in UEFI * Remove setting language to en\_US for 'venv' * Add config drive documentation * Refactor test code to reduce duplication * Mock time.sleep() for two unittests * Clarify message for power action during cleaning * Add display-name option to example apache2 configuration * New field 'name' not supported in port REST API * Update doc for test database migrations * Add PXE-AMT driver's support of IPA ramdisk * Fix cleaning nits * Update docs: No power actions during cleaning * Prevent power actions on node in cleaning * Followup to comments on Cleaning Docs * Remove inspect\_ports from ilo inspection * Removed hardcoded IDs from "chassis" test resources * Fix is\_hostname\_safe for RFC compliance * Enable pxe\_amt driver with localboot * Improve backwards compat on API behaviour * Use node UUID in logs instead of node ID * Add IPA to enable drivers doc's page * Top level unit tests: Use autospec=True for mocks * DRAC: power on during reboot if powered off * Update pythonseamicroclient package version * A wrong variable format used in msg of ilo: * Add documentation for Cleaning * Explictly state that reboot is expected to work with powered off nodes * Prevent updating the node's driver if console is enabled * Agent driver: no-op heartbeat for maintenanced node * Deploys post whole disk image deploy fails * Allow node.instance\_uuid to be removed during cleaning * Attach ilo\_boot\_iso only if node is active * Ensure configdrive isn't mounted for ilo drivers * Ensure configdrive isn't mounted for ipxe/elilo * Correct update\_dhcp\_opts methods * Fix broken unittests usage of sort() * Add root device hints documentation * Ensure configdrive isn't mounted in CoreOS ramdisks * Add local boot with partition images documentation * Add a return after saving node power state * Fix formatting error in states\_to\_dot * pxe partition image deploy fails in UEFI boot mode * Updated from global requirements * Fix common misspellings * Ilo drivers sets capabilities:boot\_mode in node * Add whole disk image support for iscsi\_ilo using agent ramdisk * Fixed nits for secure boot support for iLO Drivers * Fix typos in ironic/ironic/drivers/modules * fix invalid asserts in tests * Fail deploy if root uuid or disk id isn't available * Hide new fields via single method * Update "Ironic as a standalone service" documentation * DRAC: add retry capability to wsman client operations * Secure boot support for agent\_ilo driver * Secure boot support for iscsi\_ilo driver * Changes for secure boot support for iLO drivers 2015.1.0b3 ---------- * follow up patch for ilo capabilities * Support agent\_ilo driver to perform cleaning * Implement cleaning/zapping for the agent driver * Add Cleaning Operations for iLO drivers * Automate uefi boot iso creation for iscsi\_ilo driver * Generate keystone\_authtoken options in sample config file * Use task.spawn\_after to maintain lock during cleaning * is\_whole\_disk\_image might not exist for previous instances * Hide inspection\_\*\_at fields if version < 1.6 * Disable cleaning by default * Suppress urllib3.connection INFO level logging * Allow periods (".") in hostnames * iscsi\_ilo driver do not validate boot\_option * Sync from oslo.incubator * Common changes for secure boot support * Add pxe\_irmc to the sending IPMI sensor data driver list * iLO driver updates node capabilities during inspection * iLO implementation for hardware inspection * Address nits in uefi agent iscsi deploy commit * Raise exception for Agent Deploy driver when using partition images * Add uefi support for agent iscsi deploy * Enable agent\_ilo for uefi-bios switching * Fixup log message for discoverd * Update unittests and use NamedTemporaryFile * Rename \_continue\_deploy() to pass\_deploy\_info() * Write documentation for hardware inspection * Start using in-band inspection * Log message is missing a blank space * Address comments on cleaning commit * IPA: Add support for root device hints * Use Mock.patch decorator to handle patching amt management module * iscsi\_ilo driver to support agent ramdisk * Enhance AMT driver documentation, pt 2 * Implement execute clean steps * Add missing exceptions to destroy\_node docstrings * Force LANGUAGE=en\_US in test runs * Add validations for root device hints * Add localboot support for uefi boot mode * ironic port deletion fails even if node is locked by same process * Add whole disk image support in iscsi\_ilo driver * Enhance AMT driver documentation * Use oslo\_policy package * Use oslo\_context package * Adds support for deploying whole disk images * Add AMT-PXE driver doc * Fix two typos * Add node UUID to deprecated log message * Fix wrong chown command in deployment guide * PXE driver: Deprecate pxe\_deploy\_{ramdisk, kernel} * Add label to virtual floppy image * Make sure we don't log the full content of the config drive * Update API doc to reflect node uuid or name * Fix typo agaist->against * Use strutils from oslo\_utils * Updated from global requirements * Add AMT-PXE-Driver Power&Management&Vendor Interface * Fix wrong log output in ironic/ironic/conductor/manager.py * Refactor agent iscsi deploy out of pxe driver * Tiny improvement of efficient * Make try block shorter for \_make\_password\_file * Add module for in-band inspection using ironic-discoverd * Fix take over for agent driver * Add server-supported min and max API version to HTTPNotAcceptable(406) * Updated from global requirements * Add tftp mapfile configuration in install-guide * Fix nits in cleaning * Fix nits for supporting non-glance images * Follow-up patch for generic node inspection * Add a note to dev-quickstart * Add iter\_nodes() helper to the conductor manager * Implement Cleaning in DriverInterfaces * Update install-guide for Ubuntu 14.10 package changes * Use mock instead of fixtures when appropriate * Generic changes for Node Inspection * Fix typo in "Enabling Drivers" * Support for non-Glance image references * Create new config for pecan debug mode * Local boot support for IPA * PXE drivers support for IPA * Update documentation on VirtualBox drivers * Add localboot support for iscsi\_ilo driver * Improve last\_error for async exceptions * Fix IPMI support documentation * Root partition should be bootable for localboot * Updated from global requirements * Add iRMC Management module for iRMC Driver * Spelling error in Comment * Remove unused code from agent vendor lookup() * Add documentation for VirtualBox drivers * Implement Cleaning States * Missing mock causing long tests * Add support for 'latest' in microversion header * Add tests for ilo\_deploy driver * Fix reboot logic of iRMC Power Driver * Update the states generator and regenerate the image * Ensure state values are 15 characters or less * Minor changes to InspectInterface * INSPECTFAIL value is more readable * Disable n-novnc, heat, cinder and horizon on devstack * Return required properties for agent deploy driver * Remove unused modules from ironic/openstack/common * Use functions from oslo.utils * Update Ilo drivers to use REST API interface to iLO * Add dhcp-all-interfaces to get IP to NIC other than eth0 * Log exception on tear\_down failure * Fix PEP8 E124 & E125 errors * Mock sleep function for OtherFunctionTestCase * Log node UUID rather than node object * Updated from global requirements * Add InspectInterface for node-introspection * Correctly rebuild the PXE file during takeover of ACTIVE nodes * Fix PEP8 E121 & E122 errors * Add documentation for the IPMI retry timeout option * Use oslo\_utils replace oslo.utils * Avoid deregistering conductor following SIGUSR1 * Add states required for node-inspection * For flake8 check, make the 'E12' ignore be more granular * add retry logic to is\_block\_device function * Imported Translations from Transifex * Move oslo.config references to oslo\_config * Add AMT-PXE-Driver Common Library * Fix typos in documentation: Capabilities * Removed unused image file * Address final comments of a4cf7149fb * Add concept of stable states to the state machine * Fix ml2\_conf.ini settings * Vendorpassthru doesn't get correct 'self' * Remove docs in proprietary formats * Fix file permissions in project * Imported Translations from Transifex * Updated from global requirements * Remove deploy\_is\_done() from AgentClient * AgentVendorInterface: Move to a common place * Stop console at first if console is enabled when destroy node * fixed typos from eligable to eligible and delition to deletion * Add logical name support to Ironic * Add support for local boot * Fix chown invalid option -- 'p' * ipmitool drivers fail with integer passwords * Add the subnet creation step to the install guide 2015.1.0b2 ---------- * improve iSCSI connection check * Remove min and max from base.Version * Add list of python driver packages * Add policy show\_password to mask passwords in driver\_info * Conductor errors if enabled\_drivers are not found * Add MANAGEABLE state and associated transitions * Raise minimum API version to 1.1 * Correct typo in agent\_client * Fix argument value for work\_on\_disk() in unit test * Documentation: Describe the 'spacing' argument * update docstring for driver\_periodic\_task's parallel param * Use prolianutils module for ilo driver tests * Add documentation on parallel argument for driver periodic tasks * Rename provision\_state to power\_state in test\_manager.py * Refactor ilo.deploy.\_get\_single\_nic\_with\_vif\_port\_id() * Update agent driver with new field driver\_internal\_info * Updated from global requirements * Add support for driver-specific periodic tasks * Partial revert of 4606716 until we debug further * Clean driver\_internal\_info when changes nodes' driver * Add Node.driver\_internal\_info * Move oslo.config references to oslo\_config * Move oslo.db references to oslo\_db * Revert "Do not pass PXE net config from bootloader to ramdisk" * Bump oslo.rootwrap to 1.5.0 * Drop deprecated namespace for oslo.rootwrap * Add VirtualBox drivers and its modules * region missing in endpoint selection * Add :raises: for Version constructor docstring * Improve testing of the Node's REST API * Rename NOSTATE to AVAILABLE * Add support for API microversions * Address final comments of edf532db91 * Add missing exceptions into function docstring * Fix typos in commit I68c9f9f86f5f113bb111c0f4fd83216ae0659d36 * Add logic to store the config drive passed by Nova * Do not POST conductor\_affinity in tests * Add 'irmc\_' prefix to optional properties * Actively check iSCSI connection after login * Updated from global requirements * Add iRMC Driver and its iRMC Power module * Fix drivers.rst doc format error * Improve test assertion for get\_glance\_image\_properties * Do not pass PXE net config from bootloader to ramdisk * Adds get\_glance\_image\_properties * Fix filter\_query in drac/power interface * Updated from global requirements * Simplify policy.json * Replace DIB installation step from git clone to pip * Add a TODO file * Updated from global requirements * Fix function docstring of \_get\_boot\_iso\_object\_name() * Improve ironic-dbsync help strings * Clear locks on conductor startup * Remove argparse from requirements * Use oslo\_serialization replace oslo.serialization * Agent driver fails with Swift Multiple Containers * Add ipmitool to quickstart guide for Ubuntu * Allow operations on DEPLOYFAIL'd nodes * Allow associate an instance independent of the node power state * Improve docstrings about TaskManager's spawning feature * DracClient to handle ReturnValue validation * Fix instance\_info parameters clearing * DRAC: Fix wsman host verification * Updated from global requirements * Clean up ilo's parse\_driver\_info() * Fix ssh \_get\_power\_status as it returned status for wrong node * Fix RPCService and Ironic Conductor so they shut down gracefully * Remove jsonutils from openstack.common * Remove lockfile from dependencies * Remove IloPXEDeploy.validate() * Force glance recheck for kernel/ramdisk on rebuild * iboot power driver: unbound variable error * Remove unused state transitions * PXE: Add configdrive support * Rename localrc for local.conf * DracClient to handle ClientOptions creation * Ensure we don't have stale power state in database after power action * Remove links autogenerated from module names * Make DD block size adjustable * Improve testing of state transitions * Convert drivers to use process\_event() * Update service.py to support graceful Service shutdown * Ensure that image link points to the correct image * Raise SSH failure messages to the error level * Make 'method' explicit for VendorInterface.validate() * Updated from global requirements * Provided backward compat for enforcing admin policy * Allow configuration of neutronclient retries * Convert check\_deploy\_timeout to use process\_event * Add requests to requirements.txt * Enable async callbacks from task.process\_event() * Document dependency on \`fuser\` for pxe driver * Distinguish between prepare + deploy errors * Avoid querying the power state twice * Add state machine to documentation * Updated from global requirements * Adjust the help strings to better reflect usage * Updated from global requirements * Updated from global requirements * Update etc/ironic/ironic.conf.sample * Fix policy enforcement to properly detect admin * Minor changes to state model * Add documentation to create in RegionOne * Delete unnecessary document files * Updated from global requirements * display error logging should be improved * Refactor async helper methods in conductor/manager.py * Hide oslo.messaging DEBUG logs by default * add comments for NodeStates fields * Stop conductor if no drivers were loaded * Fix typo in install-guide.rst * Reuse methods from netutils * Use get\_my\_ipv4 from oslo.utils * improve the neutron configuration in install-guide * Refactoring for Ironic policy * PXE: Pass root device hints via kernel cmdline * Extend API multivalue fields * Add a fsm state -> dot diagram generator * Updated from global requirements * Update command options in the Installation Guide 2015.1.0b1 ---------- * Improve Agent deploy driver validation * Add new enrollment and troubleshooting doc sections * Begin using the state machine for node deploy/teardown * Add base state machine * Updated from global requirements * Get rid of set\_failed\_state duplication * Remove Python 2.6 from setup.cfg * Updated from global requirements * Update dev quick-start for devstack * Updated from global requirements * Correct vmware ssh power manager * rename oslo.concurrency to oslo\_concurrency * Remove duplicate dependencies from dev-quickstart docs * Do not strip 'glance://' prefix from image hrefs * Updated from global requirements * Fix image\_info passed to IPA for image download * Use Literal Blocks to write code sample in docstring * Workflow documentation is now in infra-manual * Add tests to iscsi\_deploy.build\_deploy\_ramdisk\_options * Fix for broken deploy of iscsi\_ilo driver * Updated from global requirements * Add info on creating a tftp map file * Add documentation for SeaMicro driver * Fixed typo in Drac management driver test * boot\_devices.PXE value should match with pyghmi define * Add decorator that requires a lock for Drac management driver * Remove useless deprecation warning for node-update maintenance * Ilo tests refactoring * Change some exceptions from invalid to missing * Add decorator that requires a lock for Drac power driver * Change methods from classmethod to staticmethod * iLO Management Interface * Improve docs for running IPA in Devstack * Update 'Introduction to Ironic' document * Avoid calling \_parse\_driver\_info in every test * Updated from global requirements * Correct link in user guide * Minor fix to install guide for associating k&r to nodes * Add serial console feature to seamicro driver * Support configdrive in agent driver * Add driver\_validate() * Update drivers VendorInterface validate() method * Adds help for installing prerequisites on RHEL * Add documentation about Vendor Methods * Make vendor methods discoverable via the Ironic API * Fix PXEDeploy class docstring * Updated from global requirements * Vendor endpoints to support different HTTP methods * Add ipmitool as dependency on RHEL/Fedora systems * dev-quickstart.rst update to add required packages * Add gendocs tox job for generating the documentation * Add gettext to packages needed in dev quickstart * Convert qcow2 image to raw format when deploy * Update iLO driver documentation * Disable IPMI timeout before setting boot device * Updated from global requirements * ConductorManager catches Exceptions * Remove unused variable in agent.\_get\_interfaces() * Enable hacking rule E265 * Add sync and async support for passthru methods * Fix documentation on Standard driver interfaces * Add a mechanism to route vendor methods * Remove redundant FunctionalTest usage in API tests * Use wsme.Unset as default value for API objects * Fix traceback on rare agent error case * Make \_send\_sensor\_data more cooperative * Updated from global requirements * Add logging to driver vendor\_passthru functions * Support ipxe with Dnsmasq * Correct "returns" line in PXE deploy method * Remove all redundant setUp() methods * Update install guide to install tftp * Remove duplicated \_fetch\_images function * Change the force\_raw\_image config usage * Clear maintenance\_reason when setting maintenance=False * Removed hardcoded IDs from "port" test resources * Switch to oslo.concurrency * Updated from global requirements * Use docstrings for attributes in api/controllers * Put nodes-related API in same section * Fix get\_test\_node attributes set incorrectly * Get new auth token for ramdisk if old will expire soon * Delete unused 'use\_ipv6' config option * Updated from global requirements * Add maintenance to RESTful web API documentation * Updated from global requirements * Iterate over glance API servers * Add API endpoint to set/unset the node maintenance mode * Removed hardcoded IDs from "node" test resources * Add maintenance\_reason when setting maintenance mode * Add Node.maintenance\_reason * Fix F811 error in pep8 * Improve hash ring value conversion * Add SNMP driver for Aten PDU's * Update node-validate error messages * Store image disk\_format and container\_format * Continue heartbeating after DB connection failure * TestAgentVendor to use the fake\_agent driver * Put a cap on our cyclomatic complexity * More helpful failure for tests on noexec /tmp * Update doc headers at end of Juno * Fix E131 PEP8 errors 2014.2 ------ * Add the PXE VendorPassthru interface to PXEDracDriver * Add documentation for iLO driver(s) * Enable E111 PEP8 check * Updated from global requirements * Fix F812 PEP8 error * Enable H305 PEP8 check * Enable H307 PEP8 check * Updated from global requirements * Enable H405 PEP8 check * Enable H702 PEP8 check * Enable H904 PEP8 check * Migration to oslo.serialization * Add the PXE VendorPassthru interface to PXEDracDriver * Adds instructions for deploying instances on real hardware * Fix pep8 test * Add missing attributes to sample API objects * Fix markup-related issues in documentation * Add documentation for PXE UEFI setup 2014.2.rc2 ---------- * Clear hash ring cache in get\_topic\_for\* * Fix exceptions names and messages for Keystone errors * Remove unused change\_node\_maintenance\_mode from rpcapi * Imported Translations from Transifex * Clear hash ring cache in get\_topic\_for\* * Move database fixture to a separate test case * KeyError from AgentVendorInterface.\_heartbeat() * Validate the power interface before deployment * Cleans up some Sphinx rST warnings in Ironic * Remove kombu as a dependency for Ironic 2014.2.rc1 ---------- * Make hash ring mapping be more consistent * Add periodic task to rebuild conductor local state * Open Kilo development * Add "affinity" tracking to nodes and conductors * ilo\* drivers to use only ilo credentials * Update hacking version in test requirements * Add a call to management.validate(task) * Replace custom lazy loading by stevedore * Updated from global requirements * Remove useless variable in migration * Use DbTestCase as test base when context needed * For convention rename the first classmethod parameter to cls * Always reset target\_power\_state in node\_power\_action * Imported Translations from Transifex * Stop running check\_uptodate in the pep8 testenv * Add HashRingManager to wrap hash ring singleton * Fix typo in agent validation code * Conductor changes target\_power\_state before starting work * Adds openSUSE support for developer documentation * Updated from global requirements * Remove untranslated PO files * Update ironic.conf.sample * Remove unneeded context initialization in tests * Force the SSH commands to use their default language * Add parameter to override locale to utils.execute * Refactor PXE clean up tests * Updated from global requirements * Don't reraise Exceptions from agent driver * Add documentation for ironic-dbsync command * Do not return 'id' in REST API error messages * Separate the agent driver config from the base localrc config * pxe\_ilo driver to call iLO set\_boot\_device * Remove redundant context parameter * Update docs with new dbsync command * Update devstack docs, require Ubuntu 14.04 * Do not use the context parameter on refresh() * Pass ipa-driver-name to agent ramdisk * Do not set the context twice when forming RPC objects * Make context mandatory when instantiating a RPC object * Neutron DHCP implementation to raise exception if no ports have VIF * Do not cache auth token in Neutron DHCP provider * Imported Translations from Transifex * add\_node\_capability and rm\_node\_capability unable to save changes to db * Updated from global requirements * Handle SNMP exception error.PySnmpError * Use standard locale in list\_partitions * node\_uuid should not be used to create test port * Revert "Revert "Search line with awk itself and avoid grep"" * Fix code error in pxe\_ilo driver * Add unit tests for SNMPClient * Check whether specified FS is supported * Sync the doc with latest code * Add a doc note about the vendor\_passthru endpoint * Remove 'incubated' documentation theme * Import modules for fake IPMINative/iBoot drivers * Allow clean\_up with missing image ref * mock.called\_once\_with() is not a valid method * Fix Devstack docs for zsh users * Fix timestamp column migration * Update ironic states and documentation * Stop using intersphinx * Updated from global requirements * Remove the objectify decorator * Add reserve() and release() to Node object * Add uefi boot mode support in IloVirtualMediaIscsiDeploy * Don't write python bytecode while testing * Support for setting boot mode in pxe\_ilo driver * Remove bypassing of H302 for gettextutils markers * Revert "Search line with awk itself and avoid grep" * Search line with awk itself and avoid grep * Add list\_by\_node\_id() to Port object * Remove unused modules from openstack-common.conf * Sync the document with the current implementation * Unify the sensor data format * Updated from global requirements * Deprecate Ironic compute driver and sched manager * Log ERROR power state in node\_power\_action() * Fix compute\_driver and scheduler\_host\_manager in install-guide * Use oslo.utils instead of ironic.openstack.common * Use expected, actual order for PXE template test * Fix agent PXE template * Translator functions cleanup part 3 * Translator functions cleanup part 2 * Imported Translations from Transifex * Updated from global requirements * Remove XML from api doc samples * Update ironic.conf.sample * Fix race conditions running pxe\_utils tests in parallel * Switch to "incubating" doc theme * Minor fixes for ipminative console support * Translator functions cleanup part 4 * Translator functions cleanup part 1 * Remove unnecessary mapping from Agent drivers * mock.assert\_called\_once() is not valid method * Use models.TimestampMixin from oslo.db * Updated from global requirements 2014.2.b3 --------- * Driver merge review comments from 111425 * Nova review updates for \_node\_resource * Ignore backup files * IloVirtualMediaAgent deploy driver * IloVirtualMediaIscsi deploy driver * Unbreak debugging via testr * Interactive console support for ipminative driver * Add UEFI based deployment support in Ironic * Adds SNMP power driver * Control extra space for images conversion in image\_cache * Use metadata.create\_all() to initialise DB schema * Fix minor issues in the DRAC driver * Add send-data-to-ceilometer support for pxe\_ipminative driver * Reduce redundancy in conductor manager docstrings * Fix typo in PXE driver docstrings * Update installation guide for syslinux 6 * Updated from global requirements * Imported Translations from Transifex * Avoid deadlock when logging network\_info * Implements the DRAC ManagementInterface for get/set boot device * Rewrite images tests with mock * Add boot\_device support for vbox * Remove gettextutils \_ injection * Make DHCP provider pluggable * DRAC wsman\_{enumerate, invoke}() to return an ElementTree object * Remove futures from requirements * Script to migrate Nova BM data to Ironic * Imported Translations from Transifex * Updated from global requirements * Fix unit tests with keystoneclient master * Add support for interacting with swift * properly format user guide in RST * Updated from global requirements * Fix typo in user-guide.rst * Add console interface to agent\_ipmitool driver * Add support for creating vfat and iso images * Check ERROR state from driver in \_do\_sync\_power\_state * Set PYTHONHASHSEED for venv tox environment * Add iPXE Installation Guide documentation * Add management interface for agent drivers * Add driver name on driver load exception * Take iSCSI deploy out of pxe driver * Set ssh\_virt\_type to vmware * Update nova driver's power\_off() parameters * return power state ERROR instead of an exception * handle invalid seamicro\_api\_version * Imported Translations from Transifex * Nova ironic driver review update requests to p4 * Allow rebuild of node in ERROR and DEPLOYFAIL state * Use cache in node\_is\_available() * Query full node details and cache * Add in text for text mode on trusty * Add Parallels virtualisation type * IPMI double bridging functionality * Add DracDriver and its DracPower module * use MissingParameterValue exception in iboot * Update compute driver macs\_for\_instance per docs * Update DevStack guide when querying the image UUID * Updated from global requirements * Fix py3k-unsafe code in test\_get\_properties() * Fix tear\_down a node with missing info * Remove d\_info param from \_destroy\_images * Add docs for agent driver with devstack * Removes get\_port\_by\_vif * Update API document with BootDevice * Replace incomplete "ilo" driver with pxe\_ilo and fake\_ilo * Handle all exceptions from \_exec\_ipmitool * Remove objectify decorator from dbapi's {get, register}\_conductor() * Improve exception handling in console code * Use valid exception in start\_shellinabox\_console * Remove objectify decorator from dbapi.update\_\* methods * Add list() to Chassis, Node, Port objects * Raise MissingParameterValue when validating glance info * Mechanism to cleanup all ImageCaches * Driver merge review comments from 111425-2-3 * Raise MissingParameterValue instead of Invalid * Import fixes from the Nova driver reviews * Imported Translations from Transifex * Use auth\_token from keystonemiddleware * Make swift tempurl key secret * Add method for deallocating networks on reschedule * Reduce running time of test\_different\_sizes * Remove direct calls to dbapi's get\_node\_by\_instance * Add create() and destroy() to Port object * Correct \`op.drop\_constraint\` parameters * Use timeutils from one place * Add create() and destroy() to Chassis object * Add iPXE support for Ironic * Imported Translations from Transifex * Add posix\_ipc to requirements * backport reviewer comments on nova.virt.ironic.patcher * Move the 'instance\_info' fields to GenericDriverFields * Migration to oslo.utils library * Fix self.fields on API Port object * Fix self.fields on API Chassis object * Sync oslo.incubator modules * Updated from global requirements * Expose {set,get}\_boot\_device in the API * Check if boot device is persistent on ipminative * Sync oslo imageutils, strutils to Ironic * Add charset and engine settings to every table * Imported Translations from Transifex * Remove dbapi calls from agent driver * Fix not attribute '\_periodic\_last\_run' * Implements send-data-to-ceilometer * Port iBoot PDU driver from Nova * Log exception with translation * Add ironic-python-agent deploy driver * Updated from global requirements * Imported Translations from Transifex * Clean up calls to get\_port() * Clean up calls to get\_chassis() * Do not rely on hash ordering in tests * Update\_port should expect MACAlreadyExists * Imported Translations from Transifex * Adding swift temp url support * Push the image cache ttl way up * Imported Translations from Transifex * SSH virsh to use the new ManagementInterface * Split test case in ironic.tests.conductor.test\_manager * Tune down node\_locked\_retry\_{attempts,interval} config for tests * Add RPC version to test\_get\_driver\_properties 2014.2.b2 --------- * Import fixes from the Nova driver reviews * Generalize exception handling in Nova driver * Fix nodes left in an incosistent state if no workers * IPMINative to use the new ManagementInterface * Backporting nova host manager changes into ironic * Catch oslo.db error instead of sqlalchemy error * Add a test case for DB schema comparison * remove ironic-manage-ipmi.filters * Implement API to get driver properties * Add drivers.base.BaseDriver.get\_properties() * Implement retry on NodeLocked exceptions * SeaMicro to use the new ManagementInterface * Import fixes from Nova scheduler reviews * Rename/update common/tftp.py to common/pxe\_utils.py * Imported Translations from Transifex * Factor out deploy info from PXE driver * IPMITool to use the new ManagementInterface * Use mock.assert\_called\_once\_with() * Add missing docstrings * Raise appropriate errors on duplicate Node, Port and Chassis creation * Add IloDriver and its IloPower module * Add methods to ipmitool driver * Use opportunistic approach for migration testing * Use oslo.db library * oslo.i18n migration * Import a few more fixes from the Nova driver * Set a more generous default image cache size * Fix wrong test fixture for Node.properties * Make ComputeCapabilitiesFilter work with Ironic * Add more INFO logging to ironic/common/service.py * Clean up nova virt driver test code * Fix node to chassis and port to node association * Allow Ironic URL from config file * Imported Translations from Transifex * Update webapi doc with link and console * REST API 'limit' parameter to only accept positive values * Update docstring for api...node.validate * Document 'POST /v1/.../vendor\_passthru' * ManagementInterface {set, get}\_boot\_device() to support 'persistent' * Use my\_ip for neutron URL * Updated from global requirements * Add more INFO logging to ironic/conductor * Specify rootfstype=ramfs deploy kernel parameter * Add set\_spawn\_error\_hook to TaskManager * Imported Translations from Transifex * Updates the Ironic on Devstack dev documentation * Simplify error handling * Add gettextutils.\_L\* to import\_exceptions * Fix workaround for the "device is busy" problem * Allow noauth for Neutron * Minor cleanups to nova virt driver and tests * Update nova rebuild to account for new image * Updated from global requirements * pep8 cleanup of Nova code * PEP fixes for the Nova driver * Fix glance endpoint tests * Update Nova's available resources at termination * Fix the section name in CONTRIBUTING.rst * Add/Update docstrings in the Nova Ironic Driver * Update Nova Ironic Driver destroy() method * Nova Ironic driver get\_info() to return memory stats in KBytes * Updates Ironic Guide with deployment information * Add the remaining unittests to the ClientWrapper class * Wait for Neutron port updates when using SSHPower * Fix 'fake' driver unable to finish a deploy * Update "Exercising the Services Locally" doc * Fixing hardcoded glance protocol * Remove from\_chassis/from\_nodes from the API doc * Prevent updating UUID of Node, Port and Chassis on DB API level * Imported Translations from Transifex * Do not delete pxe\_deploy\_{kernel, ramdisk} on tear down * Implement security groups and firewall filtering methods * Add genconfig tox job for sample config file generation * Mock pyghmi lib in unit tests if not present * PXE to pass hints to ImageCache on how much space to reclaim * Add some real-world testing on DiskPartitioner * Eliminate races in Conductor \_check\_deploy\_timeouts * Use temporary dir for image conversion * Updated from global requirements * Move PXE instance level parameters to instance\_info * Clarify doc: API is admin only * Mock time.sleep for the IPMI tests * Destroy instance to clear node state on failure * Add 'context' parameter to get\_console\_output() * Cleanup virt driver tests and verify final spawn * Test fake console driver * Allow overriding the log level for ironicclient * Virt driver logging improvements * ipmitool driver raises DriverLoadError * VendorPassthru.validate()s call \_parse\_driver\_info * Enforce a minimum time between all IPMI commands * Remove 'node' parameter from the validate() methods * Test for membership should be 'not in' * Replace mknod() with chmod() * Factoring out PXE and TFTP functions * Let ipmitool natively retry commands * Sync processutils from oslo code * Driver interface's validate should return nothing * Use .png instead of .gif images * Fix utils.execute() for consistency with Oslo code * remove default=None for config options 2014.2.b1 --------- * Stop ipmitool.validate from touching the BMC * Set instance default\_ephemeral\_device * Add unique constraint to instance\_uuid * Add node id to DEBUG messages in impitool * Remove 'node' parameter from the Console and Rescue interfaces * TaskManager: Only support single node locking * Allow more time for API requests to be completed * Add retry logic to iscsiadm commands * Wipe any metadata from a nodes disk * Rework make\_partitions logic when preserve\_ephemeral is set * Fix host manager node detection logic * Add missing stats to IronicNodeState * Update IronicHostManager tests to better match how code works * Update Nova driver's list\_instance\_uuids() * Remove 'fake' and 'ssh' drivers from default enabled list * Work around iscsiadm delete failures * Mock seamicroclient lib in unit tests if not present * Cleanup mock patch without \`with\` part 2 * Add \_\_init\_\_.py for nova scheduler filters * Skip migrations test\_walk\_versions instead of pass * Improving unit tests for \_do\_sync\_power\_state * Fix AttributeError when calling create\_engine() * Reuse validate\_instance\_and\_node() Nova ironic Driver * Fix the logging message to identify node by uuid * Fix concurrent deletes in virt driver * Log exceptions from deploy and tear\_down * PXE driver to validate the requested image in Glance * Return the HTTP Location for accepted requestes * Return the HTTP Location for newly created resources * Fix tests with new keystoneclient * list\_instances() to return a list of instances names * Pass kwargs to ClientWrapper's call() method * Remove 'node' parameter from the Power interface * Set the correct target versions for the RPC methods * Consider free disk space before downloading images into cache * Change NodeLocked status code to a client-side error * Remove "node" parameter from methods handling power state in docs * Add parallel\_image\_downloads option * Synced jsonutils from oslo-incubator * Fix chassis bookmark link url * Remove 'node' parameter from the Deploy interface * Imported Translations from Transifex * Remove all mostly untranslated PO files * Cleanup images after deployment * Fix wrong usage of mock methods * Using system call for downloading files * Run keepalive in a dedicated thread * Don't translate debug level logs * Update dev quickstart guide for ephemeral testing * Speed up Nova Ironic driver tests * Renaming ironicclient exceptions in nova driver * Fix bad Mock calls to assert\_called\_once() * Cleanup mock patch without \`with\` part 1 * Corrects a typo in RESTful Web API (v1) document * Updated from global requirements * Clean up openstack-common.conf * Remove non-existent 'pxe\_default\_format' parameter from patcher * Remove explicit dependency on amqplib * Pin RPC client version min == max * Check requested image size * Fix 'pxe\_preserve\_ephemeral' parameter leakage * RPC\_API\_VERSION out of sync * Simplify calls to ImageCache in PXE module * Implement the reboot command on the Ironic Driver * Place root partition last so that it can always be expanded * Stop creating a swap partition when none was specified * Virt driver change to use API retry config value * Implement more robust caching for master images * Decouple state inspection and availability check * Updated from global requirements * Fix ironic node state comparison * Add create() and destroy() to Node * Fix typo in rpcapi.driver\_vendor\_passthru * Support serial console access * Remove 'node' parameter from the VendorPassthru interface * Updated from global requirements * Synced jsonutils from oslo-incubator * Fix chassis-node relationship * Implement instance rebuild in nova.virt.driver * Sync oslo logging * Add ManagementInterface * Clean oslo dependencies files * Return error immediately if set\_console\_mode is not supported * Fix bypassed reference to node state values * Updated from global requirements * Port to oslo.messaging * Drivers may expose a top-level passthru API * Overwrite instance\_exists in Nova Ironic Driver * Update Ironic User Guide post landing for 41af7d6b * Spawn support for TaskManager and 2 locking fixes * Document ClusteredComputeManager * Clean up calls to get\_node() * nova.virt.ironic passes ephemeral\_gb to ironic * Implement list\_instance\_uuids() in Nova driver * Modify the get console API * Complete wrapping ironic client calls * Add worker threads limit to \_check\_deploy\_timeouts task * Use DiskPartitioner * Better handling of missing drivers * Remove hardcoded node id value * cleanup docstring for drivers.utils.get\_node\_mac\_addresses * Update ironic.conf.sample * Make sync\_power\_states yield * Refactor sync\_power\_states tests to not use DB * Add DiskPartitioner * Some minor clean up of various doc pages * Fix message preventing overwrite the instance\_uuid * Install guide for Ironic * Refactor the driver fields mapping * Imported Translations from Transifex * Fix conductor.manager test assertion order * Overwriting node\_is\_available in IronicDriver * Sync oslo/common/excutils * Sync oslo/config/generator * Cherry pick oslo rpc HA fixes * Add Ironic User Guide * Remove a DB query for get\_ports\_by\_node() * Fix missed stopping of conductor service * Encapsulate Ironic client retry logic * Do not sync power state for new invalidated nodes * Make tests use Node object instead of dict * Sync object list stuff from Nova * Fix Node object version * Cleanup running conductor services in tests * Factor hash ring management out of the conductor * Replace sfdisk with parted * Handling validation in conductor consistently * JsonPatch add operation on existing property * Updated from global requirements * Remove usage of Glance from PXE clean\_up() * Fix hosts mapping for conductor's periodic tasks * Supports filtering port by address * Fix seamicro power.validate() method definition * Update tox.ini to also run nova tests * Updated from global requirements * Fix messages formatting for \_sync\_power\_states * Refactor nova.virt.ironic.driver get\_host\_stats * Use xargs -0 instead of --null * Change admin\_url help in ironic driver * Sync base object code with Nova's * Add Node.instance\_info field * Fix self.fields on API Node object * Show maintenance field in GET /nodes * Move duplicated \_get\_node(s)\_mac\_addresses() * Fix grammar in error string in pxe driver * Reduce logging output from non-Ironic libraries * Open Juno development 2014.1.rc1 ---------- * Fix spelling error in conductor/manager * Improved coverage for ironic API * Manually update all translated strings * Check that all po/pot files are valid * If no swap is specified default to 1MB * Fix Nova rescheduling tear down problem * Remove obsolete po entries - they break translation jobs * Add note to ssh about impact on ci testing * Adds exact match filters to nova scheduler * Clean up IronicNodeStates.update\_from\_compute\_node * ironic\_host\_manager was missing two stats * Imported Translations from Transifex * Fix seamicro validate() method definition * Remove some obsolete settings from DevStack doc * Raise unexpected exceptions during destroy() * Start using oslosphinx theme for docs * Provide a new ComputeManager for Ironic * Nova Ironic driver to set pxe\_swap\_mb in Ironic * Fix strings post landing for c63e1d9f6 * Run periodic\_task in a with a dynamic timer * Update SeaMicro to use MixinVendorInterface * Run ipmi power status less aggressively * Avoid API root controller dependency on v1 dir * Update Neutron if mac address of the port changed * Replace fixtures with mock in test\_keystone.py * Decrease running time of SeaMicro driver tests * Remove logging of exceptions from controller's methods * Imported Translations from Transifex * Fix missed exception raise in \_add\_driver\_fields * Speed up ironic tests * Pass no arguments to \_wait\_for\_provision\_state() * Adds max retry limit to sync\_power\_state task * Updated from global requirements * Imported Translations from Transifex * Stop incorrectly returning rescue: supported * Correct version.py and update current version string * Documentation for deploying DevStack /w Ironic * Hide rescue interface from validate() output * Change set\_console\_mode() to use greenthreads * Fix help string for a glance option * Expose API for fetching a single driver * Change JsonEncodedType.impl to TEXT * Fix traceback hook for avoid duplicate traces * Fix 'spacing' parameters for periodic tasks * Permit passing SSH keys into the Ironic API * Better instance-not-found handling within IronicDriver * Make sure auth\_url exists and is not versionless * Conductor de-registers on shutdown * Change deploy validation exception handling * Suppress conductor logging of expected exceptions * Remove unused method from timeutils * Add admin\_auth\_token option for nova driver * Remove redundant nova virt driver test * Process public API list as regular expressions * Enable pep8 tests for the Nova Ironic Driver * Fix typo tenet -> tenant * Stop logging paramiko's DEBUG and INFO messages * Set boot device to PXE when deploying * Driver utils should raise unsupported method * Delete node while waiting for deploy * Check BMC availability in ipmitool 'validate' method * SeaMicro use device parameter for set\_boot\_device * Make the Nova Ironic driver to wait for ACTIVE * Fix misspelled impi to ipmi * Do not use \_\_builtin\_\_ in python3 * Use range instead xrange to keep python 3.X compatibility * Set the database.connection option default value * PXE validate() to fail if no Ironic API URL * Improve Ironic Conductor threading & locks * Generic MixinVendorInterface using static mapping * Conductor logs better error if seamicroclient missing * Add TaskManager lock on change port data * Nova ironic driver to retry on HTTP 503 * Mark hash\_replicas as experimental * do\_node\_deploy() to use greenthreads * Move v1 API tests to separate v1 directory * Pin iso8601 logging to WARN * Only fetch node once for vif actions * Fix how nova ironic driver gets flavor information * Imported Translations from Transifex * API: Add sample() method to remaining models * Import Nova "ironic" driver * Remove errors from API documentation * Add libffi-dev(el) dependency to quickstart * Updated from global requirements * Remove redundant default value None for dict.get 2014.1.b3 --------- * Refactor vendor\_passthru to use conductor async workers * Fix wrong exception raised by conductor for node * Fix params order in assertEqual * Sync the log\_handler from oslo * Fix SeaMicro driver post landing for ba207b4aa0 * Implements SeaMicro VendorPassThru functionality * Implement the SeaMicro Power driver * Fix provision\_updated\_at deserialization * Remove jsonutils from test\_rpcapi * Do not delete a Node which is not powered off * Add provision\_updated\_at to node's resource * Prevent a node in maintenance from being deployed * Allow clients to mark a node as in maintenance * Support preserve\_ephemeral * Updated from global requirements * API: Expose a way to start/stop the console * Add option to sync node power state from DB * Make the PXE driver understand ephemeral disks * Log deploy\_utils.deploy() erros in the PXE driver * Removing get\_node\_power\_state, bumping RPC version * Add timeout for waiting callback from deploy ramdisk * Prevent GET /v1/nodes returning maintenance field * Suggested improvements to \_set\_boot\_device * Move ipminative \_set\_boot\_device to VendorPassthru * Sync common db code from Oslo * PXE clean\_up() to remove the pxe\_deploy\_key parameter * Add support for custom libvirt uri * Python 3: replace "im\_self" by "\_\_self\_\_" * Fix race condition when deleting a node * Remove extraneous vim configuration comments for ironic * Do not allow POST ports and chassis internal attributes * Do not allow POST node's internal attributes * Unused 'pxe\_key\_data' & 'pxe\_instance\_name' info * Add provision\_updated\_at field to nodes table * Exclude nodes in DEPLOYWAIT state from \_sync\_power\_states * Sync common config module from Oslo * Get rid object model \`dict\` methods part 4 * Sync Oslo rpc module to Ironic * Clarify and fix the dev-quickstart doc some more * Do not use CONF as a default parameter value * Simplify locking around acquiring Node resources * Improve help strings * Remove shebang lines from code * Use six.moves.urllib.parse instead of urlparse * Add string representation method to MultiType * Fix test migrations for alembic * Sync Oslo gettextutils module to Ironic * NodeLocked returns 503 error status * Supports OPERATOR priv level for ipmitool driver * Correct assertEqual order from patch e69e41c99fb * PXE and SSH validate() method to check for a port * Task object as paramater to validate() methods * Fix dev-quick-start.rst post landing for 9d81333fd0 * API validates driver name for both POST and PATCH * Sync Oslo service module to Ironic * Move ipmitool \_set\_boot\_device to VendorPassthru * Use six.StringIO/BytesIO instead of StringIO.StringIO * Add JSONEncodedType with enforced type checking * Correct PXEPrivateMethodsTestCase.setUp * Don't raise MySQL 2013 'Lost connection' errors * Use the custom wsme BooleanType on the nodes api * Add wsme custom BooleanType type * Fix task\_manager acquire post landing for c4f2f26ed * Add common.service config options to sample * Removes use of timeutils.set\_time\_override * Replace assertEqual(None, \*) with assertIsNone in tests * Replace nonexistent mock assert methods with real ones * Log IPMI power on/off timeouts * Remove None as default value for dict get() * Fix autodoc formatting in pxe.py * Fix race condition when changing node states * Use StringType from WSME * Add testing and doc sections to docs/dev-quickstart * Implement \_update\_neutron in PXE driver * Remove \_load\_one\_plugin fallback * SSHPower driver support VMware ESXi * Make ironic-api not single threaded * Remove POST calls in tests for resource creation * Add topic to the change\_node\_maintenance\_mode() RPC method * Fix API inconsistence when changing node's states * Add samples to serve API through Apache mod\_wsgi * Add git dependency to quickstart docs * Add get\_console() method * Remove unnecessary json dumps/loads from tests * Add parameter for filtering nodes by maintenance mode * Rename and update ironic-deploy-helper rootwrap * Remove tox locale overrides * Updated from global requirements * Move eventlent monkeypatch out of cmd/ * Fix misspellings in ironic * Ensure parameter order of assertEqual correct * Return correct HTTP response codes for create ops * Fix broken doc links on the index page * Allow to tear-down a node waiting to be deployed * Improve NodeLocked exception message * Expose 'reservation' field of a node via API * Implement a multiplexed VendorPassthru example * Fix log and test for NeutronAPI.update\_port\_dhcp\_opts * Fix 'run\_as\_root' parameter check in utils * Handle multiple exceptions raised by jsonpatch * API tests to check for the return codes * Imported Translations from Transifex * Move test\_\_get\_nodes\_mac\_addresses * Removed duplicated function to create a swap fs * Updated from global requirements * Add futures to requirements * Fix missing keystone option in ironic.conf.sample * Adds Neutron support to Ironic * Replace CONF.set\_default with self.config * Fix ssh\_port type in \_parse\_driver\_info() from ssh.py * Improve handling of invalid input in HashRing class * Sync db.sqlalchemy code from Oslo * Add lockfile>=0.8 to requirements.txt * Remove net\_config\_template options * Remove deploy kernel and ramdisk global config * Update docstrings in ssh.py * SSHPower driver raises IronicExceptions * mock's return value for processutils.ssh\_execute * API: Add sample() method on Node * Update method doc strings in pxe.py * Minor documentation update * Removed unused exceptions * Bump version of sphinxcontrib-pecanwsme * Add missing parameter in call to \_load\_one\_plugin * Docstrings for ipmitool * alembic with initial migration and tests * Update RPC version post-landing for 9bc5f92fb * ipmitool's \_power\_status raises IPMIFailure 2014.1.b2 --------- * Add [keystone\_authtoken] to ironic.conf.sample * Updated from global requirements * Add comment about node.instance\_uuid * Run mkfs as root * Remove the absolute paths from ironic-deploy-helper.filters * PXE instance\_name is no longer mandatory * Remove unused config option - pxe\_deploy\_timeout * Delete the iscsi target * Imported Translations from Transifex * Fix non-unique tftp dir instance\_uuid * Fix non-unique pxe driver 'instance\_name' * Add missing "Filters" section to the ironic-images.filters * Use oslo.rootwrap library instead of local copy * Replace assertTrue with explicit assertIsInstance * Disallow new provision for nodes in maintenance * Add RPC method for node maintenance mode * Fix keystone get\_service\_url filtering * Use same MANAGER\_TOPIC variable * Implement consistent hashing of nodes to conductors * PXEAndSSH driver lacked vendor\_passthru * Use correct auth context inside pxe driver * sync\_power\_states handles missing driver info * Enable $pybasedir value in pxe.py * Correct SSHPowerDriver validate() exceptions * API to check the requested power state * Improve the node driver interfaces validation output * Remove copyright from empty files * Make param descriptions more consistent in API * Imported Translations from Transifex * Fix wrong message of pxe validator * Remove unused dict BYTE\_MULTIPLIERS * Implement API for provisioning * API to validate UUID parameters * Make chassis\_uuid field of nodes optional * Add unit tests for get\_nodeinfo\_list * Improve error handling in PXE \_continue\_deploy * Make param names more consistent in API * Sync config module from oslo * Fix wrong message of MACAlreadyExists * Avoid a race when associating instance\_uuid * Move and rename ValidTypes * Convert trycmd() to oslo's processutils * Improve error handling in validate\_vendor\_action * Passing nodes more consistently * Add 'next' link when GET maximum number of items * Check connectivity in SSH driver 'validate' method * GET /drivers to show a list of active conductors * Improve method to get list of active conductors * Refactor /node//state * Reworks Chassis validations * Reworks Node validations * Developer doc index page points to correct API docs * Fix auto-generated REST API formatting * Method to generate PXE options for Neutron ports * Strip '/' from api\_url string for PXE driver * Add driver interfaces validation * Command call should log the stdout and stderr * Add prepare, clean\_up, take\_over methods to deploy * PEP8-ify imports in test\_ipmitool * API: Add sample() method on Port and PortCollection * API: Validate and normalize address * Handle DBDuplicateEntry on Ports with same address * Imported Translations from Transifex * removed wrap\_exception method from ironic/common/exception.py * Rework patch validation on Ports * Add JsonPatchType class * Change default API auth to keystone-based * Clean up duplicated change-building code in objects * Add -U to pip install command in tox.ini * Updated from global requirements * Add config option for # of conductor replicas * Port StringType class from WSME trunk * Add tools/conf/check\_uptodate to tox.ini 2014.1.b1 --------- * Correct error with unicode mac address * Expose created\_at/updated\_at properties in the REST API * Import heartbeat\_interval opt in API * Add power control to PXE driver * Implement sync\_power\_state periodic task * Set the provision\_state to DEPLOYFAIL * Save PKI token in a file for PXE deploy ramdisk * API ports update for WSME 0.5b6 compliance * Add heartbeat\_interval to new 'conductor' cfg group * Add missing hash\_partition\_exponent config option * If no block devices abort deployment * Add missing link for drivers resource * Apply comments to 58558/4 post-landing * Replace removed xrange in Python3 * Imported Translations from Transifex * Use addCleanup() in test\_deploy\_utils * Allow Pecan to use 'debuginfo' response field * Do not allow API to expose error stacktrace * Add port address unique constraint for sqlite * Implement consistent hashing common methods * Sync some db changes from Oslo * Bump required version of sqlalchemy-migrate * Update ironic.conf.sample * Import uuidutils unit tests from oslo * Allow FakePower to return node objects power\_state * Adds doc strings to API FunctionalTest class * Use oslo's execute() and ssh\_execute() methods * Remove openstack.common.uuidutils * Sync common.context changes from olso * Remove oslo uuidutils.is\_uuid\_like call * Remove oslo uuidutils.generate\_uuid() call * Add troubleshoot option to PXE template * Imported Translations from Transifex * Add tftp\_server pattern in ironic.conf * Import HasLength object * ipmitool SHOULD accept empty username/password * Imported Translations from Transifex * Add missing ConfigNotFound exception * Imported Translations from Transifex * Add hooks to auto-generate REST API docs * Imported Translations from Transifex * Redefined default value of allowed\_rpc\_exception\_modules * Add last\_error usage to deploy and teardown methods * Support building wheels (PEP-427) * Import missing gettext \_ to fix Sphinx error * sync common.service from oslo * sync common.periodic\_task from oslo * sync common.notifier.\* from oslo * sync common.log from oslo * sync common.local from oslo * Sync common utils from Oslo * Rename parameters * Accessing a subresource that parent does not exist * Imported Translations from Transifex * Changes power\_state and adds last\_error field * Update openstack/common/lockutils * sync common.context from oslo * sync common.config.generator from oslo * Remove sqlalchemy-migrate 0.7.3 patching * Fix integer division compatibility in middleware * Fix node lock in PXE driver * Imported Translations from Transifex * Register API options under the 'api' group * Supporting both Python 2 and Python 3 with six * Supports get node by instance uuid in API * Imported Translations from Transifex * Check invalid uuid for get-by-instance db api * Fix error handling in ssh driver * Replace \_\_metaclass\_\_ * Supporting both Python 2 and Python 3 with six * Pass Ironic API url to deploy ramdisk in PXE driver * Remove 'basestring' from objects utils * Allows unicode description for chassis * Fix a typo in the name of logger method exception * Don't use deprecated module commands * Comply with new hacking requirements * Improve the API doc spec for chassis * Improve the API doc spec for node * Updated from global requirements * Fix i18N compliance * Add wrapper for keystone service catalog * Fix test node manager * Expose /drivers on the API * Update mailmap for Joe Gordon * Add mailmap file * Implement /nodes/UUID/vendor\_passthru in the API * Add context to TaskManager * Regenerate the sample config file * Conductors maintan driver list in the DB * Group and unify ipmi configurations * Fix a few missing i18n * Fix status codes in node controller * Fix exceptions handling in controllers * Updated from global requirements * Support uniform MAC address with colons * Remove redundant test stubs from conductor/manager * Remove several old TODO messages * Supports paginate query for two get nodes DB APIs * Remove \_driver\_factory class attribute * Fixes RootController to allow URL without version tag * Don't allow deletion of associated node * Remove duplicated db\_api.get\_instance() from tests * Updated from global requirements * Do not use string concatenation for localized strings * Remove the NULL state * Add DriverFactory * Adjust native ipmi default wait time * Be more patient with IPMI and BMC * Implement db get\_[un]associated\_nodes * Remove unused nova specific files * Removes unwanted mox and fixture files * Removes stubs from unit tests * Remove unused class/file * Remove driver validation on node update * Consolidates TestCase and BaseTestCase * Fix policies * Improve error message for ssh * Fix datetime format in FakeCache * Fix power\_state set to python object repr * Updated from global requirements * Replaces mox with mock for test\_deploy\_utils * Replaces mox with mock in api's unit tests * Replaces mox with mock in objects' unit tests * Replaces mox with mock for conductor unit tests * fix ssh driver exec command issues * Fix exceptions error codes * Remove obsolete redhat-eventlet.patch * Replaces mox with mock for test\_utils * Replaces mox with mock for ssh driver unit tests * Remove nested 'ipmi' dict from driver\_info * Replace tearDown with addCleanup in unit tests * Remove nested 'ssh' dict from driver\_info * Remove nested 'pxe' dict from driver\_info * Save and validate deployment key in PXE driver * Implement deploy and tear\_down conductor methods * Use mock to do unit tests for pxe driver * Code clean in node controller * Use mock to do unit tests for ipminative driver * Replaces mox with mock for ipmitool driver unit tests * Fix parameter name in wsexpose * Rename start\_power\_state\_change to change\_node\_power\_state * Mount iSCSI target and 'dd' in PXE driver * Add tests for api/utils.py * Check for required fields on ports * Replace Cheetah with Jinja2 * Update from global requirements * Upgrade tox to 1.6 * Add API uuid <-> id mapping * Doc string and minor clean up for 41976 * Update error return code to match new Pecan release * Add vendor\_passthru method to RPC API * Integer types support in api * Add native ipmi driver * API GET to return only minimal data * Fix broken links * Collection named based on resource type * Remove nova specific tests * Changes documentation hyperlinks to be relative * Replace OpenStack LLC with OpenStack Foundation * Force textmode consoles * Implemented start\_power\_state\_change In Conductor * Updates documentation for tox use * Drop setuptools\_git dependency * Fix tests return codes * Fix misused assertTrue in unit tests * Prevent updates while state change is in progress * Use localisation where user visible strings are used * Update only the changed fields * Improve parameters validate in PXE driver * Rename ipmi driver to ipmitool * Remove jsonutils from PXE driver * Expose the vendor\_passthru resource * Driver's validation during node update process implemented * Public API * Remove references for the 'task\_state' property * Use 'provision\_state' in PXE driver * Updating resources with PATCH * Add missing unique constraint * Fix docstring typo * Removed templates directory in api config * Added upper version boundry for six * Sync models with migrations * Optimization reserve and release nodes db api methods * Add missing foreign key * Porting nova pxe driver to ironic * API Nodes states * Fix driver loading * Move glance image service client from nova and cinder into ironic * Implement the root and v1 entry points of the API * Expose subresources for Chassis and Node * Add checks locked nodes to db api * Update the dev docs with driver interface description * Add missing tests for chassis API * Delete controller to make code easy to read and understood * Disable deleting a chassis that contains nodes * Update API documentation * Add Pagination of collections across the API * Fix typo in conductor manager * Remove wsme validate decorator from API * Add missing tests for ports API * Modify is\_valid\_mac() for support unicode strings * Add DB and RPC method doc strings to hook.py * Delete unused templates * Use fixture from Oslo * Move "opportunistic" db migrations tests from Nova * Build unittests for nodes api * make api test code more readable * Add links to API Objects * Delete Ironic context * Add tests for existing db migrations * Add common code from Oslo for db migrations test * Remove extra pep8/flake8/pyflakes requirements * Sync requirements with OpenStack/requirements * Fix up API tests before updating hacking checks * Add RPC methods for updating nodes * Run extract\_messages * Keystone authentiation * Add serializer param to RPC service * Import serialization and nesting from Nova Objects * Implement chassis api actions * update requires to prevent version cap * Change validate() to raise instead of returning T/F * Add helpers for single-node tasks * Implement port api action * Modify gitignore to ignore sqlite * Update resource manager for fixed stevedore issue * Add dbapi functions * Remove suds requirement * Sync install\_venv\_common from oslo * Move mysql\_engine option to [database] group * Re-define 'extra' as dict\_or\_none * Added Python-2.6 to the classifier * Rename "manager" to "conductor" * Port from nova: Fix local variable 'root\_uuid' ref * Created a package for API controllers V1 * Sync requirements with OpenStack/requirements * Remove unused APICoverage class * Sync fileutils from oslo-incubator * Sync strutils from oslo-incubator * Add license header * Update get\_by\_uuid function doc in chassis * Fix various Python 2.x->3.x compat issues * Improve unit tests for API * Add Chassis object * Add Chassis DB model and DB-API * Delete associated ports after deleting a node * Virtual power driver is superceded by ssh driver * Add conf file generator * Refactored query filters * Add troubleshoot to baremetal PXE template * Add err\_msg param to baremetal\_deploy\_helper * Retry the sfdisk command up to 3 times * Updated API Spec for new Drivers * Improve IPMI's \_make\_password\_file method * Remove spurious print statement from update\_node * Port middleware error handler from ceilometer API * Add support for GET /v1/nodes to return a list * Add object support to API service * Remove the unused plugin framework * Improve tests for Node and Port DB objects * SSH driver doesn't need to query database * Create Port object * Add uuid to Port DB model * Delete Flask Dependence * Writing Error: nodess to nodes * Create the Node object * Restructuring driver API and inheritance * Remove explicit distribute depend * Bump version of PBR * Remove deleted[\_at] from base object * Make object actions pass positional arguments * Fix relative links in architecture doc * Reword architecture driver description * Remove duplication from README, add link to docs * Port base object from Nova * Fix ironic-rootwrap capability * Add ssh power manager * Prevent IPMI actions from colliding * Add TaskManager tests and fix decorator * Mocked NodeManager can load and mock real drivers * Add docs for task\_manager and tests/manager/utils * Fix one typo in index.rst * Add missing 'extra' field to models.nodes * More doc updates * Remove the old README * More doc updates * Minor fixes to sphinx docs * Added API v1 Specification * Add initial sphinx docs, based on README * Initial skeleton for an RPC layer * Log configuration values on API startup * Don't use pecan to configure logging * Move database.backend option import * Remove unused authentication CLI options * Rename TestCase.flags() to TestCase.config() * Copy the RHEL6 eventlet workaround from Oslo * Sync new database config group from oslo-incubator * Minor doc change for manager and resorce\_manager * Add support for Sphinx Docs * Update IPMI driver to work with resource manager * Add validate\_driver\_info to driver classes * Implement Task and Resource managers * Update [reserve|release]\_nodes to accept a tag * More updates to the README * Reimplement reserve\_nodes and release\_nodes * Rename the 'ifaces' table to 'ports' * Change 'nodes' to use more driver-specific JSON * Update driver names and base class * Stop creating a new db IMPL for every request * Fix double "host" option * Sync safe changes from oslo-incubator * Sync rpc changes from oslo-incubator * Sync log changes from oslo-incubator * Sync a rootwrap KillFilter fix from oslo-incubator * Sync oslo-incubator python3 changes * Add steps to README.rst * Fix fake bmc driver * move ironic docs to top level for ease of discovery * Update the README file development section * Add some API definitions to the README * Update the distribute dependency version * Add information to the project README * Fixes test\_update\_node by testing updated node * Fix pep8 errors and make it pass Jenkins tests * Update IPMI driver for new base class * Add new base and fake driver classes * Delete old base and fake classes * Add a few fixes for the API * Move strong nova depenencies into temporary dir * Update IPMI for new DB schema * Add unit tests for DB API * Remove tests for old DB * Add tests for ironic-dbsync * Remove ironic\_manage * Implement GET /node/ifaces/ in API * Update exception.py * Update db models and API * Implement skeleton for a new DB backend * Remove the old db implementation * Implement initial skeleton of a manager service * Implement initial draft of a Pecan-based API * Fix IPMI tests * Move common things to ironic.common * Fix failing db and deploy\_helper tests * un-split the db backend * Rename files and fix things * Import add'l files from Nova * update openstack-common.conf and import from oslo * Added .testr.conf * Renamed nova to ironic * Fixed hacking, pep8 and pyflakes errors * Added project infrastructure needs * Fix baremetal get\_available\_nodes * Improve Python 3.x compatibility * Import and convert to oslo loopingcall * baremetal: VirtualPowerDriver uses mac addresses in bm\_interfaces * baremetal: Change input for sfdisk * baremetal: Change node api related to prov\_mac\_address * Remove "undefined name" pyflake errors * Remove unnecessary LOG initialisation * Define LOG globally in baremetal\_deploy\_helper * Only call getLogger after configuring logging * baremetal: Integrate provisioning and non-provisioning interfaces * Move console scripts to entrypoints * baremetal: Drop unused columns in bm\_nodes * Remove print statements * Delete tests.baremetal.util.new\_bm\_deployment() * Adds Tilera back-end for baremetal * Change type of ssh\_port option from Str to Int * Virtual Power Driver list running vms quoting error * xenapi: Fix reboot with hung volumes * Make bm model's deleted column match database * Correct substring matching of baremetal VPD node names * Read baremetal images from extra\_specs namespace * Compute manager should remove dead resources * Add ssh port and key based auth to VPD * Add instance\_type\_get() to virt api * Don't blindly skip first migration * BM Migration 004: Actually drop column * Update OpenStack LLC to Foundation * Sync nova with oslo DB exception cleanup * Fix exception handling in baremetal API * BM Migrations 2 & 3: Fix drop\_column statements * Remove function redefinitions * Move some context checking code from sqlalchemy * Baremetal driver returns accurate list of instance * Identify baremetal nodes by UUID * Improve performance of baremetal list\_instances * Better error handling in baremetal spawn & destroy * Wait for baremetal deploy inside driver.spawn * Add better status to baremetal deployments * Use oslo-config-2013.1b4 * Delete baremetal interfaces when their parent node is deleted * VirtualPowerDriver catches ProcessExecutionError * Don't modify injected\_files inside PXE driver * Remove nova.db call from baremetal PXE driver * Add a virtual PowerDriver for Baremetal testing * Recache or rebuild missing images on hard\_reboot * Use oslo database code * Fixes 'not in' operator usage * Make sure there are no unused import * Enable N302: Import modules only * Correct a format string in virt/baremetal/ipmi.py * Add REST api to manage bare-metal nodes * Baremetal/utils should not log certain exceptions * PXE driver should rmtree directories it created * Add support for Option Groups in LazyPluggable * Remove obsolete baremetal override of MAC addresses * PXE driver should not accept empty kernel UUID * Correcting improper use of the word 'an' * Export the MAC addresses of nodes for bare-metal * Break out a helper function for working with bare metal nodes * Keep self and context out of error notification payload * Tests for PXE bare-metal provisioning helper server * Change ComputerDriver.legacy\_nwinfo to raise by default * fix new N402 errors * Remove unused baremetal PXE options * Move global service networking opts to new module * Fix N402 for nova/virt * Cope better with out of sync bm data * Fix baremetal VIFDriver * CLI for bare-metal database sync * attach/detach\_volume() take instance as a parameter * Convert short doc strings to be on one line * Check admin context in bm\_interface\_get\_all() * Provide a PXE NodeDriver for the Baremetal driver * Refactor periodic tasks * Add helper methods to nova.paths * Move global path opts in nova.paths * Removes unused imports * Improve baremetal driver error handling * baremetal power driver takes \*\*kwargs * Implement IPMI sub-driver for baremetal compute * Fix tests/baremetal/test\_driver.py * Move baremetal options to [BAREMETAL] OptGroup * Remove session.flush() and session.query() monkey patching * Remove unused imports * Removed unused imports * Parameterize database connection in test.py * Baremetal VIF and Volume sub-drivers * New Baremetal provisioning framework * Move baremetal database tests to fixtures * Add exceptions to baremetal/db/api * Add blank nova/virt/baremetal/\_\_init\_\_.py * Move sql options to nova.db.sqlalchemy.session * Use CONF.import\_opt() for nova.config opts * Remove nova.config.CONF * remove old baremetal driver * Remove nova.flags * Fix a couple uses of FLAGS * Added separate bare-metal MySQL DB * Switch from FLAGS to CONF in tests * Updated scheduler and compute for multiple capabilities * Switch from FLAGS to CONF in nova.virt * Make ComputeDrivers send hypervisor\_hostname * Introduce VirtAPI to nova/virt * Migrate to fileutils and lockutils * Remove ComputeDriver.update\_host\_status() * Rename imagebackend arguments * Move ensure\_tree to utils * Keep the ComputeNode model updated with usage * Don't stuff non-db data into instance dict * Making security group refresh more specific * Use dict style access for image\_ref * Remove unused InstanceInfo class * Remove list\_instances\_detail from compute drivers * maint: remove an unused import in libvirt.driver * Fixes bare-metal spawn error * Refactoring required for blueprint xenapi-live-migration * refactor baremetal/proxy => baremetal/driver * Switch to common logging * Make libvirt LoopingCalls actually wait() * Imports cleanup * Unused imports cleanup (folsom-2) * convert virt drivers to fully dynamic loading * cleanup power state (partially implements bp task-management) * clean-up of the bare-metal framework * Added a instance state update notification * Update pep8 dependency to v1.1 * Alphabetize imports in nova/tests/ * Make use of openstack.common.jsonutils * Alphabetize imports in nova/virt/ * Replaces exceptions.Error with NovaException * Log instance information for baremetal * Improved localization testing * remove unused flag: baremetal\_injected\_network\_template baremetal\_uri baremetal\_allow\_project\_net\_traffic * Add periodic\_fuzzy\_delay option * HACKING fixes, TODO authors * Add pybasedir and bindir options * Only raw string literals should be used with \_() * Remove unnecessary setting up and down of mox and stubout * Remove unnecessary variables from tests * Move get\_info to taking an instance * Exception cleanup * Backslash continuations (nova.tests) * Replace ApiError with new exceptions * Standardize logging delaration and use * remove unused and buggy function from baremetal proxy * Backslash continuations (nova.virt.baremetal) * Remove the last of the gflags shim layer * Implements blueprint heterogeneous-tilera-architecture-support * Deleting test dir from a pull from trunk * Updated to remove built docs * initial commit ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/LICENSE0000664000175000017500000002363700000000000014120 0ustar00zuulzuul00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648108936.1106672 ironic-20.1.0/PKG-INFO0000664000175000017500000000536600000000000014207 0ustar00zuulzuul00000000000000Metadata-Version: 2.1 Name: ironic Version: 20.1.0 Summary: OpenStack Bare Metal Provisioning Home-page: https://docs.openstack.org/ironic/latest/ Author: OpenStack Author-email: openstack-discuss@lists.openstack.org License: UNKNOWN Description: ====== Ironic ====== Team and repository tags ------------------------ .. image:: https://governance.openstack.org/tc/badges/ironic.svg :target: https://governance.openstack.org/tc/reference/tags/index.html Overview -------- Ironic consists of an API and plug-ins for managing and provisioning physical machines in a security-aware and fault-tolerant manner. It can be used with nova as a hypervisor driver, or standalone service using bifrost. By default, it will use PXE and IPMI to interact with bare metal machines. Ironic also supports vendor-specific plug-ins which may implement additional functionality. Ironic is distributed under the terms of the Apache License, Version 2.0. The full terms and conditions of this license are detailed in the LICENSE file. Project resources ~~~~~~~~~~~~~~~~~ * Documentation: https://docs.openstack.org/ironic/latest * Source: https://opendev.org/openstack/ironic * Bugs: https://storyboard.openstack.org/#!/project/943 * Wiki: https://wiki.openstack.org/wiki/Ironic * APIs: https://docs.openstack.org/api-ref/baremetal/index.html * Release Notes: https://docs.openstack.org/releasenotes/ironic/ * Design Specifications: https://specs.openstack.org/openstack/ironic-specs/ Project status, bugs, and requests for feature enhancements (RFEs) are tracked in StoryBoard: https://storyboard.openstack.org/#!/project/943 For information on how to contribute to ironic, see https://docs.openstack.org/ironic/latest/contributor Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 :: Only Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: Python :: 3.9 Requires-Python: >=3.6 Provides-Extra: devstack Provides-Extra: guru_meditation_reports Provides-Extra: i18n Provides-Extra: test ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/README.rst0000664000175000017500000000270400000000000014572 0ustar00zuulzuul00000000000000====== Ironic ====== Team and repository tags ------------------------ .. image:: https://governance.openstack.org/tc/badges/ironic.svg :target: https://governance.openstack.org/tc/reference/tags/index.html Overview -------- Ironic consists of an API and plug-ins for managing and provisioning physical machines in a security-aware and fault-tolerant manner. It can be used with nova as a hypervisor driver, or standalone service using bifrost. By default, it will use PXE and IPMI to interact with bare metal machines. Ironic also supports vendor-specific plug-ins which may implement additional functionality. Ironic is distributed under the terms of the Apache License, Version 2.0. The full terms and conditions of this license are detailed in the LICENSE file. Project resources ~~~~~~~~~~~~~~~~~ * Documentation: https://docs.openstack.org/ironic/latest * Source: https://opendev.org/openstack/ironic * Bugs: https://storyboard.openstack.org/#!/project/943 * Wiki: https://wiki.openstack.org/wiki/Ironic * APIs: https://docs.openstack.org/api-ref/baremetal/index.html * Release Notes: https://docs.openstack.org/releasenotes/ironic/ * Design Specifications: https://specs.openstack.org/openstack/ironic-specs/ Project status, bugs, and requests for feature enhancements (RFEs) are tracked in StoryBoard: https://storyboard.openstack.org/#!/project/943 For information on how to contribute to ironic, see https://docs.openstack.org/ironic/latest/contributor ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648108935.8626666 ironic-20.1.0/api-ref/0000775000175000017500000000000000000000000014423 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/regenerate-samples.sh0000775000175000017500000003021200000000000020543 0ustar00zuulzuul00000000000000#!/bin/bash set -e -x if [ ! -x /usr/bin/jq ]; then echo "This script relies on 'jq' to process JSON output." echo "Please install it before continuing." exit 1 fi OS_AUTH_TOKEN=$(openstack token issue | grep ' id ' | awk '{print $4}') IRONIC_URL="http://127.0.0.1:6385" IRONIC_API_VERSION="1.55" export OS_AUTH_TOKEN IRONIC_URL DOC_BIOS_UUID="dff29d23-1ded-43b4-8ae1-5eebb3e30de1" DOC_CHASSIS_UUID="dff29d23-1ded-43b4-8ae1-5eebb3e30de1" DOC_NODE_UUID="6d85703a-565d-469a-96ce-30b6de53079d" DOC_DYNAMIC_NODE_UUID="2b045129-a906-46af-bc1a-092b294b3428" DOC_PORT_UUID="d2b30520-907d-46c8-bfee-c5586e6fb3a1" DOC_PORTGROUP_UUID="e43c722c-248e-4c6e-8ce8-0d8ff129387a" DOC_VOL_CONNECTOR_UUID="9bf93e01-d728-47a3-ad4b-5e66a835037c" DOC_VOL_TARGET_UUID="bd4d008c-7d31-463d-abf9-6c23d9d55f7f" DOC_PROVISION_UPDATED_AT="2016-08-18T22:28:49.946416+00:00" DOC_CREATED_AT="2016-08-18T22:28:48.643434+11:11" DOC_UPDATED_AT="2016-08-18T22:28:49.653974+00:00" DOC_IRONIC_CONDUCTOR_HOSTNAME="897ab1dad809" DOC_ALLOCATION_UUID="3bf138ba-6d71-44e7-b6a1-ca9cac17103e" DOC_DEPLOY_TEMPLATE_UUID="bbb45f41-d4bc-4307-8d1d-32f95ce1e920" function GET { # GET $RESOURCE curl -s -H "X-Auth-Token: $OS_AUTH_TOKEN" \ -H "X-OpenStack-Ironic-API-Version: $IRONIC_API_VERSION" \ ${IRONIC_URL}/$1 | jq -S '.' } function POST { # POST $RESOURCE $FILENAME curl -s -H "X-Auth-Token: $OS_AUTH_TOKEN" \ -H "X-OpenStack-Ironic-API-Version: $IRONIC_API_VERSION" \ -H "Content-Type: application/json" \ -X POST --data @$2 \ ${IRONIC_URL}/$1 | jq -S '.' } function PATCH { # POST $RESOURCE $FILENAME curl -s -H "X-Auth-Token: $OS_AUTH_TOKEN" \ -H "X-OpenStack-Ironic-API-Version: $IRONIC_API_VERSION" \ -H "Content-Type: application/json" \ -X PATCH --data @$2 \ ${IRONIC_URL}/$1 | jq -S '.' } function PUT { # PUT $RESOURCE $FILENAME curl -s -H "X-Auth-Token: $OS_AUTH_TOKEN" \ -H "X-OpenStack-Ironic-API-Version: $IRONIC_API_VERSION" \ -H "Content-Type: application/json" \ -X PUT --data @$2 \ ${IRONIC_URL}/$1 } function wait_for_node_state { local node="$1" local field="$2" local target_state="$3" local attempt=10 while [[ $attempt -gt 0 ]]; do res=$(openstack baremetal node show "$node" -f value -c "$field") if [[ "$res" == "$target_state" ]]; then break fi sleep 1 attempt=$((attempt - 1)) echo "Failed to get node $field == $target_state in $attempt attempts." done if [[ $attempt == 0 ]]; then exit 1 fi } pushd source/samples ########### # ROOT APIs GET '' > api-root-response.json GET 'v1' > api-v1-root-response.json ########### # DRIVER APIs GET v1/drivers > drivers-list-response.json GET v1/drivers?detail=true > drivers-list-detail-response.json GET v1/drivers/ipmi > driver-get-response.json GET v1/drivers/agent_ipmitool/properties > driver-property-response.json GET v1/drivers/agent_ipmitool/raid/logical_disk_properties > driver-logical-disk-properties-response.json ######### # CHASSIS POST v1/chassis chassis-create-request.json > chassis-show-response.json CID=$(cat chassis-show-response.json | grep '"uuid"' | sed 's/.*"\([0-9a-f\-]*\)",*/\1/') if [ "$CID" == "" ]; then exit 1 else echo "Chassis created. UUID: $CID" fi GET v1/chassis > chassis-list-response.json GET v1/chassis/detail > chassis-list-details-response.json PATCH v1/chassis/$CID chassis-update-request.json > chassis-update-response.json # skip GET /v1/chassis/$UUID because the response is same as POST ####### # NODES # Create a node with a real driver, but missing ipmi_address, # then do basic commands with it POST v1/nodes node-create-request-classic.json > node-create-response.json NID=$(cat node-create-response.json | grep '"uuid"' | sed 's/.*"\([0-9a-f\-]*\)",*/\1/') if [ "$NID" == "" ]; then exit 1 else echo "Node created. UUID: $NID" fi # Also create a node with a dynamic driver for viewing in the node list # endpoint DNID=$(POST v1/nodes node-create-request-dynamic.json | grep '"uuid"' | sed 's/.*"\([0-9a-f\-]*\)",*/\1/') if [ "$DNID" == "" ]; then exit 1 else echo "Node created. UUID: $DNID" fi # get the list of passthru methods from agent* driver GET v1/nodes/$NID/vendor_passthru/methods > node-vendor-passthru-response.json # Change to the fake driver and then move the node into the AVAILABLE # state without saving any output. # NOTE that these three JSON files are not included in the docs PATCH v1/nodes/$NID node-update-driver.json PUT v1/nodes/$NID/states/provision node-set-manage-state.json PUT v1/nodes/$NID/states/provision node-set-available-state.json # Wait node to become available wait_for_node_state $NID provision_state available GET v1/nodes/$NID/validate > node-validate-response.json PUT v1/nodes/$NID/states/power node-set-power-off.json # Wait node to reach power off state wait_for_node_state $NID power_state "power off" GET v1/nodes/$NID/states > node-get-state-response.json GET v1/nodes > nodes-list-response.json GET v1/nodes/detail > nodes-list-details-response.json GET v1/nodes/$NID > node-show-response.json # Node traits PUT v1/nodes/$NID/traits node-set-traits-request.json GET v1/nodes/$NID/traits > node-traits-list-response.json ############ # ALLOCATIONS POST v1/allocations allocation-create-request.json > allocation-create-response.json AID=$(cat allocation-create-response.json | grep '"uuid"' | sed 's/.*"\([0-9a-f\-]*\)",*/\1/') if [ "$AID" == "" ]; then exit 1 else echo "Allocation created. UUID: $AID" fi # Create a failed allocation for listing POST v1/allocations allocation-create-request-2.json # Poor man's wait_for_allocation sleep 1 GET v1/allocations > allocations-list-response.json GET v1/allocations/$AID > allocation-show-response.json GET v1/nodes/$NID/allocation > node-allocation-show-response.json ############ # NODES - MAINTENANCE # Do this after allocation API to be able to create successful allocations PUT v1/nodes/$NID/maintenance node-maintenance-request.json ############ # PORTGROUPS # Before we can create a portgroup, we must # write NODE ID into the create request document body sed -i "s/.*node_uuid.*/ \"node_uuid\": \"$NID\",/" portgroup-create-request.json POST v1/portgroups portgroup-create-request.json > portgroup-create-response.json PGID=$(cat portgroup-create-response.json | grep '"uuid"' | sed 's/.*"\([0-9a-f\-]*\)",*/\1/') if [ "$PGID" == "" ]; then exit 1 else echo "Portgroup created. UUID: $PGID" fi GET v1/portgroups > portgroup-list-response.json GET v1/portgroups/detail > portgroup-list-detail-response.json PATCH v1/portgroups/$PGID portgroup-update-request.json > portgroup-update-response.json # skip GET $PGID because same result as POST # skip DELETE ########### # PORTS # Before we can create a port, we must # write NODE ID and PORTGROUP ID into the create request document body sed -i "s/.*node_uuid.*/ \"node_uuid\": \"$NID\",/" port-create-request.json sed -i "s/.*portgroup_uuid.*/ \"portgroup_uuid\": \"$PGID\",/" port-create-request.json POST v1/ports port-create-request.json > port-create-response.json PID=$(cat port-create-response.json | grep '"uuid"' | sed 's/.*"\([0-9a-f\-]*\)",*/\1/') if [ "$PID" == "" ]; then exit 1 else echo "Port created. UUID: $PID" fi GET v1/ports > port-list-response.json GET v1/ports/detail > port-list-detail-response.json PATCH v1/ports/$PID port-update-request.json > port-update-response.json # skip GET $PID because same result as POST # skip DELETE ################ # NODE PORT APIs GET v1/nodes/$NID/ports > node-port-list-response.json GET v1/nodes/$NID/ports/detail > node-port-detail-response.json ##################### # NODE PORTGROUP APIs GET v1/nodes/$NID/portgroups > node-portgroup-list-response.json GET v1/nodes/$NID/portgroups/detail > node-portgroup-detail-response.json ##################### # PORTGROUPS PORT APIs GET v1/portgroups/$PGID/ports > portgroup-port-list-response.json GET v1/portgroups/$PGID/ports/detail > portgroup-port-detail-response.json ############ # LOOKUP API GET v1/lookup?node_uuid=$NID > lookup-node-response.json ##################### # NODES MANAGEMENT API # These need to be done while the node is in maintenance mode, # and the node's driver is "fake", to avoid potential races # with internal processes that lock the Node # this corrects an intentional ommission in some of the samples PATCH v1/nodes/$NID node-update-driver-info-request.json > node-update-driver-info-response.json GET v1/nodes/$NID/management/boot_device/supported > node-get-supported-boot-devices-response.json PUT v1/nodes/$NID/management/boot_device node-set-boot-device.json GET v1/nodes/$NID/management/boot_device > node-get-boot-device-response.json PUT v1/nodes/$NID/management/inject_nmi node-inject-nmi.json ############################# # NODES VIF ATTACH/DETACH API POST v1/nodes/$NID/vifs node-vif-attach-request.json GET v1/nodes/$NID/vifs > node-vif-list-response.json ############# # VOLUME APIs GET v1/volume/ > volume-list-response.json sed -i "s/.*node_uuid.*/ \"node_uuid\": \"$NID\",/" volume-connector-create-request.json POST v1/volume/connectors volume-connector-create-request.json > volume-connector-create-response.json VCID=$(cat volume-connector-create-response.json | grep '"uuid"' | sed 's/.*"\([0-9a-f\-]*\)",*/\1/') if [ "$VCID" == "" ]; then exit 1 else echo "Volume connector created. UUID: $VCID" fi GET v1/volume/connectors > volume-connector-list-response.json GET v1/volume/connectors?detail=True > volume-connector-list-detail-response.json PATCH v1/volume/connectors/$VCID volume-connector-update-request.json > volume-connector-update-response.json sed -i "s/.*node_uuid.*/ \"node_uuid\": \"$NID\",/" volume-target-create-request.json POST v1/volume/targets volume-target-create-request.json > volume-target-create-response.json VTID=$(cat volume-target-create-response.json | grep '"uuid"' | sed 's/.*"\([0-9a-f\-]*\)",*/\1/') if [ "$VTID" == "" ]; then exit 1 else echo "Volume target created. UUID: $VCID" fi GET v1/volume/targets > volume-target-list-response.json GET v1/volume/targets?detail=True > volume-target-list-detail-response.json PATCH v1/volume/targets/$VTID volume-target-update-request.json > volume-target-update-response.json ################## # NODE VOLUME APIs GET v1/nodes/$NID/volume > node-volume-list-response.json GET v1/nodes/$NID/volume/connectors > node-volume-connector-list-response.json GET v1/nodes/$NID/volume/connectors?detail=True > node-volume-connector-detail-response.json GET v1/nodes/$NID/volume/targets > node-volume-target-list-response.json GET v1/nodes/$NID/volume/targets?detail=True > node-volume-target-detail-response.json ################## # DEPLOY TEMPLATES POST v1/deploy_templates deploy-template-create-request.json > deploy-template-create-response.json DTID=$(cat deploy-template-create-response.json | grep '"uuid"' | sed 's/.*"\([0-9a-f\-]*\)",*/\1/') if [ "$DTID" == "" ]; then exit 1 else echo "Deploy template created. UUID: $DTID" fi GET v1/deploy_templates > deploy-template-list-response.json GET v1/deploy_templates?detail=True > deploy-template-detail-response.json GET v1/deploy_templates/$DTID > deploy-template-show-response.json PATCH v1/deploy_templates/$DTID deploy-template-update-request.json > deploy-template-update-response.json ##################### # Replace automatically generated UUIDs by already used in documentation sed -i "s/$BID/$DOC_BIOS_UUID/" *.json sed -i "s/$CID/$DOC_CHASSIS_UUID/" *.json sed -i "s/$NID/$DOC_NODE_UUID/" *.json sed -i "s/$DNID/$DOC_DYNAMIC_NODE_UUID/" *.json sed -i "s/$PID/$DOC_PORT_UUID/" *.json sed -i "s/$PGID/$DOC_PORTGROUP_UUID/" *.json sed -i "s/$VCID/$DOC_VOL_CONNECTOR_UUID/" *.json sed -i "s/$VTID/$DOC_VOL_TARGET_UUID/" *.json sed -i "s/$AID/$DOC_ALLOCATION_UUID/" *.json sed -i "s/$DTID/$DOC_DEPLOY_TEMPLATE_UUID/" *.json sed -i "s/$(hostname)/$DOC_IRONIC_CONDUCTOR_HOSTNAME/" *.json sed -i "s/created_at\": \".*\"/created_at\": \"$DOC_CREATED_AT\"/" *.json sed -i "s/updated_at\": \".*\"/updated_at\": \"$DOC_UPDATED_AT\"/" *.json sed -i "s/provision_updated_at\": \".*\"/provision_updated_at\": \"$DOC_PROVISION_UPDATED_AT\"/" *.json ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648108935.8666666 ironic-20.1.0/api-ref/source/0000775000175000017500000000000000000000000015723 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/baremetal-api-v1-allocation.inc0000664000175000017500000001502200000000000023570 0ustar00zuulzuul00000000000000.. -*- rst -*- ========================= Allocations (allocations) ========================= The Allocation resource represents a request to find and allocate a Node for deployment. .. versionadded:: 1.52 Allocation API was introduced. Create Allocation ================= .. rest_method:: POST /v1/allocations Creates an allocation. A Node can be requested by its resource class and traits. Additionally, Nodes can be pre-filtered on the client side, and the resulting list of UUIDs and/or names can be submitted as ``candidate_nodes``. Otherwise all nodes are considered. A Node is suitable for an Allocation if all of the following holds: * ``provision_state`` is ``available`` * ``power_state`` is not ``null`` * ``maintenance`` is ``false`` * ``instance_uuid`` is ``null`` * ``resource_class`` matches requested one * ``traits`` list contains all of the requested ones The allocation process is asynchronous. The new Allocation is returned in the ``allocating`` state, and the process continues in the background. If it succeeds, the ``node_uuid`` field is populated with the Node's UUID, and the Node's ``instance_uuid`` field is set to the Allocation's UUID. If you want to backfill an allocation for an already deployed node, you can pass the UUID or name of this node to ``node``. In this case the allocation is created immediately, bypassing the normal allocation process. Other parameters must be missing or match the provided node. .. versionadded:: 1.52 Allocation API was introduced. .. versionadded:: 1.58 Added support for backfilling allocations. .. versionadded:: 1.60 Introduced the ``owner`` field. Normal response codes: 201 Error response codes: 400, 401, 403, 409, 503 Request ------- .. rest_parameters:: parameters.yaml - resource_class: req_allocation_resource_class - candidate_nodes: req_candidate_nodes - name: req_allocation_name - traits: req_allocation_traits - uuid: req_uuid - extra: req_extra - node: req_allocation_node - owner: owner Request Example --------------- .. literalinclude:: samples/allocation-create-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - uuid: uuid - candidate_nodes: candidate_nodes - last_error: allocation_last_error - name: allocation_name - node_uuid: allocation_node - resource_class: allocation_resource_class - state: allocation_state - traits: allocation_traits - owner: owner - extra: extra - created_at: created_at - updated_at: updated_at - links: links Response Example ---------------- .. literalinclude:: samples/allocation-create-response.json :language: javascript List Allocations ================ .. rest_method:: GET /v1/allocations Lists all Allocations. .. versionadded:: 1.52 Allocation API was introduced. .. versionadded:: 1.60 Introduced the ``owner`` field. Normal response codes: 200 Error response codes: 400, 401, 403, 404 Request ------- .. rest_parameters:: parameters.yaml - node: r_allocation_node - resource_class: r_resource_class - state: r_allocation_state - owner: owner - fields: fields - limit: limit - marker: marker - sort_dir: sort_dir - sort_key: sort_key Response Parameters ------------------- .. rest_parameters:: parameters.yaml - uuid: uuid - candidate_nodes: candidate_nodes - last_error: allocation_last_error - name: allocation_name - node_uuid: allocation_node - resource_class: allocation_resource_class - state: allocation_state - traits: allocation_traits - owner: owner - extra: extra - created_at: created_at - updated_at: updated_at - links: links Response Example ---------------- .. literalinclude:: samples/allocations-list-response.json :language: javascript Show Allocation Details ======================= .. rest_method:: GET /v1/allocations/{allocation_id} Shows details for an Allocation. .. versionadded:: 1.52 Allocation API was introduced. .. versionadded:: 1.60 Introduced the ``owner`` field. Normal response codes: 200 Error response codes: 400, 401, 403, 404 Request ------- .. rest_parameters:: parameters.yaml - fields: fields - allocation_id: allocation_ident Response Parameters ------------------- .. rest_parameters:: parameters.yaml - uuid: uuid - candidate_nodes: candidate_nodes - last_error: allocation_last_error - name: allocation_name - node_uuid: allocation_node - resource_class: allocation_resource_class - state: allocation_state - traits: allocation_traits - owner: owner - extra: extra - created_at: created_at - updated_at: updated_at - links: links Response Example ---------------- .. literalinclude:: samples/allocation-show-response.json :language: javascript Update Allocation ================= .. rest_method:: PATCH /v1/allocations/{allocation_id} Updates an allocation. Allows updating only name and extra fields. .. versionadded:: 1.57 Allocation update API was introduced. Normal response codes: 200 Error response codes: 400, 401, 403, 404, 409, 503 Request ------- The BODY of the PATCH request must be a JSON PATCH document, adhering to `RFC 6902 `_. .. rest_parameters:: parameters.yaml - allocation_id: allocation_ident - name: req_allocation_name - extra: req_extra Request Example --------------- .. literalinclude:: samples/allocation-update-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - uuid: uuid - candidate_nodes: candidate_nodes - last_error: allocation_last_error - name: allocation_name - node_uuid: allocation_node - resource_class: allocation_resource_class - state: allocation_state - traits: allocation_traits - owner: owner - extra: extra - created_at: created_at - updated_at: updated_at - links: links Response Example ---------------- .. literalinclude:: samples/allocation-update-response.json :language: javascript Delete Allocation ================= .. rest_method:: DELETE /v1/allocations/{allocation_id} Deletes an Allocation. If the Allocation has a Node associated, the Node's ``instance_uuid`` is reset. The deletion will fail if the Allocation has a Node assigned and the Node is ``active`` and not in the maintenance mode. .. versionadded:: 1.52 Allocation API was introduced. Normal response codes: 204 Error response codes: 400, 401, 403, 404, 409, 503 Request ------- .. rest_parameters:: parameters.yaml - allocation_id: allocation_ident ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/baremetal-api-v1-chassis.inc0000664000175000017500000001036400000000000023104 0ustar00zuulzuul00000000000000.. -*- rst -*- ================= Chassis (chassis) ================= The Chassis resource type was originally conceived as a means to group Node resources. Support for this continues to exist in the REST API, however, it is very minimal. The Chassis object does not provide any functionality today aside from a means to list a group of Nodes. Use of this resource is discouraged, and may be deprecated and removed in a future release. List chassis with details ========================= .. rest_method:: GET /v1/chassis/detail Lists all chassis with details. Normal response codes: 200 .. TODO: add error codes Request ------- .. rest_parameters:: parameters.yaml - limit: limit - marker: marker - sort_dir: sort_dir - sort_key: sort_key Response Parameters ------------------- .. rest_parameters:: parameters.yaml - uuid: uuid - chassis: chassis - description: description - extra: extra Response Example ---------------- .. literalinclude:: samples/chassis-list-details-response.json :language: javascript Show chassis details ==================== .. rest_method:: GET /v1/chassis/{chassis_id} Shows details for a chassis. Normal response codes: 200 .. TODO: add error codes Request ------- .. rest_parameters:: parameters.yaml - fields: fields - chassis_id: chassis_ident Response Parameters ------------------- .. rest_parameters:: parameters.yaml - uuid: uuid - chassis: chassis - description: description - extra: extra Response Example ---------------- .. literalinclude:: samples/chassis-show-response.json :language: javascript Update chassis ============== .. rest_method:: PATCH /v1/chassis/{chassis_id} Updates a chassis. Normal response codes: 200 .. TODO: add error codes Request ------- The BODY of the PATCH request must be a JSON PATCH document, adhering to `RFC 6902 `_. .. rest_parameters:: parameters.yaml - chassis_id: chassis_ident - description: req_description - extra: req_extra Request Example --------------- .. literalinclude:: samples/chassis-update-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - description: description - links: links - extra: extra - created_at: created_at - updated_at: updated_at - chassis: chassis - nodes: nodes - uuid: uuid Response Example ---------------- .. literalinclude:: samples/chassis-update-response.json :language: javascript Delete chassis ============== .. rest_method:: DELETE /v1/chassis/{chassis_id} Deletes a chassis. .. TODO: add error codes Request ------- .. rest_parameters:: parameters.yaml - chassis_id: chassis_ident Create chassis ============== .. rest_method:: POST /v1/chassis Creates a chassis. Error response codes:201,413,415,405,404,403,401,400,503,409, Request ------- .. rest_parameters:: parameters.yaml - uuid: req_uuid - description: req_description - extra: req_extra Request Example --------------- .. literalinclude:: samples/chassis-create-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - description: description - links: links - extra: extra - created_at: created_at - updated_at: updated_at - nodes: nodes - uuid: uuid Response Example ---------------- .. literalinclude:: samples/chassis-show-response.json :language: javascript List chassis ============ .. rest_method:: GET /v1/chassis Lists all chassis. .. versionadded:: 1.43 Added the ``detail`` boolean request parameter. When specified ``True`` this causes the response to include complete details about each chassis. Normal response codes: 200 .. TODO: add error codes Request ------- .. rest_parameters:: parameters.yaml - limit: limit - marker: marker - sort_dir: sort_dir - sort_key: sort_key - fields: fields - detail: detail Response Parameters ------------------- .. rest_parameters:: parameters.yaml - description: description - links: links - extra: extra - created_at: created_at - updated_at: updated_at - nodes: nodes - uuid: uuid Response Example ---------------- .. literalinclude:: samples/chassis-list-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/baremetal-api-v1-conductors.inc0000664000175000017500000000401200000000000023623 0ustar00zuulzuul00000000000000.. -*- rst -*- ======================= Conductors (conductors) ======================= .. versionadded:: 1.49 Listing Conductor resources is done through the ``conductors`` resource. Conductor resources are read-only, they can not be created, updated, or removed. List Conductors =============== .. rest_method:: GET /v1/conductors Return a list of conductors known by the Bare Metal service. By default, this query will return the hostname, conductor group, and alive status for each Conductor. When ``detail`` is set to True in the query string, will return the full representation of the resource. Normal response code: 200 Request ------- .. rest_parameters:: parameters.yaml - fields: fields_for_conductor - limit: limit - marker: marker - sort_dir: sort_dir - sort_key: sort_key - detail: detail Response -------- .. rest_parameters:: parameters.yaml - hostname: hostname - conductor_group: conductor_group - alive: alive - drivers: drivers - links: links **Example Conductor list response:** .. literalinclude:: samples/conductor-list-response.json :language: javascript **Example detailed Conductor list response:** .. literalinclude:: samples/conductor-list-details-response.json :language: javascript Show Conductor Details ====================== .. rest_method:: GET /v1/conductors/{hostname} Shows details for a conductor. By default, this will return the full representation of the resource; an optional ``fields`` parameter can be supplied to return only the specified set. Normal response codes: 200 Error codes: 400,403,404,406 Request ------- .. rest_parameters:: parameters.yaml - hostname: hostname_ident - fields: fields_for_conductor Response -------- .. rest_parameters:: parameters.yaml - hostname: hostname - conductor_group: conductor_group - alive: alive - drivers: drivers - links: links **Example JSON representation of a Conductor:** .. literalinclude:: samples/conductor-show-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/baremetal-api-v1-deploy-templates.inc0000664000175000017500000001116300000000000024735 0ustar00zuulzuul00000000000000.. -*- rst -*- =================================== Deploy Templates (deploy_templates) =================================== The Deploy Template resource represents a collection of Deploy Steps that may be executed during deployment of a node. A deploy template is matched for a node if at the time of deployment, the template's name matches a trait in the node's ``instance_info.traits``. .. versionadded:: 1.55 Deploy Template API was introduced. Create Deploy Template ====================== .. rest_method:: POST /v1/deploy_templates Creates a deploy template. .. versionadded:: 1.55 Deploy Template API was introduced. Normal response codes: 201 Error response codes: 400, 401, 403, 409 Request ------- .. rest_parameters:: parameters.yaml - name: deploy_template_name - steps: deploy_template_steps - uuid: req_uuid - extra: req_extra Request Step ------------ .. rest_parameters:: parameters.yaml - interface: deploy_template_step_interface - step: deploy_template_step_step - args: deploy_template_step_args - priority: deploy_template_step_priority Request Example --------------- .. literalinclude:: samples/deploy-template-create-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - uuid: uuid - name: deploy_template_name - steps: deploy_template_steps - extra: extra - created_at: created_at - updated_at: updated_at - links: links Response Example ---------------- .. literalinclude:: samples/deploy-template-create-response.json :language: javascript List Deploy Templates ===================== .. rest_method:: GET /v1/deploy_templates Lists all deploy templates. .. versionadded:: 1.55 Deploy Template API was introduced. Normal response codes: 200 Error response codes: 400, 401, 403, 404 Request ------- .. rest_parameters:: parameters.yaml - fields: fields - limit: limit - marker: marker - sort_dir: sort_dir - sort_key: sort_key - detail: detail Response Parameters ------------------- .. rest_parameters:: parameters.yaml - uuid: uuid - name: deploy_template_name - steps: deploy_template_steps - extra: extra - created_at: created_at - updated_at: updated_at - links: links Response Example ---------------- **Example deploy template list response:** .. literalinclude:: samples/deploy-template-list-response.json :language: javascript **Example detailed deploy template list response:** .. literalinclude:: samples/deploy-template-detail-response.json :language: javascript Show Deploy Template Details ============================ .. rest_method:: GET /v1/deploy_templates/{deploy_template_id} Shows details for a deploy template. .. versionadded:: 1.55 Deploy Template API was introduced. Normal response codes: 200 Error response codes: 400, 401, 403, 404 Request ------- .. rest_parameters:: parameters.yaml - fields: fields - deploy_template_id: deploy_template_ident Response Parameters ------------------- .. rest_parameters:: parameters.yaml - uuid: uuid - name: deploy_template_name - steps: deploy_template_steps - extra: extra - created_at: created_at - updated_at: updated_at - links: links Response Example ---------------- .. literalinclude:: samples/deploy-template-show-response.json :language: javascript Update a Deploy Template ======================== .. rest_method:: PATCH /v1/deploy_templates/{deploy_template_id} Update a deploy template. .. versionadded:: 1.55 Deploy Template API was introduced. Normal response code: 200 Error response codes: 400, 401, 403, 404, 409 Request ------- The BODY of the PATCH request must be a JSON PATCH document, adhering to `RFC 6902 `_. Request ------- .. rest_parameters:: parameters.yaml - deploy_template_id: deploy_template_ident .. literalinclude:: samples/deploy-template-update-request.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - name: deploy_template_name - steps: deploy_template_steps - extra: extra - created_at: created_at - updated_at: updated_at - links: links .. literalinclude:: samples/deploy-template-update-response.json :language: javascript Delete Deploy Template ====================== .. rest_method:: DELETE /v1/deploy_template/{deploy_template_id} Deletes a deploy template. .. versionadded:: 1.55 Deploy Template API was introduced. Normal response codes: 204 Error response codes: 400, 401, 403, 404 Request ------- .. rest_parameters:: parameters.yaml - deploy_template_id: deploy_template_ident ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/baremetal-api-v1-driver-passthru.inc0000664000175000017500000000534400000000000024613 0ustar00zuulzuul00000000000000.. -*- rst -*- ================================ Driver Vendor Passthru (drivers) ================================ Each driver MAY support vendor-specific extensions, called "passthru" methods. Internally, Ironic's driver API supports flexibly exposing functions via the common HTTP methods GET, PUT, POST, and DELETE. To call a passthru method, the query string must contain the name of the method. For example, if the method name was ``my_passthru_method``, the request would look like ``/vendor_passthru?method=my_passthru_method``. The contents of the HTTP request are forwarded to the driver and validated there. Ironic's REST API provides a means to discover these methods, but does not provide support, testing, or documentation for these endpoints. The Ironic development team does not guarantee any compatibility within these methods between releases, though we encourage driver authors to provide documentation and support for them. Besides the endpoints documented here, all other resources and endpoints under the heading ``vendor_passthru`` should be considered unsupported APIs, and could be changed without warning by the driver authors. List Methods ============ .. rest_method:: GET /v1/drivers/{driver_name}/vendor_passthru/methods Retrieve a list of the available vendor passthru methods for the given Driver. The response will indicate which HTTP method(s) each vendor passthru method allows, whether the method call will be synchronous or asynchronous, and whether the response will include any attachment. Normal response code: 200 Request ------- .. rest_parameters:: parameters.yaml - driver_name: driver_ident Response -------- The response BODY is a dictionary whose keys are the method names. The value of each item is itself a dictionary describing how to interact with that method. .. rest_parameters:: parameters.yaml - async: passthru_async - attach: passthru_attach - description: passthru_description - http_methods: passthru_http_methods Call a Method ============= .. rest_method:: METHOD /v1/drivers/{driver_name}/vendor_passthru?method={method_name} The HTTP METHOD may be one of GET, POST, PUT, DELETE, depending on the driver and method. This endpoint passes the request directly to the hardware driver. The HTTP BODY must be parseable JSON, which will be converted to parameters passed to that function. Unparseable JSON, missing parameters, or excess parameters will cause the request to be rejected with an HTTP 400 error. Normal response code: 200 202 Error codes: 400 Request ------- .. rest_parameters:: parameters.yaml - driver_name: driver_ident - method_name: method_name All other parameters should be passed in the BODY. Parameter list varies by method_name. Response -------- Varies. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/baremetal-api-v1-drivers.inc0000664000175000017500000002031500000000000023122 0ustar00zuulzuul00000000000000.. -*- rst -*- ================= Drivers (drivers) ================= .. versionchanged:: 1.30 The REST API now also exposes information about *dynamic* drivers. Ironic has two types of drivers: *classic* drivers and *dynamic* drivers. A *classic* driver is a Python object containing all the logic to manage the bare metal nodes enrolled within Ironic. A driver may be loaded within one or more ``ironic-conductor`` services. Each driver contains a pre-determined set of instantiated interfaces. Each type of interface (eg, ``power`` or ``boot``) performs a specific hardware function. *Dynamic* drivers are supported via hardware types, which are Python classes enabled via entry points. Unlike *classic* drivers, which have pre-determined interfaces, a hardware type may support multiple types of interfaces. For example, the ``ipmi`` hardware type may support multiple methods for enabling node console. Which interface a node of a particular hardware type uses is determined at runtime. This collection of interfaces is called a *dynamic* driver. For more information about this, see the node API documentation. The REST API exposes the list of drivers and which ``ironic-conductor`` processes have loaded that driver via the Driver resource (``/v1/drivers`` endpoint). This can be useful for operators to validate their configuration in a heterogeneous hardware environment. Each ``ironic-conductor`` process may load one or more drivers, and does not necessarily need to load the same *classic* drivers as another ``ironic-conductor``. Each ``ironic-conductor`` with the same hardware types must have the same hardware interfaces enabled. The REST API also exposes details about each driver, such as what properties must be supplied to a node's ``driver_info`` for that driver to manage hardware. Lastly, some drivers may expose methods through a ``driver_vendor_passthru`` endpoint, allowing one to interact with the driver directly (i.e., without knowing a specific node identifier). For example, this is used by the ironic python agent ramdisk to get the UUID of the node being deployed/cleaned by using MAC addresses of the node's network interfaces the agent has discovered. List drivers ============ .. rest_method:: GET /v1/drivers Lists all drivers. .. versionadded:: 1.77 Added ``fields`` selector to query for particular fields. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - type: driver_type - detail: driver_detail - fields: fields Response Parameters ------------------- The response BODY contains a single key, "drivers", whose value is a list of drivers supported by this Ironic service. .. rest_parameters:: parameters.yaml - drivers: drivers - name: driver_name - hosts: hosts - type: response_driver_type - links: links - properties: driver_property_links .. versionchanged:: 1.30 If the request has the "detail" URL parameter set to true, each driver will also include the following fields. .. rest_parameters:: parameters.yaml - default_bios_interface: default_bios_interface - default_boot_interface: default_boot_interface - default_console_interface: default_console_interface - default_deploy_interface: default_deploy_interface - default_inspect_interface: default_inspect_interface - default_management_interface: default_management_interface - default_network_interface: default_network_interface - default_power_interface: default_power_interface - default_raid_interface: default_raid_interface - default_rescue_interface: default_rescue_interface - default_storage_interface: default_storage_interface - default_vendor_interface: default_vendor_interface - enabled_bios_interfaces: enabled_bios_interfaces - enabled_boot_interfaces: enabled_boot_interfaces - enabled_console_interfaces: enabled_console_interfaces - enabled_deploy_interfaces: enabled_deploy_interfaces - enabled_inspect_interfaces: enabled_inspect_interfaces - enabled_management_interfaces: enabled_management_interfaces - enabled_network_interfaces: enabled_network_interfaces - enabled_power_interfaces: enabled_power_interfaces - enabled_rescue_interfaces: enabled_rescue_interfaces - enabled_raid_interfaces: enabled_raid_interfaces - enabled_storage_interfaces: enabled_storage_interfaces - enabled_vendor_interfaces: enabled_vendor_interfaces Response Example ---------------- Example for a request with detail=false (the default): .. literalinclude:: samples/drivers-list-response.json :language: javascript Example for a request with detail=true: .. literalinclude:: samples/drivers-list-detail-response.json :language: javascript Show driver details =================== .. rest_method:: GET /v1/drivers/{driver_name} Shows details for a driver. .. versionadded:: 1.77 Added ``fields`` selector to query for particular fields. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - driver_name: driver_ident - fields: fields Response Parameters ------------------- .. rest_parameters:: parameters.yaml - name: driver_name - hosts: hosts - type: response_driver_type - default_bios_interface: default_bios_interface - default_boot_interface: default_boot_interface - default_console_interface: default_console_interface - default_deploy_interface: default_deploy_interface - default_inspect_interface: default_inspect_interface - default_management_interface: default_management_interface - default_network_interface: default_network_interface - default_power_interface: default_power_interface - default_raid_interface: default_raid_interface - default_rescue_interface: default_rescue_interface - default_storage_interface: default_storage_interface - default_vendor_interface: default_vendor_interface - enabled_bios_interfaces: enabled_bios_interfaces - enabled_boot_interfaces: enabled_boot_interfaces - enabled_console_interfaces: enabled_console_interfaces - enabled_deploy_interfaces: enabled_deploy_interfaces - enabled_inspect_interfaces: enabled_inspect_interfaces - enabled_management_interfaces: enabled_management_interfaces - enabled_network_interfaces: enabled_network_interfaces - enabled_power_interfaces: enabled_power_interfaces - enabled_raid_interfaces: enabled_raid_interfaces - enabled_rescue_interfaces: enabled_rescue_interfaces - enabled_storage_interfaces: enabled_storage_interfaces - enabled_vendor_interfaces: enabled_vendor_interfaces - links: links - properties: driver_property_links Response Example ---------------- .. literalinclude:: samples/driver-get-response.json :language: javascript Show driver properties ====================== .. rest_method:: GET /v1/drivers/{driver_name}/properties Shows the required and optional parameters that ``driver_name`` expects to be supplied in the ``driver_info`` field for every Node it manages. To check if all required parameters have been supplied to a Node, you should query the ``/v1/nodes/{node_ident}/validate`` endpoint. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - driver_name: driver_ident Response Example ---------------- The response BODY is a dictionary, but the keys are unique to each driver. The structure of the response is ``property`` : ``description``. The following example is returned from the ``agent_ipmitool`` driver. .. literalinclude:: samples/driver-property-response.json :language: javascript Show driver logical disk properties =================================== .. versionadded:: 1.12 .. rest_method:: GET /v1/drivers/{driver_name}/raid/logical_disk_properties Show the required and optional parameters that ``driver_name`` expects to be supplied in the node's ``raid_config`` field, if a RAID configuration change is requested. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - driver_name: driver_ident Response Example ---------------- The response BODY is a dictionary, but the keys are unique to each driver. The structure of the response is ``property`` : ``description``. The following example is returned from the ``agent_ipmitool`` driver. .. literalinclude:: samples/driver-logical-disk-properties-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/baremetal-api-v1-indicators.inc0000664000175000017500000000674600000000000023617 0ustar00zuulzuul00000000000000.. -*- rst -*- ===================== Indicators Management ===================== The Indicators management is an extension of the node ReST API endpoint that allows reading and toggling the indicators (e.g. LEDs) on the hardware units. List Indicators =============== .. rest_method:: GET /v1/nodes/{node_ident}/management/indicators .. versionadded:: 1.63 List all available indicator names for each of the hardware components. The components that the ``redfish`` driver may have are: ``system``, ``chassis`` and ``drive``. The actual list depends on the support by the underlying hardware. Normal response code: 200 Error response codes: 404 (if node not found) Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident Response Parameters ------------------- .. rest_parameters:: parameters.yaml - components: n_components - name: component_name - links: links **Example list of indicators for the node:** .. literalinclude:: samples/node-indicators-list-response.json :language: javascript List Indicators for hardware component ====================================== .. rest_method:: GET /v1/nodes/{node_ident}/management/indicators/{component} .. versionadded:: 1.63 Retrieves indicators for a given hardware component along with their attributes. The components that the ``redfish`` driver may have are: ``system``, ``chassis`` and ``drive``. The actual list depends on the support by the underlying hardware. Normal response code: 200 Error response codes: 404 (if node or component is not found) Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident - component: component Response Parameters ------------------- .. rest_parameters:: parameters.yaml - indicators: n_indicators - name: indicator_name - readonly: indicator_readonly - states: indicator_states - links: links **Example list of indicators for a given component of the node:** .. literalinclude:: samples/node-indicators-component-list-response.json :language: javascript Get Indicator State =================== .. rest_method:: GET /v1/nodes/{node_ident}/management/indicators/{component}/{ind_ident} .. versionadded:: 1.63 Retrieves the state of a chosen indicator for a given component of the node. The value of the field in the response object represents its state. The values can be one of ``OFF``, ``ON``, ``BLINKING`` or ``UNKNOWN``. Normal response code: 200 Error response codes: 404 (if node, component or indicator is not found) Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident - component: component - ind_ident: ind_ident Response Parameters ------------------- .. rest_parameters:: parameters.yaml - state: n_ind_state **Example list of indicators for a given component of the node:** .. literalinclude:: samples/node-indicators-get-state-response.json :language: javascript Set Indicator State =================== .. rest_method:: PUT /v1/nodes/{node_ident}/management/indicators/{component}/{ind_ident} .. versionadded:: 1.63 Set the state of the desired indicators of the component. Normal response code: 204 (No content) Error codes: - 400 (if state is not an accepted value) - 404 (if node, component or indicator is not found) Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident - component: component - ind_ident: ind_ident - state: n_ind_state **Set the State of an Indicator** .. literalinclude:: samples/node-indicators-set-state.json ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/baremetal-api-v1-misc.inc0000664000175000017500000000650200000000000022401 0ustar00zuulzuul00000000000000.. -*- rst -*- ======= Utility ======= This section describes two API endpoints used by the ``ironic-python-agent`` ramdisk as it communicates with the Bare Metal service. These were previously exposed as vendor passthrough methods, however, as ironic-python-agent has become the standard ramdisk agent, these methods have been made a part of the official REST API. .. note:: **Operators are reminded not to expose the Bare Metal Service's API to unsecured networks.** Both API endpoints listed below are available to *unauthenticated* clients because the default method for booting the ``ironic-python-agent`` ramdisk does not provide the agent with keystone credentials. .. note:: It is possible to include keys in your ramdisk, or pass keys in via the boot method, if your driver supports it; if that is done, you may configure these endpoints to require authentication by changing the policy rules ``baremetal:driver:ipa_lookup`` and ``baremetal:node:ipa_heartbeat``. In light of that, operators are recommended to ensure that this endpoint is only available on the ``provisioning`` and ``cleaning`` networks. Agent Lookup ============ .. rest_method:: GET /v1/lookup .. versionadded:: 1.22 A ``/lookup`` method is exposed at the root of the REST API. This should only be used by the ``ironic-python-agent`` ramdisk to retrieve required configuration data from the Bare Metal service. By default, ``/v1/lookup`` will only match Nodes that are expected to be running the ``ironic-python-agent`` ramdisk (for instance, because the Bare Metal service has just initiated a deployment). It can not be used as a generic search mechanism, though this behaviour may be changed by setting the ``[api] restrict_lookup = false`` configuration option for the ironic-api service. The query string should include either or both a ``node_uuid`` or an ``addresses`` query parameter. If a matching Node is found, information about that Node shall be returned. Normal response codes: 200 Error response codes: 400 404 Request ------- .. rest_parameters:: parameters.yaml - node_uuid: r_node_uuid - addresses: r_addresses Response -------- Returns only the information about the corresponding Node that the ``ironic-python-agent`` process requires. .. rest_parameters:: parameters.yaml - node: agent_node - config: agent_config Response Example ---------------- .. literalinclude:: samples/lookup-node-response.json :language: javascript Agent Heartbeat =============== .. rest_method:: POST /v1/heartbeat/{node_ident} .. versionadded:: 1.22 A ``/heartbeat`` method is exposed at the root of the REST API. This is used as a callback from within the ``ironic-python-agent`` ramdisk, so that an active ramdisk may periodically contact the Bare Metal service and provide the current URL at which to contact the agent. Normal response codes: 202 Error response codes: 400 404 .. versionadded:: 1.36 ``agent_version`` parameter for passing the version of the Ironic Python Agent to Ironic during heartbeat .. versionadded:: 1.62 ``agent_token`` parameter for passing the token of the Ironic Python Agent to Ironic during heartbeat Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident - callback_url: callback_url - agent_version: agent_version - agent_token: agent_token ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/baremetal-api-v1-node-allocation.inc0000664000175000017500000000327600000000000024523 0ustar00zuulzuul00000000000000.. -*- rst -*- ==================================== Node Allocation (allocations, nodes) ==================================== Given a Node identifier (``uuid`` or ``name``), the API allows to get and delete the associated allocation. .. versionadded:: 1.52 Allocation API was introduced. Show Allocation by Node ======================= .. rest_method:: GET /v1/nodes/{node_ident}/allocation Shows details for an allocation. .. versionadded:: 1.52 Allocation API was introduced. Normal response codes: 200 Error response codes: 400, 401, 403, 404 Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident - fields: fields Response Parameters ------------------- .. rest_parameters:: parameters.yaml - uuid: uuid - candidate_nodes: candidate_nodes - last_error: allocation_last_error - name: allocation_name - node_uuid: allocation_node - resource_class: allocation_resource_class - state: allocation_state - traits: allocation_traits - extra: extra - created_at: created_at - updated_at: updated_at - links: links Response Example ---------------- .. literalinclude:: samples/allocation-show-response.json :language: javascript Delete Allocation by Node ========================= .. rest_method:: DELETE /v1/nodes/{node_ident}/allocation Deletes the allocation of this node and resets its ``instance_uuid``. The deletion will fail if the allocation the node is ``active`` and not in the ``maintenance`` mode. .. versionadded:: 1.52 Allocation API was introduced. Normal response codes: 204 Error response codes: 400, 401, 403, 404, 409, 503 Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/baremetal-api-v1-node-management.inc0000664000175000017500000003327200000000000024511 0ustar00zuulzuul00000000000000.. -*- rst -*- ======================= Node Management (nodes) ======================= Nodes can be managed through several sub-resources. Maintenance mode can be set by the operator, with an optional "reason" stored by Ironic. The supplied ``driver_info`` can be validated to ensure that the selected ``driver`` has all the information it requires to manage the Node. A Node can be rebooted, turned on, or turned off by requesting a change to its power state. This is handled asynchronously and tracked in the ``target_power_state`` field after the request is received. A Node's boot device can be changed, and the set of supported boot devices can be queried. A request to change a Node's provision state is also tracked asynchronously; the ``target_provision_state`` represents the requested state. A Node may transition through several discrete ``provision_state`` steps before arriving at the requested state. This can vary between drivers and based on configuration. For example, a Node in the ``available`` state can have an instance deployed to it by requesting the provision state of ``active``. During this transition, the Node's ``provision_state`` will temporarily be set to ``deploying``, and depending on the driver, it may also be ``wait call-back``. When the transitions are complete, ``target_provision_state`` will be set to ``None`` and ``provision_state`` will be set to ``active``. To destroy the instance, request the provision state of ``delete``. During this transition, the Node may or may not go through a ``cleaning`` state, depending on the service configuration. Validate Node =============== .. rest_method:: GET /v1/nodes/{node_ident}/validate Request that Ironic validate whether the Node's ``driver`` has enough information to manage the Node. This polls each ``interface`` on the driver, and returns the status of that ``interface`` as an element in the response. Note that each ``driver`` may require different information to be supplied, and not all drivers support all interfaces. Normal response codes: 200 .. TODO: add error codes Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident Response -------- Each element in the response will contain a "result" variable, which will have a value of "true" or "false", indicating that the interface either has or does not have sufficient information to function. A value of ``null`` indicates that the Node's driver does not support that interface. .. rest_parameters:: parameters.yaml - bios: v_bios - boot: v_boot - console: v_console - deploy: v_deploy - inspect: v_inspect - management: v_management - network: v_network - power: v_power - raid: v_raid - rescue: v_rescue - storage: v_storage **Example node validation response:** .. literalinclude:: samples/node-validate-response.json :language: javascript Set Maintenance Flag ============================= .. rest_method:: PUT /v1/nodes/{node_ident}/maintenance Request that Ironic set the maintenance flag on the Node. This will disable certain automatic actions that the Node's driver may take, and remove the Node from Nova's available resource pool. Normal response code: 202 .. TODO: Add link to user / operator documentation on the Maintenance flag Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident - reason: reason **Example request: mark a node for maintenance:** .. literalinclude:: samples/node-maintenance-request.json Clear Maintenance Flag ============================== .. rest_method:: DELETE /v1/nodes/{node_ident}/maintenance The maintenance flag is unset by sending a DELETE request to this endpoint. If the request is accepted, Ironic will also clear the ``maintenance_reason`` field. Normal response code: 202 .. TODO: Add link to user / operator documentation on the Maintenance flag Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident Set Boot Device =============== .. rest_method:: PUT /v1/nodes/{node_ident}/management/boot_device Set the boot device for the given Node, and set it persistently or for one-time boot. The exact behaviour of this depends on the hardware driver. .. note:: In some drivers, eg. the ``*_ipmitool`` family, this method initiates a synchronous call to the hardware management device (BMC). It should be used with caution! This is `a known bug `_. .. note:: Some drivers do not support one-time boot, and always set the boot device persistently. Normal response code: 204 .. TODO: add error codes Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident - boot_device: req_boot_device - persistent: req_persistent **Example JSON request body to set boot device:** .. literalinclude:: samples/node-set-boot-device.json Get Boot Device =============== .. rest_method:: GET /v1/nodes/{node_ident}/management/boot_device Get the current boot device for the given Node. .. note:: In some drivers, eg. the ``*_ipmitool`` family, this method initiates a synchronous call to the hardware management device (BMC). It should be used with caution! This is `a known bug `_. Normal response code: 200 .. TODO: add error codes Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident Response -------- .. rest_parameters:: parameters.yaml - boot_device: boot_device - persistent: persistent **Example JSON response to get boot device:** .. literalinclude:: samples/node-get-boot-device-response.json Get Supported Boot Devices =========================== .. rest_method:: GET /v1/nodes/{node_ident}/management/boot_device/supported Retrieve the acceptable set of supported boot devices for a specific Node. Normal response code: 200 .. TODO: add error codes Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident Response -------- .. rest_parameters:: parameters.yaml - supported_boot_devices: supported_boot_devices **Example response listing supported boot devices:** .. literalinclude:: samples/node-get-supported-boot-devices-response.json Inject NMI (Non-Masking Interrupts) =================================== .. rest_method:: PUT /v1/nodes/{node_ident}/management/inject_nmi .. versionadded:: 1.29 Inject NMI (Non-Masking Interrupts) for the given Node. This feature can be used for hardware diagnostics, and actual support depends on a driver. Normal response code: 204 (No content) Error codes: - 400 (Invalid) - 403 (Forbidden) - 404 (NotFound) - 406 (NotAcceptable) - 409 (NodeLocked, ClientError) Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident **Request to inject NMI to a node has to be empty dictionary:** .. literalinclude:: samples/node-inject-nmi.json Node State Summary ================== .. rest_method:: GET /v1/nodes/{node_ident}/states Get a summary of the Node's current power, provision, boot mode, raid, and console status. .. versionadded:: 1.75 Introduced ``boot_mode`` and ``secure_boot`` fields. Normal response code: 200 Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident Response -------- .. rest_parameters:: parameters.yaml - power_state: power_state - target_power_state: target_power_state - provision_state: provision_state - target_provision_state: target_provision_state - provision_updated_at: provision_updated_at - last_error: last_error - console_enabled: console_enabled - raid_config: raid_config - target_raid_config: target_raid_config - boot_mode: boot_mode - secure_boot: secure_boot **Example node state:** .. literalinclude:: samples/node-get-state-response.json Change Node Boot Mode ===================== .. rest_method:: PUT /v1/nodes/{node_ident}/states/boot_mode Request a change to the Node's boot mode. .. versionadded:: 1.76 A change in node's boot mode can be requested. Normal response code: 202 (Accepted) Error codes: - 400 (Invalid, InvalidStateRequested, InvalidParameterValue) - 404 (NotFound) - 409 (Conflict, NodeLocked, ClientError) - 503 (NoFreeConductorWorkers) Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident - target: req_target_boot_mode **Example request for UEFI boot:** .. literalinclude:: samples/node-set-boot-mode-uefi.json **Example request for Legacy BIOS boot:** .. literalinclude:: samples/node-set-boot-mode-bios.json Change Node Secure Boot ======================= .. rest_method:: PUT /v1/nodes/{node_ident}/states/secure_boot Request a change to the Node's secure boot state. .. versionadded:: 1.76 A change in node's secure boot state can be requested. Normal response code: 202 (Accepted) Error codes: - 400 (Invalid, InvalidStateRequested, InvalidParameterValue) - 404 (NotFound) - 409 (Conflict, NodeLocked, ClientError) - 503 (NoFreeConductorWorkers) Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident - target: req_target_secure_boot **Example request to turn off secure boot:** .. literalinclude:: samples/node-set-secure-boot-off.json **Example request to turn on secure boot:** .. literalinclude:: samples/node-set-secure-boot-on.json Change Node Power State ======================= .. rest_method:: PUT /v1/nodes/{node_ident}/states/power Request a change to the Node's power state. Normal response code: 202 (Accepted) .. versionadded:: 1.27 In the request, the ``target`` value can also be one of ``soft power off`` or ``soft rebooting``. .. versionadded:: 1.27 In the request, a ``timeout`` can be specified. Error codes: - 409 (NodeLocked, ClientError) - 400 (Invalid, InvalidStateRequested, InvalidParameterValue) - 406 (NotAcceptable) - 503 (NoFreeConductorWorkers) Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident - target: req_target_power_state - timeout: power_timeout **Example request to power off a Node:** .. literalinclude:: samples/node-set-power-off.json **Example request to soft power off a Node with timeout:** .. literalinclude:: samples/node-set-soft-power-off.json Change Node Provision State =========================== .. rest_method:: PUT /v1/nodes/{node_ident}/states/provision Request a change to the Node's provision state. Acceptable target states depend on the Node's current provision state. More detailed documentation of the Ironic State Machine is available `in the developer docs `_. .. versionadded:: 1.35 A ``configdrive`` can be provided when setting the node's provision target state to ``rebuild``. .. versionadded:: 1.38 A node can be rescued or unrescued by setting the node's provision target state to ``rescue`` or ``unrescue`` respectively. .. versionadded:: 1.56 A ``configdrive`` can be a JSON object with ``meta_data``, ``network_data`` and ``user_data``. .. versionadded:: 1.59 A ``configdrive`` now accepts ``vendor_data``. .. versionadded:: 1.69 ``deploy_steps`` can be provided when settings the node's provision target state to ``active`` or ``rebuild``. .. versionadded:: 1.70 ``disable_ramdisk`` can be provided to avoid booting the ramdisk during manual cleaning. Normal response code: 202 Error codes: - 409 (NodeLocked, ClientError) - 400 (InvalidState, NodeInMaintenance) - 406 (NotAcceptable) - 503 (NoFreeConductorWorkers) Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident - target: req_provision_state - configdrive: configdrive - clean_steps: clean_steps - deploy_steps: deploy_steps - rescue_password: rescue_password - disable_ramdisk: disable_ramdisk **Example request to deploy a Node, using a configdrive served via local webserver:** .. literalinclude:: samples/node-set-active-state.json **Example request to deploy a Node with custom deploy step:** .. literalinclude:: samples/node-set-active-state-deploy-steps.json **Example request to clean a Node, with custom clean step:** .. literalinclude:: samples/node-set-clean-state.json Set RAID Config =============== .. rest_method:: PUT /v1/nodes/{node_ident}/states/raid .. versionadded:: 1.12 Store the supplied configuration on the Node's ``target_raid_config`` property. This property must be structured JSON, and will be validated by the driver upon receipt. The request schema is defined in the `documentation for the RAID feature `_ .. note:: Calling this API only stores the requested configuration; it will be applied the next time that the Node transitions through the ``cleaning`` phase. Normal response code: 204 .. TODO: add more description, response code, sample response Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident - target_raid_config: req_target_raid_config **Example requested RAID config:** .. literalinclude:: samples/node-set-raid-request.json .. TODO: add more description, response code, sample response Get Console =========== .. rest_method:: GET /v1/nodes/{node_ident}/states/console Get connection information about the console. .. TODO: add more description, response code, sample response Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident .. TODO: add more description, response code, sample response Start/Stop Console =================== .. rest_method:: PUT /v1/nodes/{node_ident}/states/console Start or stop the serial console. .. TODO: add more description, response code, sample response Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident - enabled: req_console_enabled ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/baremetal-api-v1-node-passthru.inc0000664000175000017500000000466500000000000024252 0ustar00zuulzuul00000000000000.. -*- rst -*- ============================ Node Vendor Passthru (nodes) ============================ Each driver MAY support vendor-specific extensions, called "passthru" methods. Internally, Ironic's driver API supports flexibly exposing functions via the common HTTP methods GET, PUT, POST, and DELETE. To call a passthru method, the query string must contain the name of the method, eg. ``/vendor_passthru?method=reset_bmc``. The contents of the HTTP request are forwarded to the Node's driver and validated there. Ironic's REST API provides a means to discover these methods, but does not provide support, testing, or documentation for these endpoints. The Ironic development team does not guarantee any compatibility within these methods between releases, though we encourage driver authors to provide documentation and support for them. Besides the endpoints documented here, all other resources and endpoints under the heading ``vendor_passthru`` should be considered unsupported APIs, and could be changed without warning by the driver authors. List Methods ============ .. rest_method:: GET /v1/nodes/{node_ident}/vendor_passthru/methods Retrieve a list of the available vendor passthru methods for the given Node. The response will indicate which HTTP method(s) each vendor passthru method allows, whether the method call will be synchronous or asynchronous, and whether the response will include any attachment. Normal response code: 200 .. TODO: add error codes Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident Response -------- **Example passthru methods listing:** .. literalinclude:: samples/node-vendor-passthru-response.json Call a Method ============= .. rest_method:: METHOD /v1/nodes/{node_ident}/vendor_passthru?method={method_name} The HTTP METHOD may be one of GET, POST, PUT, DELETE, depending on the driver and method. This endpoint passes the request directly to the Node's hardware driver. The HTTP BODY must be parseable JSON, which will be converted to parameters passed to that function. Unparseable JSON, missing parameters, or excess parameters will cause the request to be rejected with an HTTP 400 error. Normal response code: 200 202 .. TODO: add error codes Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident - method_name: method_name All other parameters should be passed in the BODY. Parameter list varies by method_name. Response -------- Varies.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/baremetal-api-v1-nodes-bios.inc0000664000175000017500000000703100000000000023506 0ustar00zuulzuul00000000000000.. -*- rst -*- ================= Node Bios (nodes) ================= .. versionadded:: 1.40 Given a Node identifier (``uuid`` or ``name``), the API exposes the list of all Bios settings associated with that Node. These endpoints do not allow modification of the Bios settings; that should be done by using ``clean steps``. List all Bios settings by Node ============================== .. rest_method:: GET /v1/nodes/{node_ident}/bios Return a list of Bios settings associated with ``node_ident``. .. versionadded:: 1.74 Added additional fields from bios registry which can be retrieved using ``?detail=True`` (see detailed response below). Added ``fields`` selector to query for particular fields. Normal response code: 200 Error codes: 404 Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident - fields: fields - detail: detail Response -------- .. rest_parameters:: parameters.yaml - bios: bios_settings - created_at: created_at - updated_at: updated_at - links: links - name: bios_setting_name - value: bios_setting_value **Example list of a Node's Bios settings:** .. literalinclude:: samples/node-bios-list-response.json List detailed Bios settings by Node =================================== .. rest_method:: GET /v1/nodes/{node_ident}/bios/?detail=True Return a list of detailed Bios settings associated with ``node_ident``. The detailed list includes the BIOS Attribute Registry information retrieved via Redfish. .. versionadded:: 1.74 Introduced Normal response code: 200 Error codes: 404 Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident Response -------- .. rest_parameters:: parameters.yaml - bios: bios_settings - created_at: created_at - updated_at: updated_at - links: links - name: bios_setting_name - value: bios_setting_value - attribute_type: bios_setting_attribute_type - allowable_values: bios_setting_allowable_values - lower_bound: bios_setting_lower_bound - max_length: bios_setting_max_length - min_length: bios_setting_min_length - read_only: bios_setting_read_only - reset_required: bios_setting_reset_required - unique: bios_setting_unique - upper_bound: bios_setting_upper_bound **Example list of a Node's Bios settings:** .. literalinclude:: samples/node-bios-list-details-response.json Show single Bios setting of a Node ================================== .. rest_method:: GET /v1/nodes/{node_ident}/bios/{bios_setting} Return the content of the specific bios ``bios_setting`` associated with ``node_ident``. .. versionadded:: 1.74 Introduced fields from the BIOS registry. Normal response code: 200 Error codes: 404 Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident - bios_setting: bios_setting Response -------- .. rest_parameters:: parameters.yaml - : d_bios_setting - created_at: created_at - updated_at: updated_at - links: links - name: bios_setting_name - value: bios_setting_value - attribute_type: bios_setting_attribute_type - allowable_values: bios_setting_allowable_values - lower_bound: bios_setting_lower_bound - max_length: bios_setting_max_length - min_length: bios_setting_min_length - read_only: bios_setting_read_only - reset_required: bios_setting_reset_required - unique: bios_setting_unique - upper_bound: bios_setting_upper_bound **Example details of a Node's Bios setting details:** .. literalinclude:: samples/node-bios-detail-response.json ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/baremetal-api-v1-nodes-history.inc0000664000175000017500000000320600000000000024253 0ustar00zuulzuul00000000000000.. -*- rst -*- ================ History of nodes ================ .. versionadded:: 1.78 Identifying history of events from nodes is available via API version 1.78 via the ``v1/nodes/{node_ident}/history`` endpoint. In default policy configuration, only "System" scoped users as well as owners who are listed owners of associated nodes can list and retrieve nodes. List history entries for a node =============================== .. rest_method:: GET /v1/nodes/{node_ident}/history Normal response code: 200 Error codes: 400,401,403,404 Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident - detail: detail - marker: marker - limit: limit Response -------- .. rest_parameters:: parameters.yaml - history: n_history **Example list of history events from a node:** .. literalinclude:: samples/node-history-list-response.json :language: javascript Retrieve a specific history entry ================================= .. rest_method:: GET /v1/nodes/{node_ident}/history/{history_event_uuid} Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident - history_event_uuid: history_event_ident Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - created_at: created_at - user: history_user_ident - severity: history_severity - event: history_event - event_type: history_event_type - conductor: hostname Deleting history entries for a node =================================== Due to the nature of an immutable history record, records cannot be deleted via the REST API. Records and ultimately expired history records are managed by the conductor. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/baremetal-api-v1-nodes-portgroups.inc0000664000175000017500000000426400000000000025003 0ustar00zuulzuul00000000000000.. -*- rst -*- ============================================== Listing Portgroups by Node (nodes, portgroups) ============================================== .. versionadded:: 1.24 Given a Node identifier (``uuid`` or ``name``), the API exposes the list of, and details of, all Portgroups associated with that Node. These endpoints do not allow modification of the Portgroups; that should be done by accessing the Portgroup resources under the ``/v1/portgroups`` endpoint. List Portgroups by Node ======================= .. rest_method:: GET /v1/nodes/{node_ident}/portgroups Return a list of bare metal Portgroups associated with ``node_ident``. Normal response code: 200 Error codes: 400,401,403,404 Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident - fields: fields - limit: limit - marker: marker - sort_dir: sort_dir - sort_key: sort_key Response -------- .. rest_parameters:: parameters.yaml - portgroups: portgroups - uuid: uuid - address: portgroup_address - name: portgroup_name - links: links **Example list of a Node's Portgroups:** .. literalinclude:: samples/node-portgroup-list-response.json List detailed Portgroups by Node ================================ .. rest_method:: GET /v1/nodes/{node_ident}/portgroups/detail Return a detailed list of bare metal Portgroups associated with ``node_ident``. Normal response code: 200 Error codes: 400,401,403,404 Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident - limit: limit - marker: marker - sort_dir: sort_dir - sort_key: sort_key Response -------- .. rest_parameters:: parameters.yaml - portgroups: portgroups - uuid: uuid - address: portgroup_address - name: portgroup_name - node_uuid: node_uuid - standalone_ports_supported: standalone_ports_supported - internal_info: portgroup_internal_info - extra: extra - mode: portgroup_mode - properties: portgroup_properties - ports: pg_ports - created_at: created_at - updated_at: updated_at - links: links **Example details of a Node's Portgroups:** .. literalinclude:: samples/node-portgroup-detail-response.json ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/baremetal-api-v1-nodes-ports.inc0000664000175000017500000000531600000000000023725 0ustar00zuulzuul00000000000000.. -*- rst -*- ==================================== Listing Ports by Node (nodes, ports) ==================================== Given a Node identifier (``uuid`` or ``name``), the API exposes the list of, and details of, all Ports associated with that Node. These endpoints do not allow modification of the Ports; that should be done by accessing the Port resources under the ``/v1/ports`` endpoint. List Ports by Node =================== .. rest_method:: GET /v1/nodes/{node_ident}/ports Return a list of bare metal Ports associated with ``node_ident``. .. versionadded:: 1.8 Added the ``fields`` request parameter. When specified, this causes the content of the response to include only the specified fields, rather than the default set. .. versionadded:: 1.19 Added the ``pxe_enabled`` and ``local_link_connection`` fields. .. versionadded:: 1.24 Added the ``portgroup_uuid`` field. .. versionadded:: 1.34 Added the ``physical_network`` field. .. versionadded:: 1.53 Added the ``is_smartnic`` response fields. Normal response code: 200 Error codes: TBD Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident - fields: fields - limit: limit - marker: marker - sort_dir: sort_dir - sort_key: sort_key Response -------- .. rest_parameters:: parameters.yaml - ports: ports - uuid: uuid - address: port_address - links: links **Example list of a Node's Ports:** .. literalinclude:: samples/node-port-list-response.json List detailed Ports by Node =========================== .. rest_method:: GET /v1/nodes/{node_ident}/ports/detail Return a detailed list of bare metal Ports associated with ``node_ident``. .. versionadded:: 1.19 Added the ``pxe_enabled`` and ``local_link_connection`` fields. .. versionadded:: 1.24 Added the ``portgroup_uuid`` field. .. versionadded:: 1.34 Added the ``physical_network`` field. .. versionadded:: 1.53 Added the ``is_smartnic`` response fields. Normal response code: 200 Error codes: TBD Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident - fields: fields - limit: limit - marker: marker - sort_dir: sort_dir - sort_key: sort_key Response -------- .. rest_parameters:: parameters.yaml - ports: ports - uuid: uuid - address: port_address - node_uuid: node_uuid - local_link_connection: local_link_connection - pxe_enabled: pxe_enabled - physical_network: physical_network - internal_info: internal_info - extra: extra - created_at: created_at - updated_at: updated_at - links: links - is_smartnic: is_smartnic **Example details of a Node's Ports:** .. literalinclude:: samples/node-port-detail-response.json ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/baremetal-api-v1-nodes-traits.inc0000664000175000017500000000507200000000000024063 0ustar00zuulzuul00000000000000.. -*- rst -*- =================== Node Traits (nodes) =================== .. versionadded:: 1.37 Node traits are used for scheduling in the Compute service, using qualitative attributes to influence the placement of instances to bare metal compute nodes. Traits specified for a node in the Bare Metal service will be registered on the corresponding resource provider in the Compute service's placement API. Traits can be either standard or custom. Standard traits are listed in the `os_traits library `_. Custom traits must meet the following requirements: * prefixed with ``CUSTOM_`` * contain only upper case characters A to Z, digits 0 to 9, or underscores * no longer than 255 characters in length A bare metal node can have a maximum of 50 traits. List Traits of a Node ===================== .. rest_method:: GET /v1/nodes/{node_ident}/traits Return a list of traits for the node. Normal response code: 200 Error codes: 400,401,403,404 Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident Response -------- .. rest_parameters:: parameters.yaml - traits: n_traits **Example list of traits for the node:** .. literalinclude:: samples/node-traits-list-response.json :language: javascript Set all traits of a node ======================== .. rest_method:: PUT /v1/nodes/{node_ident}/traits Set all traits of a node, replacing any existing traits. Normal response code: 204 Error codes: 400,401,403,404,409 Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident - traits: n_traits **Example request to set all traits of a Node:** .. literalinclude:: samples/node-set-traits-request.json Add a trait to a node ===================== .. rest_method:: PUT /v1/nodes/{node_ident}/traits/{trait} Add a single trait to a node. Normal response code: 204 Error codes: 400,401,403,404,409 Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident - trait: trait Remove all traits from a node ============================= .. rest_method:: DELETE /v1/nodes/{node_ident}/traits Normal response code: 204 Error codes: 400,401,403,404,409 Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident Remove a trait from a node ========================== Remove a single trait from a node. .. rest_method:: DELETE /v1/nodes/{node_ident}/traits/{trait} Normal response code: 204 Error codes: 400,401,403,404,409 Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident - trait: trait ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/baremetal-api-v1-nodes-vifs.inc0000664000175000017500000000332500000000000023523 0ustar00zuulzuul00000000000000.. -*- rst -*- ================================== VIFs (Virtual Interfaces) of nodes ================================== .. versionadded:: 1.28 Attaching and detaching VIFs (Virtual Interfaces) to or from a node are done via the ``v1/nodes/{node_ident}/vifs`` endpoint. Attaching a VIF to a node means that a VIF will be mapped to a free port or port group of the specified node. List attached VIFs of a Node ============================ .. rest_method:: GET /v1/nodes/{node_ident}/vifs Return a list of VIFs that are attached to the node. Normal response code: 200 Error codes: 400,401,403,404 Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident Response -------- .. rest_parameters:: parameters.yaml - vifs: n_vifs - id: node_vif_ident **Example list of VIFs that are attached to the node:** .. literalinclude:: samples/node-vif-list-response.json :language: javascript Attach a VIF to a node ====================== .. rest_method:: POST /v1/nodes/{node_ident}/vifs Attach a VIF to a node. Normal response code: 204 Error codes: 400,401,403,404,409 Request ------- .. rest_parameters:: parameters.yaml - id: req_node_vif_ident - port_uuid: req_node_vif_port_uuid - portgroup_uuid: req_node_vif_portgroup_uuid - node_ident: node_ident **Example request to attach a VIF to a Node:** .. literalinclude:: samples/node-vif-attach-request.json Detach VIF from a node ====================== .. rest_method:: DELETE /v1/nodes/{node_ident}/vifs/{node_vif_ident} Detach VIF from a Node. Normal response code: 204 Error codes: 400,401,403,404 Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident - node_vif_ident: req_node_vif_ident ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/baremetal-api-v1-nodes-volume.inc0000664000175000017500000000613000000000000024060 0ustar00zuulzuul00000000000000.. -*- rst -*- ================================================ Listing Volume resources by Node (nodes, volume) ================================================ .. versionadded:: 1.32 Given a Node identifier (``uuid`` or ``name``), the API exposes the list of, and details of, all Volume resources associated with that Node. These endpoints do not allow modification of the Volume connectors and Volume targets; that should be done by accessing the Volume resources under the ``/v1/volume/connectors`` and ``/v1/volume/targets`` endpoint. List Links of Volume Resources by Node ====================================== .. rest_method:: GET /v1/nodes/{node_ident}/volume Return a list of links to all volume resources associated with ``node_ident``. Normal response code: 200 Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident Response -------- .. rest_parameters:: parameters.yaml - connectors: volume_connectors_link - targets: volume_targets_link - links: links **Example Volume list response:** .. literalinclude:: samples/node-volume-list-response.json :language: javascript List Volume connectors by Node ============================== .. rest_method:: GET /v1/nodes/{node_ident}/volume/connectors Return a list of bare metal Volume connectors associated with ``node_ident``. Normal response code: 200 Error codes: 400,401,403,404 Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident - fields: fields - limit: limit - marker: marker - sort_dir: sort_dir - sort_key: sort_key Response -------- .. rest_parameters:: parameters.yaml - connectors: volume_connectors - uuid: uuid - type: volume_connector_type - connector_id: volume_connector_connector_id - node_uuid: node_uuid - extra: extra - links: links - next: next **Example list of Node's Volume connectors:** .. literalinclude:: samples/node-volume-connector-list-response.json **Example detailed list of Node's Volume connectors:** .. literalinclude:: samples/node-volume-connector-detail-response.json List Volume targets by Node =========================== .. rest_method:: GET /v1/nodes/{node_ident}/volume/targets Return a list of bare metal Volume targets associated with ``node_ident``. Normal response code: 200 Error codes: 400,401,403,404 Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident - fields: fields - limit: limit - marker: marker - sort_dir: sort_dir - sort_key: sort_key Response -------- .. rest_parameters:: parameters.yaml - targets: volume_targets - uuid: uuid - volume_type: volume_target_volume_type - properties: volume_target_properties - boot_index: volume_target_boot_index - volume_id: volume_target_volume_id - extra: extra - node_uuid: node_uuid - links: links - next: next **Example list of Node's Volume targets:** .. literalinclude:: samples/node-volume-target-list-response.json **Example detailed list of Node's Volume targets:** .. literalinclude:: samples/node-volume-target-detail-response.json ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/baremetal-api-v1-nodes.inc0000664000175000017500000004731600000000000022566 0ustar00zuulzuul00000000000000.. -*- rst -*- ============= Nodes (nodes) ============= List, Searching, Creating, Updating, and Deleting of bare metal Node resources are done through the ``/v1/nodes`` resource. There are also several sub-resources, which allow further actions to be performed on a bare metal Node. A Node is the canonical representation of a discretely allocatable server, capable of running an Operating System. Each Node must be associated with a ``driver``; this informs Ironic what protocol to use when managing the Node. .. versionchanged:: 1.6 A Node may be referenced both by its UUID and by a unique human-readable "name" in any request. Throughout this documentation, this is referred to as the ``node_ident``. Responses clearly indicate whether a given field is a ``uuid`` or a ``name``. Depending on the Roles assigned to the authenticated OpenStack User, and upon the configuration of the Bare Metal service, API responses may change. For example, the default value of the "show_password" settings cause all API responses to mask passwords within ``driver_info`` with the literal string "\*\*\*\*\*\*". Create Node =========== .. rest_method:: POST /v1/nodes Creates a new Node resource. This method requires that a ``driver`` be supplied in the request body. Most subresources of a Node (eg, ``properties``, ``driver_info``, etc) may be supplied when the Node is created, or the resource may be updated later. .. versionadded:: 1.2 Added ``available`` state name, which replaced ``None`` as the status of an unprovisioned Node. All clients should be updated to use the new ``available`` state name. Nodes in the ``available`` state may have workloads provisioned on them; they are "available" for use. .. versionadded:: 1.5 Introduced the ``name`` field. .. versionadded:: 1.7 Introduced the ``clean_step`` field. .. versionchanged:: 1.11 The default initial state of newly-created Nodes from ``available`` to ``enroll``. This provides users a workflow to verify the manageability of a Node and perform necessary operational functions (eg, building a RAID array) before making the Node available for provisioning. .. versionadded:: 1.12 Introduced support for the ``raid_config`` and ``target_raid_config`` fields. .. versionadded:: 1.20 Introduced the ``network_interface`` field. If this field is not supplied when creating the Node, the default value will be used. .. versionadded:: 1.21 Introduced the ``resource_class`` field, which may be used to store a resource designation for the proposed OpenStack Placement Engine. This field has no effect within Ironic. .. versionadded:: 1.31 Introduced the ``boot_interface``, ``deploy_interface``, ``management_interface``, ``power_interface``, ``inspect_interface``, ``console_interface``, ``vendor_interface`` and ``raid_interface`` fields. If any of these fields are not supplied when creating the Node, their default value will be used. .. versionchanged:: 1.31 If the specified driver is a dynamic driver, then all the interfaces (boot_interface, deploy_interface, etc.) will be set to the default interface for that driver unless another enabled interface is specified in the creation request. .. versionadded:: 1.33 Introduced the ``storage_interface`` field. If this field is not supplied when creating the Node, the default value will be used. .. versionadded:: 1.38 Introduced the ``rescue_interface`` field. If this field is not supplied when creating the Node, the default value will be used. .. versionadded:: 1.44 Introduced the ``deploy_step`` field. .. versionadded:: 1.46 Introduced the ``conductor_group`` field. .. versionadded:: 1.50 Introduced the ``owner`` field. .. versionadded:: 1.51 Introduced the ``description`` field. .. versionadded:: 1.52 Introduced the ``allocation_uuid`` field. .. versionadded:: 1.65 Introduced the ``lessee`` field. Normal response codes: 201 Error codes: 400,403,406 Request ------- .. rest_parameters:: parameters.yaml - boot_interface: req_boot_interface - conductor_group: req_conductor_group - console_interface: req_console_interface - deploy_interface: req_deploy_interface - driver_info: req_driver_info - driver: req_driver_name - extra: req_extra - inspect_interface: req_inspect_interface - management_interface: req_management_interface - name: node_name - network_interface: req_network_interface - power_interface: req_power_interface - properties: req_properties - raid_interface: req_raid_interface - rescue_interface: req_rescue_interface - resource_class: req_resource_class_create - storage_interface: req_storage_interface - uuid: req_uuid - vendor_interface: req_vendor_interface - owner: owner - description: req_n_description - lessee: lessee - automated_clean: req_automated_clean - bios_interface: req_bios_interface - chassis_uuid: req_chassis_uuid - instance_info: req_instance_info - instance_uuid: req_instance_uuid - maintenance: req_maintenance - maintenance_reason: maintenance_reason - network_data: network_data - protected: protected - protected_reason: protected_reason - retired: retired - retired_reason: retired_reason **Example Node creation request with a dynamic driver:** .. literalinclude:: samples/node-create-request-dynamic.json :language: javascript Response -------- The response will contain the complete Node record, with the supplied data, and any defaults added for non-specified fields. Most fields default to "null" or "". The list and example below are representative of the response as of API microversion 1.48. .. rest_parameters:: parameters.yaml - uuid: uuid - name: node_name - power_state: power_state - target_power_state: target_power_state - provision_state: provision_state - target_provision_state: target_provision_state - maintenance: maintenance - maintenance_reason: maintenance_reason - fault: fault - last_error: last_error - reservation: reservation - driver: driver_name - driver_info: driver_info - driver_internal_info: driver_internal_info - properties: n_properties - instance_info: instance_info - instance_uuid: instance_uuid - chassis_uuid: chassis_uuid - extra: extra - console_enabled: console_enabled - raid_config: raid_config - target_raid_config: target_raid_config - clean_step: clean_step - deploy_step: deploy_step - links: links - ports: n_ports - portgroups: n_portgroups - states: n_states - resource_class: resource_class - boot_interface: boot_interface - console_interface: console_interface - deploy_interface: deploy_interface - inspect_interface: inspect_interface - management_interface: management_interface - network_interface: network_interface - power_interface: power_interface - raid_interface: raid_interface - rescue_interface: rescue_interface - storage_interface: storage_interface - traits: n_traits - vendor_interface: vendor_interface - volume: n_volume - conductor_group: conductor_group - protected: protected - protected_reason: protected_reason - conductor: conductor - owner: owner - lessee: lessee - description: n_description - allocation_uuid: allocation_uuid - automated_clean: automated_clean - bios_interface: bios_interface - network_data: network_data - retired: retired - retired_reason: retired_reason **Example JSON representation of a Node:** .. literalinclude:: samples/node-create-response.json :language: javascript List Nodes ========== .. rest_method:: GET /v1/nodes Return a list of bare metal Nodes, with some useful information about each Node. Some filtering is possible by passing in flags with the request. By default, this query will return the name, uuid, instance uuid, power state, provision state, and maintenance setting for each Node. .. versionadded:: 1.8 Added the ``fields`` Request parameter. When specified, this causes the content of the Response to include only the specified fields, rather than the default set. .. versionadded:: 1.9 Added the ``provision_state`` Request parameter, allowing the list of returned Nodes to be filtered by their current state. .. versionadded:: 1.16 Added the ``driver`` Request parameter, allowing the list of returned Nodes to be filtered by their driver name. .. versionadded:: 1.21 Added the ``resource_class`` Request parameter, allowing the list of returned Nodes to be filtered by this field. .. versionadded:: 1.42 Introduced the ``fault`` field. .. versionadded:: 1.43 Added the ``detail`` boolean request parameter. When specified ``True`` this causes the response to include complete details about each node, as shown in the "List Nodes Detailed" section below. .. versionadded:: 1.46 Introduced the ``conductor_group`` request parameter, to allow filtering the list of returned nodes by conductor group. .. versionadded:: 1.49 Introduced the ``conductor`` request parameter, to allow filtering the list of returned nodes by conductor. .. versionadded:: 1.50 Introduced the ``owner`` field. .. versionadded:: 1.51 Introduced the ``description`` field. .. versionadded:: 1.65 Introduced the ``lessee`` field. Normal response codes: 200 Error codes: 400,403,406 Request ------- .. rest_parameters:: parameters.yaml - instance_uuid: r_instance_uuid - maintenance: r_maintenance - associated: r_associated - provision_state: r_provision_state - driver: r_driver - resource_class: r_resource_class - conductor_group: r_conductor_group - conductor: r_conductor - fault: r_fault - owner: owner - lessee: lessee - description_contains: r_description_contains - fields: fields - limit: limit - marker: marker - sort_dir: sort_dir - sort_key: sort_key - detail: detail Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - name: node_name - instance_uuid: instance_uuid - power_state: power_state - provision_state: provision_state - maintenance: maintenance - links: links **Example list of Nodes:** .. literalinclude:: samples/nodes-list-response.json :language: javascript List Nodes Detailed =================== .. rest_method:: GET /v1/nodes/detail .. deprecated:: Use ?detail=True query string instead. Return a list of bare metal Nodes with complete details. Some filtering is possible by passing in flags with the request. This method is particularly useful to locate the Node associated to a given Nova instance, eg. with a request to ``v1/nodes/detail?instance_uuid={NOVA INSTANCE UUID}`` .. versionadded:: 1.37 Introduced the ``traits`` field. .. versionadded:: 1.38 Introduced the ``rescue_interface`` field. .. versionadded:: 1.42 Introduced the ``fault`` field. .. versionadded:: 1.46 Introduced the ``conductor_group`` field. .. versionadded:: 1.48 Introduced the ``protected`` and ``protected_reason`` fields. .. versionadded:: 1.49 Introduced the ``conductor`` request parameter and ``conductor`` field. .. versionadded:: 1.50 Introduced the ``owner`` field. .. versionadded:: 1.51 Introduced the ``description`` field. .. versionadded:: 1.52 Introduced the ``allocation_uuid`` field. .. versionadded:: 1.65 Introduced the ``lessee`` field. Normal response codes: 200 Error codes: 400,403,406 Request ------- .. rest_parameters:: parameters.yaml - instance_uuid: r_instance_uuid - maintenance: r_maintenance - fault: r_fault - associated: r_associated - provision_state: r_provision_state - driver: r_driver - resource_class: r_resource_class - conductor_group: r_conductor_group - conductor: r_conductor - owner: owner - lessee: lessee - description_contains: r_description_contains - limit: limit - marker: marker - sort_dir: sort_dir - sort_key: sort_key Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - name: node_name - power_state: power_state - target_power_state: target_power_state - provision_state: provision_state - target_provision_state: target_provision_state - maintenance: maintenance - maintenance_reason: maintenance_reason - fault: fault - last_error: last_error - reservation: reservation - driver: driver_name - driver_info: driver_info - driver_internal_info: driver_internal_info - properties: n_properties - instance_info: instance_info - instance_uuid: instance_uuid - chassis_uuid: chassis_uuid - extra: extra - console_enabled: console_enabled - raid_config: raid_config - target_raid_config: target_raid_config - clean_step: clean_step - deploy_step: deploy_step - links: links - ports: n_ports - portgroups: n_portgroups - states: n_states - resource_class: resource_class - bios_interface: bios_interface - boot_interface: boot_interface - console_interface: console_interface - deploy_interface: deploy_interface - inspect_interface: inspect_interface - management_interface: management_interface - network_interface: network_interface - power_interface: power_interface - raid_interface: raid_interface - rescue_interface: rescue_interface - storage_interface: storage_interface - traits: n_traits - vendor_interface: vendor_interface - volume: n_volume - conductor_group: conductor_group - protected: protected - protected_reason: protected_reason - owner: owner - lessee: lessee - description: n_description - conductor: conductor - allocation_uuid: allocation_uuid - retired: retired - retired_reason: retired_reason - network_data: network_data **Example detailed list of Nodes:** .. literalinclude:: samples/nodes-list-details-response.json :language: javascript Show Node Details ================= .. rest_method:: GET /v1/nodes/{node_ident} Shows details for a node. By default, this will return the full representation of the resource; an optional ``fields`` parameter can be supplied to return only the specified set. .. versionadded:: 1.37 Introduced the ``traits`` field. .. versionadded:: 1.38 Introduced the ``rescue_interface`` field. .. versionadded:: 1.42 Introduced the ``fault`` field. .. versionadded:: 1.46 Introduced the ``conductor_group`` field. .. versionadded:: 1.48 Introduced the ``protected`` and ``protected_reason`` fields. .. versionadded:: 1.49 Introduced the ``conductor`` field .. versionadded:: 1.50 Introduced the ``owner`` field. .. versionadded:: 1.51 Introduced the ``description`` field. .. versionadded:: 1.52 Introduced the ``allocation_uuid`` field. .. versionadded:: 1.61 Introduced the ``retired`` and ``retired_reason`` fields. .. versionadded:: 1.65 Introduced the ``lessee`` field. .. versionadded:: 1.66 Introduced the ``network_data`` field. Normal response codes: 200 Error codes: 400,403,404,406 Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident - fields: fields Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - name: node_name - power_state: power_state - target_power_state: target_power_state - provision_state: provision_state - target_provision_state: target_provision_state - maintenance: maintenance - maintenance_reason: maintenance_reason - fault: fault - last_error: last_error - reservation: reservation - driver: driver_name - driver_info: driver_info - driver_internal_info: driver_internal_info - properties: n_properties - instance_info: instance_info - instance_uuid: instance_uuid - chassis_uuid: chassis_uuid - extra: extra - console_enabled: console_enabled - raid_config: raid_config - target_raid_config: target_raid_config - clean_step: clean_step - deploy_step: deploy_step - links: links - ports: n_ports - portgroups: n_portgroups - states: n_states - resource_class: resource_class - bios_interface: bios_interface - boot_interface: boot_interface - console_interface: console_interface - deploy_interface: deploy_interface - inspect_interface: inspect_interface - management_interface: management_interface - network_interface: network_interface - power_interface: power_interface - raid_interface: raid_interface - rescue_interface: rescue_interface - storage_interface: storage_interface - traits: n_traits - vendor_interface: vendor_interface - volume: n_volume - conductor_group: conductor_group - protected: protected - protected_reason: protected_reason - owner: owner - lessee: lessee - description: n_description - conductor: conductor - allocation_uuid: allocation_uuid - network_data: network_data **Example JSON representation of a Node:** .. literalinclude:: samples/node-show-response.json :language: javascript Update Node =========== .. rest_method:: PATCH /v1/nodes/{node_ident} Updates the information stored about a Node. Note that this endpoint can not be used to request state changes, which are managed through sub-resources. .. versionadded:: 1.25 Introduced the ability to unset a node's chassis UUID. .. versionadded:: 1.51 Introduced the ability to set/unset a node's description. Normal response codes: 200 Error codes: 400,403,404,406,409 Request ------- The BODY of the PATCH request must be a JSON PATCH document, adhering to `RFC 6902 `_. .. rest_parameters:: parameters.yaml - node_ident: node_ident **Example PATCH document updating Node driver_info:** .. literalinclude:: samples/node-update-driver-info-request.json Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - name: node_name - power_state: power_state - target_power_state: target_power_state - provision_state: provision_state - target_provision_state: target_provision_state - maintenance: maintenance - maintenance_reason: maintenance_reason - fault: fault - last_error: last_error - reservation: reservation - driver: driver_name - driver_info: driver_info - driver_internal_info: driver_internal_info - properties: n_properties - instance_info: instance_info - instance_uuid: instance_uuid - chassis_uuid: chassis_uuid - extra: extra - console_enabled: console_enabled - raid_config: raid_config - target_raid_config: target_raid_config - clean_step: clean_step - deploy_step: deploy_step - links: links - ports: n_ports - portgroups: n_portgroups - states: n_states - resource_class: resource_class - boot_interface: boot_interface - console_interface: console_interface - deploy_interface: deploy_interface - inspect_interface: inspect_interface - management_interface: management_interface - network_interface: network_interface - power_interface: power_interface - raid_interface: raid_interface - rescue_interface: rescue_interface - storage_interface: storage_interface - traits: n_traits - vendor_interface: vendor_interface - volume: n_volume - conductor_group: conductor_group - protected: protected - protected_reason: protected_reason - owner: owner - lessee: lessee - description: n_description - conductor: conductor - allocation_uuid: allocation_uuid - network_data: network_data **Example JSON representation of a Node:** .. literalinclude:: samples/node-update-driver-info-response.json :language: javascript Delete Node =========== .. rest_method:: DELETE /v1/nodes/{node_ident} Deletes a node. Normal response codes: 204 Error codes: 400,403,404,409 Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/baremetal-api-v1-portgroups-ports.inc0000664000175000017500000000506300000000000025040 0ustar00zuulzuul00000000000000.. -*- rst -*- ============================================= Listing Ports by Portgroup (portgroup, ports) ============================================= .. versionadded:: 1.24 Given a Portgroup identifier (``uuid`` or ``name``), the API exposes the list of, and details of, all Ports associated with that Portgroup. These endpoints do not allow modification of the Ports; that should be done by accessing the Port resources under the ``/v1/ports`` endpoint. List Ports by Portgroup ======================= .. rest_method:: GET /v1/portgroups/{portgroup_ident}/ports Return a list of bare metal Ports associated with ``portgroup_ident``. When specified, the ``fields`` request parameter causes the content of the Response to include only the specified fields, rather than the default set. .. versionadded:: 1.34 Added the ``physical_network`` field. .. versionadded:: 1.53 Added the ``is_smartnic`` response fields. Normal response code: 200 Error codes: 400,401,403,404 Request ------- .. rest_parameters:: parameters.yaml - portgroup_ident: portgroup_ident - fields: fields - limit: limit - marker: marker - sort_dir: sort_dir - sort_key: sort_key Response -------- .. rest_parameters:: parameters.yaml - ports: ports - uuid: uuid - address: port_address - links: links **Example list of a Portgroup's Ports:** .. literalinclude:: samples/portgroup-port-list-response.json List detailed Ports by Portgroup ================================ .. rest_method:: GET /v1/portgroups/{portgroup_ident}/ports/detail Return a detailed list of bare metal Ports associated with ``portgroup_ident``. .. versionadded:: 1.34 Added the ``physical_network`` field. .. versionadded:: 1.53 Added the ``is_smartnic`` response fields. Normal response code: 200 Error codes: 400,401,403,404 Request ------- .. rest_parameters:: parameters.yaml - portgroup_ident: portgroup_ident - limit: limit - marker: marker - sort_dir: sort_dir - sort_key: sort_key Response -------- .. rest_parameters:: parameters.yaml - ports: ports - uuid: uuid - address: port_address - node_uuid: node_uuid - local_link_connection: local_link_connection - pxe_enabled: pxe_enabled - physical_network: physical_network - internal_info: internal_info - extra: extra - portgroup_uuid: portgroup_uuid - created_at: created_at - updated_at: updated_at - links: links - is_smartnic: is_smartnic **Example details of a Portgroup's Ports:** .. literalinclude:: samples/portgroup-port-detail-response.json ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/baremetal-api-v1-portgroups.inc0000664000175000017500000001450200000000000023671 0ustar00zuulzuul00000000000000.. -*- rst -*- ======================= Portgroups (portgroups) ======================= .. versionadded:: 1.23 Ports can be combined into portgroups to support static link aggregation group (LAG) or multi-chassis link aggregation group (MLAG) configurations. Listing, Searching, Creating, Updating, and Deleting of bare metal Portgroup resources are done through the ``v1/portgroups`` resource. All Portgroups must be associated with a Node when created. This association can be changed, though the request may be rejected if either the current or destination Node are in a transitive state (for example, in the process of deploying) or are in a state that would be non-deterministically affected by such a change (for example, there is an active user instance on the Node). List Portgroups =============== .. rest_method:: GET /v1/portgroups Return a list of bare metal Portgroups. Some filtering is possible by passing in some parameters with the request. By default, this query will return the UUID, name and address for each Portgroup. .. versionadded:: 1.43 Added the ``detail`` boolean request parameter. When specified ``True`` this causes the response to include complete details about each portgroup. Normal response code: 200 Error codes: 400,401,403,404 Request ------- .. rest_parameters:: parameters.yaml - node: r_portgroup_node_ident - address: r_portgroup_address - fields: fields - limit: limit - marker: marker - sort_dir: sort_dir - sort_key: sort_key - detail: detail Response -------- .. rest_parameters:: parameters.yaml - portgroups: portgroups - uuid: uuid - address: portgroup_address - name: portgroup_name - links: links **Example Portgroup list response:** .. literalinclude:: samples/portgroup-list-response.json :language: javascript Create Portgroup ================ .. rest_method:: POST /v1/portgroups Creates a new Portgroup resource. This method requires a Node UUID and the physical hardware address for the Portgroup (MAC address in most cases). Normal response code: 201 Error codes: 400,401,403,404 Request ------- .. rest_parameters:: parameters.yaml - node_uuid: req_node_uuid - address: req_portgroup_address - name: req_portgroup_name - mode: req_portgroup_mode - standalone_ports_supported: req_standalone_ports_supported - properties: req_portgroup_properties - extra: req_extra - uuid: req_uuid **Example Portgroup creation request:** .. literalinclude:: samples/portgroup-create-request.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - name: portgroup_name - address: portgroup_address - node_uuid: node_uuid - standalone_ports_supported: standalone_ports_supported - internal_info: portgroup_internal_info - extra: extra - mode: portgroup_mode - properties: portgroup_properties - created_at: created_at - updated_at: updated_at - links: links - ports: pg_ports **Example Portgroup creation response:** .. literalinclude:: samples/portgroup-create-response.json :language: javascript List Detailed Portgroups ======================== .. rest_method:: GET /v1/portgroups/detail Return a list of bare metal Portgroups, with detailed information. Normal response code: 200 Error codes: 400,401,403,404 Request ------- .. rest_parameters:: parameters.yaml - node: r_portgroup_node_ident - address: r_portgroup_address - limit: limit - marker: marker - sort_dir: sort_dir - sort_key: sort_key Response -------- .. rest_parameters:: parameters.yaml - portgroups: portgroups - name: portgroup_name - uuid: uuid - address: portgroup_address - node_uuid: node_uuid - standalone_ports_supported: standalone_ports_supported - internal_info: portgroup_internal_info - extra: extra - mode: portgroup_mode - properties: portgroup_properties - created_at: created_at - updated_at: updated_at - links: links - ports: pg_ports **Example detailed Portgroup list response:** .. literalinclude:: samples/portgroup-list-detail-response.json :language: javascript Show Portgroup Details ====================== .. rest_method:: GET /v1/portgroups/{portgroup_ident} Show details for the given Portgroup. Normal response code: 200 Error codes: 400,401,403,404 Request ------- .. rest_parameters:: parameters.yaml - portgroup_ident: portgroup_ident - fields: fields Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - name: portgroup_name - address: portgroup_address - node_uuid: node_uuid - standalone_ports_supported: standalone_ports_supported - internal_info: portgroup_internal_info - extra: extra - mode: portgroup_mode - properties: portgroup_properties - created_at: created_at - updated_at: updated_at - links: links - ports: pg_ports **Example Portgroup details:** .. literalinclude:: samples/portgroup-create-response.json :language: javascript Update a Portgroup ================== .. rest_method:: PATCH /v1/portgroups/{portgroup_ident} Update a Portgroup. Normal response code: 200 Error codes: 400,401,403,404 Request ------- The BODY of the PATCH request must be a JSON PATCH document, adhering to `RFC 6902 `_. .. rest_parameters:: parameters.yaml - portgroup_ident: portgroup_ident **Example Portgroup update request:** .. literalinclude:: samples/portgroup-update-request.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - name: portgroup_name - address: portgroup_address - node_uuid: node_uuid - standalone_ports_supported: standalone_ports_supported - internal_info: portgroup_internal_info - extra: extra - mode: portgroup_mode - properties: portgroup_properties - created_at: created_at - updated_at: updated_at - links: links - ports: pg_ports **Example Portgroup update response:** .. literalinclude:: samples/portgroup-update-response.json :language: javascript Delete Portgroup ================ .. rest_method:: DELETE /v1/portgroups/{portgroup_ident} Delete a Portgroup. Normal response code: 204 Error codes: 400,401,403,404 Request ------- .. rest_parameters:: parameters.yaml - portgroup_ident: portgroup_ident ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/baremetal-api-v1-ports.inc0000664000175000017500000002000300000000000022605 0ustar00zuulzuul00000000000000.. -*- rst -*- ============= Ports (ports) ============= Listing, Searching, Creating, Updating, and Deleting of bare metal Port resources are done through the ``ports`` resource. All Ports must be associated to a Node when created. This association can be changed, though the request may be rejected if either the current or destination Node are in a transitive state (e.g., in the process of deploying) or are in a state that would be non-deterministically affected by such a change (e.g., there is an active user instance on the Node). List Ports ========== .. rest_method:: GET /v1/ports Return a list of bare metal Ports. Some filtering is possible by passing in some parameters with the request. By default, this query will return the uuid and address for each Port. .. versionadded:: 1.6 Added the ``node`` query parameter. If both ``node_uuid`` and ``node`` are specified in the request, ``node_uuid`` will be used to filter results. .. versionadded:: 1.8 Added the ``fields`` request parameter. When specified, this causes the content of the response to include only the specified fields, rather than the default set. .. versionadded:: 1.19 Added the ``pxe_enabled`` and ``local_link_connection`` fields. .. versionadded:: 1.24 Added the ``portgroup_uuid`` field. .. versionadded:: 1.34 Added the ``physical_network`` field. .. versionadded:: 1.43 Added the ``detail`` boolean request parameter. When specified ``True`` this causes the response to include complete details about each port. .. versionadded:: 1.53 Added the ``is_smartnic`` field. Normal response code: 200 Request ------- .. rest_parameters:: parameters.yaml - node: r_port_node_ident - node_uuid: r_port_node_uuid - portgroup: r_port_portgroup_ident - address: r_port_address - fields: fields - limit: limit - marker: marker - sort_dir: sort_dir - sort_key: sort_key - detail: detail Response -------- .. rest_parameters:: parameters.yaml - ports: ports - uuid: uuid - address: port_address - links: links **Example Port list response:** .. literalinclude:: samples/port-list-response.json :language: javascript Create Port =========== .. rest_method:: POST /v1/ports Creates a new Port resource. This method requires a Node UUID and the physical hardware address for the Port (MAC address in most cases). .. versionadded:: 1.19 Added the ``pxe_enabled`` and ``local_link_connection`` request and response fields. .. versionadded:: 1.24 Added the ``portgroup_uuid`` request and response fields. .. versionadded:: 1.34 Added the ``physical_network`` request and response fields. .. versionadded:: 1.53 Added the ``is_smartnic`` request and response fields. Normal response code: 201 Request ------- .. rest_parameters:: parameters.yaml - node_uuid: req_node_uuid - address: req_port_address - portgroup_uuid: req_portgroup_uuid - local_link_connection: req_local_link_connection - pxe_enabled: req_pxe_enabled - physical_network: req_physical_network - extra: req_extra - is_smartnic: req_is_smartnic - uuid: req_uuid **Example Port creation request:** .. literalinclude:: samples/port-create-request.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - address: port_address - node_uuid: node_uuid - portgroup_uuid: portgroup_uuid - local_link_connection: local_link_connection - pxe_enabled: pxe_enabled - physical_network: physical_network - internal_info: internal_info - extra: extra - created_at: created_at - updated_at: updated_at - links: links - is_smartnic: is_smartnic **Example Port creation response:** .. literalinclude:: samples/port-create-response.json :language: javascript List Detailed Ports =================== .. rest_method:: GET /v1/ports/detail Return a list of bare metal Ports, with detailed information. .. versionadded:: 1.6 Added the ``node`` query parameter. If both ``node_uuid`` and ``node`` are specified in the request, ``node_uuid`` will be used to filter results. .. versionadded:: 1.19 Added the ``pxe_enabled`` and ``local_link_connection`` response fields. .. versionadded:: 1.24 Added the ``portgroup`` query parameter and ``portgroup_uuid`` response field. .. versionadded:: 1.34 Added the ``physical_network`` response field. .. versionadded:: 1.53 Added the ``is_smartnic`` response fields. Normal response code: 200 Request ------- .. rest_parameters:: parameters.yaml - node: r_port_node_ident - node_uuid: r_port_node_uuid - portgroup: r_port_portgroup_ident - address: r_port_address - limit: limit - marker: marker - sort_dir: sort_dir - sort_key: sort_key Response -------- .. rest_parameters:: parameters.yaml - ports: ports - uuid: uuid - address: port_address - node_uuid: node_uuid - portgroup_uuid: portgroup_uuid - local_link_connection: local_link_connection - pxe_enabled: pxe_enabled - physical_network: physical_network - internal_info: internal_info - extra: extra - created_at: created_at - updated_at: updated_at - links: links - is_smartnic: is_smartnic **Example detailed Port list response:** .. literalinclude:: samples/port-list-detail-response.json :language: javascript Show Port Details ================= .. rest_method:: GET /v1/ports/{port_id} Show details for the given Port. .. versionadded:: 1.8 Added the ``fields`` request parameter. When specified, this causes the content of the response to include only the specified fields, rather than the default set. .. versionadded:: 1.19 Added the ``pxe_enabled`` and ``local_link_connection`` response fields. .. versionadded:: 1.24 Added the ``portgroup_uuid`` response field. .. versionadded:: 1.34 Added the ``physical_network`` response field. .. versionadded:: 1.53 Added the ``is_smartnic`` response fields. Normal response code: 200 Request ------- .. rest_parameters:: parameters.yaml - port_id: port_ident - fields: fields Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - address: port_address - node_uuid: node_uuid - portgroup_uuid: portgroup_uuid - local_link_connection: local_link_connection - pxe_enabled: pxe_enabled - physical_network: physical_network - internal_info: internal_info - extra: extra - created_at: created_at - updated_at: updated_at - links: links - is_smartnic: is_smartnic **Example Port details:** .. literalinclude:: samples/port-create-response.json :language: javascript Update a Port ============= .. rest_method:: PATCH /v1/ports/{port_id} Update a Port. .. versionadded:: 1.19 Added the ``pxe_enabled`` and ``local_link_connection`` fields. .. versionadded:: 1.24 Added the ``portgroup_uuid`` field. .. versionadded:: 1.34 Added the ``physical_network`` field. .. versionadded:: 1.53 Added the ``is_smartnic`` fields. Normal response code: 200 Request ------- The BODY of the PATCH request must be a JSON PATCH document, adhering to `RFC 6902 `_. .. rest_parameters:: parameters.yaml - port_id: port_ident **Example Port update request:** .. literalinclude:: samples/port-update-request.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - address: port_address - node_uuid: node_uuid - portgroup_uuid: portgroup_uuid - local_link_connection: local_link_connection - pxe_enabled: pxe_enabled - physical_network: physical_network - internal_info: internal_info - extra: extra - created_at: created_at - updated_at: updated_at - links: links - is_smartnic: is_smartnic **Example Port update response:** .. literalinclude:: samples/port-update-response.json :language: javascript Delete Port =========== .. rest_method:: DELETE /v1/ports/{port_id} Delete a Port. Normal response code: 204 Request ------- .. rest_parameters:: parameters.yaml - port_id: port_ident ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/baremetal-api-v1-volume.inc0000664000175000017500000002323500000000000022757 0ustar00zuulzuul00000000000000.. -*- rst -*- =============== Volume (volume) =============== .. versionadded:: 1.32 Information for connecting remote volumes to a node can be associated with a Node. There are two types of resources, Volume connectors and Volume targets. Volume connectors contain initiator information of Nodes. Volume targets contain target information of remote volumes. Listing, Searching, Creating, Updating, and Deleting of Volume connector resources are done through the ``v1/volume/connectors`` resource. The same operations for Volume targets are done through the ``v1/volume/targets`` resources. List Links of Volume Resources ============================== .. rest_method:: GET /v1/volume Return a list of links to all volume resources. Normal response code: 200 Request ------- Response -------- .. rest_parameters:: parameters.yaml - connectors: volume_connectors_link - targets: volume_targets_link - links: links **Example Volume list response:** .. literalinclude:: samples/volume-list-response.json :language: javascript List Volume Connectors ====================== .. rest_method:: GET /v1/volume/connectors Return a list of Volume connectors for all nodes. By default, this query will return the UUID, node UUID, type, and connector ID for each Volume connector. Normal response code: 200 Error codes: 400,401,403,404 Request ------- .. rest_parameters:: parameters.yaml - node: r_volume_connector_node_ident - fields: fields - detail: detail - limit: limit - marker: marker - sort_dir: sort_dir - sort_key: sort_key Response -------- .. rest_parameters:: parameters.yaml - connectors: volume_connectors - uuid: uuid - type: volume_connector_type - connector_id: volume_connector_connector_id - node_uuid: node_uuid - extra: extra - links: links - next: next **Example Volume connector list response:** .. literalinclude:: samples/volume-connector-list-response.json :language: javascript **Example detailed Volume connector list response:** .. literalinclude:: samples/volume-connector-list-detail-response.json :language: javascript Create Volume Connector ======================= .. rest_method:: POST /v1/volume/connectors Creates a new Volume connector resource. This method requires a Node UUID, a connector type and a connector ID. Normal response code: 201 Error codes: 400,401,403,404,409 Request ------- .. rest_parameters:: parameters.yaml - node_uuid: req_node_uuid - type: volume_connector_type - connector_id: volume_connector_connector_id - extra: req_extra - uuid: req_uuid **Example Volume connector creation request:** .. literalinclude:: samples/volume-connector-create-request.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - type: volume_connector_type - connector_id: volume_connector_connector_id - node_uuid: node_uuid - extra: extra - links: links **Example Volume connector creation response:** .. literalinclude:: samples/volume-connector-create-response.json :language: javascript Show Volume Connector Details ============================= .. rest_method:: GET /v1/volume/connectors/{volume_connector_id} Show details for the given Volume connector. Normal response code: 200 Error codes: 400,401,403,404 Request ------- .. rest_parameters:: parameters.yaml - volume_connector_id: volume_connector_id - fields: fields Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - type: volume_connector_type - connector_id: volume_connector_connector_id - node_uuid: node_uuid - extra: extra - links: links **Example Volume connector details:** .. literalinclude:: samples/volume-connector-create-response.json :language: javascript Update a Volume Connector ========================= .. rest_method:: PATCH /v1/volume/connectors/{volume_connector_id} Update a Volume connector. A Volume connector can be updated only while a node associated with the Volume connector is powered off. Normal response code: 200 Error codes: 400,401,403,404,409 Request ------- The BODY of the PATCH request must be a JSON PATCH document, adhering to `RFC 6902 `_. .. rest_parameters:: parameters.yaml - volume_connector_id: volume_connector_id **Example Volume connector update request:** .. literalinclude:: samples/volume-connector-update-request.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - type: volume_connector_type - connector_id: volume_connector_connector_id - node_uuid: node_uuid - extra: extra - links: links **Example Volume connector update response:** .. literalinclude:: samples/volume-connector-update-response.json :language: javascript Delete Volume Connector ======================= .. rest_method:: DELETE /v1/volume/connector/{volume_connector_id} Delete a Volume connector. A Volume connector can be deleted only while a node associated with the Volume connector is powered off. Normal response code: 204 Error codes: 400,401,403,404,409 Request ------- .. rest_parameters:: parameters.yaml - volume_connector_id: volume_connector_id List Volume Targets =================== .. rest_method:: GET /v1/volume/targets Return a list of Volume targets for all nodes. By default, this query will return the UUID, node UUID, volume type, boot index, and volume ID for each Volume target. Normal response code: 200 Error codes: 400,401,403,404 Request ------- .. rest_parameters:: parameters.yaml - node: r_volume_target_node_ident - fields: fields - detail: detail - limit: limit - marker: marker - sort_dir: sort_dir - sort_key: sort_key Response -------- .. rest_parameters:: parameters.yaml - targets: volume_targets - uuid: uuid - volume_type: volume_target_volume_type - properties: volume_target_properties - boot_index: volume_target_boot_index - volume_id: volume_target_volume_id - extra: extra - node_uuid: node_uuid - links: links - next: next **Example Volume target list response:** .. literalinclude:: samples/volume-target-list-response.json :language: javascript **Example detailed Volume target list response:** .. literalinclude:: samples/volume-target-list-detail-response.json :language: javascript Create Volume Target ==================== .. rest_method:: POST /v1/volume/targets Creates a new Volume target resource. This method requires a Node UUID, volume type, volume ID, and boot index.. Normal response code: 201 Error codes: 400,401,403,404,409 Request ------- .. rest_parameters:: parameters.yaml - node_uuid: req_node_uuid - volume_type: volume_target_volume_type - properties: req_volume_target_properties - boot_index: volume_target_boot_index - volume_id: volume_target_volume_id - extra: req_extra - uuid: req_uuid **Example Volume target creation request:** .. literalinclude:: samples/volume-target-create-request.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - volume_type: volume_target_volume_type - properties: volume_target_properties - boot_index: volume_target_boot_index - volume_id: volume_target_volume_id - extra: extra - node_uuid: node_uuid - links: links **Example Volume target creation response:** .. literalinclude:: samples/volume-target-create-response.json :language: javascript Show Volume Target Details ========================== .. rest_method:: GET /v1/volume/targets/{volume_target_id} Show details for the given Volume target. Normal response code: 200 Error codes: 400,401,403,404 Request ------- .. rest_parameters:: parameters.yaml - volume_target_id: volume_target_id - fields: fields Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - volume_type: volume_target_volume_type - properties: volume_target_properties - boot_index: volume_target_boot_index - volume_id: volume_target_volume_id - extra: extra - node_uuid: node_uuid - links: links **Example Volume target details:** .. literalinclude:: samples/volume-target-create-response.json :language: javascript Update a Volume Target ====================== .. rest_method:: PATCH /v1/volume/targets/{volume_target_id} Update a Volume target. A Volume target can be updated only while a node associated with the Volume target is powered off. Normal response code: 200 Error codes: 400,401,403,404,409 Request ------- The BODY of the PATCH request must be a JSON PATCH document, adhering to `RFC 6902 `_. .. rest_parameters:: parameters.yaml - volume_target_id: volume_target_id **Example Volume target update request:** .. literalinclude:: samples/volume-target-update-request.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - volume_type: volume_target_volume_type - properties: volume_target_properties - boot_index: volume_target_boot_index - volume_id: volume_target_volume_id - extra: extra - node_uuid: node_uuid - links: links **Example Volume target update response:** .. literalinclude:: samples/volume-target-update-response.json :language: javascript Delete Volume Target ==================== .. rest_method:: DELETE /v1/volume/target/{volume_target_id} Delete a Volume target. A Volume target can be deleted only while a node associated with the Volume target is powered off. Normal response code: 204 Error codes: 400,401,403,404,409 Request ------- .. rest_parameters:: parameters.yaml - volume_target_id: volume_target_id ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/baremetal-api-versions.inc0000664000175000017500000000471400000000000022775 0ustar00zuulzuul00000000000000.. -*- rst -*- ============ API versions ============ Concepts ======== In order to bring new features to users over time, the Ironic API supports versioning. There are two kinds of versions in Ironic. - ''major versions'', which have dedicated urls. - ''microversions'', which can be requested through the use of the ``X-OpenStack-Ironic-API-Version`` header. The Version APIs work differently from other APIs as they *do not* require authentication. Beginning with the Kilo release, all API requests support the ``X-OpenStack-Ironic-API-Version`` header. This header SHOULD be supplied with every request; in the absence of this header, each request is treated as though coming from an older pre-Kilo client. This was done to preserve backwards compatibility as we introduced new features in the server. If you try to use a feature with an API version older than when that feature was introduced the ironic service will respond as would before that feature existed. For example if a new API URL was added, and you try to make a request with an older API version, then you will get a ``Not Found (404)`` error, or if a new field was added to an existing API and you request an older API version then you will get an ``Invalid Parameter`` response. List API versions ================= .. rest_method:: GET / This fetches all the information about all known major API versions in the deployment. Links to more specific information will be provided for each major API version, as well as information about supported min and max microversions. Normal response codes: 200 Request ------- Response Example ---------------- .. rest_parameters:: parameters.yaml - description: description - versions: versions - version: version - id: id - links: links - min_version: x-openstack-ironic-api-min-version .. literalinclude:: samples/api-root-response.json :language: javascript Show v1 API =========== .. rest_method:: GET /v1/ Show all the resources within the Ironic v1 API. Normal response codes: 200 Request ------- Response Example ---------------- .. rest_parameters:: parameters.yaml - id: id - links: links - openstack-request-id: openstack-request-id - x-openstack-ironic-api-version: header_version - x-openstack-ironic-api-min-version: x-openstack-ironic-api-min-version - x-openstack-ironic-api-max-version: x-openstack-ironic-api-max-version .. literalinclude:: samples/api-v1-root-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/conf.py0000664000175000017500000001522100000000000017223 0ustar00zuulzuul00000000000000# -*- coding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # ironic documentation build configuration file, created by # sphinx-quickstart on Sat May 1 15:17:47 2010. # # This file is execfile()d with the current directory set to # its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import os import sys html_theme = 'openstackdocs' html_theme_options = { "sidebar_mode": "toc", } extensions = [ 'os_api_ref', 'openstackdocstheme' ] openstackdocs_repo_name = 'openstack/ironic' openstackdocs_use_storyboard = True openstackdocs_auto_name = False # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('../../')) sys.path.insert(0, os.path.abspath('../')) sys.path.insert(0, os.path.abspath('./')) # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # # source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Ironic API Reference' copyright = u'OpenStack Foundation' # html_context allows us to pass arbitrary values into the html template html_context = {"bug_tag": "api-ref", "bug_project": "ironic"} # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # The reST default role (used for this markup: `text`) to use # for all documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = False # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'native' # -- Options for man page output ---------------------------------------------- # Grouping the document tree for man pages. # List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual' # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. # html_theme_path = ["."] # html_theme = '_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". # html_static_path = ['_static'] # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_use_modindex = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'ironicdoc' # -- Options for LaTeX output ------------------------------------------------- # The paper size ('letter' or 'a4'). # latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). # latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', 'Ironic.tex', u'OpenStack Bare Metal API Documentation', u'OpenStack Foundation', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # Additional stuff for the LaTeX preamble. # latex_preamble = '' # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_use_modindex = True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/index.rst0000664000175000017500000000245000000000000017565 0ustar00zuulzuul00000000000000:tocdepth: 2 ================ Bare Metal API ================ .. rest_expand_all:: .. include:: baremetal-api-versions.inc .. include:: baremetal-api-v1-nodes.inc .. include:: baremetal-api-v1-node-management.inc .. include:: baremetal-api-v1-node-passthru.inc .. include:: baremetal-api-v1-nodes-traits.inc .. include:: baremetal-api-v1-nodes-vifs.inc .. include:: baremetal-api-v1-indicators.inc .. include:: baremetal-api-v1-portgroups.inc .. include:: baremetal-api-v1-nodes-portgroups.inc .. include:: baremetal-api-v1-ports.inc .. include:: baremetal-api-v1-nodes-ports.inc .. include:: baremetal-api-v1-portgroups-ports.inc .. include:: baremetal-api-v1-volume.inc .. include:: baremetal-api-v1-nodes-volume.inc .. include:: baremetal-api-v1-drivers.inc .. include:: baremetal-api-v1-driver-passthru.inc .. include:: baremetal-api-v1-nodes-bios.inc .. include:: baremetal-api-v1-conductors.inc .. include:: baremetal-api-v1-allocation.inc .. include:: baremetal-api-v1-node-allocation.inc .. include:: baremetal-api-v1-deploy-templates.inc .. include:: baremetal-api-v1-nodes-history.inc .. NOTE(dtantsur): keep chassis close to the end since it's semi-deprecated .. include:: baremetal-api-v1-chassis.inc .. NOTE(dtantsur): keep misc last, since it covers internal API .. include:: baremetal-api-v1-misc.inc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/parameters.yaml0000664000175000017500000015736300000000000020771 0ustar00zuulzuul00000000000000# variables in header header_version: description: | Specific API microversion used to generate this response. in: header required: true type: string openstack-request-id: description: > A unique ID for tracking the request. The request ID associated with the request appears in the log lines for that request. By default, the middleware configuration ensures that the request ID appears in the log files. in: header required: false type: string x-openstack-ironic-api-max-version: description: | Maximum API microversion supported by this endpoint, eg. "1.22" in: header required: true type: string x-openstack-ironic-api-min-version: description: | Minimum API microversion supported by this endpoint, eg. "1.1" in: header required: true type: string x-openstack-ironic-api-version: description: > A request SHOULD include this header to indicate to the Ironic API service what version the client supports. The server will transform the response object into compliance with the requested version, if it is supported, or return a 406 Not Supported error. If this header is not supplied, the server will default to ``min_version`` in all responses. in: header required: true type: string # variables in path allocation_ident: description: | The UUID or name of the allocation. in: path required: true type: string bios_setting: description: | The name of the Bios setting. in: path required: true type: string chassis_ident: description: | The UUID of the chassis. in: path required: true type: string component: description: | The Bare Metal node component. in: path required: true type: string deploy_template_ident: description: | The UUID or name of the deploy template. in: path required: true type: string driver_ident: description: | The name of the driver. in: path required: true type: string history_event_ident: description: | The UUID of a history event. in: path required: true type: string hostname_ident: description: | The hostname of the conductor. in: path required: true type: string ind_ident: description: | The indicator of a Bare Metal component. in: path required: true type: string node_id: description: | The UUID of the node. in: path required: false type: string node_ident: description: | The UUID or Name of the node. in: path required: true type: string port_ident: description: | The UUID of the port. in: path required: true type: string portgroup_ident: description: | The UUID or Name of the portgroup. in: path required: true type: string trait: description: | A single trait for this node. in: path required: true type: string volume_connector_id: description: | The UUID of the Volume connector. in: path required: true type: string volume_target_id: description: | The UUID of the Volume target. in: path required: true type: string agent_token: description: | The token of the ironic-python-agent ramdisk, sent to the Bare Metal service for authentication purposes. in: query required: true type: string agent_version: description: | The version of the ironic-python-agent ramdisk, sent back to the Bare Metal service and stored during provisioning. in: query required: true type: string callback_url: description: | The URL of an active ironic-python-agent ramdisk, sent back to the Bare Metal service and stored temporarily during a provisioning action. in: query required: true type: string detail: description: | Whether to show detailed information about the resource. This cannot be set to True if ``fields`` parameter is specified. in: query required: false type: boolean # variables in driver query string driver_detail: description: | Whether to show detailed information about the drivers (e.g. the "boot_interface" field). in: query required: false type: boolean driver_type: description: | Only list drivers of this type. Options are "classic" or "dynamic". in: query required: false type: string # variables common to all query strings fields: description: | One or more fields to be returned in the response. For example, the following request returns only the ``uuid`` and ``name`` fields for each node: :: GET /v1/nodes?fields=uuid,name in: query required: false type: array fields_for_conductor: description: | One or more fields to be returned in the response. For example, the following request returns only the ``hostname`` and ``alive`` fields for each conductor: :: GET /v1/conductors?fields=hostname,alive in: query required: false type: array limit: description: | Requests a page size of items. Returns a number of items up to a limit value. Use the ``limit`` parameter to make an initial limited request and use the ID of the last-seen item from the response as the ``marker`` parameter value in a subsequent limited request. This value cannot be larger than the ``max_limit`` option in the ``[api]`` section of the configuration. If it is higher than ``max_limit``, only ``max-limit`` resources will be returned. in: query required: false type: integer marker: description: | The ID of the last-seen item. Use the ``limit`` parameter to make an initial limited request and use the ID of the last-seen item from the response as the ``marker`` parameter value in a subsequent limited request. in: query required: false type: string # variables in the vendor_passthru query string method_name: description: | Driver specific method name. in: query required: true type: string # variable in the lookup query string r_addresses: description: | Optional list of one or more Port addresses. in: query required: false type: array # variables in the query string r_allocation_node: description: | Filter the list of allocations by the node UUID or name. in: query required: false type: string r_allocation_state: description: | Filter the list of allocations by the allocation state, one of ``active``, ``allocating`` or ``error``. in: query required: false type: string r_associated: description: | Filter the list of returned nodes and only return those which are, or are not, associated with an ``instance_uuid``. in: query required: false type: boolean r_conductor: description: | Filter the list of returned nodes, and only return those with the specified ``conductor``. in: query required: false type: string r_conductor_group: description: | Filter the list of returned nodes, and only return those with the specified ``conductor_group``. Case-insensitive string up to 255 characters, containing ``a-z``, ``0-9``, ``_``, ``-``, and ``.``. in: query required: false type: string r_description_contains: description: | Filter the list of returned nodes, and only return those containing substring specified by ``description_contains``. in: query requred: false type: string r_driver: description: | Filter the list of returned nodes, and only return those with the specified ``driver``. in: query required: false type: string r_fault: description: | Filter the list of returned nodes, and only return those with the specified ``fault``. Possible values are determined by faults supported by ironic, e.g., ``power failure``, ``clean failure`` or ``rescue abort failure``. in: query required: false type: string r_instance_uuid: description: | Filter the list of returned nodes, and only return the node with this specific instance UUID, or an empty set if not found. in: query required: false type: string r_maintenance: description: | Filter the list of returned nodes and only return those with ``maintenance`` set to ``True`` or ``False``. in: query required: false type: boolean # variable in the lookup query string r_node_uuid: description: | Optional Node UUID. in: query required: false type: string r_owner: description: | Filter the list of returned allocations, and only return those with the specified owner. in: query required: false type: string r_port_address: description: | Filter the list of returned Ports, and only return the ones with the specified physical hardware address, typically MAC, or an empty set if not found. in: query required: false type: string r_port_node_ident: description: | Filter the list of returned Ports, and only return the ones associated with this specific node (name or UUID), or an empty set if not found. in: query required: false type: string r_port_node_uuid: description: | Filter the list of returned Ports, and only return the ones associated with this specific node UUID, or an empty set if not found. in: query required: false type: string r_port_portgroup_ident: description: | Filter the list of returned Ports, and only return the ones associated with this specific Portgroup (name or UUID), or an empty set if not found. in: query required: false type: string r_portgroup_address: description: | Filter the list of returned Portgroups, and only return the ones with the specified physical hardware address, typically MAC, or an empty set if not found. in: query required: false type: string r_portgroup_node_ident: description: | Filter the list of returned Portgroups, and only return the ones associated with this specific node (name or UUID), or an empty set if not found. in: query required: false type: string r_provision_state: description: | Filter the list of returned nodes, and only return those with the specified ``provision_state``. in: query required: false type: string r_resource_class: description: | Filter the list of returned nodes, and only return the ones with the specified resource class. in: query required: false type: string r_volume_connector_node_ident: description: | Filter the list of returned Volume connectors, and only return the ones associated with this specific node (name or UUID), or an empty set if not found. in: query required: false type: string r_volume_target_node_ident: description: | Filter the list of returned Volume targets, and only return the ones associated with this specific node (name or UUID), or an empty set if not found. in: query required: false type: string sort_dir: description: | Sorts the response by the requested sort direction. A valid value is ``asc`` (ascending) or ``desc`` (descending). Default is ``asc``. You can specify multiple pairs of sort key and sort direction query parameters. If you omit the sort direction in a pair, the API uses the natural sorting direction of the server attribute that is provided as the ``sort_key``. in: query required: false type: string sort_key: description: | Sorts the response by this attribute value. Default is ``id``. You can specify multiple pairs of sort key and sort direction query parameters. If you omit the sort direction in a pair, the API uses the natural sorting direction of the server attribute that is provided as the ``sort_key``. in: query required: false type: string # variable returned from /lookup agent_config: description: | JSON document of configuration data for the ironic-python-agent process. in: body required: true type: JSON agent_node: description: | JSON document containing the Node fields "uuid", "properties", "instance_info", and "driver_internal_info"; used by the ironic-python-agent process as it operates on the Node. in: body required: true type: JSON # variables in the API body alive: description: | The conductor status indicates whether a conductor is considered alive or not. in: body required: true type: boolean allocation_last_error: description: | The error message for the allocation if it is in the ``error`` state, ``null`` otherwise. in: body required: true type: string allocation_name: description: | The unique name of the allocation. in: body required: true type: string allocation_node: description: | The UUID of the node assigned to the allocation. Will be ``null`` if a node is not yet assigned. in: body required: true type: string allocation_patch: description: | A JSON patch document to apply to the allocation. in: body required: true type: JSON allocation_resource_class: description: | The resource class requested for the allocation. Can be ``null`` if the allocation was created via backfilling and the target node did not have the resource class set. in: body required: true type: string allocation_state: description: | The current state of the allocation. One of: * ``allocating`` - allocation is in progress. * ``active`` - allocation is finished and ``node_uuid`` is assigned. * ``error`` - allocation has failed, see ``last_error`` for details. in: body required: true type: string allocation_traits: description: | The list of the traits requested for the allocation. in: body required: true type: array allocation_uuid: description: | The UUID of the allocation associated with the node. If not ``null``, will be the same as ``instance_uuid`` (the opposite is not always true). Unlike ``instance_uuid``, this field is read-only. Please use the Allocation API to remove allocations. in: body required: true type: string automated_clean: description: | Indicates whether the node will perform automated clean or not. in: body required: true type: boolean bios_interface: description: | The bios interface to be used for this node. in: body required: true type: string bios_setting_allowable_values: description: | A list of allowable values, otherwise ``null``. in: body required: true type: array bios_setting_attribute_type: description: | A string describing the type of the Bios setting. May be ``null``. in: body required: true type: string bios_setting_lower_bound: description: | The lowest allowed integer value. May be ``null``. in: body required: true type: integer bios_setting_max_length: description: | The maximum string length of the value. May be ``null``. in: body required: true type: integer bios_setting_min_length: description: | The minimum string length of the value. May be ``null``. in: body required: true type: integer bios_setting_name: description: | The name of a Bios setting for a Node, eg. "virtualization". in: body required: true type: string bios_setting_read_only: description: | This Bios seting is read only and can't be changed. May be None. in: body required: true type: boolean bios_setting_reset_required: description: | After setting this Bios setting a node reboot is required. May be None. in: body required: true type: boolean bios_setting_unique: description: | This Bios setting is unique to this node. May be ``null``. in: body required: true type: boolean bios_setting_upper_bound: description: | The highest allowed integer value. May be ``null``. in: body required: true type: integer bios_setting_value: description: | The value of a Bios setting for a Node, eg. "on". in: body required: true type: string bios_settings: description: | Optional list of one or more Bios settings. It includes following fields "created_at", "updated_at", "links", "name", "value", "attribute_type", "allowable_values", "lower_bound", "max_length", "min_length", "read_only", "reset_required", "unique", "upper_bound" in: body required: true type: array boot_device: description: | The boot device for a Node, eg. "pxe" or "disk". in: body required: true type: string boot_interface: description: | The boot interface for a Node, e.g. "pxe". in: body required: true type: string boot_mode: description: | The current boot mode state (uefi/bios) in: body type: string candidate_nodes: description: | A list of UUIDs of the nodes that are candidates for this allocation. in: body required: true type: array chassis: description: | A ``chassis`` object. in: body required: true type: array chassis_uuid: description: | UUID of the chassis associated with this Node. May be empty or None. in: body required: true type: string clean_step: description: | The current clean step. Introduced with the cleaning feature. in: body required: false type: string clean_steps: description: | An ordered list of cleaning steps that will be performed on the node. A cleaning step is a dictionary with required keys 'interface' and 'step', and optional key 'args'. If specified, the value for 'args' is a keyword variable argument dictionary that is passed to the cleaning step method. in: body required: false type: array component_name: description: | The name of the component available for the node. in: body required: true type: string conductor: description: | The conductor currently servicing a node. This field is read-only. in: body required: false type: string conductor_group: description: | The conductor group for a node. Case-insensitive string up to 255 characters, containing ``a-z``, ``0-9``, ``_``, ``-``, and ``.``. in: body required: true type: string configdrive: description: | A config drive to be written to a partition on the Node's boot disk. Can be a full gzip'ed and base-64 encoded image or a JSON object with the keys: * ``meta_data`` (optional) - JSON object with the standard meta data. Ironic will provide the defaults for the ``uuid`` and ``name`` fields. * ``network_data`` (optional) - JSON object with networking configuration. * ``user_data`` (optional) - user data. May be a string (which will be UTF-8 encoded); a JSON object, or a JSON array. * ``vendor_data`` (optional) - JSON object with extra vendor data. This parameter is only accepted when setting the state to "active" or "rebuild". in: body required: false type: string or object console_enabled: description: | Indicates whether console access is enabled or disabled on this node. in: body required: true type: boolean console_interface: description: | The console interface for a node, e.g. "no-console". in: body required: true type: string created_at: description: | The UTC date and time when the resource was created, `ISO 8601 `_ format. in: body required: true type: string d_bios_setting: description: | Dictionary containing the definition of a Bios setting. It includes the following fields "created_at", "updated_at", "links", "name", "value". in: body required: true type: dictionary default_bios_interface: description: | The default bios interface used for a node with a dynamic driver, if no bios interface is specified for the node. in: body required: true type: string default_boot_interface: description: | The default boot interface used for a node with a dynamic driver, if no boot interface is specified for the node. in: body required: true type: string default_console_interface: description: | The default console interface used for a node with a dynamic driver, if no console interface is specified for the node. in: body required: true type: string default_deploy_interface: description: | The default deploy interface used for a node with a dynamic driver, if no deploy interface is specified for the node. in: body required: true type: string default_inspect_interface: description: | The default inspection interface used for a node with a dynamic driver, if no inspection interface is specified for the node. in: body required: true type: string default_management_interface: description: | The default management interface used for a node with a dynamic driver, if no management interface is specified for the node. in: body required: true type: string default_network_interface: description: | The default network interface used for a node with a dynamic driver, if no network interface is specified for the node. in: body required: true type: string default_power_interface: description: | The default power interface used for a node with a dynamic driver, if no power interface is specified for the node. in: body required: true type: string default_raid_interface: description: | The default RAID interface used for a node with a dynamic driver, if no RAID interface is specified for the node. in: body required: true type: string default_rescue_interface: description: | The default rescue interface used for a node with a dynamic driver, if no rescue interface is specified for the node. in: body required: true type: string default_storage_interface: description: | The default storage interface used for a node with a dynamic driver, if no storage interface is specified for the node. in: body required: true type: string default_vendor_interface: description: | The default vendor interface used for a node with a dynamic driver, if no vendor interface is specified for the node. in: body required: true type: string deploy_interface: description: | The deploy interface for a node, e.g. "iscsi". in: body required: true type: string deploy_step: description: | The current deploy step. in: body required: false type: string deploy_steps: description: | A list of deploy steps that will be performed on the node. A deploy step is a dictionary with required keys 'interface', 'step', 'priority' and optional key 'args'. If specified, the value for 'args' is a keyword variable argument dictionary that is passed to the deploy step method. in: body required: False type: array deploy_template_name: description: | The unique name of the deploy template. in: body required: true type: string deploy_template_step_args: description: | A dictionary of arguments that are passed to the deploy step method. in: body required: true type: object deploy_template_step_interface: description: | The name of the driver interface. in: body required: true type: string deploy_template_step_priority: description: | A non-negative integer priority for the step. A value of ``0`` will disable that step. in: body required: true type: integer deploy_template_step_step: description: | The name of the deploy step method on the driver interface. in: body required: true type: string deploy_template_steps: description: | The deploy steps of the deploy template. Must be a list of dictionaries containing at least one deploy step. See `Request Step`_ for step parameters. in: body required: true type: array description: description: | Descriptive text about the Ironic service. in: body required: true type: string disable_ramdisk: description: | If set to ``true``, the ironic-python-agent ramdisk will not be booted for cleaning. Only clean steps explicitly marked as not requiring ramdisk can be executed in this mode. Only allowed for manual cleaning. in: body required: false type: boolean driver_info: description: | All the metadata required by the driver to manage this Node. List of fields varies between drivers, and can be retrieved from the ``/v1/drivers//properties`` resource. in: body required: true type: JSON driver_internal_info: description: | Internal metadata set and stored by the Node's driver. This field is read-only. in: body required: false type: JSON driver_name: description: | The name of the driver. in: body required: true type: string driver_property_links: description: | A list of links to driver properties. in: body required: true type: array drivers: description: | A list of driver objects. in: body required: true type: array enabled_bios_interfaces: description: | The enabled bios interfaces for this driver. in: body required: true type: list enabled_boot_interfaces: description: | The enabled boot interfaces for this driver. in: body required: true type: list enabled_console_interfaces: description: | The enabled console interfaces for this driver. in: body required: true type: list enabled_deploy_interfaces: description: | The enabled deploy interfaces for this driver. in: body required: true type: list enabled_inspect_interfaces: description: | The enabled inspection interfaces for this driver. in: body required: true type: list enabled_management_interfaces: description: | The enabled management interfaces for this driver. in: body required: true type: list enabled_network_interfaces: description: | The enabled network interfaces for this driver. in: body required: true type: list enabled_power_interfaces: description: | The enabled power interfaces for this driver. in: body required: true type: list enabled_raid_interfaces: description: | The enabled RAID interfaces for this driver. in: body required: true type: list enabled_rescue_interfaces: description: | The enabled rescue interfaces for this driver. in: body required: true type: list enabled_storage_interfaces: description: | The enabled storage interfaces for this driver. in: body required: true type: list enabled_vendor_interfaces: description: | The enabled vendor interfaces for this driver. in: body required: true type: list extra: description: | A set of one or more arbitrary metadata key and value pairs. in: body required: true type: object fault: description: | The fault indicates the active fault detected by ironic, typically the Node is in "maintenance mode". None means no fault has been detected by ironic. "power failure" indicates ironic failed to retrieve power state from this node. There are other possible types, e.g., "clean failure" and "rescue abort failure". in: body required: false type: string history_event: description: | The event message body which has been logged related to the node for this error. in: body required: true type: string history_event_type: description: | Short descriptive string to indicate where the error occurred at to enable API users/System Operators to be able to identify repeated issues in a particular area of operation, such as 'deployment', 'console', 'cleaning', 'monitoring'. in: body required: true type: string history_severity: description: | Severity indicator for the event being returned. Typically this will indicate if this was an Error or Informational entry. in: body required: true type: string history_user_ident: description: | The UUID value representing the user whom appears to have caused the recorded event. in: body required: true type: string hostname: description: | The hostname of this conductor. in: body required: true type: array hosts: description: | A list of active hosts that support this driver. in: body required: true type: array id: description: | Major API version, eg, "v1" in: body required: true type: string indicator_name: description: | The name of the indicator. in: body required: true type: boolean indicator_readonly: description: | Indicates whether the indicator is readonly. in: body required: true type: boolean indicator_states: description: | The possible states for a given indicator, the only values that can be returned are: ``ON``, ``OFF``, ``BLINKING`` and ``UNKNOWN`` in: body required: true type: string inspect_interface: description: | The interface used for node inspection, e.g. "no-inspect". in: body required: true type: string inspection_finished_at: description: | The UTC date and time when the last hardware inspection finished successfully, `ISO 8601 `_ format. May be "null". in: body required: true type: string inspection_started_at: description: | The UTC date and time when the hardware inspection was started, `ISO 8601 `_ format. May be "null". in: body required: true type: string instance_info: description: | Information used to customize the deployed image. May include root partition size, a base 64 encoded config drive, and other metadata. Note that this field is erased automatically when the instance is deleted (this is done by requesting the Node provision state be changed to DELETED). in: body required: true type: JSON instance_uuid: description: | UUID of the Nova instance associated with this Node. in: body required: true type: string internal_info: description: | Internal metadata set and stored by the Port. This field is read-only. in: body required: true type: JSON is_smartnic: description: | Indicates whether the Port is a Smart NIC port. in: body required: false type: boolean last_error: description: | Any error from the most recent (last) transaction that started but failed to finish. in: body required: true type: string lessee: description: | A string or UUID of the tenant who is leasing the object. in: body required: false type: string links: description: | A list of relative links. Includes the self and bookmark links. in: body required: true type: array local_link_connection: description: | The Port binding profile. If specified, must contain ``switch_id`` (only a MAC address or an OpenFlow based datapath_id of the switch are accepted in this field) and ``port_id`` (identifier of the physical port on the switch to which node's port is connected to) fields. ``switch_info`` is an optional string field to be used to store any vendor-specific information. in: body required: true type: JSON maintenance: description: | Whether or not this Node is currently in "maintenance mode". Setting a Node into maintenance mode removes it from the available resource pool and halts some internal automation. This can happen manually (eg, via an API request) or automatically when Ironic detects a hardware fault that prevents communication with the machine. in: body required: true type: boolean maintenance_reason: description: | User-settable description of the reason why this Node was placed into maintenance mode in: body required: false type: string management_interface: description: | Interface for out-of-band node management, e.g. "ipmitool". in: body required: true type: string n_components: description: | List all available indicators names for each of the hardware components for this node. in: body required: true type: array n_description: description: | Informational text about this node. in: body required: true type: string n_history: description: | History events attached to this node. in: body required: true type: array n_ind_state: description: | The state of an indicator of the component of the node. Possible values are: ``OFF``, ``ON``, ``BLINKING`` or ``UNKNOWN``. in: body required: true type: string n_indicators: description: | Retrieves all indicators for a given hardware component along with their attributes for this node. in: body required: true type: array n_portgroups: description: | Links to the collection of portgroups on this node. in: body required: true type: array n_ports: description: | Links to the collection of ports on this node in: body required: true type: array n_properties: description: | Physical characteristics of this Node. Populated by ironic-inspector during inspection. May be edited via the REST API at any time. in: body required: true type: JSON n_states: description: | Links to the collection of states. Note that this resource is also used to request state transitions. in: body required: true type: array n_traits: description: | List of traits for this node. in: body required: true type: array n_vifs: description: | VIFs attached to this node. in: body required: true type: array n_volume: description: | Links to the volume resources. in: body required: true type: array name: description: | The name of the driver. in: body required: true type: string network_data: description: | Static network configuration in the OpenStack network data format to use during deployment and cleaning. Requires a specially crafted ramdisk, see `DHCP-less documentation `_. in: body required: false type: JSON network_interface: description: | Which Network Interface provider to use when plumbing the network connections for this Node. in: body required: true type: string next: description: | A URL to request a next collection of the resource. This parameter is returned when ``limit`` is specified in a request and there remain items. in: body required: false type: string node_name: description: | Human-readable identifier for the Node resource. May be undefined. Certain words are reserved. in: body required: false type: string node_uuid: description: | UUID of the Node this resource belongs to. in: body required: true type: string node_vif_ident: description: | The UUID or name of the VIF. in: body required: true type: string nodes: description: | Links to the collection of nodes contained in this chassis. in: body required: true type: array owner: description: | A string or UUID of the tenant who owns the object. in: body required: false type: string passthru_async: description: | If True the passthru function is invoked asynchronously; if False, synchronously. in: body required: true type: boolean passthru_attach: description: | True if the return value will be attached to the response object, and False if the return value will be returned in the response body. in: body required: true type: boolean passthru_description: description: | A description of what the method does, including any method parameters. in: body required: true type: string passthru_http_methods: description: | A list of HTTP methods supported by the vendor function. in: body required: true type: array persistent: description: | Whether the boot device should be set only for the next reboot, or persistently. in: body required: true type: boolean pg_ports: description: | Links to the collection of ports belonging to this portgroup. in: body required: true type: array physical_network: description: | The name of the physical network to which a port is connected. May be empty. in: body required: true type: string port_address: description: | Physical hardware address of this network Port, typically the hardware MAC address. in: body required: true type: string portgroup_address: description: | Physical hardware address of this Portgroup, typically the hardware MAC address. in: body required: false type: string portgroup_internal_info: description: | Internal metadata set and stored by the Portgroup. This field is read-only. in: body required: true type: JSON portgroup_mode: description: | Mode of the port group. For possible values, refer to https://www.kernel.org/doc/Documentation/networking/bonding.txt. If not specified in a request to create a port group, it will be set to the value of the ``[DEFAULT]default_portgroup_mode`` configuration option. When set, can not be removed from the port group. in: body required: true type: string portgroup_name: description: | Human-readable identifier for the Portgroup resource. May be undefined. in: body required: true type: string portgroup_properties: description: | Key/value properties related to the port group's configuration. in: body required: true type: JSON portgroup_uuid: description: | UUID of the Portgroup this resource belongs to. in: body required: true type: string portgroups: description: | A collection of Portgroup resources. in: body required: true type: array ports: description: | A collection of Port resources. in: body required: true type: array power_interface: description: | Interface used for performing power actions on the node, e.g. "ipmitool". in: body required: true type: string power_state: description: | The current power state of this Node. Usually, "power on" or "power off", but may be "None" if Ironic is unable to determine the power state (eg, due to hardware failure). in: body required: true type: string power_timeout: description: | Timeout (in seconds) for a power state transition. in: body required: false type: integer properties: description: | A list of links to driver properties. in: body required: true type: array protected: description: | Whether the node is protected from undeploying, rebuilding and deletion. in: body required: false type: boolean protected_reason: description: | The reason the node is marked as protected. in: body required: false type: string provision_state: description: | The current provisioning state of this Node. in: body required: true type: string provision_updated_at: description: | The UTC date and time when the resource was created, `ISO 8601 `_ format. ``null`` if the node is not being provisioned. in: body required: true type: string pxe_enabled: description: | Indicates whether PXE is enabled or disabled on the Port. in: body required: true type: boolean raid_config: description: | Represents the current RAID configuration of the node. Introduced with the cleaning feature. in: body required: false type: JSON raid_interface: description: | Interface used for configuring RAID on this node, e.g. "no-raid". in: body required: true type: string reason: description: | Specify the reason for setting the Node into maintenance mode. in: body required: false type: string req_allocation_name: description: | The unique name of the Allocation. in: body required: false type: string req_allocation_node: description: | The node UUID or name to create the allocation against, bypassing the normal allocation process. .. warning:: This field must not be used to request a normal allocation with one candidate node, use ``candidate_nodes`` instead. in: body required: false type: string req_allocation_resource_class: description: | The requested resource class for the allocation. Can only be missing when backfilling an allocation (will be set to the node's ``resource_class`` in such case). in: body required: true type: string req_allocation_traits: description: | The list of requested traits for the allocation. in: body required: false type: array req_automated_clean: description: | Indicates whether the node will perform automated clean or not. in: body required: false type: boolean req_bios_interface: description: | The bios interface to be used for this node. in: body required: false type: string req_boot_device: description: | The boot device for a Node, eg. "pxe" or "disk". in: body required: true type: string req_boot_interface: description: | The boot interface for a Node, e.g. "pxe". in: body required: false type: string req_candidate_nodes: description: | The list of nodes (names or UUIDs) that should be considered for this allocation. If not provided, all available nodes will be considered. in: body required: false type: array req_chassis: description: | A ``chassis`` object. in: body required: true type: array req_chassis_uuid: description: | UUID of the chassis associated with this Node. May be empty or None. in: body required: false type: string req_conductor_group: description: | The conductor group for a node. Case-insensitive string up to 255 characters, containing ``a-z``, ``0-9``, ``_``, ``-``, and ``.``. in: body required: false type: string req_console_enabled: description: | Indicates whether console access is enabled or disabled on this node. in: body required: true type: boolean req_console_interface: description: | The console interface for a node, e.g. "no-console". in: body required: false type: string req_deploy_interface: description: | The deploy interface for a node, e.g. "iscsi". in: body required: false type: string req_description: description: | Descriptive text about the Ironic service. in: body required: false type: string req_driver_info: description: | All the metadata required by the driver to manage this Node. List of fields varies between drivers, and can be retrieved from the ``/v1/drivers//properties`` resource. in: body required: false type: JSON req_driver_name: description: | The name of the driver used to manage this Node. in: body required: true type: string req_extra: description: | A set of one or more arbitrary metadata key and value pairs. in: body required: false type: object req_inspect_interface: description: | The interface used for node inspection, e.g. "no-inspect". in: body required: false type: string req_instance_info: description: | Information used to customize the deployed image. May include root partition size, a base 64 encoded config drive, and other metadata. Note that this field is erased automatically when the instance is deleted (this is done by requesting the Node provision state be changed to DELETED). in: body required: false type: JSON req_instance_uuid: description: | UUID of the Nova instance associated with this Node. in: body required: false type: string req_is_smartnic: description: | Indicates whether the Port is a Smart NIC port. in: body required: false type: boolean req_local_link_connection: description: | The Port binding profile. If specified, must contain ``switch_id`` (only a MAC address or an OpenFlow based datapath_id of the switch are accepted in this field) and ``port_id`` (identifier of the physical port on the switch to which node's port is connected to) fields. ``switch_info`` is an optional string field to be used to store any vendor-specific information. in: body required: false type: JSON req_maintenance: description: | Whether or not this Node is currently in "maintenance mode". Setting a Node into maintenance mode removes it from the available resource pool and halts some internal automation. This can happen manually (eg, via an API request) or automatically when Ironic detects a hardware fault that prevents communication with the machine. in: body required: false type: boolean req_management_interface: description: | Interface for out-of-band node management, e.g. "ipmitool". in: body required: false type: string req_n_description: description: | Informational text about this node. in: body required: false type: string req_network_interface: description: | Which Network Interface provider to use when plumbing the network connections for this Node. in: body required: false type: string req_node_uuid: description: | UUID of the Node this resource belongs to. in: body required: true type: string req_node_vif_ident: description: | The UUID or name of the VIF. in: body required: true type: string req_node_vif_port_uuid: description: | The UUID of a port to attach the VIF to. Cannot be specified with ``portgroup_uuid``. in: body required: false type: string req_node_vif_portgroup_uuid: description: | The UUID of a portgroup to attach the VIF to. Cannot be specified with ``port_uuid``. in: body required: false type: string req_persistent: description: | Whether the boot device should be set only for the next reboot, or persistently. in: body required: false type: boolean req_physical_network: description: | The name of the physical network to which a port is connected. May be empty. in: body required: false type: string req_port_address: description: | Physical hardware address of this network Port, typically the hardware MAC address. in: body required: true type: string req_portgroup_address: description: | Physical hardware address of this Portgroup, typically the hardware MAC address. in: body required: false type: string req_portgroup_mode: description: | Mode of the port group. For possible values, refer to https://www.kernel.org/doc/Documentation/networking/bonding.txt. If not specified in a request to create a port group, it will be set to the value of the ``[DEFAULT]default_portgroup_mode`` configuration option. When set, can not be removed from the port group. in: body required: false type: string req_portgroup_name: description: | Human-readable identifier for the Portgroup resource. May be undefined. in: body required: false type: string req_portgroup_properties: description: | Key/value properties related to the port group's configuration. in: body required: false type: JSON req_portgroup_uuid: description: | UUID of the Portgroup this resource belongs to. in: body required: false type: string req_power_interface: description: | Interface used for performing power actions on the node, e.g. "ipmitool". in: body required: false type: string req_properties: description: | Physical characteristics of this Node. Populated during inspection, if performed. Can be edited via the REST API at any time. in: body required: false type: JSON req_provision_state: description: | The requested provisioning state of this Node. in: body required: true type: string req_pxe_enabled: description: | Indicates whether PXE is enabled or disabled on the Port. in: body required: false type: boolean req_raid_interface: description: | Interface used for configuring RAID on this node, e.g. "no-raid". in: body required: false type: string req_rescue_interface: description: | The interface used for node rescue, e.g. "no-rescue". in: body required: false type: string req_resource_class_create: description: | A string which can be used by external schedulers to identify this Node as a unit of a specific type of resource. in: body required: false type: string req_standalone_ports_supported: description: | Indicates whether ports that are members of this portgroup can be used as stand-alone ports. in: body required: false type: boolean req_storage_interface: description: | Interface used for attaching and detaching volumes on this node, e.g. "cinder". in: body required: false type: string req_target_boot_mode: description: | If a boot mode change has been requested, this field represents the requested (ie, "target") state, either "uefi" or "bios". in: body required: true type: string req_target_power_state: description: | If a power state transition has been requested, this field represents the requested (ie, "target") state either "power on", "power off", "rebooting", "soft power off" or "soft rebooting". in: body required: true type: string req_target_raid_config: description: | Represents the requested RAID configuration of the node, which will be applied when the Node next transitions through the CLEANING state. Introduced with the cleaning feature. in: body required: true type: JSON req_target_secure_boot: description: | If a secure boot change has been requested, this field represents the requested (ie, "target") state, either ``true`` or ``false``. in: body required: true type: boolean req_uuid: description: | The UUID for the resource. in: body required: false type: string req_vendor_interface: description: | Interface for vendor-specific functionality on this node, e.g. "no-vendor". in: body required: false type: string req_volume_target_properties: description: | A set of physical information of the volume such as the identifier (eg. IQN) and LUN number of the volume. This information is used to connect the node to the volume by the storage interface. The contents depend on the volume type. in: body required: false type: object requested_provision_state: description: | One of the provisioning verbs: manage, provide, inspect, clean, active, rebuild, delete (deleted), abort, adopt, rescue, unrescue. in: body required: true type: string rescue_interface: description: | The interface used for node rescue, e.g. "no-rescue". in: body required: true type: string rescue_password: description: | Non-empty password used to configure rescue ramdisk during node rescue operation. in: body required: false type: string reservation: description: | The ``name`` of an Ironic Conductor host which is holding a lock on this node, if a lock is held. Usually "null", but this field can be useful for debugging. in: body required: true type: string resource_class: description: | A string which can be used by external schedulers to identify this Node as a unit of a specific type of resource. For more details, see: https://docs.openstack.org/ironic/latest/install/configure-nova-flavors.html in: body required: true type: string response_driver_type: description: | Type of this driver ("classic" or "dynamic"). in: body required: true type: string retired: description: | Whether the node is retired and can hence no longer be provided, i.e. move from ``manageable`` to ``available``, and will end up in ``manageable`` after cleaning (rather than ``available``). in: body required: false type: boolean retired_reason: description: | The reason the node is marked as retired. in: body required: false type: string secure_boot: description: | Indicates whether node is currently booted with secure_boot turned on. in: body type: boolean standalone_ports_supported: description: | Indicates whether ports that are members of this portgroup can be used as stand-alone ports. in: body required: true type: boolean storage_interface: description: | Interface used for attaching and detaching volumes on this node, e.g. "cinder". in: body required: true type: string supported_boot_devices: description: | List of boot devices which this Node's driver supports. in: body required: true type: array target_power_state: description: | If a power state transition has been requested, this field represents the requested (ie, "target") state, either "power on" or "power off". in: body required: true type: string target_provision_state: description: | If a provisioning action has been requested, this field represents the requested (ie, "target") state. Note that a Node may go through several states during its transition to this target state. For instance, when requesting an instance be deployed to an AVAILABLE Node, the Node may go through the following state change progression: AVAILABLE -> DEPLOYING -> DEPLOYWAIT -> DEPLOYING -> ACTIVE in: body required: true type: string target_raid_config: description: | Represents the requested RAID configuration of the node, which will be applied when the Node next transitions through the CLEANING state. Introduced with the cleaning feature. in: body required: true type: JSON updated_at: description: | The UTC date and time when the resource was updated, `ISO 8601 `_ format. May be "null". in: body required: true type: string uuid: description: | The UUID for the resource. in: body required: true type: string # variables returned from node-validate v_bios: description: | Status of the "bios" interface in: body required: true type: object v_boot: description: | Status of the "boot" interface in: body required: true type: object v_console: description: | Status of the "console" interface in: body required: true type: object v_deploy: description: | Status of the "deploy" interface in: body required: true type: object v_inspect: description: | Status of the "inspect" interface in: body required: true type: object v_management: description: | Status of the "management" interface in: body required: true type: object v_network: description: | Status of the "network" interface in: body required: true type: object v_power: description: | Status of the "power" interface in: body required: true type: object v_raid: description: | Status of the "raid" interface in: body required: true type: object v_rescue: description: | Status of the "rescue" interface in: body required: true type: object v_storage: description: | Status of the "storage" interface in: body required: true type: object vendor_interface: description: | Interface for vendor-specific functionality on this node, e.g. "no-vendor". in: body required: true type: string version: description: | Versioning of this API response, eg. "1.22". in: body required: true type: string versions: description: | Array of information about currently supported versions. in: body required: true type: array # variables returned from volume-connector volume_connector_connector_id: description: | The identifier of Volume connector. The identifier format depends on the ``type`` of the Volume connector, eg "iqn.2017-05.org.openstack:01:d9a51732c3f" if the ``type`` is "iqn", "192.168.1.2" if the ``type`` is "ip". in: body required: true type: string volume_connector_type: description: | The type of Volume connector such as "iqn", "ip", "wwnn" and "wwpn". in: body required: true type: string volume_connectors: description: | A collection of Volume connector resources. in: body required: true type: array volume_connectors_link: description: | Links to a collection of Volume connector resources. in: body required: true type: array # variables returned from volume-target volume_target_boot_index: description: | The boot index of the Volume target. "0" indicates that this volume is used as a boot volume. in: body required: true type: string volume_target_properties: description: | A set of physical information of the volume such as the identifier (eg. IQN) and LUN number of the volume. This information is used to connect the node to the volume by the storage interface. The contents depend on the volume type. in: body required: true type: object volume_target_volume_id: description: | The identifier of the volume. This ID is used by storage interface to distinguish volumes. in: body required: true type: string volume_target_volume_type: description: | The type of Volume target such as 'iscsi' and 'fibre_channel'. in: body required: true type: string volume_targets: description: | A collection of Volume target resources. in: body required: true type: array volume_targets_link: description: | Links to a collection of Volume target resources. in: body required: true type: array ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648108935.8786666 ironic-20.1.0/api-ref/source/samples/0000775000175000017500000000000000000000000017367 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/allocation-create-request-2.json0000664000175000017500000000014100000000000025471 0ustar00zuulzuul00000000000000{ "name": "allocation-2", "resource_class": "bm-large", "traits": ["CUSTOM_GOLD"] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/allocation-create-request.json0000664000175000017500000000010100000000000025326 0ustar00zuulzuul00000000000000{ "name": "allocation-1", "resource_class": "bm-large" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/allocation-create-response.json0000664000175000017500000000111100000000000025476 0ustar00zuulzuul00000000000000{ "candidate_nodes": [], "created_at": "2019-02-20T09:43:58+00:00", "extra": {}, "last_error": null, "links": [ { "href": "http://127.0.0.1:6385/v1/allocations/5344a3e2-978a-444e-990a-cbf47c62ef88", "rel": "self" }, { "href": "http://127.0.0.1:6385/allocations/5344a3e2-978a-444e-990a-cbf47c62ef88", "rel": "bookmark" } ], "name": "allocation-1", "node_uuid": null, "owner": null, "resource_class": "bm-large", "state": "allocating", "traits": [], "updated_at": null, "uuid": "5344a3e2-978a-444e-990a-cbf47c62ef88" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/allocation-show-response.json0000664000175000017500000000117600000000000025226 0ustar00zuulzuul00000000000000{ "candidate_nodes": [], "created_at": "2019-02-20T09:43:58+00:00", "extra": {}, "last_error": null, "links": [ { "href": "http://127.0.0.1:6385/v1/allocations/5344a3e2-978a-444e-990a-cbf47c62ef88", "rel": "self" }, { "href": "http://127.0.0.1:6385/allocations/5344a3e2-978a-444e-990a-cbf47c62ef88", "rel": "bookmark" } ], "name": "allocation-1", "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "owner": null, "resource_class": "bm-large", "state": "active", "traits": [], "updated_at": "2019-02-20T09:43:58+00:00", "uuid": "5344a3e2-978a-444e-990a-cbf47c62ef88" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/allocation-update-request.json0000664000175000017500000000013200000000000025351 0ustar00zuulzuul00000000000000[ { "op": "add", "path": "/extra/foo", "value": "bar" } ] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/allocation-update-response.json0000664000175000017500000000120200000000000025516 0ustar00zuulzuul00000000000000{ "node_uuid": null, "uuid": "241db410-7b04-4b1c-87ae-4e336435db08", "links": [ { "href": "http://10.66.169.122/v1/allocations/241db410-7b04-4b1c-87ae-4e336435db08", "rel": "self" }, { "href": "http://10.66.169.122/allocations/241db410-7b04-4b1c-87ae-4e336435db08", "rel": "bookmark" } ], "extra": { "foo": "bar" }, "last_error": null, "created_at": "2019-06-04T07:46:25+00:00", "owner": null, "resource_class": "CUSTOM_GOLD", "updated_at": "2019-06-06T03:28:19.496960+00:00", "traits": [], "state": "error", "candidate_nodes": [], "name": "test_allocation" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/allocations-list-response.json0000664000175000017500000000311000000000000025372 0ustar00zuulzuul00000000000000{ "allocations": [ { "candidate_nodes": [], "created_at": "2019-02-20T09:43:58+00:00", "extra": {}, "last_error": null, "links": [ { "href": "http://127.0.0.1:6385/v1/allocations/5344a3e2-978a-444e-990a-cbf47c62ef88", "rel": "self" }, { "href": "http://127.0.0.1:6385/allocations/5344a3e2-978a-444e-990a-cbf47c62ef88", "rel": "bookmark" } ], "name": "allocation-1", "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "owner": null, "resource_class": "bm-large", "state": "active", "traits": [], "updated_at": "2019-02-20T09:43:58+00:00", "uuid": "5344a3e2-978a-444e-990a-cbf47c62ef88" }, { "candidate_nodes": [], "created_at": "2019-02-20T09:43:58+00:00", "extra": {}, "last_error": "Failed to process allocation eff80f47-75f0-4d41-b1aa-cf07c201adac: no available nodes match the resource class bm-large.", "links": [ { "href": "http://127.0.0.1:6385/v1/allocations/eff80f47-75f0-4d41-b1aa-cf07c201adac", "rel": "self" }, { "href": "http://127.0.0.1:6385/allocations/eff80f47-75f0-4d41-b1aa-cf07c201adac", "rel": "bookmark" } ], "name": "allocation-2", "node_uuid": null, "owner": null, "resource_class": "bm-large", "state": "error", "traits": [ "CUSTOM_GOLD" ], "updated_at": "2019-02-20T09:43:58+00:00", "uuid": "eff80f47-75f0-4d41-b1aa-cf07c201adac" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/api-root-response.json0000664000175000017500000000115700000000000023654 0ustar00zuulzuul00000000000000{ "default_version": { "id": "v1", "links": [ { "href": "http://127.0.0.1:6385/v1/", "rel": "self" } ], "min_version": "1.1", "status": "CURRENT", "version": "1.37" }, "description": "Ironic is an OpenStack project which enables the provision and management of baremetal machines.", "name": "OpenStack Ironic API", "versions": [ { "id": "v1", "links": [ { "href": "http://127.0.0.1:6385/v1/", "rel": "self" } ], "min_version": "1.1", "status": "CURRENT", "version": "1.37" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/api-v1-root-response.json0000664000175000017500000000353700000000000024204 0ustar00zuulzuul00000000000000{ "chassis": [ { "href": "http://127.0.0.1:6385/v1/chassis/", "rel": "self" }, { "href": "http://127.0.0.1:6385/chassis/", "rel": "bookmark" } ], "drivers": [ { "href": "http://127.0.0.1:6385/v1/drivers/", "rel": "self" }, { "href": "http://127.0.0.1:6385/drivers/", "rel": "bookmark" } ], "heartbeat": [ { "href": "http://127.0.0.1:6385/v1/heartbeat/", "rel": "self" }, { "href": "http://127.0.0.1:6385/heartbeat/", "rel": "bookmark" } ], "id": "v1", "links": [ { "href": "http://127.0.0.1:6385/v1/", "rel": "self" }, { "href": "https://docs.openstack.org/ironic/latest/contributor/webapi.html", "rel": "describedby", "type": "text/html" } ], "lookup": [ { "href": "http://127.0.0.1:6385/v1/lookup/", "rel": "self" }, { "href": "http://127.0.0.1:6385/lookup/", "rel": "bookmark" } ], "media_types": [ { "base": "application/json", "type": "application/vnd.openstack.ironic.v1+json" } ], "nodes": [ { "href": "http://127.0.0.1:6385/v1/nodes/", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/", "rel": "bookmark" } ], "portgroups": [ { "href": "http://127.0.0.1:6385/v1/portgroups/", "rel": "self" }, { "href": "http://127.0.0.1:6385/portgroups/", "rel": "bookmark" } ], "ports": [ { "href": "http://127.0.0.1:6385/v1/ports/", "rel": "self" }, { "href": "http://127.0.0.1:6385/ports/", "rel": "bookmark" } ], "volume": [ { "href": "http://127.0.0.1:6385/v1/volume/", "rel": "self" }, { "href": "http://127.0.0.1:6385/volume/", "rel": "bookmark" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/chassis-create-request.json0000664000175000017500000000005000000000000024641 0ustar00zuulzuul00000000000000{ "description": "Sample chassis" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/chassis-list-details-response.json0000664000175000017500000000150400000000000026147 0ustar00zuulzuul00000000000000{ "chassis": [ { "created_at": "2016-08-18T22:28:48.643434+11:11", "description": "Sample chassis", "extra": {}, "links": [ { "href": "http://127.0.0.1:6385/v1/chassis/dff29d23-1ded-43b4-8ae1-5eebb3e30de1", "rel": "self" }, { "href": "http://127.0.0.1:6385/chassis/dff29d23-1ded-43b4-8ae1-5eebb3e30de1", "rel": "bookmark" } ], "nodes": [ { "href": "http://127.0.0.1:6385/v1/chassis/dff29d23-1ded-43b4-8ae1-5eebb3e30de1/nodes", "rel": "self" }, { "href": "http://127.0.0.1:6385/chassis/dff29d23-1ded-43b4-8ae1-5eebb3e30de1/nodes", "rel": "bookmark" } ], "updated_at": null, "uuid": "dff29d23-1ded-43b4-8ae1-5eebb3e30de1" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/chassis-list-response.json0000664000175000017500000000065100000000000024526 0ustar00zuulzuul00000000000000{ "chassis": [ { "description": "Sample chassis", "links": [ { "href": "http://127.0.0.1:6385/v1/chassis/dff29d23-1ded-43b4-8ae1-5eebb3e30de1", "rel": "self" }, { "href": "http://127.0.0.1:6385/chassis/dff29d23-1ded-43b4-8ae1-5eebb3e30de1", "rel": "bookmark" } ], "uuid": "dff29d23-1ded-43b4-8ae1-5eebb3e30de1" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/chassis-show-response.json0000664000175000017500000000130100000000000024524 0ustar00zuulzuul00000000000000{ "created_at": "2016-08-18T22:28:48.643434+11:11", "description": "Sample chassis", "extra": {}, "links": [ { "href": "http://127.0.0.1:6385/v1/chassis/dff29d23-1ded-43b4-8ae1-5eebb3e30de1", "rel": "self" }, { "href": "http://127.0.0.1:6385/chassis/dff29d23-1ded-43b4-8ae1-5eebb3e30de1", "rel": "bookmark" } ], "nodes": [ { "href": "http://127.0.0.1:6385/v1/chassis/dff29d23-1ded-43b4-8ae1-5eebb3e30de1/nodes", "rel": "self" }, { "href": "http://127.0.0.1:6385/chassis/dff29d23-1ded-43b4-8ae1-5eebb3e30de1/nodes", "rel": "bookmark" } ], "updated_at": null, "uuid": "dff29d23-1ded-43b4-8ae1-5eebb3e30de1" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/chassis-update-request.json0000664000175000017500000000015400000000000024665 0ustar00zuulzuul00000000000000[ { "op": "replace", "path": "/description", "value": "Updated Chassis" } ] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/chassis-update-response.json0000664000175000017500000000134000000000000025031 0ustar00zuulzuul00000000000000{ "created_at": "2016-08-18T22:28:48.643434+11:11", "description": "Updated Chassis", "extra": {}, "links": [ { "href": "http://127.0.0.1:6385/v1/chassis/dff29d23-1ded-43b4-8ae1-5eebb3e30de1", "rel": "self" }, { "href": "http://127.0.0.1:6385/chassis/dff29d23-1ded-43b4-8ae1-5eebb3e30de1", "rel": "bookmark" } ], "nodes": [ { "href": "http://127.0.0.1:6385/v1/chassis/dff29d23-1ded-43b4-8ae1-5eebb3e30de1/nodes", "rel": "self" }, { "href": "http://127.0.0.1:6385/chassis/dff29d23-1ded-43b4-8ae1-5eebb3e30de1/nodes", "rel": "bookmark" } ], "updated_at": "2016-08-18T22:28:49.653974+00:00", "uuid": "dff29d23-1ded-43b4-8ae1-5eebb3e30de1" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/conductor-list-details-response.json0000664000175000017500000000204400000000000026512 0ustar00zuulzuul00000000000000{ "conductors": [ { "links": [ { "href": "http://127.0.0.1:6385/v1/conductors/compute1.localdomain", "rel": "self" }, { "href": "http://127.0.0.1:6385/conductors/compute1.localdomain", "rel": "bookmark" } ], "created_at": "2018-08-07T08:39:21+00:00", "hostname": "compute1.localdomain", "conductor_group": "", "updated_at": "2018-11-30T07:07:23+00:00", "alive": false, "drivers": [ "ipmi" ] }, { "links": [ { "href": "http://127.0.0.1:6385/v1/conductors/compute2.localdomain", "rel": "self" }, { "href": "http://127.0.0.1:6385/conductors/compute2.localdomain", "rel": "bookmark" } ], "created_at": "2018-12-05T07:03:19+00:00", "hostname": "compute2.localdomain", "conductor_group": "", "updated_at": "2018-12-05T07:03:21+00:00", "alive": true, "drivers": [ "ipmi" ] } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/conductor-list-response.json0000664000175000017500000000141100000000000025064 0ustar00zuulzuul00000000000000{ "conductors": [ { "hostname": "compute1.localdomain", "conductor_group": "", "links": [ { "href": "http://127.0.0.1:6385/v1/conductors/compute1.localdomain", "rel": "self" }, { "href": "http://127.0.0.1:6385/conductors/compute1.localdomain", "rel": "bookmark" } ], "alive": false }, { "hostname": "compute2.localdomain", "conductor_group": "", "links": [ { "href": "http://127.0.0.1:6385/v1/conductors/compute2.localdomain", "rel": "self" }, { "href": "http://127.0.0.1:6385/conductors/compute2.localdomain", "rel": "bookmark" } ], "alive": true } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/conductor-show-response.json0000664000175000017500000000066400000000000025102 0ustar00zuulzuul00000000000000{ "links": [ { "href": "http://127.0.0.1:6385/v1/conductors/compute2.localdomain", "rel": "self" }, { "href": "http://127.0.0.1:6385/conductors/compute2.localdomain", "rel": "bookmark" } ], "created_at": "2018-12-05T07:03:19+00:00", "hostname": "compute2.localdomain", "conductor_group": "", "updated_at": "2018-12-05T07:03:21+00:00", "alive": true, "drivers": [ "ipmi" ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/deploy-template-create-request.json0000664000175000017500000000065100000000000026320 0ustar00zuulzuul00000000000000{ "extra": {}, "name": "CUSTOM_HYPERTHREADING_ON", "steps": [ { "interface": "bios", "step": "apply_configuration", "args": { "settings": [ { "name": "LogicalProc", "value": "Enabled" } ] }, "priority": 150 } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/deploy-template-create-response.json0000664000175000017500000000132700000000000026467 0ustar00zuulzuul00000000000000{ "created_at": "2016-08-18T22:28:48.643434+11:11", "extra": {}, "links": [ { "href": "http://10.60.253.180:6385/v1/deploy_templates/bbb45f41-d4bc-4307-8d1d-32f95ce1e920", "rel": "self" }, { "href": "http://10.60.253.180:6385/deploy_templates/bbb45f41-d4bc-4307-8d1d-32f95ce1e920", "rel": "bookmark" } ], "name": "CUSTOM_HYPERTHREADING_ON", "steps": [ { "args": { "settings": [ { "name": "LogicalProc", "value": "Enabled" } ] }, "interface": "bios", "priority": 150, "step": "apply_configuration" } ], "updated_at": null, "uuid": "bbb45f41-d4bc-4307-8d1d-32f95ce1e920" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/deploy-template-detail-response.json0000664000175000017500000000156700000000000026474 0ustar00zuulzuul00000000000000{ "deploy_templates": [ { "created_at": "2016-08-18T22:28:48.643434+11:11", "extra": {}, "links": [ { "href": "http://10.60.253.180:6385/v1/deploy_templates/bbb45f41-d4bc-4307-8d1d-32f95ce1e920", "rel": "self" }, { "href": "http://10.60.253.180:6385/deploy_templates/bbb45f41-d4bc-4307-8d1d-32f95ce1e920", "rel": "bookmark" } ], "name": "CUSTOM_HYPERTHREADING_ON", "steps": [ { "args": { "settings": [ { "name": "LogicalProc", "value": "Enabled" } ] }, "interface": "bios", "priority": 150, "step": "apply_configuration" } ], "updated_at": null, "uuid": "bbb45f41-d4bc-4307-8d1d-32f95ce1e920" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/deploy-template-list-response.json0000664000175000017500000000071700000000000026201 0ustar00zuulzuul00000000000000{ "deploy_templates": [ { "links": [ { "href": "http://10.60.253.180:6385/v1/deploy_templates/bbb45f41-d4bc-4307-8d1d-32f95ce1e920", "rel": "self" }, { "href": "http://10.60.253.180:6385/deploy_templates/bbb45f41-d4bc-4307-8d1d-32f95ce1e920", "rel": "bookmark" } ], "name": "CUSTOM_HYPERTHREADING_ON", "uuid": "bbb45f41-d4bc-4307-8d1d-32f95ce1e920" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/deploy-template-show-response.json0000664000175000017500000000132700000000000026204 0ustar00zuulzuul00000000000000{ "created_at": "2016-08-18T22:28:48.643434+11:11", "extra": {}, "links": [ { "href": "http://10.60.253.180:6385/v1/deploy_templates/bbb45f41-d4bc-4307-8d1d-32f95ce1e920", "rel": "self" }, { "href": "http://10.60.253.180:6385/deploy_templates/bbb45f41-d4bc-4307-8d1d-32f95ce1e920", "rel": "bookmark" } ], "name": "CUSTOM_HYPERTHREADING_ON", "steps": [ { "args": { "settings": [ { "name": "LogicalProc", "value": "Enabled" } ] }, "interface": "bios", "priority": 150, "step": "apply_configuration" } ], "updated_at": null, "uuid": "bbb45f41-d4bc-4307-8d1d-32f95ce1e920" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/deploy-template-update-request.json0000664000175000017500000000013500000000000026334 0ustar00zuulzuul00000000000000[ { "path" : "/name", "value" : "CUSTOM_HT_ON", "op" : "replace" } ] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/deploy-template-update-response.json0000664000175000017500000000135100000000000026503 0ustar00zuulzuul00000000000000{ "created_at": "2016-08-18T22:28:48.643434+11:11", "extra": {}, "links": [ { "href": "http://10.60.253.180:6385/v1/deploy_templates/bbb45f41-d4bc-4307-8d1d-32f95ce1e920", "rel": "self" }, { "href": "http://10.60.253.180:6385/deploy_templates/bbb45f41-d4bc-4307-8d1d-32f95ce1e920", "rel": "bookmark" } ], "name": "CUSTOM_HT_ON", "steps": [ { "args": { "settings": [ { "name": "LogicalProc", "value": "Enabled" } ] }, "interface": "bios", "priority": 150, "step": "apply_configuration" } ], "updated_at": "2016-08-18T22:28:49.653974+00:00", "uuid": "bbb45f41-d4bc-4307-8d1d-32f95ce1e920" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/driver-get-response.json0000664000175000017500000000316600000000000024174 0ustar00zuulzuul00000000000000{ "default_bios_interface": "no-bios", "default_boot_interface": "pxe", "default_console_interface": "no-console", "default_deploy_interface": "direct", "default_inspect_interface": "no-inspect", "default_management_interface": "ipmitool", "default_network_interface": "flat", "default_power_interface": "ipmitool", "default_raid_interface": "no-raid", "default_rescue_interface": "no-rescue", "default_storage_interface": "noop", "default_vendor_interface": "no-vendor", "enabled_bios_interfaces": [ "no-bios" ], "enabled_boot_interfaces": [ "pxe" ], "enabled_console_interfaces": [ "no-console" ], "enabled_deploy_interfaces": [ "ansible", "direct" ], "enabled_inspect_interfaces": [ "no-inspect" ], "enabled_management_interfaces": [ "ipmitool" ], "enabled_network_interfaces": [ "flat", "noop" ], "enabled_power_interfaces": [ "ipmitool" ], "enabled_raid_interfaces": [ "no-raid", "agent" ], "enabled_rescue_interfaces": [ "no-rescue" ], "enabled_storage_interfaces": [ "noop" ], "enabled_vendor_interfaces": [ "no-vendor" ], "hosts": [ "897ab1dad809" ], "links": [ { "href": "http://127.0.0.1:6385/v1/drivers/ipmi", "rel": "self" }, { "href": "http://127.0.0.1:6385/drivers/ipmi", "rel": "bookmark" } ], "name": "ipmi", "properties": [ { "href": "http://127.0.0.1:6385/v1/drivers/ipmi/properties", "rel": "self" }, { "href": "http://127.0.0.1:6385/drivers/ipmi/properties", "rel": "bookmark" } ], "type": "dynamic" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/driver-logical-disk-properties-response.json0000664000175000017500000000307100000000000030144 0ustar00zuulzuul00000000000000{ "controller": "Controller to use for this logical disk. If not specified, the driver will choose a suitable RAID controller on the bare metal node. Optional.", "disk_type": "The type of disk preferred. Valid values are 'hdd' and 'ssd'. If this is not specified, disk type will not be a selection criterion for choosing backing physical disks. Optional.", "interface_type": "The interface type of disk. Valid values are 'sata', 'scsi' and 'sas'. If this is not specified, interface type will not be a selection criterion for choosing backing physical disks. Optional.", "is_root_volume": "Specifies whether this disk is a root volume. By default, this is False. Optional.", "number_of_physical_disks": "Number of physical disks to use for this logical disk. By default, the driver uses the minimum number of disks required for that RAID level. Optional.", "physical_disks": "The physical disks to use for this logical disk. If not specified, the driver will choose suitable physical disks to use. Optional.", "raid_level": "RAID level for the logical disk. Valid values are 'JBOD', '0', '1', '2', '5', '6', '1+0', '5+0' and '6+0'. Required.", "share_physical_disks": "Specifies whether other logical disks can share physical disks with this logical disk. By default, this is False. Optional.", "size_gb": "Size in GiB (Integer) for the logical disk. Use 'MAX' as size_gb if this logical disk is supposed to use the rest of the space available. Required.", "volume_name": "Name of the volume to be created. If this is not specified, it will be auto-generated. Optional." } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/driver-property-response.json0000664000175000017500000000513200000000000025274 0ustar00zuulzuul00000000000000{ "deploy_forces_oob_reboot": "Whether Ironic should force a reboot of the Node via the out-of-band channel after deployment is complete. Provides compatibility with older deploy ramdisks. Defaults to False. Optional.", "deploy_kernel": "UUID (from Glance) of the deployment kernel. Required.", "deploy_ramdisk": "UUID (from Glance) of the ramdisk that is mounted at boot time. Required.", "image_http_proxy": "URL of a proxy server for HTTP connections. Optional.", "image_https_proxy": "URL of a proxy server for HTTPS connections. Optional.", "image_no_proxy": "A comma-separated list of host names, IP addresses and domain names (with optional :port) that will be excluded from proxying. To denote a domain name, use a dot to prefix the domain name. This value will be ignored if ``image_http_proxy`` and ``image_https_proxy`` are not specified. Optional.", "ipmi_address": "IP address or hostname of the node. Required.", "ipmi_bridging": "bridging_type; default is \"no\". One of \"single\", \"dual\", \"no\". Optional.", "ipmi_disable_boot_timeout": "By default ironic will send a raw IPMI command to disable the 60 second timeout for booting. Setting this option to False will NOT send that command; default value is True. Optional.", "ipmi_force_boot_device": "Whether Ironic should specify the boot device to the BMC each time the server is turned on, eg. because the BMC is not capable of remembering the selected boot device across power cycles; default value is False. Optional.", "ipmi_local_address": "local IPMB address for bridged requests. Used only if ipmi_bridging is set to \"single\" or \"dual\". Optional.", "ipmi_password": "password. Optional.", "ipmi_port": "remote IPMI RMCP port. Optional.", "ipmi_priv_level": "privilege level; default is ADMINISTRATOR. One of ADMINISTRATOR, CALLBACK, OPERATOR, USER. Optional.", "ipmi_protocol_version": "the version of the IPMI protocol; default is \"2.0\". One of \"1.5\", \"2.0\". Optional.", "ipmi_target_address": "destination address for bridged request. Required only if ipmi_bridging is set to \"single\" or \"dual\".", "ipmi_target_channel": "destination channel for bridged request. Required only if ipmi_bridging is set to \"single\" or \"dual\".", "ipmi_terminal_port": "node's UDP port to connect to. Only required for console access.", "ipmi_transit_address": "transit address for bridged request. Required only if ipmi_bridging is set to \"dual\".", "ipmi_transit_channel": "transit channel for bridged request. Required only if ipmi_bridging is set to \"dual\".", "ipmi_username": "username; default is NULL user. Optional." } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/drivers-list-detail-response.json0000664000175000017500000001211100000000000026001 0ustar00zuulzuul00000000000000{ "drivers": [ { "default_bios_interface": null, "default_boot_interface": null, "default_console_interface": null, "default_deploy_interface": null, "default_inspect_interface": null, "default_management_interface": null, "default_network_interface": null, "default_power_interface": null, "default_raid_interface": null, "default_rescue_interface": null, "default_storage_interface": null, "default_vendor_interface": null, "enabled_bios_interfaces": null, "enabled_boot_interfaces": null, "enabled_console_interfaces": null, "enabled_deploy_interfaces": null, "enabled_inspect_interfaces": null, "enabled_management_interfaces": null, "enabled_network_interfaces": null, "enabled_power_interfaces": null, "enabled_raid_interfaces": null, "enabled_rescue_interfaces": null, "enabled_storage_interfaces": null, "enabled_vendor_interfaces": null, "hosts": [ "897ab1dad809" ], "links": [ { "href": "http://127.0.0.1:6385/v1/drivers/agent_ipmitool", "rel": "self" }, { "href": "http://127.0.0.1:6385/drivers/agent_ipmitool", "rel": "bookmark" } ], "name": "agent_ipmitool", "properties": [ { "href": "http://127.0.0.1:6385/v1/drivers/agent_ipmitool/properties", "rel": "self" }, { "href": "http://127.0.0.1:6385/drivers/agent_ipmitool/properties", "rel": "bookmark" } ], "type": "classic" }, { "default_bios_interface": null, "default_boot_interface": null, "default_console_interface": null, "default_deploy_interface": null, "default_inspect_interface": null, "default_management_interface": null, "default_network_interface": null, "default_power_interface": null, "default_raid_interface": null, "default_rescue_interface": null, "default_storage_interface": null, "default_vendor_interface": null, "enabled_bios_interfaces": null, "enabled_boot_interfaces": null, "enabled_console_interfaces": null, "enabled_deploy_interfaces": null, "enabled_inspect_interfaces": null, "enabled_management_interfaces": null, "enabled_network_interfaces": null, "enabled_power_interfaces": null, "enabled_raid_interfaces": null, "enabled_rescue_interfaces": null, "enabled_storage_interfaces": null, "enabled_vendor_interfaces": null, "hosts": [ "897ab1dad809" ], "links": [ { "href": "http://127.0.0.1:6385/v1/drivers/fake", "rel": "self" }, { "href": "http://127.0.0.1:6385/drivers/fake", "rel": "bookmark" } ], "name": "fake", "properties": [ { "href": "http://127.0.0.1:6385/v1/drivers/fake/properties", "rel": "self" }, { "href": "http://127.0.0.1:6385/drivers/fake/properties", "rel": "bookmark" } ], "type": "classic" }, { "default_bios_interface": "no-bios", "default_boot_interface": "pxe", "default_console_interface": "no-console", "default_deploy_interface": "direct", "default_inspect_interface": "no-inspect", "default_management_interface": "ipmitool", "default_network_interface": "flat", "default_power_interface": "ipmitool", "default_raid_interface": "no-raid", "default_rescue_interface": "no-rescue", "default_storage_interface": "noop", "default_vendor_interface": "no-vendor", "enabled_bios_interfaces": [ "no-bios" ], "enabled_boot_interfaces": [ "pxe" ], "enabled_console_interfaces": [ "no-console" ], "enabled_deploy_interfaces": [ "ansible", "direct" ], "enabled_inspect_interfaces": [ "no-inspect" ], "enabled_management_interfaces": [ "ipmitool" ], "enabled_network_interfaces": [ "flat", "noop" ], "enabled_power_interfaces": [ "ipmitool" ], "enabled_raid_interfaces": [ "no-raid", "agent" ], "enabled_rescue_interfaces": [ "no-rescue" ], "enabled_storage_interfaces": [ "noop" ], "enabled_vendor_interfaces": [ "no-vendor" ], "hosts": [ "897ab1dad809" ], "links": [ { "href": "http://127.0.0.1:6385/v1/drivers/ipmi", "rel": "self" }, { "href": "http://127.0.0.1:6385/drivers/ipmi", "rel": "bookmark" } ], "name": "ipmi", "properties": [ { "href": "http://127.0.0.1:6385/v1/drivers/ipmi/properties", "rel": "self" }, { "href": "http://127.0.0.1:6385/drivers/ipmi/properties", "rel": "bookmark" } ], "type": "dynamic" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/drivers-list-response.json0000664000175000017500000000353100000000000024547 0ustar00zuulzuul00000000000000{ "drivers": [ { "hosts": [ "897ab1dad809" ], "links": [ { "href": "http://127.0.0.1:6385/v1/drivers/agent_ipmitool", "rel": "self" }, { "href": "http://127.0.0.1:6385/drivers/agent_ipmitool", "rel": "bookmark" } ], "name": "agent_ipmitool", "properties": [ { "href": "http://127.0.0.1:6385/v1/drivers/agent_ipmitool/properties", "rel": "self" }, { "href": "http://127.0.0.1:6385/drivers/agent_ipmitool/properties", "rel": "bookmark" } ], "type": "classic" }, { "hosts": [ "897ab1dad809" ], "links": [ { "href": "http://127.0.0.1:6385/v1/drivers/fake", "rel": "self" }, { "href": "http://127.0.0.1:6385/drivers/fake", "rel": "bookmark" } ], "name": "fake", "properties": [ { "href": "http://127.0.0.1:6385/v1/drivers/fake/properties", "rel": "self" }, { "href": "http://127.0.0.1:6385/drivers/fake/properties", "rel": "bookmark" } ], "type": "classic" }, { "hosts": [ "897ab1dad809" ], "links": [ { "href": "http://127.0.0.1:6385/v1/drivers/ipmi", "rel": "self" }, { "href": "http://127.0.0.1:6385/drivers/ipmi", "rel": "bookmark" } ], "name": "ipmi", "properties": [ { "href": "http://127.0.0.1:6385/v1/drivers/ipmi/properties", "rel": "self" }, { "href": "http://127.0.0.1:6385/drivers/ipmi/properties", "rel": "bookmark" } ], "type": "dynamic" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/lookup-node-response.json0000664000175000017500000000137500000000000024360 0ustar00zuulzuul00000000000000{ "config": { "heartbeat_timeout": 300, "metrics": { "backend": "noop", "global_prefix": null, "prepend_host": false, "prepend_host_reverse": true, "prepend_uuid": false }, "metrics_statsd": { "statsd_host": "localhost", "statsd_port": 8125 } }, "node": { "driver_internal_info": { "clean_steps": null }, "instance_info": {}, "links": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/6d85703a-565d-469a-96ce-30b6de53079d", "rel": "bookmark" } ], "properties": {}, "uuid": "6d85703a-565d-469a-96ce-30b6de53079d" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/node-bios-detail-response.json0000664000175000017500000000137400000000000025242 0ustar00zuulzuul00000000000000{ "virtualization": { "created_at": "2016-08-18T22:28:49.653974+00:00", "updated_at": "2016-08-18T22:28:49.653974+00:00", "links": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d/bios/virtualization", "rel": "self" }, { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d/bios/virtualization", "rel": "bookmark" } ], "name": "Virtualization", "value": "Enabled", "attribute_type": "Enumeration", "allowable_values": ["Enabled", "Disabled"], "lower_bound": None, "max_length": None, "min_length": None, "read_only": false, "reset_required": None, "unique": None, "upper_bound": None } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/node-bios-list-details-response.json0000664000175000017500000000145500000000000026376 0ustar00zuulzuul00000000000000{ "bios": [ { "created_at": "2016-08-18T22:28:49.653974+00:00", "updated_at": "2016-08-18T22:28:49.653974+00:00", "links": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d/bios/virtualization", "rel": "self" }, { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d/bios/virtualization", "rel": "bookmark" } ], "name": "Virtualization", "value": "Enabled", "attribute_type": "Enumeration", "allowable_values": ["Enabled", "Disabled"], "lower_bound": None, "max_length": None, "min_length": None, "read_only": false, "reset_required": None, "unique": None, "upper_bound": None } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/node-bios-list-response.json0000664000175000017500000000103200000000000024742 0ustar00zuulzuul00000000000000{ "bios": [ { "created_at": "2016-08-18T22:28:49.653974+00:00", "updated_at": "2016-08-18T22:28:49.653974+00:00", "links": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d/bios/virtualization", "rel": "self" }, { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d/bios/virtualization", "rel": "bookmark" } ], "name": "Virtualization", "value": "Enabled" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/node-create-request-classic.json0000664000175000017500000000031000000000000025547 0ustar00zuulzuul00000000000000{ "name": "test_node_classic", "driver": "agent_ipmitool", "driver_info": { "ipmi_username": "ADMIN", "ipmi_password": "password" }, "resource_class": "bm-large" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/node-create-request-dynamic.json0000664000175000017500000000034100000000000025556 0ustar00zuulzuul00000000000000{ "name": "test_node_dynamic", "driver": "ipmi", "driver_info": { "ipmi_username": "ADMIN", "ipmi_password": "password" }, "power_interface": "ipmitool", "resource_class": "bm-large" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/node-create-response.json0000664000175000017500000000533400000000000024311 0ustar00zuulzuul00000000000000{ "allocation_uuid": null, "boot_interface": null, "chassis_uuid": null, "clean_step": {}, "conductor_group": "group-1", "console_enabled": false, "console_interface": null, "created_at": "2016-08-18T22:28:48.643434+11:11", "deploy_interface": null, "deploy_step": {}, "description": null, "driver": "agent_ipmitool", "driver_info": { "ipmi_password": "******", "ipmi_username": "ADMIN" }, "driver_internal_info": {}, "extra": {}, "inspect_interface": null, "inspection_finished_at": null, "inspection_started_at": null, "instance_info": {}, "instance_uuid": null, "last_error": null, "lessee": null, "links": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/6d85703a-565d-469a-96ce-30b6de53079d", "rel": "bookmark" } ], "maintenance": false, "maintenance_reason": null, "management_interface": null, "name": "test_node_classic", "network_data": {}, "network_interface": "flat", "owner": null, "portgroups": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d/portgroups", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/6d85703a-565d-469a-96ce-30b6de53079d/portgroups", "rel": "bookmark" } ], "ports": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d/ports", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/6d85703a-565d-469a-96ce-30b6de53079d/ports", "rel": "bookmark" } ], "power_interface": null, "power_state": null, "properties": {}, "protected": false, "protected_reason": null, "provision_state": "enroll", "provision_updated_at": null, "raid_config": {}, "raid_interface": null, "rescue_interface": null, "reservation": null, "resource_class": "bm-large", "retired": false, "retired_reason": null, "states": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d/states", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/6d85703a-565d-469a-96ce-30b6de53079d/states", "rel": "bookmark" } ], "storage_interface": "noop", "target_power_state": null, "target_provision_state": null, "target_raid_config": {}, "traits": [], "updated_at": null, "uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "vendor_interface": null, "volume": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d/volume", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/6d85703a-565d-469a-96ce-30b6de53079d/volume", "rel": "bookmark" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/node-get-boot-device-response.json0000664000175000017500000000006200000000000026014 0ustar00zuulzuul00000000000000{ "boot_device": "pxe", "persistent": false } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/node-get-state-response.json0000664000175000017500000000052200000000000024735 0ustar00zuulzuul00000000000000{ "boot_mode": "uefi", "console_enabled": false, "last_error": null, "power_state": "power off", "provision_state": "available", "provision_updated_at": "2016-08-18T22:28:49.946416+00:00", "raid_config": {}, "secure_boot": true, "target_power_state": null, "target_provision_state": null, "target_raid_config": {} } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/node-get-supported-boot-devices-response.json0000664000175000017500000000006000000000000030220 0ustar00zuulzuul00000000000000{ "supported_boot_devices": [ "pxe" ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/node-history-list-response.json0000664000175000017500000000062700000000000025520 0ustar00zuulzuul00000000000000{ "history": [ { "uuid": "e5840e39-b4ba-4a93-8071-cff9aa2c9633", "created_at": "2021-09-15T17:45:04.686541+00:00", "severity": "ERROR", "event": "Something is wrong", "links": [ { "href": "http://localhost/v1/nodes/1be26c0b-03f2-4d2e-ae87-c02d7f33c123/history/e5840e39-b4ba-4a93-8071-cff9aa2c9633", "rel": "self" } ] } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/node-indicators-component-list-response.json0000664000175000017500000000235000000000000030151 0ustar00zuulzuul00000000000000{ "indicators": [ { "name": "power", "readonly": true, "states": [ "OFF", "ON" ], "links": [ { "href": "http://127.0.0.1:6385/v1/nodes/Compute0/ management/indicators/system/power", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/Compute0/ management/indicators/system/power", "rel": "bookmark" } ] }, { "name": "alert", "readonly": false, "states": [ "OFF", "BLINKING", "UNKNOWN" ], "links": [ { "href": "http://127.0.0.1:6385/v1/nodes/Compute0/ management/indicators/system/alert", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/Compute0/ management/indicators/system/alert", "rel": "bookmark" } ] }, ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/node-indicators-get-state-response.json0000664000175000017500000000002400000000000027067 0ustar00zuulzuul00000000000000{ "state": "ON" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/node-indicators-list-response.json0000664000175000017500000000172000000000000026151 0ustar00zuulzuul00000000000000{ "components": [ { "name": "system", "links": [ { "href": "http://127.0.0.1:6385/v1/nodes/Compute0/ management/indicators/system", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/Compute0/ management/indicators/system", "rel": "bookmark" } ] }, { "name": "chassis", "links": [ { "href": "http://127.0.0.1:6385/v1/nodes/Compute0/ management/indicators/chassis", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/Compute0/ management/indicators/chassis", "rel": "bookmark" } ] } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/node-indicators-set-state.json0000664000175000017500000000003200000000000025246 0ustar00zuulzuul00000000000000{ "state": "BLINKING" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/node-inject-nmi.json0000664000175000017500000000000300000000000023233 0ustar00zuulzuul00000000000000{} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/node-maintenance-request.json0000664000175000017500000000005400000000000025154 0ustar00zuulzuul00000000000000{ "reason": "Replacing the hard drive" }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/node-port-detail-response.json0000664000175000017500000000165600000000000025275 0ustar00zuulzuul00000000000000{ "ports": [ { "address": "22:22:22:22:22:22", "created_at": "2016-08-18T22:28:48.643434+11:11", "extra": {}, "internal_info": {}, "is_smartnic": true, "links": [ { "href": "http://127.0.0.1:6385/v1/ports/d2b30520-907d-46c8-bfee-c5586e6fb3a1", "rel": "self" }, { "href": "http://127.0.0.1:6385/ports/d2b30520-907d-46c8-bfee-c5586e6fb3a1", "rel": "bookmark" } ], "local_link_connection": { "port_id": "Ethernet3/1", "switch_id": "0a:1b:2c:3d:4e:5f", "switch_info": "switch1" }, "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "physical_network": "physnet1", "portgroup_uuid": "e43c722c-248e-4c6e-8ce8-0d8ff129387a", "pxe_enabled": true, "updated_at": "2016-08-18T22:28:49.653974+00:00", "uuid": "d2b30520-907d-46c8-bfee-c5586e6fb3a1" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/node-port-list-response.json0000664000175000017500000000064200000000000025000 0ustar00zuulzuul00000000000000{ "ports": [ { "address": "22:22:22:22:22:22", "links": [ { "href": "http://127.0.0.1:6385/v1/ports/d2b30520-907d-46c8-bfee-c5586e6fb3a1", "rel": "self" }, { "href": "http://127.0.0.1:6385/ports/d2b30520-907d-46c8-bfee-c5586e6fb3a1", "rel": "bookmark" } ], "uuid": "d2b30520-907d-46c8-bfee-c5586e6fb3a1" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/node-portgroup-detail-response.json0000664000175000017500000000210700000000000026342 0ustar00zuulzuul00000000000000{ "portgroups": [ { "address": "22:22:22:22:22:22", "created_at": "2016-08-18T22:28:48.643434+11:11", "extra": {}, "internal_info": {}, "links": [ { "href": "http://127.0.0.1:6385/v1/portgroups/e43c722c-248e-4c6e-8ce8-0d8ff129387a", "rel": "self" }, { "href": "http://127.0.0.1:6385/portgroups/e43c722c-248e-4c6e-8ce8-0d8ff129387a", "rel": "bookmark" } ], "mode": "active-backup", "name": "test_portgroup", "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "ports": [ { "href": "http://127.0.0.1:6385/v1/portgroups/e43c722c-248e-4c6e-8ce8-0d8ff129387a/ports", "rel": "self" }, { "href": "http://127.0.0.1:6385/portgroups/e43c722c-248e-4c6e-8ce8-0d8ff129387a/ports", "rel": "bookmark" } ], "properties": {}, "standalone_ports_supported": true, "updated_at": "2016-08-18T22:28:49.653974+00:00", "uuid": "e43c722c-248e-4c6e-8ce8-0d8ff129387a" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/node-portgroup-list-response.json0000664000175000017500000000072100000000000026053 0ustar00zuulzuul00000000000000{ "portgroups": [ { "address": "22:22:22:22:22:22", "links": [ { "href": "http://127.0.0.1:6385/v1/portgroups/e43c722c-248e-4c6e-8ce8-0d8ff129387a", "rel": "self" }, { "href": "http://127.0.0.1:6385/portgroups/e43c722c-248e-4c6e-8ce8-0d8ff129387a", "rel": "bookmark" } ], "name": "test_portgroup", "uuid": "e43c722c-248e-4c6e-8ce8-0d8ff129387a" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/node-set-active-state-deploy-steps.json0000664000175000017500000000037000000000000027015 0ustar00zuulzuul00000000000000{ "target": "active", "deploy_steps": [ { "interface": "deploy", "step": "upgrade_firmware", "args": { "force": "True" }, "priority": 95 } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/node-set-active-state.json0000664000175000017500000000014600000000000024370 0ustar00zuulzuul00000000000000{ "target": "active", "configdrive": "http://127.0.0.1/images/test-node-config-drive.iso.gz" }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/node-set-available-state.json0000664000175000017500000000003400000000000025031 0ustar00zuulzuul00000000000000{ "target": "provide" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/node-set-boot-device.json0000664000175000017500000000006600000000000024200 0ustar00zuulzuul00000000000000{ "boot_device": "pxe", "persistent": false } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/node-set-boot-mode-bios.json0000664000175000017500000000003100000000000024607 0ustar00zuulzuul00000000000000{ "target": "bios" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/node-set-boot-mode-uefi.json0000664000175000017500000000003100000000000024603 0ustar00zuulzuul00000000000000{ "target": "uefi" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/node-set-clean-state.json0000664000175000017500000000033100000000000024173 0ustar00zuulzuul00000000000000{ "target": "clean", "clean_steps": [ { "interface": "deploy", "step": "upgrade_firmware", "args": { "force": "True" } } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/node-set-manage-state.json0000664000175000017500000000003300000000000024340 0ustar00zuulzuul00000000000000{ "target": "manage" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/node-set-power-off.json0000664000175000017500000000003500000000000023700 0ustar00zuulzuul00000000000000{ "target": "power off" }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/node-set-raid-request.json0000664000175000017500000000021000000000000024374 0ustar00zuulzuul00000000000000{ "logical_disks" : [ { "size_gb" : 100, "is_root_volume" : true, "raid_level" : "1" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/node-set-secure-boot-off.json0000664000175000017500000000003000000000000024766 0ustar00zuulzuul00000000000000{ "target": false } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/node-set-secure-boot-on.json0000664000175000017500000000002700000000000024636 0ustar00zuulzuul00000000000000{ "target": true } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/node-set-soft-power-off.json0000664000175000017500000000006700000000000024656 0ustar00zuulzuul00000000000000{ "target": "soft power off", "timeout": 300 } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/node-set-traits-request.json0000664000175000017500000000010000000000000024761 0ustar00zuulzuul00000000000000{ "traits": [ "CUSTOM_TRAIT1", "HW_CPU_X86_VMX" ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/node-show-response.json0000664000175000017500000000553200000000000024026 0ustar00zuulzuul00000000000000{ "allocation_uuid": null, "boot_interface": null, "chassis_uuid": null, "clean_step": {}, "conductor": "compute1.localdomain", "conductor_group": "group-1", "console_enabled": false, "console_interface": null, "created_at": "2016-08-18T22:28:48.643434+11:11", "deploy_interface": null, "deploy_step": {}, "description": null, "driver": "fake", "driver_info": { "ipmi_password": "******", "ipmi_username": "ADMIN" }, "driver_internal_info": { "clean_steps": null }, "extra": {}, "inspect_interface": null, "inspection_finished_at": null, "inspection_started_at": null, "instance_info": {}, "instance_uuid": null, "last_error": null, "lessee": null, "links": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/6d85703a-565d-469a-96ce-30b6de53079d", "rel": "bookmark" } ], "maintenance": false, "maintenance_reason": null, "management_interface": null, "name": "test_node_classic", "network_data": {}, "network_interface": "flat", "owner": null, "portgroups": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d/portgroups", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/6d85703a-565d-469a-96ce-30b6de53079d/portgroups", "rel": "bookmark" } ], "ports": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d/ports", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/6d85703a-565d-469a-96ce-30b6de53079d/ports", "rel": "bookmark" } ], "power_interface": null, "power_state": "power off", "properties": {}, "protected": false, "protected_reason": null, "provision_state": "available", "provision_updated_at": "2016-08-18T22:28:49.946416+00:00", "raid_config": {}, "raid_interface": null, "rescue_interface": null, "reservation": null, "resource_class": "bm-large", "retired": false, "retired_reason": null, "states": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d/states", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/6d85703a-565d-469a-96ce-30b6de53079d/states", "rel": "bookmark" } ], "storage_interface": "noop", "target_power_state": null, "target_provision_state": null, "target_raid_config": {}, "traits": [], "updated_at": "2016-08-18T22:28:49.653974+00:00", "uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "vendor_interface": null, "volume": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d/volume", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/6d85703a-565d-469a-96ce-30b6de53079d/volume", "rel": "bookmark" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/node-traits-list-response.json0000664000175000017500000000010000000000000025307 0ustar00zuulzuul00000000000000{ "traits": [ "CUSTOM_TRAIT1", "HW_CPU_X86_VMX" ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/node-update-driver-info-request.json0000664000175000017500000000054400000000000026402 0ustar00zuulzuul00000000000000[ { "op": "replace", "path": "/driver_info/ipmi_username", "value": "OPERATOR" }, { "op": "add", "path": "/driver_info/deploy_kernel", "value": "http://127.0.0.1/images/kernel" }, { "op": "add", "path": "/driver_info/deploy_ramdisk", "value": "http://127.0.0.1/images/ramdisk" } ] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/node-update-driver-info-response.json0000664000175000017500000000576200000000000026557 0ustar00zuulzuul00000000000000{ "allocation_uuid": null, "boot_interface": null, "chassis_uuid": null, "clean_step": {}, "conductor": "compute1.localdomain", "conductor_group": "group-1", "console_enabled": false, "console_interface": null, "created_at": "2016-08-18T22:28:48.643434+11:11", "deploy_interface": null, "deploy_step": {}, "driver": "fake", "driver_info": { "deploy_kernel": "http://127.0.0.1/images/kernel", "deploy_ramdisk": "http://127.0.0.1/images/ramdisk", "ipmi_password": "******", "ipmi_username": "OPERATOR" }, "driver_internal_info": { "clean_steps": null }, "extra": {}, "inspect_interface": null, "inspection_finished_at": null, "inspection_started_at": null, "instance_info": {}, "instance_uuid": null, "last_error": null, "lessee": null, "links": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/6d85703a-565d-469a-96ce-30b6de53079d", "rel": "bookmark" } ], "maintenance": true, "maintenance_reason": "Replacing the hard drive", "management_interface": null, "name": "test_node_classic", "network_data": {}, "network_interface": "flat", "owner": null, "portgroups": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d/portgroups", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/6d85703a-565d-469a-96ce-30b6de53079d/portgroups", "rel": "bookmark" } ], "ports": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d/ports", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/6d85703a-565d-469a-96ce-30b6de53079d/ports", "rel": "bookmark" } ], "power_interface": null, "power_state": "power off", "properties": {}, "protected": false, "protected_reason": null, "provision_state": "available", "provision_updated_at": "2016-08-18T22:28:49.946416+00:00", "raid_config": {}, "raid_interface": null, "rescue_interface": null, "reservation": null, "resource_class": null, "retired": false, "retired_reason": null, "states": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d/states", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/6d85703a-565d-469a-96ce-30b6de53079d/states", "rel": "bookmark" } ], "storage_interface": "noop", "target_power_state": null, "target_provision_state": null, "target_raid_config": {}, "traits": [ "CUSTOM_TRAIT1", "HW_CPU_X86_VMX" ], "updated_at": "2016-08-18T22:28:49.653974+00:00", "uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "vendor_interface": null, "volume": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d/volume", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/6d85703a-565d-469a-96ce-30b6de53079d/volume", "rel": "bookmark" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/node-update-driver.json0000664000175000017500000000012700000000000023760 0ustar00zuulzuul00000000000000[ { "op" : "replace", "path" : "/driver", "value" : "fake" } ] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/node-validate-response.json0000664000175000017500000000064100000000000024633 0ustar00zuulzuul00000000000000{ "boot": { "result": true }, "console": { "result": true }, "deploy": { "result": true }, "inspect": { "result": true }, "management": { "result": true }, "network": { "result": true }, "power": { "result": true }, "raid": { "result": true }, "rescue": { "reason": "not supported", "result": null }, "storage": { "result": true } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/node-vendor-passthru-response.json0000664000175000017500000000050600000000000026206 0ustar00zuulzuul00000000000000{ "bmc_reset": { "async": true, "attach": false, "description": "", "http_methods": [ "POST" ], "require_exclusive_lock": true }, "send_raw": { "async": true, "attach": false, "description": "", "http_methods": [ "POST" ], "require_exclusive_lock": true } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/node-vif-attach-request.json0000664000175000017500000000006500000000000024722 0ustar00zuulzuul00000000000000{ "id": "1974dcfa-836f-41b2-b541-686c100900e5" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/node-vif-list-response.json0000664000175000017500000000012300000000000024572 0ustar00zuulzuul00000000000000{ "vifs": [ { "id": "1974dcfa-836f-41b2-b541-686c100900e5" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/node-volume-connector-detail-response.json0000664000175000017500000000125600000000000027604 0ustar00zuulzuul00000000000000{ "connectors": [ { "connector_id": "iqn.2017-07.org.openstack:02:10190a4153e", "created_at": "2016-08-18T22:28:48.643434+11:11", "extra": {}, "links": [ { "href": "http://127.0.0.1:6385/v1/volume/connectors/9bf93e01-d728-47a3-ad4b-5e66a835037c", "rel": "self" }, { "href": "http://127.0.0.1:6385/volume/connectors/9bf93e01-d728-47a3-ad4b-5e66a835037c", "rel": "bookmark" } ], "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "type": "iqn", "updated_at": "2016-08-18T22:28:49.653974+00:00", "uuid": "9bf93e01-d728-47a3-ad4b-5e66a835037c" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/node-volume-connector-list-response.json0000664000175000017500000000105300000000000027310 0ustar00zuulzuul00000000000000{ "connectors": [ { "connector_id": "iqn.2017-07.org.openstack:02:10190a4153e", "links": [ { "href": "http://127.0.0.1:6385/v1/volume/connectors/9bf93e01-d728-47a3-ad4b-5e66a835037c", "rel": "self" }, { "href": "http://127.0.0.1:6385/volume/connectors/9bf93e01-d728-47a3-ad4b-5e66a835037c", "rel": "bookmark" } ], "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "type": "iqn", "uuid": "9bf93e01-d728-47a3-ad4b-5e66a835037c" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/node-volume-list-response.json0000664000175000017500000000152600000000000025325 0ustar00zuulzuul00000000000000{ "connectors": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d/volume/connectors", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/6d85703a-565d-469a-96ce-30b6de53079d/volume/connectors", "rel": "bookmark" } ], "links": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d/volume/", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/6d85703a-565d-469a-96ce-30b6de53079d/volume/", "rel": "bookmark" } ], "targets": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d/volume/targets", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/6d85703a-565d-469a-96ce-30b6de53079d/volume/targets", "rel": "bookmark" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/node-volume-target-detail-response.json0000664000175000017500000000132600000000000027076 0ustar00zuulzuul00000000000000{ "targets": [ { "boot_index": 0, "created_at": "2016-08-18T22:28:48.643434+11:11", "extra": {}, "links": [ { "href": "http://127.0.0.1:6385/v1/volume/targets/bd4d008c-7d31-463d-abf9-6c23d9d55f7f", "rel": "self" }, { "href": "http://127.0.0.1:6385/volume/targets/bd4d008c-7d31-463d-abf9-6c23d9d55f7f", "rel": "bookmark" } ], "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "properties": {}, "updated_at": "2016-08-18T22:28:49.653974+00:00", "uuid": "bd4d008c-7d31-463d-abf9-6c23d9d55f7f", "volume_id": "7211f7d3-3f32-4efc-b64e-9b8e92e64a8e", "volume_type": "iscsi" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/node-volume-target-list-response.json0000664000175000017500000000107300000000000026606 0ustar00zuulzuul00000000000000{ "targets": [ { "boot_index": 0, "links": [ { "href": "http://127.0.0.1:6385/v1/volume/targets/bd4d008c-7d31-463d-abf9-6c23d9d55f7f", "rel": "self" }, { "href": "http://127.0.0.1:6385/volume/targets/bd4d008c-7d31-463d-abf9-6c23d9d55f7f", "rel": "bookmark" } ], "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "uuid": "bd4d008c-7d31-463d-abf9-6c23d9d55f7f", "volume_id": "7211f7d3-3f32-4efc-b64e-9b8e92e64a8e", "volume_type": "iscsi" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/nodes-list-details-response.json0000664000175000017500000001507300000000000025630 0ustar00zuulzuul00000000000000{ "nodes": [ { "allocation_uuid": "5344a3e2-978a-444e-990a-cbf47c62ef88", "boot_interface": null, "chassis_uuid": null, "clean_step": {}, "conductor": "compute1.localdomain", "conductor_group": "group-1", "console_enabled": false, "console_interface": null, "created_at": "2016-08-18T22:28:48.643434+11:11", "deploy_interface": null, "deploy_step": {}, "description": null, "driver": "fake", "driver_info": { "ipmi_password": "******", "ipmi_username": "ADMIN" }, "driver_internal_info": { "clean_steps": null }, "extra": {}, "inspect_interface": null, "inspection_finished_at": null, "inspection_started_at": null, "instance_info": {}, "instance_uuid": "5344a3e2-978a-444e-990a-cbf47c62ef88", "last_error": null, "lessee": null, "links": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/6d85703a-565d-469a-96ce-30b6de53079d", "rel": "bookmark" } ], "maintenance": false, "maintenance_reason": null, "management_interface": null, "name": "test_node_classic", "network_data": {}, "network_interface": "flat", "owner": "john doe", "portgroups": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d/portgroups", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/6d85703a-565d-469a-96ce-30b6de53079d/portgroups", "rel": "bookmark" } ], "ports": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d/ports", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/6d85703a-565d-469a-96ce-30b6de53079d/ports", "rel": "bookmark" } ], "power_interface": null, "power_state": "power off", "properties": {}, "protected": false, "protected_reason": null, "provision_state": "available", "provision_updated_at": "2016-08-18T22:28:49.946416+00:00", "raid_config": {}, "raid_interface": null, "rescue_interface": null, "reservation": null, "resource_class": null, "retired": false, "retired_reason": null, "states": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d/states", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/6d85703a-565d-469a-96ce-30b6de53079d/states", "rel": "bookmark" } ], "storage_interface": "noop", "target_power_state": null, "target_provision_state": null, "target_raid_config": {}, "traits": [], "updated_at": "2016-08-18T22:28:49.653974+00:00", "uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "vendor_interface": null, "volume": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d/volume", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/6d85703a-565d-469a-96ce-30b6de53079d/volume", "rel": "bookmark" } ] }, { "allocation_uuid": null, "boot_interface": "pxe", "chassis_uuid": null, "clean_step": {}, "conductor": "compute1.localdomain", "conductor_group": "", "console_enabled": false, "console_interface": "no-console", "created_at": "2016-08-18T22:28:48.643434+11:11", "deploy_interface": "direct", "deploy_step": {}, "driver": "ipmi", "driver_info": { "ipmi_password": "******", "ipmi_username": "ADMIN" }, "driver_internal_info": {}, "extra": {}, "inspect_interface": "no-inspect", "inspection_finished_at": null, "inspection_started_at": null, "instance_info": {}, "instance_uuid": null, "last_error": null, "lessee": null, "links": [ { "href": "http://127.0.0.1:6385/v1/nodes/2b045129-a906-46af-bc1a-092b294b3428", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/2b045129-a906-46af-bc1a-092b294b3428", "rel": "bookmark" } ], "maintenance": false, "maintenance_reason": null, "management_interface": "ipmitool", "name": "test_node_dynamic", "network_data": {}, "network_interface": "flat", "owner": "43e61ec9-8e42-4dcb-bc45-30d66aa93e5b", "portgroups": [ { "href": "http://127.0.0.1:6385/v1/nodes/2b045129-a906-46af-bc1a-092b294b3428/portgroups", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/2b045129-a906-46af-bc1a-092b294b3428/portgroups", "rel": "bookmark" } ], "ports": [ { "href": "http://127.0.0.1:6385/v1/nodes/2b045129-a906-46af-bc1a-092b294b3428/ports", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/2b045129-a906-46af-bc1a-092b294b3428/ports", "rel": "bookmark" } ], "power_interface": "ipmitool", "power_state": null, "properties": {}, "protected": false, "protected_reason": null, "provision_state": "enroll", "provision_updated_at": null, "raid_config": {}, "raid_interface": "no-raid", "rescue_interface": "no-rescue", "reservation": null, "resource_class": null, "retired": false, "retired_reason": null, "states": [ { "href": "http://127.0.0.1:6385/v1/nodes/2b045129-a906-46af-bc1a-092b294b3428/states", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/2b045129-a906-46af-bc1a-092b294b3428/states", "rel": "bookmark" } ], "storage_interface": "noop", "target_power_state": null, "target_provision_state": null, "target_raid_config": {}, "traits": [], "updated_at": null, "uuid": "2b045129-a906-46af-bc1a-092b294b3428", "vendor_interface": "no-vendor", "volume": [ { "href": "http://127.0.0.1:6385/v1/nodes/2b045129-a906-46af-bc1a-092b294b3428/volume", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/2b045129-a906-46af-bc1a-092b294b3428/volume", "rel": "bookmark" } ] } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/nodes-list-response.json0000664000175000017500000000204200000000000024175 0ustar00zuulzuul00000000000000{ "nodes": [ { "instance_uuid": null, "links": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/6d85703a-565d-469a-96ce-30b6de53079d", "rel": "bookmark" } ], "maintenance": false, "name": "test_node_classic", "power_state": "power off", "provision_state": "available", "uuid": "6d85703a-565d-469a-96ce-30b6de53079d" }, { "instance_uuid": null, "links": [ { "href": "http://127.0.0.1:6385/v1/nodes/2b045129-a906-46af-bc1a-092b294b3428", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/2b045129-a906-46af-bc1a-092b294b3428", "rel": "bookmark" } ], "maintenance": false, "name": "test_node_dynamic", "power_state": null, "provision_state": "enroll", "uuid": "2b045129-a906-46af-bc1a-092b294b3428" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/port-create-request.json0000664000175000017500000000055600000000000024203 0ustar00zuulzuul00000000000000{ "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "portgroup_uuid": "e43c722c-248e-4c6e-8ce8-0d8ff129387a", "address": "11:11:11:11:11:11", "is_smartnic": true, "local_link_connection": { "switch_id": "0a:1b:2c:3d:4e:5f", "port_id": "Ethernet3/1", "switch_info": "switch1" }, "physical_network": "physnet1" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/port-create-response.json0000664000175000017500000000141300000000000024342 0ustar00zuulzuul00000000000000{ "address": "11:11:11:11:11:11", "created_at": "2016-08-18T22:28:48.643434+11:11", "extra": {}, "internal_info": {}, "is_smartnic": true, "links": [ { "href": "http://127.0.0.1:6385/v1/ports/d2b30520-907d-46c8-bfee-c5586e6fb3a1", "rel": "self" }, { "href": "http://127.0.0.1:6385/ports/d2b30520-907d-46c8-bfee-c5586e6fb3a1", "rel": "bookmark" } ], "local_link_connection": { "port_id": "Ethernet3/1", "switch_id": "0a:1b:2c:3d:4e:5f", "switch_info": "switch1" }, "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "physical_network": "physnet1", "portgroup_uuid": "e43c722c-248e-4c6e-8ce8-0d8ff129387a", "pxe_enabled": true, "updated_at": null, "uuid": "d2b30520-907d-46c8-bfee-c5586e6fb3a1" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/port-list-detail-response.json0000664000175000017500000000162000000000000025312 0ustar00zuulzuul00000000000000{ "ports": [ { "address": "11:11:11:11:11:11", "created_at": "2016-08-18T22:28:48.643434+11:11", "extra": {}, "internal_info": {}, "is_smartnic": true, "links": [ { "href": "http://127.0.0.1:6385/v1/ports/d2b30520-907d-46c8-bfee-c5586e6fb3a1", "rel": "self" }, { "href": "http://127.0.0.1:6385/ports/d2b30520-907d-46c8-bfee-c5586e6fb3a1", "rel": "bookmark" } ], "local_link_connection": { "port_id": "Ethernet3/1", "switch_id": "0a:1b:2c:3d:4e:5f", "switch_info": "switch1" }, "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "physical_network": "physnet1", "portgroup_uuid": "e43c722c-248e-4c6e-8ce8-0d8ff129387a", "pxe_enabled": true, "updated_at": null, "uuid": "d2b30520-907d-46c8-bfee-c5586e6fb3a1" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/port-list-response.json0000664000175000017500000000064200000000000024055 0ustar00zuulzuul00000000000000{ "ports": [ { "address": "11:11:11:11:11:11", "links": [ { "href": "http://127.0.0.1:6385/v1/ports/d2b30520-907d-46c8-bfee-c5586e6fb3a1", "rel": "self" }, { "href": "http://127.0.0.1:6385/ports/d2b30520-907d-46c8-bfee-c5586e6fb3a1", "rel": "bookmark" } ], "uuid": "d2b30520-907d-46c8-bfee-c5586e6fb3a1" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/port-update-request.json0000664000175000017500000000014500000000000024214 0ustar00zuulzuul00000000000000[ { "path" : "/address", "value" : "22:22:22:22:22:22", "op" : "replace" } ] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/port-update-response.json0000664000175000017500000000145100000000000024363 0ustar00zuulzuul00000000000000{ "address": "22:22:22:22:22:22", "created_at": "2016-08-18T22:28:48.643434+11:11", "extra": {}, "internal_info": {}, "is_smartnic": true, "links": [ { "href": "http://127.0.0.1:6385/v1/ports/d2b30520-907d-46c8-bfee-c5586e6fb3a1", "rel": "self" }, { "href": "http://127.0.0.1:6385/ports/d2b30520-907d-46c8-bfee-c5586e6fb3a1", "rel": "bookmark" } ], "local_link_connection": { "port_id": "Ethernet3/1", "switch_id": "0a:1b:2c:3d:4e:5f", "switch_info": "switch1" }, "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "physical_network": "physnet1", "portgroup_uuid": "e43c722c-248e-4c6e-8ce8-0d8ff129387a", "pxe_enabled": true, "updated_at": "2016-08-18T22:28:49.653974+00:00", "uuid": "d2b30520-907d-46c8-bfee-c5586e6fb3a1" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/portgroup-create-request.json0000664000175000017500000000017600000000000025256 0ustar00zuulzuul00000000000000{ "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "address": "11:11:11:11:11:11", "name": "test_portgroup" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/portgroup-create-response.json0000664000175000017500000000161300000000000025421 0ustar00zuulzuul00000000000000{ "address": "11:11:11:11:11:11", "created_at": "2016-08-18T22:28:48.643434+11:11", "extra": {}, "internal_info": {}, "links": [ { "href": "http://127.0.0.1:6385/v1/portgroups/e43c722c-248e-4c6e-8ce8-0d8ff129387a", "rel": "self" }, { "href": "http://127.0.0.1:6385/portgroups/e43c722c-248e-4c6e-8ce8-0d8ff129387a", "rel": "bookmark" } ], "mode": "active-backup", "name": "test_portgroup", "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "ports": [ { "href": "http://127.0.0.1:6385/v1/portgroups/e43c722c-248e-4c6e-8ce8-0d8ff129387a/ports", "rel": "self" }, { "href": "http://127.0.0.1:6385/portgroups/e43c722c-248e-4c6e-8ce8-0d8ff129387a/ports", "rel": "bookmark" } ], "properties": {}, "standalone_ports_supported": true, "updated_at": null, "uuid": "e43c722c-248e-4c6e-8ce8-0d8ff129387a" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/portgroup-list-detail-response.json0000664000175000017500000000205100000000000026366 0ustar00zuulzuul00000000000000{ "portgroups": [ { "address": "11:11:11:11:11:11", "created_at": "2016-08-18T22:28:48.643434+11:11", "extra": {}, "internal_info": {}, "links": [ { "href": "http://127.0.0.1:6385/v1/portgroups/e43c722c-248e-4c6e-8ce8-0d8ff129387a", "rel": "self" }, { "href": "http://127.0.0.1:6385/portgroups/e43c722c-248e-4c6e-8ce8-0d8ff129387a", "rel": "bookmark" } ], "mode": "active-backup", "name": "test_portgroup", "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "ports": [ { "href": "http://127.0.0.1:6385/v1/portgroups/e43c722c-248e-4c6e-8ce8-0d8ff129387a/ports", "rel": "self" }, { "href": "http://127.0.0.1:6385/portgroups/e43c722c-248e-4c6e-8ce8-0d8ff129387a/ports", "rel": "bookmark" } ], "properties": {}, "standalone_ports_supported": true, "updated_at": null, "uuid": "e43c722c-248e-4c6e-8ce8-0d8ff129387a" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/portgroup-list-response.json0000664000175000017500000000072100000000000025130 0ustar00zuulzuul00000000000000{ "portgroups": [ { "address": "11:11:11:11:11:11", "links": [ { "href": "http://127.0.0.1:6385/v1/portgroups/e43c722c-248e-4c6e-8ce8-0d8ff129387a", "rel": "self" }, { "href": "http://127.0.0.1:6385/portgroups/e43c722c-248e-4c6e-8ce8-0d8ff129387a", "rel": "bookmark" } ], "name": "test_portgroup", "uuid": "e43c722c-248e-4c6e-8ce8-0d8ff129387a" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/portgroup-port-detail-response.json0000664000175000017500000000165600000000000026411 0ustar00zuulzuul00000000000000{ "ports": [ { "address": "22:22:22:22:22:22", "created_at": "2016-08-18T22:28:48.643434+11:11", "extra": {}, "internal_info": {}, "is_smartnic": true, "links": [ { "href": "http://127.0.0.1:6385/v1/ports/d2b30520-907d-46c8-bfee-c5586e6fb3a1", "rel": "self" }, { "href": "http://127.0.0.1:6385/ports/d2b30520-907d-46c8-bfee-c5586e6fb3a1", "rel": "bookmark" } ], "local_link_connection": { "port_id": "Ethernet3/1", "switch_id": "0a:1b:2c:3d:4e:5f", "switch_info": "switch1" }, "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "physical_network": "physnet1", "portgroup_uuid": "e43c722c-248e-4c6e-8ce8-0d8ff129387a", "pxe_enabled": true, "updated_at": "2016-08-18T22:28:49.653974+00:00", "uuid": "d2b30520-907d-46c8-bfee-c5586e6fb3a1" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/portgroup-port-list-response.json0000664000175000017500000000064200000000000026114 0ustar00zuulzuul00000000000000{ "ports": [ { "address": "22:22:22:22:22:22", "links": [ { "href": "http://127.0.0.1:6385/v1/ports/d2b30520-907d-46c8-bfee-c5586e6fb3a1", "rel": "self" }, { "href": "http://127.0.0.1:6385/ports/d2b30520-907d-46c8-bfee-c5586e6fb3a1", "rel": "bookmark" } ], "uuid": "d2b30520-907d-46c8-bfee-c5586e6fb3a1" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/portgroup-update-request.json0000664000175000017500000000014500000000000025271 0ustar00zuulzuul00000000000000[ { "path" : "/address", "value" : "22:22:22:22:22:22", "op" : "replace" } ] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/portgroup-update-response.json0000664000175000017500000000165100000000000025442 0ustar00zuulzuul00000000000000{ "address": "22:22:22:22:22:22", "created_at": "2016-08-18T22:28:48.643434+11:11", "extra": {}, "internal_info": {}, "links": [ { "href": "http://127.0.0.1:6385/v1/portgroups/e43c722c-248e-4c6e-8ce8-0d8ff129387a", "rel": "self" }, { "href": "http://127.0.0.1:6385/portgroups/e43c722c-248e-4c6e-8ce8-0d8ff129387a", "rel": "bookmark" } ], "mode": "active-backup", "name": "test_portgroup", "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "ports": [ { "href": "http://127.0.0.1:6385/v1/portgroups/e43c722c-248e-4c6e-8ce8-0d8ff129387a/ports", "rel": "self" }, { "href": "http://127.0.0.1:6385/portgroups/e43c722c-248e-4c6e-8ce8-0d8ff129387a/ports", "rel": "bookmark" } ], "properties": {}, "standalone_ports_supported": true, "updated_at": "2016-08-18T22:28:49.653974+00:00", "uuid": "e43c722c-248e-4c6e-8ce8-0d8ff129387a" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/volume-connector-create-request.json0000664000175000017500000000022000000000000026502 0ustar00zuulzuul00000000000000{ "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "type": "iqn", "connector_id": "iqn.2017-07.org.openstack:01:d9a51732c3f" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/volume-connector-create-response.json0000664000175000017500000000105200000000000026654 0ustar00zuulzuul00000000000000{ "connector_id": "iqn.2017-07.org.openstack:01:d9a51732c3f", "created_at": "2016-08-18T22:28:48.643434+11:11", "extra": {}, "links": [ { "href": "http://127.0.0.1:6385/v1/volume/connectors/9bf93e01-d728-47a3-ad4b-5e66a835037c", "rel": "self" }, { "href": "http://127.0.0.1:6385/volume/connectors/9bf93e01-d728-47a3-ad4b-5e66a835037c", "rel": "bookmark" } ], "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "type": "iqn", "updated_at": null, "uuid": "9bf93e01-d728-47a3-ad4b-5e66a835037c" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/volume-connector-list-detail-response.json0000664000175000017500000000122000000000000027621 0ustar00zuulzuul00000000000000{ "connectors": [ { "connector_id": "iqn.2017-07.org.openstack:01:d9a51732c3f", "created_at": "2016-08-18T22:28:48.643434+11:11", "extra": {}, "links": [ { "href": "http://127.0.0.1:6385/v1/volume/connectors/9bf93e01-d728-47a3-ad4b-5e66a835037c", "rel": "self" }, { "href": "http://127.0.0.1:6385/volume/connectors/9bf93e01-d728-47a3-ad4b-5e66a835037c", "rel": "bookmark" } ], "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "type": "iqn", "updated_at": null, "uuid": "9bf93e01-d728-47a3-ad4b-5e66a835037c" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/volume-connector-list-response.json0000664000175000017500000000105300000000000026365 0ustar00zuulzuul00000000000000{ "connectors": [ { "connector_id": "iqn.2017-07.org.openstack:01:d9a51732c3f", "links": [ { "href": "http://127.0.0.1:6385/v1/volume/connectors/9bf93e01-d728-47a3-ad4b-5e66a835037c", "rel": "self" }, { "href": "http://127.0.0.1:6385/volume/connectors/9bf93e01-d728-47a3-ad4b-5e66a835037c", "rel": "bookmark" } ], "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "type": "iqn", "uuid": "9bf93e01-d728-47a3-ad4b-5e66a835037c" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/volume-connector-update-request.json0000664000175000017500000000020100000000000026520 0ustar00zuulzuul00000000000000[ { "path" : "/connector_id", "value" : "iqn.2017-07.org.openstack:02:10190a4153e", "op" : "replace" } ] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/volume-connector-update-response.json0000664000175000017500000000111000000000000026666 0ustar00zuulzuul00000000000000{ "connector_id": "iqn.2017-07.org.openstack:02:10190a4153e", "created_at": "2016-08-18T22:28:48.643434+11:11", "extra": {}, "links": [ { "href": "http://127.0.0.1:6385/v1/volume/connectors/9bf93e01-d728-47a3-ad4b-5e66a835037c", "rel": "self" }, { "href": "http://127.0.0.1:6385/volume/connectors/9bf93e01-d728-47a3-ad4b-5e66a835037c", "rel": "bookmark" } ], "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "type": "iqn", "updated_at": "2016-08-18T22:28:49.653974+00:00", "uuid": "9bf93e01-d728-47a3-ad4b-5e66a835037c" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/volume-list-response.json0000664000175000017500000000112400000000000024374 0ustar00zuulzuul00000000000000{ "connectors": [ { "href": "http://127.0.0.1:6385/v1/volume/connectors", "rel": "self" }, { "href": "http://127.0.0.1:6385/volume/connectors", "rel": "bookmark" } ], "links": [ { "href": "http://127.0.0.1:6385/v1/volume/", "rel": "self" }, { "href": "http://127.0.0.1:6385/volume/", "rel": "bookmark" } ], "targets": [ { "href": "http://127.0.0.1:6385/v1/volume/targets", "rel": "self" }, { "href": "http://127.0.0.1:6385/volume/targets", "rel": "bookmark" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/volume-target-create-request.json0000664000175000017500000000024700000000000026007 0ustar00zuulzuul00000000000000{ "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "volume_type": "iscsi", "boot_index": 0, "volume_id": "04452bed-5367-4202-8bf5-de4335ac56d2" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/volume-target-create-response.json0000664000175000017500000000111500000000000026150 0ustar00zuulzuul00000000000000{ "boot_index": 0, "created_at": "2016-08-18T22:28:48.643434+11:11", "extra": {}, "links": [ { "href": "http://127.0.0.1:6385/v1/volume/targets/bd4d008c-7d31-463d-abf9-6c23d9d55f7f", "rel": "self" }, { "href": "http://127.0.0.1:6385/volume/targets/bd4d008c-7d31-463d-abf9-6c23d9d55f7f", "rel": "bookmark" } ], "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "properties": {}, "updated_at": null, "uuid": "bd4d008c-7d31-463d-abf9-6c23d9d55f7f", "volume_id": "04452bed-5367-4202-8bf5-de4335ac56d2", "volume_type": "iscsi" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/volume-target-list-detail-response.json0000664000175000017500000000127000000000000027122 0ustar00zuulzuul00000000000000{ "targets": [ { "boot_index": 0, "created_at": "2016-08-18T22:28:48.643434+11:11", "extra": {}, "links": [ { "href": "http://127.0.0.1:6385/v1/volume/targets/bd4d008c-7d31-463d-abf9-6c23d9d55f7f", "rel": "self" }, { "href": "http://127.0.0.1:6385/volume/targets/bd4d008c-7d31-463d-abf9-6c23d9d55f7f", "rel": "bookmark" } ], "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "properties": {}, "updated_at": null, "uuid": "bd4d008c-7d31-463d-abf9-6c23d9d55f7f", "volume_id": "04452bed-5367-4202-8bf5-de4335ac56d2", "volume_type": "iscsi" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/volume-target-list-response.json0000664000175000017500000000107300000000000025663 0ustar00zuulzuul00000000000000{ "targets": [ { "boot_index": 0, "links": [ { "href": "http://127.0.0.1:6385/v1/volume/targets/bd4d008c-7d31-463d-abf9-6c23d9d55f7f", "rel": "self" }, { "href": "http://127.0.0.1:6385/volume/targets/bd4d008c-7d31-463d-abf9-6c23d9d55f7f", "rel": "bookmark" } ], "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "uuid": "bd4d008c-7d31-463d-abf9-6c23d9d55f7f", "volume_id": "04452bed-5367-4202-8bf5-de4335ac56d2", "volume_type": "iscsi" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/volume-target-update-request.json0000664000175000017500000000017200000000000026023 0ustar00zuulzuul00000000000000[ { "path" : "/volume_id", "value" : "7211f7d3-3f32-4efc-b64e-9b8e92e64a8e", "op" : "replace" } ] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/api-ref/source/samples/volume-target-update-response.json0000664000175000017500000000115300000000000026171 0ustar00zuulzuul00000000000000{ "boot_index": 0, "created_at": "2016-08-18T22:28:48.643434+11:11", "extra": {}, "links": [ { "href": "http://127.0.0.1:6385/v1/volume/targets/bd4d008c-7d31-463d-abf9-6c23d9d55f7f", "rel": "self" }, { "href": "http://127.0.0.1:6385/volume/targets/bd4d008c-7d31-463d-abf9-6c23d9d55f7f", "rel": "bookmark" } ], "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "properties": {}, "updated_at": "2016-08-18T22:28:49.653974+00:00", "uuid": "bd4d008c-7d31-463d-abf9-6c23d9d55f7f", "volume_id": "7211f7d3-3f32-4efc-b64e-9b8e92e64a8e", "volume_type": "iscsi" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/bindep.txt0000664000175000017500000000747500000000000015117 0ustar00zuulzuul00000000000000# these are needed to run ironic with default ipmitool and (i)PXE boot drivers ipmitool [default] ipxe [platform:dpkg default] ipxe-bootimgs [platform:rpm default] socat [default] xinetd [default] tftpd-hpa [platform:dpkg default] tftp-server [platform:rpm default] # Starting with Debian Jessie (and thus in Ubuntu Xenial too), # pxelinux package provides the pxelinux.0 boot loader, # but such package is absent from Debian Wheezy / Ubuntu Trusty. # Also, in Debian Wheezy / Ubuntu Trusty 'syslinux' depends on syslinux-common, # but only recommends it in Jessie/Xenial. # Make sure syslinux-common is installed for those distros as it provides # *.c32 modules for syslinux # TODO remove distro pinning when Wheezy / Trusty are EOLed (May 2019) # or DevStack stops supporting those. # In the mean time, new Debian-based release codenames will have to be added # as distros can not be pinned with 'if-later-than' specified. pxelinux [platform:ubuntu-xenial platform:debian-jessie default] syslinux [platform:rpm platform:ubuntu-trusty platform:debian-wheezy default] syslinux-common [platform:ubuntu-xenial platform:debian-jessie default] socat [default] # Grub2 files for boot loadingusing PXE/GRUB2 shim-signed [platform:dpkg default] shim [platform:rpm default] grub-efi-amd64-signed [platform:dpkg default] grub2-efi [platform:rpm default] # these are needed to create and access VMs when testing with virtual hardware libvirt-bin [platform:dpkg devstack] libvirt [platform:rpm devstack] libvirt-dev [platform:dpkg devstack] libvirt-devel [platform:rpm devstack] qemu [platform:dpkg devstack build-image-dib] qemu-kvm [platform:dpkg devstack] qemu-utils [platform:dpkg devstack build-image-dib] qemu-system-data [platform:dpkg devstack] sgabios [platform:rpm devstack] ipxe-qemu [platform:dpkg devstack] edk2-ovmf [platform:rpm devstack] ovmf [platform:dpkg devstack] ipxe-roms-qemu [platform:rpm devstack] openvswitch [platform:rpm devstack] iptables [devstack] net-tools [platform:rpm devstack] # these are needed to compile Python dependencies from sources python-dev [platform:dpkg test] python3-all-dev [platform:dpkg !platform:ubuntu-precise test] python3-devel [platform:rpm test] build-essential [platform:dpkg test] libssl-dev [platform:dpkg test] libffi-dev [platform:dpkg test] libffi-devel [platform:rpm test] # these are needed by infra for python-* jobs libpq-dev [platform:dpkg test] libpq-devel [platform:rpm test] postgresql postgresql-client [platform:dpkg] # postgresql-devel [platform:rpm] postgresql-server [platform:rpm] mariadb [platform:rpm] mariadb-server [platform:rpm] # mariadb-devel [platform:rpm] dev-db/mariadb [platform:gentoo] mysql-client [platform:dpkg] mysql-server [platform:dpkg] # libmysqlclient-dev [platform:dpkg] # gettext and graphviz are needed by doc builds only. For transition, # have them in both doc and test. # TODO(jaegerandi): Remove test once infra scripts are updated. # this is needed for compiling translations gettext [test doc] # this is needed to build the FSM diagram graphviz [!platform:gentoo test doc] # libsrvg2 is needed for sphinxcontrib-svg2pdfconverter in docs builds. librsvg2-tools [doc platform:rpm] librsvg2-bin [doc platform:dpkg] # these are needed to build images # NOTE apparmor is an undeclared dependency for docker on ubuntu, # see https://github.com/docker/docker/issues/9745 apparmor [platform:dpkg imagebuild] gnupg [imagebuild] squashfs-tools [platform:dpkg platform:redhat imagebuild] squashfs [platform:suse imagebuild] # For custom partition images kpartx [devstack] libguestfs0 [platform:dpkg imagebuild] libguestfs [platform:rpm imagebuild devstack] libguestfs-tools [platform:dpkg devstack] python-guestfs [platform:dpkg imagebuild] qemu-img [platform:rpm devstack] # for TinyIPA build wget [imagebuild] python-pip [imagebuild] unzip [imagebuild] sudo [imagebuild] gawk [imagebuild] mtools [imagebuild] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648108935.8786666 ironic-20.1.0/devstack/0000775000175000017500000000000000000000000014704 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/devstack/common_settings0000664000175000017500000000474400000000000020050 0ustar00zuulzuul00000000000000#!/bin/bash if [[ -f $TOP_DIR/../../old/devstack/.localrc.auto ]]; then source <(cat $TOP_DIR/../../old/devstack/.localrc.auto | grep -v 'enable_plugin') fi # Whether configure the nodes to boot in Legacy BIOS or UEFI mode. Accepted # values are: "bios" or "uefi", defaults to "uefi". IRONIC_BOOT_MODE=${IRONIC_BOOT_MODE:-uefi} CIRROS_VERSION_DEVSTACK=$(set +o xtrace && source $TOP_DIR/stackrc && echo $CIRROS_VERSION) CIRROS_VERSION=${CIRROS_VERSION:-$CIRROS_VERSION_DEVSTACK} IRONIC_DEFAULT_IMAGE_NAME=cirros-${CIRROS_VERSION}-x86_64-uec IRONIC_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-$IRONIC_DEFAULT_IMAGE_NAME} # Add link to download queue, ignore if already exist. # TODO(vsaienko) Move to devstack https://review.opendev.org/420656 function add_image_link { local i_link="$1" if ! [[ "$IMAGE_URLS" =~ "$i_link" ]]; then if [[ -z "$IMAGE_URLS" || "${IMAGE_URLS: -1}" == "," ]]; then IMAGE_URLS+="$i_link" else IMAGE_URLS+=",$i_link" fi fi } # NOTE (vsaienko) We are going to test mixed drivers/partitions in single setup. # Do not restrict downloading image only for specific case. Download both disk and uec images. # NOTE (vdrok): Here the images are actually pre-cached by devstack, in # the files folder, so they won't be downloaded again. add_image_link http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-uec.tar.gz add_image_link http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-disk.img export IRONIC_WHOLEDISK_IMAGE_NAME=${IRONIC_WHOLEDISK_IMAGE_NAME:-${IRONIC_IMAGE_NAME/-uec/-disk}} export IRONIC_PARTITIONED_IMAGE_NAME=${IRONIC_PARTITIONED_IMAGE_NAME:-${IRONIC_IMAGE_NAME/-disk/-uec}} # These parameters describe which image will be used to provision a node in # tempest tests if [[ -z "$IRONIC_TEMPEST_WHOLE_DISK_IMAGE" && "$IRONIC_VM_EPHEMERAL_DISK" == 0 ]]; then IRONIC_TEMPEST_WHOLE_DISK_IMAGE=True fi IRONIC_TEMPEST_WHOLE_DISK_IMAGE=$(trueorfalse False IRONIC_TEMPEST_WHOLE_DISK_IMAGE) if [[ "$IRONIC_TEMPEST_WHOLE_DISK_IMAGE" == "True" ]]; then export IRONIC_IMAGE_NAME=$IRONIC_WHOLEDISK_IMAGE_NAME else export IRONIC_IMAGE_NAME=$IRONIC_PARTITIONED_IMAGE_NAME fi # NOTE(vsaienko) set DEFAULT_IMAGE_NAME here, as it is still used by grenade # https://github.com/openstack-dev/grenade/blob/90c4ead2f2a7ed48c873c51cef415b83d655752e/projects/60_nova/resources.sh#L31 export DEFAULT_IMAGE_NAME=$IRONIC_IMAGE_NAME ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648108935.8786666 ironic-20.1.0/devstack/files/0000775000175000017500000000000000000000000016006 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/devstack/files/apache-ipxe-ironic.template0000664000175000017500000000106500000000000023212 0ustar00zuulzuul00000000000000Listen %PUBLICPORT% DocumentRoot "%HTTPROOT%" Options Indexes FollowSymLinks AllowOverride None Order allow,deny Allow from all Require all granted ErrorLog %APACHELOGDIR%/ipxe_error.log ErrorLogFormat "%{cu}t [%-m:%l] [pid %P:tid %T] %7F: %E: [client\ %a] [frontend\ %A] %M% ,\ referer\ %{Referer}i" LogLevel info CustomLog %APACHELOGDIR%/ipxe_access.log "%{%Y-%m-%d}t %{%T}t.%{msec_frac}t [%l] %a \"%r\" %>s %b" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/devstack/files/apache-ironic-api-redirect.template0000664000175000017500000000144100000000000024613 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 1.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This is an example Apache2 configuration file for using the # Ironic API through mod_wsgi. This version assumes you are # running devstack to configure the software. Redirect 307 /baremetal %IRONIC_SERVICE_PROTOCOL%://%IRONIC_SERVICE_HOST%/baremetal ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/devstack/files/bindep.txt0000664000175000017500000000654000000000000020015 0ustar00zuulzuul00000000000000# NOTE(TheJulia): This is a special bindep file which is independent of the # project bindep file which is for general usage. This binde pfile is # intended for execution from Devstack. # The *primary* purpose being, devstack manages sql dependency mangement # and credential setup, so they can't be included here or it is installed # prematurely. # these are needed to run ironic with default ipmitool and (i)PXE boot drivers ipmitool [default] ipxe [platform:dpkg default] ipxe-bootimgs [platform:rpm default] socat [default] xinetd [default] tftpd-hpa [platform:dpkg] tftp-server [platform:rpm] # Starting with Debian Jessie (and thus in Ubuntu Xenial too), # pxelinux package provides the pxelinux.0 boot loader, # but such package is absent from Debian Wheezy / Ubuntu Trusty. # Make sure syslinux-common is installed for those distros as it provides # *.c32 modules for syslinux pxelinux [platform:dpkg] syslinux syslinux-common [platform:dpkg] isolinux [platform:dpkg] socat [default] # Grub2 files for boot loadingusing PXE/GRUB2 shim-signed [platform:dpkg] grub-efi-amd64-signed [platform:dpkg] libvirt-daemon [platform:dpkg] libvirt-clients [platform:dpkg] libvirt [platform:rpm] libvirt-dev [platform:dpkg] libvirt-devel [platform:rpm] qemu [platform:dpkg] qemu-kvm [platform:dpkg platform:rpm] qemu-utils [platform:dpkg] qemu-system-data [platform:dpkg] sgabios [platform:rpm] ipxe-qemu [platform:dpkg] edk2-ovmf [platform:rpm] ovmf [platform:dpkg] ipxe-roms-qemu [platform:rpm] openvswitch [platform:rpm] iptables [default] net-tools [platform:rpm] # these are needed to compile Python dependencies from sources python-dev [platform:dpkg test] python3-all-dev [platform:dpkg !platform:ubuntu-precise test] python3-devel [platform:rpm test] build-essential [platform:dpkg test] libssl-dev [platform:dpkg test] libffi-dev [platform:dpkg test] libffi-devel [platform:rpm test] # these are needed by infra for python-* jobs libpq-dev [platform:dpkg test] libpq-devel [platform:rpm test] postgresql postgresql-client [platform:dpkg] # postgresql-devel [platform:rpm] postgresql-server [platform:rpm] mariadb [platform:rpm] mariadb-server [platform:rpm] # mariadb-devel [platform:rpm] dev-db/mariadb [platform:gentoo] # gettext and graphviz are needed by doc builds only. For transition, # have them in both doc and test. # TODO(jaegerandi): Remove test once infra scripts are updated. # this is needed for compiling translations gettext [test doc] # this is needed to build the FSM diagram graphviz [!platform:gentoo test doc] # libsrvg2 is needed for sphinxcontrib-svg2pdfconverter in docs builds. librsvg2-tools [doc platform:rpm] librsvg2-bin [doc platform:dpkg] # For parsing of API response json jq dosfstools [platform:dpkg] # Metalsmith jobs gdisk [platform:dpkg] # these are needed to build a deploy ramdisk # NOTE apparmor is an undeclared dependency for docker on ubuntu, # see https://github.com/docker/docker/issues/9745 apparmor [platform:dpkg imagebuild] gnupg [imagebuild] squashfs-tools [platform:dpkg platform:redhat imagebuild] squashfs [platform:suse imagebuild] # For custom partition images kpartx libguestfs0 [platform:dpkg imagebuild] libguestfs [platform:rpm imagebuild] libguestfs-tools [platform:dpkg] python-guestfs [platform:dpkg imagebuild] qemu-img [platform:rpm] # for TinyIPA build wget [imagebuild] python-pip [imagebuild] unzip [imagebuild] sudo [imagebuild] gawk [imagebuild] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648108935.8786666 ironic-20.1.0/devstack/files/hooks/0000775000175000017500000000000000000000000017131 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/devstack/files/hooks/qemu.py0000775000175000017500000000642700000000000020466 0ustar00zuulzuul00000000000000#!/usr/bin/python3 # Copyright (c) 2017 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import os import re import subprocess import sys # This script is run as a libvirt hook. # More information here: https://libvirt.org/hooks.html # The devstack/lib/ironic script in function setup_qemu_log_hook() will replace # LOG_DIR with the correct location. And will place the script into the correct # directory. VM_LOG_DIR = os.path.abspath("%LOG_DIR%") # Regular expression to find ANSI escape sequences at the beginning of a string ANSI_ESCAPE_RE = re.compile(r""" ^\x1b\[ # ANSI escape codes are ESC (0x1b) [ ?([\d;]*)(\w)""", re.VERBOSE) NOW = datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S") def main(): if len(sys.argv) < 3: return guest_name = sys.argv[1] action = sys.argv[2] if action == "started": interfaces = subprocess.check_output( ['ip', 'link', 'show', 'type', 'macvtap'] ).decode().split("\n") for iface_line in interfaces: if 'macvtap' in iface_line: iface_string = iface_line.split('@') ifaces = iface_string[0].split(' ') subprocess.call(['ip', 'link', 'set', 'dev', ifaces[1], 'multicast', 'on', 'allmulticast', 'on']) if action != "release": return if not console_log_exists(guest_name): return new_path = move_console_log(guest_name) if not new_path: return no_ansi_filename = "{}_no_ansi_{}.log".format(guest_name, NOW) no_ansi_path = os.path.join(VM_LOG_DIR, no_ansi_filename) create_no_ansi_file(new_path, no_ansi_path) def create_no_ansi_file(source_filename, dest_filename): with open(source_filename) as in_file: data = in_file.read() data = remove_ansi_codes(data) with open(dest_filename, 'w') as out_file: out_file.write(data) def get_console_log_path(guest_name): logfile_name = "{}_console.log".format(guest_name) return os.path.join(VM_LOG_DIR, logfile_name) def console_log_exists(guest_name): return os.path.isfile(get_console_log_path(guest_name)) def move_console_log(guest_name): new_logfile_name = "{}_console_{}.log".format(guest_name, NOW) new_path = os.path.join(VM_LOG_DIR, new_logfile_name) if os.path.exists(new_path): return False os.rename(get_console_log_path(guest_name), new_path) return new_path def remove_ansi_codes(data): """Remove any ansi codes from the provided string""" output = '' while data: result = ANSI_ESCAPE_RE.match(data) if not result: output += data[0] data = data[1:] else: data = data[result.end():] return output if '__main__' == __name__: sys.exit(main()) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648108935.8786666 ironic-20.1.0/devstack/lib/0000775000175000017500000000000000000000000015452 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/devstack/lib/ironic0000664000175000017500000041303700000000000016670 0ustar00zuulzuul00000000000000#!/bin/bash # # lib/ironic # Functions to control the configuration and operation of the **Ironic** service # Dependencies: # # - ``functions`` file # - ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined # - ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined # - ``SERVICE_HOST`` # - ``KEYSTONE_TOKEN_FORMAT`` must be defined # ``stack.sh`` calls the entry points in this order: # # - install_ironic # - install_ironicclient # - init_ironic # - start_ironic # - stop_ironic # - cleanup_ironic # ensure we don't re-source this in the same environment [[ -z "$_IRONIC_DEVSTACK_LIB" ]] || return 0 declare -r -g _IRONIC_DEVSTACK_LIB=1 # Save xtrace and pipefail settings _XTRACE_IRONIC=$(set +o | grep xtrace) _PIPEFAIL_IRONIC=$(set +o | grep pipefail) set -o xtrace set +o pipefail # Defaults # -------- # Set up default directories GITDIR["python-ironicclient"]=$DEST/python-ironicclient GITDIR["ironic-lib"]=$DEST/ironic-lib GITREPO["pyghmi"]=${PYGHMI_REPO:-${GIT_BASE}/x/pyghmi} GITBRANCH["pyghmi"]=${PYGHMI_BRANCH:-master} GITDIR["pyghmi"]=$DEST/pyghmi GITREPO["virtualbmc"]=${VIRTUALBMC_REPO:-${GIT_BASE}/openstack/virtualbmc.git} GITBRANCH["virtualbmc"]=${VIRTUALBMC_BRANCH:-master} GITDIR["virtualbmc"]=$DEST/virtualbmc GITREPO["virtualpdu"]=${VIRTUALPDU_REPO:-${GIT_BASE}/openstack/virtualpdu.git} GITBRANCH["virtualpdu"]=${VIRTUALPDU_BRANCH:-master} GITDIR["virtualpdu"]=$DEST/virtualpdu GITREPO["sushy"]=${SUSHY_REPO:-${GIT_BASE}/openstack/sushy.git} GITBRANCH["sushy"]=${SUSHY_BRANCH:-master} GITDIR["sushy"]=$DEST/sushy GITREPO["sushy-tools"]=${SUSHY_TOOLS_REPO:-${GIT_BASE}/openstack/sushy-tools.git} GITBRANCH["sushy-tools"]=${SUSHY_TOOLS_BRANCH:-master} GITDIR["sushy-tools"]=$DEST/sushy-tools IRONIC_DIR=$DEST/ironic IRONIC_DEVSTACK_DIR=$IRONIC_DIR/devstack IRONIC_DEVSTACK_FILES_DIR=$IRONIC_DEVSTACK_DIR/files # TODO(dtantsur): delete these three when we migrate image building to # ironic-python-agent-builder completely IRONIC_PYTHON_AGENT_REPO=${IRONIC_PYTHON_AGENT_REPO:-${GIT_BASE}/openstack/ironic-python-agent.git} IRONIC_PYTHON_AGENT_BRANCH=${IRONIC_PYTHON_AGENT_BRANCH:-$TARGET_BRANCH} IRONIC_PYTHON_AGENT_DIR=$DEST/ironic-python-agent IRONIC_PYTHON_AGENT_BUILDER_REPO=${IRONIC_PYTHON_AGENT_BUILDER_REPO:-${GIT_BASE}/openstack/ironic-python-agent-builder.git} IRONIC_PYTHON_AGENT_BUILDER_BRANCH=${IRONIC_PYTHON_AGENT_BUILDER_BRANCH:-$BRANCHLESS_TARGET_BRANCH} IRONIC_PYTHON_AGENT_BUILDER_DIR=$DEST/ironic-python-agent-builder IRONIC_DIB_BINDEP_FILE=https://opendev.org/openstack/diskimage-builder/raw/branch/master/bindep.txt IRONIC_DATA_DIR=$DATA_DIR/ironic IRONIC_STATE_PATH=/var/lib/ironic IRONIC_AUTH_CACHE_DIR=${IRONIC_AUTH_CACHE_DIR:-/var/cache/ironic} IRONIC_CONF_DIR=${IRONIC_CONF_DIR:-/etc/ironic} IRONIC_CONF_FILE=$IRONIC_CONF_DIR/ironic.conf IRONIC_ROOTWRAP_CONF=$IRONIC_CONF_DIR/rootwrap.conf # Deploy Ironic API under uwsgi (NOT mod_wsgi) server. # Devstack aims to remove mod_wsgi support, so ironic shouldn't use it too. # If set to False that will fall back to use the eventlet server that # can happen on grenade runs. # The (confusing) name IRONIC_USE_MOD_WSGI is left for backward compatibility, # for example during grenade runs # TODO(pas-ha) remove IRONIC_USE_MOD_WSGI var after oldest supported # stable branch is stable/rocky IRONIC_USE_MOD_WSGI=$(trueorfalse $ENABLE_HTTPD_MOD_WSGI_SERVICES IRONIC_USE_MOD_WSGI) # If True, will deploy Ironic API under WSGI server, currently supported one # is uwsgi. # Defaults to the (now confusingly named) IRONIC_USE_MOD_WSGI for backward compat IRONIC_USE_WSGI=$(trueorfalse $IRONIC_USE_MOD_WSGI IRONIC_USE_WSGI) # Whether DevStack will be setup for bare metal or VMs IRONIC_IS_HARDWARE=$(trueorfalse False IRONIC_IS_HARDWARE) # Deploy callback timeout can be changed from its default (1800), if required. IRONIC_CALLBACK_TIMEOUT=${IRONIC_CALLBACK_TIMEOUT:-} # Timeout before retrying PXE boot. Set low to help the CI. if [[ "$IRONIC_IS_HARDWARE" == False ]]; then IRONIC_PXE_BOOT_RETRY_TIMEOUT=${IRONIC_PXE_BOOT_RETRY_TIMEOUT:-900} else IRONIC_PXE_BOOT_RETRY_TIMEOUT=${IRONIC_PXE_BOOT_RETRY_TIMEOUT:-} fi # Ping timeout after the node becomes active IRONIC_PING_TIMEOUT=${IRONIC_PING_TIMEOUT:-} # Deploy to hardware platform IRONIC_HW_NODE_CPU=${IRONIC_HW_NODE_CPU:-1} IRONIC_HW_NODE_RAM=${IRONIC_HW_NODE_RAM:-512} IRONIC_HW_NODE_DISK=${IRONIC_HW_NODE_DISK:-10} IRONIC_HW_EPHEMERAL_DISK=${IRONIC_HW_EPHEMERAL_DISK:-0} IRONIC_HW_ARCH=${IRONIC_HW_ARCH:-x86_64} # The file is composed of multiple lines, each line includes fields # separated by white space, in the format: # # [] # # For example: # # 192.168.110.107 00:1e:67:57:50:4c root otc123 # # Supported IRONIC_DEPLOY_DRIVERs: # ipmi: # # # idrac: # # # irmc: # # IRONIC_HWINFO_FILE=${IRONIC_HWINFO_FILE:-$IRONIC_DATA_DIR/hardware_info} # Set up defaults for functional / integration testing IRONIC_NODE_UUID=${IRONIC_NODE_UUID:-`uuidgen`} IRONIC_SCRIPTS_DIR=${IRONIC_SCRIPTS_DIR:-$IRONIC_DEVSTACK_DIR/tools/ironic/scripts} IRONIC_TEMPLATES_DIR=${IRONIC_TEMPLATES_DIR:-$IRONIC_DEVSTACK_DIR/tools/ironic/templates} IRONIC_BAREMETAL_BASIC_OPS=$(trueorfalse False IRONIC_BAREMETAL_BASIC_OPS) IRONIC_TFTPBOOT_DIR=${IRONIC_TFTPBOOT_DIR:-$IRONIC_DATA_DIR/tftpboot} IRONIC_TFTPSERVER_IP=${IRONIC_TFTPSERVER_IP:-$HOST_IP} IRONIC_TFTP_BLOCKSIZE=${IRONIC_TFTP_BLOCKSIZE:-$((PUBLIC_BRIDGE_MTU-50))} IRONIC_VM_COUNT=${IRONIC_VM_COUNT:-1} IRONIC_VM_SPECS_CPU=${IRONIC_VM_SPECS_CPU:-1} IRONIC_VM_SPECS_RAM=${IRONIC_VM_SPECS_RAM:-3072} IRONIC_VM_SPECS_CPU_ARCH=${IRONIC_VM_SPECS_CPU_ARCH:-'x86_64'} IRONIC_VM_SPECS_DISK=${IRONIC_VM_SPECS_DISK:-10} IRONIC_VM_SPECS_DISK_FORMAT=${IRONIC_VM_SPECS_DISK_FORMAT:-qcow2} IRONIC_VM_EPHEMERAL_DISK=${IRONIC_VM_EPHEMERAL_DISK:-0} IRONIC_VM_EMULATOR=${IRONIC_VM_EMULATOR:-'/usr/bin/qemu-system-x86_64'} IRONIC_VM_ENGINE=${IRONIC_VM_ENGINE:-qemu} IRONIC_VM_NETWORK_BRIDGE=${IRONIC_VM_NETWORK_BRIDGE:-brbm} IRONIC_VM_INTERFACE_COUNT=${IRONIC_VM_INTERFACE_COUNT:-2} IRONIC_VM_VOLUME_COUNT=${IRONIC_VM_VOLUME_COUNT:-1} IRONIC_VM_MACS_CSV_FILE=${IRONIC_VM_MACS_CSV_FILE:-$IRONIC_DATA_DIR/ironic_macs.csv} IRONIC_CLEAN_NET_NAME=${IRONIC_CLEAN_NET_NAME:-${IRONIC_PROVISION_NETWORK_NAME:-${PRIVATE_NETWORK_NAME}}} IRONIC_RESCUE_NET_NAME=${IRONIC_RESCUE_NET_NAME:-${IRONIC_CLEAN_NET_NAME}} IRONIC_EXTRA_PXE_PARAMS=${IRONIC_EXTRA_PXE_PARAMS:-} IRONIC_TTY_DEV=${IRONIC_TTY_DEV:-ttyS0,115200} IRONIC_TEMPEST_BUILD_TIMEOUT=${IRONIC_TEMPEST_BUILD_TIMEOUT:-${BUILD_TIMEOUT:-}} if [[ -n "$BUILD_TIMEOUT" ]]; then echo "WARNING: BUILD_TIMEOUT variable is renamed to IRONIC_TEMPEST_BUILD_TIMEOUT and will be deprecated in Pike." fi hostdomain=$(hostname) if [[ "$hostdomain" =~ "rax" ]]; then echo "WARNING: Auto-increasing the requested build timeout by 1.5 as the detected hostname suggests a cloud host where VMs are software emulated." # NOTE(TheJulia): Rax hosts are entirely qemu emulated, not CPU enabled # virtualization. As such, the ramdisk decompression is known to take an # eceptional amount of time and we need to afford a little more time to # these hosts for jobs to complete without issues. new_timeout=$(echo "$IRONIC_TEMPEST_BUILD_TIMEOUT * 1.5 / 1" | bc) IRONIC_TEMPEST_BUILD_TIMEOUT=$new_timeout if [ -n "$IRONIC_PXE_BOOT_RETRY_TIMEOUT" ]; then new_timeout=$(echo "$IRONIC_PXE_BOOT_RETRY_TIMEOUT * 1.5 / 1" | bc) IRONIC_PXE_BOOT_RETRY_TIMEOUT=$new_timeout fi # TODO(TheJulia): If we have to do magically extend timeouts again, # we should make a helper method... fi # Oslo Policy, as of Wallaby defaults to not enforcing request scope # against requestors. This is anticipated to change in Xena or after # the Xena release of OpenStack. IRONIC_ENFORCE_SCOPE=$(trueorfalse False IRONIC_ENFORCE_SCOPE) if [[ "$IRONIC_ENFORCE_SCOPE" == "True" ]]; then IRONIC_OS_CLOUD=devstack-system-admin else IRONIC_OS_CLOUD=devstack-admin fi # NOTE(TheJulia): Project scoped admin as of Yoga cycle # the core devstack code doesn't assert any environment variables # upfront except for OS_CLOUD, which is defaulted to devstack-admin # in the last keystone step which executes after ironic's base setup # occurs. We navigate this just fine for yoga with fixes, however # we need this variable for Xena and possibly Wallaby grenade. OS_CLOUD=${OS_CLOUD:-devstack-admin} # Versions and command line for API client IRONIC_DEFAULT_API_VERSION=${IRONIC_DEFAULT_API_VERSION:-} IRONIC_CMD="openstack --os-cloud $IRONIC_OS_CLOUD baremetal" if [[ -n "$IRONIC_DEFAULT_API_VERSION" ]]; then IRONIC_CMD="$IRONIC_CMD --os-baremetal-api-version $IRONIC_DEFAULT_API_VERSION" fi # Settings! IRONIC_ENABLED_HARDWARE_TYPES=${IRONIC_ENABLED_HARDWARE_TYPES:-"ipmi,fake-hardware"} # list of all available driver interfaces types IRONIC_DRIVER_INTERFACE_TYPES="bios boot power management deploy console inspect raid rescue storage network vendor" IRONIC_ENABLED_BIOS_INTERFACES=${IRONIC_ENABLED_BIOS_INTERFACES:-"fake,no-bios"} IRONIC_ENABLED_BOOT_INTERFACES=${IRONIC_ENABLED_BOOT_INTERFACES:-"fake,ipxe"} IRONIC_ENABLED_CONSOLE_INTERFACES=${IRONIC_ENABLED_CONSOLE_INTERFACES:-"fake,no-console"} IRONIC_ENABLED_DEPLOY_INTERFACES=${IRONIC_ENABLED_DEPLOY_INTERFACES:-"fake,direct,ramdisk"} IRONIC_ENABLED_INSPECT_INTERFACES=${IRONIC_ENABLED_INSPECT_INTERFACES:-"fake,no-inspect"} IRONIC_ENABLED_MANAGEMENT_INTERFACES=${IRONIC_ENABLED_MANAGEMENT_INTERFACES:-""} IRONIC_ENABLED_NETWORK_INTERFACES=${IRONIC_ENABLED_NETWORK_INTERFACES:-"flat,noop"} IRONIC_ENABLED_POWER_INTERFACES=${IRONIC_ENABLED_POWER_INTERFACES:-""} IRONIC_ENABLED_RAID_INTERFACES=${IRONIC_ENABLED_RAID_INTERFACES:-"fake,agent,no-raid"} IRONIC_ENABLED_RESCUE_INTERFACES=${IRONIC_ENABLED_RESCUE_INTERFACES:-"fake,no-rescue"} IRONIC_ENABLED_STORAGE_INTERFACES=${IRONIC_ENABLED_STORAGE_INTERFACES:-"fake,cinder,noop"} IRONIC_ENABLED_VENDOR_INTERFACES=${IRONIC_ENABLED_VENDOR_INTERFACES:-"fake,ipmitool,no-vendor"} # for usage with hardware types IRONIC_DEFAULT_BIOS_INTERFACE=${IRONIC_DEFAULT_BIOS_INTERFACE:-} IRONIC_DEFAULT_BOOT_INTERFACE=${IRONIC_DEFAULT_BOOT_INTERFACE:-} IRONIC_DEFAULT_CONSOLE_INTERFACE=${IRONIC_DEFAULT_CONSOLE_INTERFACE:-} IRONIC_DEFAULT_DEPLOY_INTERFACE=${IRONIC_DEFAULT_DEPLOY_INTERFACE:-} IRONIC_DEFAULT_INSPECT_INTERFACE=${IRONIC_DEFAULT_INSPECT_INTERFACE:-} IRONIC_DEFAULT_MANAGEMENT_INTERFACE=${IRONIC_DEFAULT_MANAGEMENT_INTERFACE:-} IRONIC_DEFAULT_NETWORK_INTERFACE=${IRONIC_DEFAULT_NETWORK_INTERFACE:-} IRONIC_DEFAULT_POWER_INTERFACE=${IRONIC_DEFAULT_POWER_INTERFACE:-} IRONIC_DEFAULT_RAID_INTERFACE=${IRONIC_DEFAULT_RAID_INTERFACE:-} IRONIC_DEFAULT_RESCUE_INTERFACE=${IRONIC_DEFAULT_RESCUE_INTERFACE:-} IRONIC_DEFAULT_STORAGE_INTERFACE=${IRONIC_DEFAULT_STORAGE_INTERFACE:-} IRONIC_DEFAULT_VENDOR_INTERFACE=${IRONIC_DEFAULT_VENDOR_INTERFACE:-} # If IRONIC_VM_ENGINE is explicitly set to "auto" or "kvm", # devstack will attempt to use hardware virtualization # (aka nested kvm). We do not enable it in the infra gates # because it is not consistently supported/working across # all gate infrastructure providers. if [[ "$IRONIC_VM_ENGINE" == "auto" ]]; then sudo modprobe kvm || true if [ ! -e /dev/kvm ]; then echo "WARNING: Switching to QEMU" IRONIC_VM_ENGINE=qemu if [[ -z "$IRONIC_VM_EMULATOR" ]]; then IRONIC_VM_EMULATOR='/usr/bin/qemu-system-x86_64' fi else IRONIC_VM_ENGINE=kvm fi fi if [[ "$IRONIC_VM_ENGINE" == "kvm" ]]; then # Set this to empty, so configure-vm.py can autodetect location # of KVM binary IRONIC_VM_EMULATOR="" fi # By default, baremetal VMs will console output to file. IRONIC_VM_LOG_CONSOLE=$(trueorfalse True IRONIC_VM_LOG_CONSOLE) IRONIC_VM_LOG_DIR=${IRONIC_VM_LOG_DIR:-$IRONIC_DATA_DIR/logs/} IRONIC_VM_LOG_ROTATE=$(trueorfalse True IRONIC_VM_LOG_ROTATE) # Set resource_classes for nodes to use Nova's placement engine IRONIC_DEFAULT_RESOURCE_CLASS=${IRONIC_DEFAULT_RESOURCE_CLASS:-baremetal} # Set traits for nodes. Traits should be separated by whitespace. IRONIC_DEFAULT_TRAITS=${IRONIC_DEFAULT_TRAITS-CUSTOM_GOLD} # Whether to build the ramdisk or download a prebuilt one. IRONIC_BUILD_DEPLOY_RAMDISK=$(trueorfalse True IRONIC_BUILD_DEPLOY_RAMDISK) # Ironic IPA ramdisk type, supported types are: IRONIC_SUPPORTED_RAMDISK_TYPES_RE="^(tinyipa|dib)$" IRONIC_RAMDISK_TYPE=${IRONIC_RAMDISK_TYPE:-dib} # Confirm we have a supported ramdisk type or fail early. if [[ ! "$IRONIC_RAMDISK_TYPE" =~ $IRONIC_SUPPORTED_RAMDISK_TYPES_RE ]]; then die $LINENO "Unrecognized IRONIC_RAMDISK_TYPE: $IRONIC_RAMDISK_TYPE. Expected 'tinyipa' or 'dib'" fi # Prevent a case that will likely result in a failure. if [[ "$hostdomain" =~ "rax" ]]; then if [[ "$IRONIC_RAMDISK_TYPE" == "dib" ]]; then echo "** WARNING ** - DIB based IPA images have been defined, however we are running devstack on RAX VM. Due to virtualization constraints, we are automatically falling back to TinyIPA to ensure CI job passage." IRONIC_RAMDISK_TYPE="tinyipa" fi fi # Which deploy driver to use - valid choices right now # are ``ipmi``, ``snmp`` and ``redfish``. # # Additional valid choices if IRONIC_IS_HARDWARE == true are: # ``idrac`` and ``irmc``. IRONIC_DEPLOY_DRIVER=${IRONIC_DEPLOY_DRIVER:-ipmi} # If present, these files are used as deploy ramdisk/kernel. # (The value must be an absolute path) IRONIC_DEPLOY_RAMDISK=${IRONIC_DEPLOY_RAMDISK:-$TOP_DIR/files/ir-deploy-$IRONIC_DEPLOY_DRIVER.initramfs} IRONIC_DEPLOY_KERNEL=${IRONIC_DEPLOY_KERNEL:-$TOP_DIR/files/ir-deploy-$IRONIC_DEPLOY_DRIVER.kernel} IRONIC_DEPLOY_ISO=${IRONIC_DEPLOY_ISO:-$TOP_DIR/files/ir-deploy-$IRONIC_DEPLOY_DRIVER.iso} # If present, this file is used to deploy/boot nodes over virtual media # (The value must be an absolute path) IRONIC_EFIBOOT=${IRONIC_EFIBOOT:-$TOP_DIR/files/ir-deploy-$IRONIC_DEPLOY_DRIVER.efiboot} # NOTE(jroll) this needs to be updated when stable branches are cut IPA_DOWNLOAD_BRANCH=${IPA_DOWNLOAD_BRANCH:-master} IPA_DOWNLOAD_BRANCH=$(echo $IPA_DOWNLOAD_BRANCH | tr / -) # OS for using with DIB images IRONIC_DIB_RAMDISK_OS=${IRONIC_DIB_RAMDISK_OS:-centos8} IRONIC_DIB_RAMDISK_RELEASE=${IRONIC_DIB_RAMDISK_RELEASE:-} # Configure URLs required to download ramdisk if we're not building it, and # IRONIC_DEPLOY_RAMDISK/KERNEL or the RAMDISK/KERNEL_URLs have not been # preconfigured. if [[ "$IRONIC_BUILD_DEPLOY_RAMDISK" == "False" && \ ! (-e "$IRONIC_DEPLOY_RAMDISK" && -e "$IRONIC_DEPLOY_KERNEL") && \ (-z "$IRONIC_AGENT_KERNEL_URL" || -z "$IRONIC_AGENT_RAMDISK_URL") ]]; then case $IRONIC_RAMDISK_TYPE in tinyipa) IRONIC_AGENT_KERNEL_FILE=tinyipa-${IPA_DOWNLOAD_BRANCH}.vmlinuz IRONIC_AGENT_RAMDISK_FILE=tinyipa-${IPA_DOWNLOAD_BRANCH}.gz ;; dib) IRONIC_AGENT_KERNEL_FILE=ipa-${IRONIC_DIB_RAMDISK_OS}-${IPA_DOWNLOAD_BRANCH}.kernel IRONIC_AGENT_RAMDISK_FILE=ipa-${IRONIC_DIB_RAMDISK_OS}-${IPA_DOWNLOAD_BRANCH}.initramfs ;; esac IRONIC_AGENT_KERNEL_URL=https://tarballs.openstack.org/ironic-python-agent/${IRONIC_RAMDISK_TYPE}/files/${IRONIC_AGENT_KERNEL_FILE} IRONIC_AGENT_RAMDISK_URL=https://tarballs.openstack.org/ironic-python-agent/${IRONIC_RAMDISK_TYPE}/files/${IRONIC_AGENT_RAMDISK_FILE} fi # This refers the options for disk-image-create and the platform on which # to build the dib based ironic-python-agent ramdisk. IRONIC_DIB_RAMDISK_OPTIONS=${IRONIC_DIB_RAMDISK_OPTIONS:-} if [[ -z "$IRONIC_DIB_RAMDISK_OPTIONS" ]]; then if [[ "$IRONIC_DIB_RAMDISK_OS" == "centos8" ]]; then # Adapt for DIB naming change IRONIC_DIB_RAMDISK_OS=centos IRONIC_DIB_RAMDISK_RELEASE=8-stream fi IRONIC_DIB_RAMDISK_OPTIONS="$IRONIC_DIB_RAMDISK_OS" fi # DHCP timeout for the dhcp-all-interfaces element. IRONIC_DIB_DHCP_TIMEOUT=${IRONIC_DIB_DHCP_TIMEOUT:-60} # Some drivers in Ironic require deploy ramdisk in bootable ISO format. # Set this variable to "true" to build an ISO for deploy ramdisk and # upload to Glance. IRONIC_DEPLOY_ISO_REQUIRED=$(trueorfalse False IRONIC_DEPLOY_ISO_REQUIRED) if [[ "$IRONIC_DEPLOY_ISO_REQUIRED" = "True" \ && "$IRONIC_BUILD_DEPLOY_RAMDISK" = "False" \ && ! -e "$IRONIC_DEPLOY_ISO" ]]; then die "Prebuilt ISOs are not available, provide an ISO via IRONIC_DEPLOY_ISO \ or set IRONIC_BUILD_DEPLOY_RAMDISK=True to use ISOs" fi # If the requested driver is not yet enable, enable it, if it is not it will fail anyway if [[ -z "$(echo ${IRONIC_ENABLED_HARDWARE_TYPES} | grep -w ${IRONIC_DEPLOY_DRIVER})" ]]; then die "The deploy driver $IRONIC_DEPLOY_DRIVER is not in the list of enabled \ hardware types $IRONIC_ENABLED_HARDWARE_TYPES" fi # Support entry points installation of console scripts IRONIC_BIN_DIR=$(get_python_exec_prefix) IRONIC_UWSGI_CONF=$IRONIC_CONF_DIR/ironic-uwsgi.ini IRONIC_UWSGI=$IRONIC_BIN_DIR/ironic-api-wsgi # Lets support IPv6 testing! IRONIC_IP_VERSION=${IRONIC_IP_VERSION:-${IP_VERSION:-4}} # Ironic connection info. Note the port must be specified. if is_service_enabled tls-proxy; then IRONIC_SERVICE_PROTOCOL=https fi IRONIC_SERVICE_PROTOCOL=${IRONIC_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} IRONIC_SERVICE_PORT=${IRONIC_SERVICE_PORT:-6385} IRONIC_SERVICE_PORT_INT=${IRONIC_SERVICE_PORT_INT:-16385} # If ironic api running under apache or UWSGI we use the path rather than port if [[ "$IRONIC_USE_WSGI" == "True" ]]; then IRONIC_HOSTPORT=${IRONIC_HOSTPORT:-$SERVICE_HOST/baremetal} else IRONIC_HOSTPORT=${IRONIC_HOSTPORT:-$SERVICE_HOST:$IRONIC_SERVICE_PORT} fi # Enable iPXE IRONIC_IPXE_ENABLED=$(trueorfalse True IRONIC_IPXE_ENABLED) # Options below are only applied when IRONIC_IPXE_ENABLED is True IRONIC_IPXE_USE_SWIFT=$(trueorfalse False IRONIC_IPXE_USE_SWIFT) IRONIC_HTTP_DIR=${IRONIC_HTTP_DIR:-$IRONIC_DATA_DIR/httpboot} IRONIC_HTTP_PORT=${IRONIC_HTTP_PORT:-3928} # Allow using JSON RPC instead of oslo.messaging IRONIC_RPC_TRANSPORT=${IRONIC_RPC_TRANSPORT:-oslo} IRONIC_JSON_RPC_PORT=${IRONIC_JSON_RPC_PORT:-8089} # The authentication strategy used by json-rpc. Valid values are: # keystone, http_basic, noauth, or no value to inherit from ironic-api # auth strategy. IRONIC_JSON_RPC_AUTH_STRATEGY=${IRONIC_JSON_RPC_AUTH_STRATEGY:-} # The first port in the range to bind the Virtual BMCs. The number of # ports that will be used depends on $IRONIC_VM_COUNT variable, e.g if # $IRONIC_VM_COUNT=3 the ports 6230, 6231 and 6232 will be used for the # Virtual BMCs, one for each VM. IRONIC_VBMC_PORT_RANGE_START=${IRONIC_VBMC_PORT_RANGE_START:-6230} IRONIC_VBMC_CONFIG_FILE=${IRONIC_VBMC_CONFIG_FILE:-$IRONIC_CONF_DIR/virtualbmc/virtualbmc.conf} IRONIC_VBMC_LOGFILE=${IRONIC_VBMC_LOGFILE:-$IRONIC_VM_LOG_DIR/virtualbmc.log} IRONIC_VBMC_SYSTEMD_SERVICE=devstack@virtualbmc.service # Virtual PDU configs IRONIC_VPDU_CONFIG_FILE=${IRONIC_VPDU_CONFIG_FILE:-$IRONIC_CONF_DIR/virtualpdu/virtualpdu.conf} IRONIC_VPDU_PORT_RANGE_START=${IRONIC_VPDU_PORT_RANGE_START:-1} IRONIC_VPDU_LISTEN_PORT=${IRONIC_VPDU_LISTEN_PORT:-1161} IRONIC_VPDU_COMMUNITY=${IRONIC_VPDU_COMMUNITY:-private} IRONIC_VPDU_SNMPDRIVER=${IRONIC_VPDU_SNMPDRIVER:-apc_rackpdu} IRONIC_VPDU_SYSTEMD_SERVICE=devstack@virtualpdu.service # Redfish configs IRONIC_REDFISH_EMULATOR_PORT=${IRONIC_REDFISH_EMULATOR_PORT:-9132} IRONIC_REDFISH_EMULATOR_SYSTEMD_SERVICE="devstack@redfish-emulator.service" IRONIC_REDFISH_EMULATOR_CONFIG=${IRONIC_REDFISH_EMULATOR_CONFIG:-$IRONIC_CONF_DIR/redfish/emulator.conf} # To explicitly enable configuration of Glance with Swift # (which is required by some vendor drivers), set this # variable to true. IRONIC_CONFIGURE_GLANCE_WITH_SWIFT=$(trueorfalse False IRONIC_CONFIGURE_GLANCE_WITH_SWIFT) # The path to the libvirt hooks directory, used if IRONIC_VM_LOG_ROTATE is True IRONIC_LIBVIRT_HOOKS_PATH=${IRONIC_LIBVIRT_HOOKS_PATH:-/etc/libvirt/hooks/} LIBVIRT_STORAGE_POOL=${LIBVIRT_STORAGE_POOL:-"default"} LIBVIRT_STORAGE_POOL_PATH=${LIBVIRT_STORAGE_POOL_PATH:-/var/lib/libvirt/images} # The authentication strategy used by ironic-api. Valid values are: # keystone, http_basic, noauth. IRONIC_AUTH_STRATEGY=${IRONIC_AUTH_STRATEGY:-keystone} # By default, terminal SSL certificate is disabled. IRONIC_TERMINAL_SSL=$(trueorfalse False IRONIC_TERMINAL_SSL) IRONIC_TERMINAL_CERT_DIR=${IRONIC_TERMINAL_CERT_DIR:-$IRONIC_DATA_DIR/terminal_cert/} # This flag is used to allow adding Link-Local-Connection info # to ironic port-create command. LLC info is obtained from # IRONIC_{VM,HW}_NODES_FILE IRONIC_USE_LINK_LOCAL=$(trueorfalse False IRONIC_USE_LINK_LOCAL) # Allow selecting dhcp provider IRONIC_DHCP_PROVIDER=${IRONIC_DHCP_PROVIDER:-neutron} # This is the network interface to use for a node IRONIC_NETWORK_INTERFACE=${IRONIC_NETWORK_INTERFACE:-} # Ironic provision network name, if this value is set it means we are using # multi-tenant networking. If not set, then we are not using multi-tenant # networking and are therefore using a 'flat' network. IRONIC_PROVISION_NETWORK_NAME=${IRONIC_PROVISION_NETWORK_NAME:-} # Provision network provider type. Can be flat or vlan. # This is only used if IRONIC_PROVISION_NETWORK_NAME has been set. IRONIC_PROVISION_PROVIDER_NETWORK_TYPE=${IRONIC_PROVISION_PROVIDER_NETWORK_TYPE:-'vlan'} # If IRONIC_PROVISION_PROVIDER_NETWORK_TYPE is vlan. VLAN_ID may be specified. If it is not set, # vlan will be allocated dynamically. # This is only used if IRONIC_PROVISION_NETWORK_NAME has been set. IRONIC_PROVISION_SEGMENTATION_ID=${IRONIC_PROVISION_SEGMENTATION_ID:-} if [[ "$IRONIC_IP_VERSION" != '6' ]]; then # NOTE(TheJulia): Lets not try and support mixed mode since the conductor # can't support mixed mode operation. We are either IPv4 OR IPv6. IRONIC_IP_VERSION='4' # Allocation network pool for provision network # Example: IRONIC_PROVISION_ALLOCATION_POOL=start=10.0.5.10,end=10.0.5.100 # This is only used if IRONIC_PROVISION_NETWORK_NAME has been set. IRONIC_PROVISION_ALLOCATION_POOL=${IRONIC_PROVISION_ALLOCATION_POOL:-'start=10.0.5.10,end=10.0.5.100'} # With multinode case all ironic-conductors should have IP from provisioning network. # IRONIC_PROVISION_SUBNET_GATEWAY - is configured on primary node. # Ironic provision subnet gateway. IRONIC_PROVISION_SUBNET_GATEWAY=${IRONIC_PROVISION_SUBNET_GATEWAY:-'10.0.5.1'} IRONIC_PROVISION_SUBNET_SUBNODE_IP=${IRONIC_PROVISION_SUBNET_SUBNODE_IP:-'10.0.5.2'} # Ironic provision subnet prefix # Example: IRONIC_PROVISION_SUBNET_PREFIX=10.0.5.0/24 IRONIC_PROVISION_SUBNET_PREFIX=${IRONIC_PROVISION_SUBNET_PREFIX:-'10.0.5.0/24'} else IRONIC_IP_VERSION='6' # NOTE(TheJulia): The IPv6 address devstack has identified is the # local loopback. This does not really serve our purposes very # well, so we need to setup something that will work. if [[ "$HOST_IPV6" == '::1' ]] || [[ ! $HOST_IPV6 =~ "::" ]]; then # We setup an address elsewhere because the service address of # loopback cannot be used for v6 testing. IRONIC_HOST_IPV6='fc00::1' else IRONIC_HOST_IPV6=$SERVICE_HOST fi IRONIC_PROVISION_SUBNET_GATEWAY=${IRONIC_PROVISION_SUBNET_GATEWAY:-'fc01::1'} IRONIC_PROVISION_SUBNET_SUBNODE_IP=${IRONIC_PROVISION_SUBNET_SUBNODE_IP:-'fc01::2'} IRONIC_PROVISION_SUBNET_PREFIX=${IRONIC_PROVISION_SUBNET_PREFIX:-'fc01::/64'} IRONIC_TFTPSERVER_IP=$IRONIC_HOST_IPV6 fi IRONIC_ROUTER_NAME=${Q_ROUTER_NAME:-router1} # Ironic provision subnet name. # This is only used if IRONIC_PROVISION_NETWORK_NAME has been set. IRONIC_PROVISION_PROVIDER_SUBNET_NAME=${IRONIC_PROVISION_PROVIDER_SUBNET_NAME:-${IRONIC_PROVISION_NETWORK_NAME}-subnet} # When enabled this will set the physical_network attribute for ironic ports # and subnet-to-segment association on provisioning network will be configured. # NOTE: The neutron segments service_plugin must be loaded for this. IRONIC_USE_NEUTRON_SEGMENTS=$(trueorfalse False IRONIC_USE_NEUTRON_SEGMENTS) # This is the storage interface to use for a node # Only 'cinder' can be set for testing boot from volume IRONIC_STORAGE_INTERFACE=${IRONIC_STORAGE_INTERFACE:-} # With multinode case all ironic-conductors should have IP from provisioning network. # IRONIC_PROVISION_SUBNET_GATEWAY - is configured on primary node. # Ironic provision subnet gateway. IRONIC_PROVISION_SUBNET_GATEWAY=${IRONIC_PROVISION_SUBNET_GATEWAY:-'10.0.5.1'} IRONIC_PROVISION_SUBNET_SUBNODE_IP=${IRONIC_PROVISION_SUBNET_SUBNODE_IP:-'10.0.5.2'} # Ironic provision subnet prefix # Example: IRONIC_PROVISION_SUBNET_PREFIX=10.0.5.0/24 IRONIC_PROVISION_SUBNET_PREFIX=${IRONIC_PROVISION_SUBNET_PREFIX:-'10.0.5.0/24'} if [[ "$HOST_TOPOLOGY_ROLE" == "primary" ]]; then # Some CI jobs get triggered without a HOST_TOPOLOGY_ROLE # If so, none of this logic is, or needs to be executed. IRONIC_TFTPSERVER_IP=$IRONIC_PROVISION_SUBNET_GATEWAY IRONIC_HTTP_SERVER=$IRONIC_PROVISION_SUBNET_GATEWAY fi if [[ "$HOST_TOPOLOGY_ROLE" == "subnode" ]]; then IRONIC_TFTPSERVER_IP=$IRONIC_PROVISION_SUBNET_SUBNODE_IP IRONIC_HTTP_SERVER=$IRONIC_PROVISION_SUBNET_SUBNODE_IP fi # NOTE(TheJulia): Last catch for this being set or not. # should only work for v4. IRONIC_HTTP_SERVER=${IRONIC_HTTP_SERVER:-$IRONIC_TFTPSERVER_IP} # Port that must be permitted for iSCSI connections to be # established from the tenant network. ISCSI_SERVICE_PORT=${ISCSI_SERVICE_PORT:-3260} # Retrieving logs from the deploy ramdisk # # IRONIC_DEPLOY_LOGS_COLLECT possible values are: # * always: Collect the ramdisk logs from the deployment on success or # failure (Default in DevStack for debugging purpose). # * on_failure: Collect the ramdisk logs upon a deployment failure # (Default in Ironic). # * never: Never collect the ramdisk logs. IRONIC_DEPLOY_LOGS_COLLECT=${IRONIC_DEPLOY_LOGS_COLLECT:-always} # IRONIC_DEPLOY_LOGS_STORAGE_BACKEND possible values are: # * local: To store the logs in the local filesystem (Default in Ironic and DevStack). # * swift: To store the logs in Swift. IRONIC_DEPLOY_LOGS_STORAGE_BACKEND=${IRONIC_DEPLOY_LOGS_STORAGE_BACKEND:-local} # The path to the directory where Ironic should put the logs when IRONIC_DEPLOY_LOGS_STORAGE_BACKEND is set to "local" IRONIC_DEPLOY_LOGS_LOCAL_PATH=${IRONIC_DEPLOY_LOGS_LOCAL_PATH:-$IRONIC_VM_LOG_DIR/deploy_logs} # Fast track option IRONIC_DEPLOY_FAST_TRACK=${IRONIC_DEPLOY_FAST_TRACK:-False} # Define baremetal min_microversion in tempest config. Default value None is picked from tempest. TEMPEST_BAREMETAL_MIN_MICROVERSION=${TEMPEST_BAREMETAL_MIN_MICROVERSION:-} # Define baremetal max_microversion in tempest config. No default value means that it is picked from tempest. TEMPEST_BAREMETAL_MAX_MICROVERSION=${TEMPEST_BAREMETAL_MAX_MICROVERSION:-} # TODO(TheJulia): This PHYSICAL_NETWORK needs to be refactored in # our devstack plugin. It is used by the neutron-legacy integration, # however they want to name the new variable for the current neutron # plugin NEUTRON_PHYSICAL_NETWORK. For now we'll do some magic and # change it later once we migrate our jobs. PHYSICAL_NETWORK=${NEUTRON_PHYSICAL_NETWORK:-${PHYSICAL_NETWORK:-}} # Ramdisk ISO image for Ramdisk Virtual Media/iPXE testing IRONIC_RAMDISK_IMAGE=${IRONIC_RAMDISK_IMAGE:-http://tinycorelinux.net/10.x/x86/archive/10.0/Core-10.0.iso} IRONIC_LOADER_PATHS=${IRONIC_LOADER_PATHS:-} # update_loader_copy_paths() - Appends to the loader paths for automatic # file copy in by Ironic upon startup. function update_loader_copy_paths { if [[ -n $IRONIC_LOADER_PATHS ]]; then IRONIC_LOADER_PATHS="$IRONIC_LOADER_PATHS,$1" else IRONIC_LOADER_PATHS=$1 fi } # get_pxe_boot_file() - Get the PXE boot file path function get_pxe_boot_file { local pxe_boot_file # Standard PXE if is_ubuntu; then # Ubuntu Xenial (16.04) places the file under /usr/lib/PXELINUX pxe_paths="/usr/lib/syslinux/pxelinux.0 /usr/lib/PXELINUX/pxelinux.0" for p in $pxe_paths; do if [[ -f $p ]]; then pxe_boot_file=$p fi done elif is_fedora || is_suse; then pxe_boot_file=/usr/share/syslinux/pxelinux.0 fi echo $pxe_boot_file } # PXE boot image - Deprecated IRONIC_PXE_BOOT_IMAGE=${IRONIC_PXE_BOOT_IMAGE:-$(get_pxe_boot_file)} IRONIC_AUTOMATED_CLEAN_ENABLED=$(trueorfalse True IRONIC_AUTOMATED_CLEAN_ENABLED) IRONIC_SECURE_BOOT=${IRONIC_SECURE_BOOT:-False} IRONIC_UEFI_BOOT_LOADER=${IRONIC_UEFI_BOOT_LOADER:-grub2} IRONIC_GRUB2_SHIM_FILE=${IRONIC_GRUB2_SHIM_FILE:-} IRONIC_GRUB2_FILE=${IRONIC_GRUB2_FILE:-} IRONIC_GRUB2_NETWORK_FILE=${IRONIC_GRUB2_NETWORK_FILE:-} IRONIC_UEFI_FILES_DIR=${IRONIC_UEFI_FILES_DIR:-/var/lib/libvirt/images} UEFI_LOADER_PATH=$IRONIC_UEFI_FILES_DIR/OVMF_CODE.fd UEFI_NVRAM_PATH=$IRONIC_UEFI_FILES_DIR/OVMF_VARS.fd # Handle architecture specific package installs if [[ $IRONIC_HW_ARCH == "x86_64" ]]; then install_package shim if is_ubuntu; then install_package grub-efi-amd64-signed shim-signed elif is_fedora; then install_package grub2 grub2-efi fi fi # Sanity checks if [[ "$IRONIC_BOOT_MODE" == "uefi" ]]; then if [[ "$IRONIC_IPXE_ENABLED" == "False" ]] && [[ "$IRONIC_UEFI_BOOT_LOADER" != "grub2" ]]; then die $LINENO "Boot mode UEFI is only supported with iPXE and grub2 bootloaders." fi if ! is_fedora && ! is_ubuntu; then die $LINENO "Boot mode UEFI only works in Ubuntu or Fedora for now." fi if is_ubuntu && [[ -z $IRONIC_GRUB2_FILE ]]; then IRONIC_GRUB2_SHIM_FILE=/usr/lib/shim/shimx64.efi.signed IRONIC_GRUB2_FILE=/usr/lib/grub/x86_64-efi-signed/grubx64.efi.signed IRONIC_GRUB2_NETWORK_FILE=/usr/lib/grub/x86_64-efi-signed/grubnetx64.efi.signed elif is_fedora && [[ -z $IRONIC_GRUB2_FILE ]]; then IRONIC_GRUB2_SHIM_FILE=/boot/efi/EFI/fedora/shimx64.efi IRONIC_GRUB2_FILE=/boot/efi/EFI/fedora/grubx64.efi IRONIC_GRUB2_NETWORK_FILE=/boot/efi/EFI/fedora/grubx64.efi fi if [[ "$IRONIC_IPXE_ENABLED" == "False" ]]; then # NOTE(TheJulia): While we no longer directly copy the # IRONIC_GRUB2_FILE, we still check the existence as # without the bootloader package we would be unable to build # the netboot core image. if [[ -z $IRONIC_GRUB2_SHIM_FILE ]] || [[ -z $IRONIC_GRUB2_FILE ]] || [[ ! -f $IRONIC_GRUB2_SHIM_FILE ]] || [[ ! -f $IRONIC_GRUB2_FILE ]]; then die $LINENO "Grub2 Bootloader and Shim file missing." fi fi fi # TODO(dtantsur): change this when we change the default value. IRONIC_DEFAULT_BOOT_OPTION=${IRONIC_DEFAULT_BOOT_OPTION:-local} if [ $IRONIC_DEFAULT_BOOT_OPTION != "netboot" ] && [ $IRONIC_DEFAULT_BOOT_OPTION != "local" ]; then die $LINENO "Supported values for IRONIC_DEFAULT_BOOT_OPTION are 'netboot' and 'local' only." fi # TODO(pas-ha) find a way to (cross-)sign the custom CA bundle used by tls-proxy # with default iPXE cert - for reference see http://ipxe.org/crypto if is_service_enabled tls-proxy && [[ "$IRONIC_IPXE_USE_SWIFT" == "True" ]]; then die $LINENO "Ironic in DevStack does not yet support booting iPXE from HTTPS URLs" fi # Timeout for "manage" action. 2 minutes is more than enough. IRONIC_MANAGE_TIMEOUT=${IRONIC_MANAGE_TIMEOUT:-120} # Timeout for "provide" action. This involves cleaning. if [[ -n "$IRONIC_PXE_BOOT_RETRY_TIMEOUT" ]]; then IRONIC_DEFAULT_CLEANING_TIMEOUT=$(( $IRONIC_PXE_BOOT_RETRY_TIMEOUT * 2 )) else IRONIC_DEFAULT_CLEANING_TIMEOUT=1800 fi IRONIC_CLEANING_TIMEOUT=${IRONIC_CLEANING_TIMEOUT:-$IRONIC_DEFAULT_CLEANING_TIMEOUT} IRONIC_CLEANING_DELAY=10 IRONIC_CLEANING_ATTEMPTS=$(( $IRONIC_CLEANING_TIMEOUT / $IRONIC_CLEANING_DELAY )) # Timeout for ironic-neutron-agent to report state before providing nodes. # The agent reports every 60 seconds, 2 minutes should do. IRONIC_NEUTRON_AGENT_REPORT_STATE_DELAY=10 IRONIC_NEUTRON_AGENT_REPORT_STATE_TIMEOUT=${IRONIC_NEUTRON_AGENT_REPORT_STATE_TIMEOUT:-120} IRONIC_NEUTRON_AGENT_REPORT_STATE_ATTEMPTS=$(( $IRONIC_NEUTRON_AGENT_REPORT_STATE_TIMEOUT / IRONIC_NEUTRON_AGENT_REPORT_STATE_DELAY )) # Username to use by Ansible to access ramdisk, # to be set as '[ansible]/default_username' option. # If not set here (default), will be set to 'tc' for TinyIPA ramdisk, # for other ramdisks it must be either provided here, # or set manually per-node via ironic API IRONIC_ANSIBLE_SSH_USER=${IRONIC_ANSIBLE_SSH_USER:-} # Path to the private SSH key to use by ansible deploy interface # that will be set as '[ansible]/default_key_file' option in config. # The public key path is assumed to be ${IRONIC_ANSIBLE_SSH_KEY}.pub # and will be used when rebuilding the image to include this public key # in ~/.ssh/authorized_keys of a $IRONIC_ANSIBLE_SSH_USER in the ramdisk. # Only the TinyIPA ramdisks are currently supported for such rebuild. # For TinyIPA ramdisks, if the specified file doesn't exist, it will # be created and will contain a new RSA passwordless key. We assume # that the directories in the path to this file exist and are # writable. # For other ramdisk types, make sure the corresponding public key is baked into # the ramdisk to be used by DevStack and provide the path to the private key here, # or set it manually per node via ironic API. # FIXME(pas-ha) auto-generated keys currently won't work for multi-node # DevStack deployment, as we do not distribute this generated key to subnodes yet. IRONIC_ANSIBLE_SSH_KEY=${IRONIC_ANSIBLE_SSH_KEY:-$IRONIC_DATA_DIR/ansible_ssh_key} if is_service_enabled swift && [[ "$SWIFT_ENABLE_TEMPURLS" == "True" ]]; then IRONIC_DEFAULT_DOWNLOAD_SOURCE=swift else IRONIC_DEFAULT_DOWNLOAD_SOURCE= fi IRONIC_AGENT_IMAGE_DOWNLOAD_SOURCE=${IRONIC_AGENT_IMAGE_DOWNLOAD_SOURCE:-$IRONIC_DEFAULT_DOWNLOAD_SOURCE} # Functions # --------- # UEFI related functions # get_uefi_ipxe_boot_file - Deprecated function get_uefi_ipxe_boot_file { if is_ubuntu; then # NOTE(TheJulia): This *should* be snponly.efi, however # ubuntu only started shipping it in hirtuse. echo /usr/lib/ipxe/ipxe.efi elif is_fedora; then echo /usr/share/ipxe/ipxe-snponly-x86_64.efi fi } function get_uefi_loader { if is_ubuntu; then echo /usr/share/OVMF/OVMF_CODE.fd elif is_fedora; then echo /usr/share/edk2/ovmf/OVMF_CODE.fd fi } function get_uefi_nvram { if is_ubuntu; then echo /usr/share/OVMF/OVMF_VARS.fd elif is_fedora; then echo /usr/share/edk2/ovmf/OVMF_VARS.fd fi } # Misc function restart_libvirt { local libvirt_service_name="libvirtd" if is_ubuntu && ! type libvirtd; then libvirt_service_name="libvirt-bin" fi restart_service $libvirt_service_name } # Test if any Ironic services are enabled # is_ironic_enabled function is_ironic_enabled { [[ ,${ENABLED_SERVICES} =~ ,"ir-" ]] && return 0 return 1 } function is_deployed_by_agent { [[ "$IRONIC_DEFAULT_DEPLOY_INTERFACE" == "direct" || "$IRONIC_DEFAULT_DEPLOY_INTERFACE" == "" ]] && return 0 return 1 } function is_deployed_by_ipmi { [[ "$IRONIC_DEPLOY_DRIVER" == ipmi ]] && return 0 return 1 } function is_deployed_by_ilo { [[ "${IRONIC_DEPLOY_DRIVER}" == ilo ]] && return 0 return 1 } function is_deployed_by_drac { [[ "${IRONIC_DEPLOY_DRIVER}" == idrac ]] && return 0 return 1 } function is_deployed_by_snmp { [[ "${IRONIC_DEPLOY_DRIVER}" == snmp ]] && return 0 return 1 } function is_deployed_by_redfish { [[ "$IRONIC_DEPLOY_DRIVER" == redfish ]] && return 0 return 1 } function is_deployed_by_irmc { [[ "$IRONIC_DEPLOY_DRIVER" == irmc ]] && return 0 return 1 } function is_deployed_by_xclarity { [[ "$IRONIC_DEPLOY_DRIVER" == xclarity ]] && return 0 return 1 } function is_deployed_by_ibmc { [[ "$IRONIC_DEPLOY_DRIVER" == ibmc ]] && return 0 return 1 } function is_drac_enabled { [[ -z "${IRONIC_ENABLED_HARDWARE_TYPES%%*idrac*}" ]] && return 0 return 1 } function is_ibmc_enabled { [[ -z "${IRONIC_ENABLED_HARDWARE_TYPES%%*ibmc*}" ]] && return 0 return 1 } function is_irmc_enabled { [[ -z "${IRONIC_ENABLED_HARDWARE_TYPES%%*irmc*}" ]] && return 0 return 1 } function is_ansible_deploy_enabled { [[ -z "${IRONIC_ENABLED_DEPLOY_INTERFACES%%*ansible*}" ]] && return 0 return 1 } function is_redfish_enabled { [[ -z "${IRONIC_ENABLED_HARDWARE_TYPES%%*redfish*}" ]] && return 0 return 1 } function is_ansible_with_tinyipa { # NOTE(pas-ha) we support rebuilding the ramdisk to include (generated) SSH keys # as needed for ansible deploy interface only for TinyIPA ramdisks for now is_ansible_deploy_enabled && [[ "$IRONIC_RAMDISK_TYPE" == "tinyipa" ]] && return 0 return 1 } function is_http_server_required { [[ "$IRONIC_IPXE_ENABLED" == "True" ]] && return 0 is_deployed_by_agent && [[ "$IRONIC_AGENT_IMAGE_DOWNLOAD_SOURCE" != "swift" ]] && return 0 return 1 } function is_glance_configuration_required { # Always configure if we're asked to [[ "$IRONIC_CONFIGURE_GLANCE_WITH_SWIFT" == "True" ]] && return 0 # Do not require swift configuration if using image_download_source!=swift [[ "$IRONIC_AGENT_IMAGE_DOWNLOAD_SOURCE" == "swift" ]] || return 1 # Otherwise require for direct and ansible deploy is_deployed_by_agent || is_ansible_deploy_enabled && return 0 return 1 } function is_deploy_iso_required { [[ "$IRONIC_IS_HARDWARE" == "True" && "$IRONIC_DEPLOY_ISO_REQUIRED" == "True" ]] && return 0 return 1 } # Assert that the redfish hardware type is enabled in case we are using # the redfish driver if is_deployed_by_redfish && [[ "$IRONIC_ENABLED_HARDWARE_TYPES" != *"redfish"* ]]; then die $LINENO "Please make sure that the redfish hardware" \ "type is enabled. Take a look at the " \ "IRONIC_ENABLED_HARDWARE_TYPES configuration option" \ "for DevStack" fi # Assert that for non-TynyIPA ramdisks and Ansible, the private SSH key file to use exists. if is_ansible_deploy_enabled && [[ "$IRONIC_RAMDISK_TYPE" != "tinyipa" ]]; then if [[ ! -f $IRONIC_ANSIBLE_SSH_KEY ]]; then die $LINENO "Using non-TinyIPA ramdisks with ansible deploy interface" \ "requires setting IRONIC_ANSIBLE_SSH_KEY to existing"\ "private SSH key file to be used by Ansible." fi fi # Syslinux >= 5.00 pxelinux.0 binary is not "stand-alone" anymore, # it depends on some c32 modules to work correctly. # More info: http://www.syslinux.org/wiki/index.php/Library_modules function setup_syslinux_modules { # Ignore it for iPXE, it doesn't repend on syslinux modules [[ "$IRONIC_IPXE_ENABLED" == "True" ]] && return 0 # Ubuntu Xenial keeps doesn't ship pxelinux.0 as part of syslinux anymore if is_ubuntu && [[ -d /usr/lib/PXELINUX/ ]]; then # NOTE(TheJulia): Few distributions package and ship syslinux.efi, # so this is basically only for bios booting. cp -aR /usr/lib/syslinux/modules/bios/*.c32 $IRONIC_TFTPBOOT_DIR else cp -aR $(dirname $IRONIC_PXE_BOOT_IMAGE)/*.c32 $IRONIC_TFTPBOOT_DIR fi } function start_virtualbmc { start_service $IRONIC_VBMC_SYSTEMD_SERVICE } function stop_virtualbmc { stop_service $IRONIC_VBMC_SYSTEMD_SERVICE } function cleanup_virtualbmc { stop_virtualbmc disable_service $IRONIC_VBMC_SYSTEMD_SERVICE local unitfile="$SYSTEMD_DIR/$IRONIC_VBMC_SYSTEMD_SERVICE" sudo rm -f $unitfile $SYSTEMCTL daemon-reload } function install_virtualbmc { # Install pyghmi from source, if requested, otherwise it will be # downloaded as part of the virtualbmc installation if use_library_from_git "pyghmi"; then git_clone_by_name "pyghmi" setup_dev_lib "pyghmi" fi if use_library_from_git "virtualbmc"; then git_clone_by_name "virtualbmc" setup_dev_lib "virtualbmc" else pip_install_gr "virtualbmc" fi local cmd cmd=$(which vbmcd) cmd+=" --foreground" write_user_unit_file $IRONIC_VBMC_SYSTEMD_SERVICE "$cmd" "" "$STACK_USER" local unitfile="$SYSTEMD_DIR/$IRONIC_VBMC_SYSTEMD_SERVICE" iniset -sudo $unitfile "Service" "Environment" "VIRTUALBMC_CONFIG=$IRONIC_VBMC_CONFIG_FILE" enable_service $IRONIC_VBMC_SYSTEMD_SERVICE } function configure_virtualbmc { if [[ ! -d $(dirname $IRONIC_VBMC_CONFIG_FILE) ]]; then mkdir -p $(dirname $IRONIC_VBMC_CONFIG_FILE) fi iniset -sudo $IRONIC_VBMC_CONFIG_FILE log debug True } function start_virtualpdu { start_service $IRONIC_VPDU_SYSTEMD_SERVICE } function stop_virtualpdu { stop_service $IRONIC_VPDU_SYSTEMD_SERVICE } function cleanup_virtualpdu { stop_virtualpdu disable_service $IRONIC_VPDU_SYSTEMD_SERVICE local unitfile="$SYSTEMD_DIR/$IRONIC_VPDU_SYSTEMD_SERVICE" sudo rm -f $unitfile $SYSTEMCTL daemon-reload } function install_virtualpdu { if use_library_from_git "virtualpdu"; then git_clone_by_name "virtualpdu" setup_dev_lib "virtualpdu" else pip_install "virtualpdu" fi local cmd cmd=$(which virtualpdu) cmd+=" $IRONIC_VPDU_CONFIG_FILE" write_user_unit_file $IRONIC_VPDU_SYSTEMD_SERVICE "$cmd" "" "$STACK_USER" enable_service $IRONIC_VPDU_SYSTEMD_SERVICE } function configure_virtualpdu { mkdir -p $(dirname $IRONIC_VPDU_CONFIG_FILE) iniset -sudo $IRONIC_VPDU_CONFIG_FILE global debug True iniset -sudo $IRONIC_VPDU_CONFIG_FILE global libvirt_uri "qemu:///system" iniset -sudo $IRONIC_VPDU_CONFIG_FILE PDU listen_address ${HOST_IP} iniset -sudo $IRONIC_VPDU_CONFIG_FILE PDU listen_port ${IRONIC_VPDU_LISTEN_PORT} iniset -sudo $IRONIC_VPDU_CONFIG_FILE PDU community ${IRONIC_VPDU_COMMUNITY} iniset -sudo $IRONIC_VPDU_CONFIG_FILE PDU ports $(_generate_pdu_ports) iniset -sudo $IRONIC_VPDU_CONFIG_FILE PDU outlet_default_state "OFF" } # _generate_pdu_ports() - Generates list of port:node_name. function _generate_pdu_ports { pdu_port_number=${IRONIC_VPDU_PORT_RANGE_START} port_config=() for vm_name in $(_ironic_bm_vm_names); do port_config+=("${pdu_port_number}:${vm_name}") pdu_port_number=$(( pdu_port_number + 1 )) done echo ${port_config[*]} | tr ' ' ',' } function start_redfish { start_service $IRONIC_REDFISH_EMULATOR_SYSTEMD_SERVICE } function stop_redfish { stop_service $IRONIC_REDFISH_EMULATOR_SYSTEMD_SERVICE } function cleanup_redfish { stop_redfish rm -f $IRONIC_REDFISH_EMULATOR_CONFIG disable_service $IRONIC_REDFISH_EMULATOR_SYSTEMD_SERVICE local unitfile="$SYSTEMD_DIR/$IRONIC_REDFISH_EMULATOR_SYSTEMD_SERVICE" sudo rm -f $unitfile $SYSTEMCTL daemon-reload } function install_redfish { # TODO(lucasagomes): Use Apache WSGI instead of gunicorn if is_fedora; then install_package python3-gunicorn else pip_install_gr "gunicorn" fi if use_library_from_git "sushy-tools"; then git_clone_by_name "sushy-tools" setup_dev_lib "sushy-tools" else pip_install "sushy-tools" fi local cmd cmd=$(which gunicorn) cmd+=" sushy_tools.emulator.main:app" cmd+=" --bind ${HOST_IP}:${IRONIC_REDFISH_EMULATOR_PORT}" cmd+=" --env FLASK_DEBUG=1" cmd+=" --env SUSHY_EMULATOR_CONFIG=${IRONIC_REDFISH_EMULATOR_CONFIG}" # NOTE(dtantsur): handling virtual media ISO can take time, so increase # both concurrency and the worker timeout. cmd+=" --workers 2 --threads 2 --timeout 90" # NOTE(dtantsur): log all accesses to stdout cmd+=" --access-logfile=- --error-logfile=-" write_user_unit_file $IRONIC_REDFISH_EMULATOR_SYSTEMD_SERVICE "$cmd" "" "$STACK_USER" enable_service $IRONIC_REDFISH_EMULATOR_SYSTEMD_SERVICE } function configure_redfish { if [[ ! -d $(dirname $IRONIC_REDFISH_EMULATOR_CONFIG) ]]; then mkdir -p $(dirname $IRONIC_REDFISH_EMULATOR_CONFIG) fi cat - < $IRONIC_REDFISH_EMULATOR_CONFIG SUSHY_EMULATOR_BOOT_LOADER_MAP = { 'UEFI': { 'x86_64': '$UEFI_LOADER_PATH' }, 'Legacy': { 'x86_64': None } } EOF } function setup_sushy { if use_library_from_git "sushy"; then git_clone_by_name "sushy" setup_dev_lib "sushy" else pip_install_gr "sushy" fi } # install_ironic() - Install the things! function install_ironic { # NOTE(vsaienko) do not check required_services on subnode if [[ "$HOST_TOPOLOGY_ROLE" != "subnode" ]]; then # make sure all needed service were enabled local req_services="key" if is_service_enabled nova && [[ "$VIRT_DRIVER" == "ironic" ]]; then req_services+=" nova glance neutron" fi for srv in $req_services; do if ! is_service_enabled "$srv"; then die $LINENO "$srv should be enabled for Ironic." fi done fi if use_library_from_git "ironic-lib"; then git_clone_by_name "ironic-lib" setup_dev_lib "ironic-lib" fi setup_develop $IRONIC_DIR if [[ "$IRONIC_USE_WSGI" == "True" ]] || is_http_server_required; then install_apache_wsgi fi if is_redfish_enabled || is_deployed_by_redfish; then setup_sushy fi if [[ "$IRONIC_IS_HARDWARE" == "False" ]]; then if is_deployed_by_ipmi; then install_virtualbmc fi if is_deployed_by_snmp; then install_virtualpdu fi if is_deployed_by_redfish; then install_redfish fi fi if is_drac_enabled; then pip_install python-dracclient fi if is_ibmc_enabled; then pip_install python-ibmcclient fi if is_irmc_enabled; then pip_install python-scciclient pysnmp fi if is_ansible_deploy_enabled; then pip_install "$(grep '^ansible' $IRONIC_DIR/driver-requirements.txt | awk '{print $1}')" fi } # install_ironicclient() - Collect sources and prepare function install_ironicclient { if use_library_from_git "python-ironicclient"; then git_clone_by_name "python-ironicclient" setup_dev_lib "python-ironicclient" else # nothing actually "requires" ironicclient, so force instally from pypi pip_install_gr python-ironicclient fi } # _cleanup_ironic_apache_additions() - Remove uwsgi files, disable and remove apache vhost file function _cleanup_ironic_apache_additions { if is_http_server_required; then sudo rm -rf $IRONIC_HTTP_DIR # TODO(dtantsur): rename the site, it's also used for direct deploy disable_apache_site ipxe-ironic sudo rm -f $(apache_site_config_for ipxe-ironic) fi if [[ "$IRONIC_USE_WSGI" == "True" ]]; then remove_uwsgi_config "$IRONIC_UWSGI_CONF" "$IRONIC_UWSGI" fi restart_apache_server } # _config_ironic_apache_additions() - Configure ironic IPXE site function _config_ironic_apache_additions { local ipxe_apache_conf ipxe_apache_conf=$(apache_site_config_for ipxe-ironic) sudo cp $IRONIC_DEVSTACK_FILES_DIR/apache-ipxe-ironic.template $ipxe_apache_conf sudo sed -e " s|%PUBLICPORT%|$IRONIC_HTTP_PORT|g; s|%HTTPROOT%|$IRONIC_HTTP_DIR|g; s|%APACHELOGDIR%|$APACHE_LOG_DIR|g; " -i $ipxe_apache_conf enable_apache_site ipxe-ironic } # cleanup_ironic_config_files() - Remove residual cache/config/log files, # left over from previous runs that would need to clean up. function cleanup_ironic_config_files { sudo rm -rf $IRONIC_AUTH_CACHE_DIR $IRONIC_CONF_DIR sudo rm -rf $IRONIC_VM_LOG_DIR/* } # cleanup_ironic() - Clean everything left from Ironic function cleanup_ironic { cleanup_ironic_config_files # Cleanup additions made to Apache if [[ "$IRONIC_USE_WSGI" == "True" ]] || is_http_server_required; then _cleanup_ironic_apache_additions fi cleanup_virtualbmc cleanup_virtualpdu cleanup_redfish # Remove the hook to disable log rotate sudo rm -rf $IRONIC_LIBVIRT_HOOKS_PATH/qemu } # configure_ironic_dirs() - Create all directories required by Ironic and # associated services. function configure_ironic_dirs { sudo install -d -o $STACK_USER $IRONIC_CONF_DIR $STACK_USER $IRONIC_DATA_DIR \ $IRONIC_STATE_PATH $IRONIC_TFTPBOOT_DIR $IRONIC_TFTPBOOT_DIR/pxelinux.cfg sudo chown -R $STACK_USER:$STACK_USER $IRONIC_TFTPBOOT_DIR if is_http_server_required; then sudo install -d -o $STACK_USER -g $STACK_USER $IRONIC_HTTP_DIR fi # Deprecated - Remove at some point. if [ ! -f "$IRONIC_PXE_BOOT_IMAGE" ] && [[ ! -z $IRONIC_LOADER_PATHS ]]; then die $LINENO "PXE boot file $IRONIC_PXE_BOOT_IMAGE not found." fi # Copy PXE binary # NOTE(mjturek): The PXE binary is x86_64 specific. So it should only be copied when # deploying to an x86_64 node. if [[ $IRONIC_HW_ARCH == "x86_64" ]]; then cp $IRONIC_PXE_BOOT_IMAGE $IRONIC_TFTPBOOT_DIR setup_syslinux_modules fi if [[ -z $IRONIC_LOADER_PATHS ]]; then # This takes a slightly different model then the legacy # path of devstack. If file exists, add it to the list. # NOTE(TheJulia): All of these paths are for booting x86 # machines only, others arches can be used, just few distros # pre-package such loader files. if [[ "$IRONIC_BOOT_MODE" == "uefi" ]]; then if is_ubuntu; then # NOTE(TheJulia): This is done separately here as this allows # the script to have hirtuse/bionic compatability. if [[ -f /usr/lib/ipxe/snponly.efi ]]; then update_loader_copy_paths snponly.efi:/usr/lib/ipxe/snponly.efi elif [[ -f /usr/lib/ipxe/ipxe.efi ]]; then update_loader_copy_paths snponly.efi:/usr/lib/ipxe/ipxe.efi fi fi if is_fedora; then if [ -f /usr/share/ipxe/ipxe-snponly-x86_64.efi ]; then # NOTE(TheJulia): I think this file got renamed at some # point during it's centos8 run, but this is current. update_loader_copy_paths snponly.efi:/usr/share/ipxe/ipxe-snponly-x86_64.efi fi fi if [ -f $IRONIC_GRUB2_SHIM_FILE ]; then update_loader_copy_paths "bootx64.efi:$IRONIC_GRUB2_SHIM_FILE" fi if [ -f $IRONIC_GRUB2_SHIM_FILE ]; then update_loader_copy_paths "grubx64.efi:$IRONIC_GRUB2_NETWORK_FILE" fi else if [[ -f /usr/lib/ipxe/undionly.kpxe ]]; then update_loader_copy_paths undionly.kpxe:/usr/lib/ipxe/undionly.kpxe elif [[ -f /usr/share/ipxe/undionly.kpxe ]]; then update_loader_copy_paths undionly.kpxe:/usr/share/ipxe/undionly.kpxe fi fi fi # end of IRONIC_LOADER_PATHS check if [[ "$IRONIC_BOOT_MODE" == "uefi" ]]; then if [[ "$IRONIC_IS_HARDWARE" == "False" ]]; then local uefi_loader local uefi_nvram # Copy the OVMF images to libvirt's path uefi_loader=$(get_uefi_loader) uefi_nvram=$(get_uefi_nvram) sudo cp $uefi_loader $UEFI_LOADER_PATH sudo cp $uefi_nvram $UEFI_NVRAM_PATH fi fi # Create the logs directory when saving the deploy logs to the filesystem if [[ "$IRONIC_DEPLOY_LOGS_STORAGE_BACKEND" == "local" && "$IRONIC_DEPLOY_LOGS_COLLECT" != "never" ]]; then install -d -o $STACK_USER $IRONIC_DEPLOY_LOGS_LOCAL_PATH fi } function configure_ironic_networks { if [[ -n "${IRONIC_PROVISION_NETWORK_NAME}" ]]; then echo_summary "Configuring Ironic provisioning network" configure_ironic_provision_network fi echo_summary "Configuring Ironic cleaning network" configure_ironic_cleaning_network echo_summary "Configuring Ironic rescue network" configure_ironic_rescue_network } function configure_ironic_cleaning_network { iniset $IRONIC_CONF_FILE neutron cleaning_network $IRONIC_CLEAN_NET_NAME } function configure_ironic_rescue_network { iniset $IRONIC_CONF_FILE neutron rescuing_network $IRONIC_RESCUE_NET_NAME } function configure_ironic_provision_network { if [[ "$IP_VERSION" == "6" ]]; then # NOTE(TheJulia): Ideally we should let this happen # with our global address, but iPXE seems to have in # consistant behavior in this configuration with devstack. # so we will setup a dummy interface and use that. sudo ip link add magicv6 type dummy sudo ip link set dev magicv6 up sudo ip -6 addr add $IRONIC_HOST_IPV6/64 dev magicv6 fi if is_service_enabled neutron-api; then if [[ "$IRONIC_IP_VERSION" == "6" ]]; then sudo sysctl -w net.ipv6.conf.all.proxy_ndp=1 configure_neutron_l3_lower_v6_ra fi # Neutron agent needs to be pre-configured before proceeding down the # path of configuring the provision network. This was done for us in # the legacy neutron code. neutron_plugin_configure_plugin_agent # This prior step updates configuration related to physnet mappings, # and we must restart neutron as a result stop_neutron sleep 15 # By default, upon start, neutron tries to create the networks... NEUTRON_CREATE_INITIAL_NETWORKS=False start_neutron_api start_neutron fi # This is only called if IRONIC_PROVISION_NETWORK_NAME has been set and # means we are using multi-tenant networking. local net_id local ironic_provision_network_ip # NOTE(vsaienko) For multinode case there is no need to create a new provisioning # network on subnode, as it was created on primary node. Just get an existed network UUID. if [[ "$HOST_TOPOLOGY_ROLE" != "subnode" ]]; then die_if_not_set $LINENO IRONIC_PROVISION_SUBNET_PREFIX "You must specify the IRONIC_PROVISION_SUBNET_PREFIX" die_if_not_set $LINENO PHYSICAL_NETWORK "You must specify the PHYSICAL_NETWORK" die_if_not_set $LINENO IRONIC_PROVISION_SUBNET_GATEWAY "You must specify the IRONIC_PROVISION_SUBNET_GATEWAY" net_id=$(openstack --os-cloud $OS_CLOUD network create --provider-network-type $IRONIC_PROVISION_PROVIDER_NETWORK_TYPE \ --provider-physical-network "$PHYSICAL_NETWORK" \ ${IRONIC_PROVISION_SEGMENTATION_ID:+--provider-segment $IRONIC_PROVISION_SEGMENTATION_ID} \ ${IRONIC_PROVISION_NETWORK_NAME} -f value -c id) die_if_not_set $LINENO net_id "Failure creating net_id for $IRONIC_PROVISION_NETWORK_NAME" if [[ "${IRONIC_USE_NEUTRON_SEGMENTS}" == "True" ]]; then local net_segment_id net_segment_id=$(openstack --os-cloud $OS_CLOUD network segment list --network $net_id -f value -c ID) die_if_not_set $LINENO net_segment_id "Failure getting net_segment_id for $IRONIC_PROVISION_NETWORK_NAME" fi local subnet_id if [[ "$IRONIC_IP_VERSION" == '4' ]]; then subnet_id="$(openstack --os-cloud $OS_CLOUD subnet create --ip-version 4 \ ${IRONIC_PROVISION_ALLOCATION_POOL:+--allocation-pool $IRONIC_PROVISION_ALLOCATION_POOL} \ ${net_segment_id:+--network-segment $net_segment_id} \ $IRONIC_PROVISION_PROVIDER_SUBNET_NAME \ --gateway $IRONIC_PROVISION_SUBNET_GATEWAY --network $net_id \ --subnet-range $IRONIC_PROVISION_SUBNET_PREFIX -f value -c id)" else subnet_id="$(openstack --os-cloud $OS_CLOUD subnet create --ip-version 6 \ --ipv6-address-mode dhcpv6-stateful \ --ipv6-ra-mode dhcpv6-stateful \ --dns-nameserver 2001:4860:4860::8888 \ ${net_segment_id:+--network-segment $net_segment_id} \ $IRONIC_PROVISION_PROVIDER_SUBNET_NAME \ --gateway $IRONIC_PROVISION_SUBNET_GATEWAY --network $net_id \ --subnet-range $IRONIC_PROVISION_SUBNET_PREFIX -f value -c id)" # NOTE(TheJulia): router must be attached to the subnet for RAs. openstack --os-cloud $OS_CLOUD router add subnet $IRONIC_ROUTER_NAME $subnet_id # We're going to be using this router of public access to tenant networks PUBLIC_ROUTER_ID=$(openstack --os-cloud $OS_CLOUD router show -c id -f value $IRONIC_ROUTER_NAME) fi die_if_not_set $LINENO subnet_id "Failure creating SUBNET_ID for $IRONIC_PROVISION_NETWORK_NAME" ironic_provision_network_ip=$IRONIC_PROVISION_SUBNET_GATEWAY else net_id=$(openstack --os-cloud $OS_CLOUD network show $IRONIC_PROVISION_NETWORK_NAME -f value -c id) ironic_provision_network_ip=$IRONIC_PROVISION_SUBNET_SUBNODE_IP fi IRONIC_PROVISION_SEGMENTATION_ID=${IRONIC_PROVISION_SEGMENTATION_ID:-`openstack --os-cloud $OS_CLOUD network show ${net_id} -f value -c provider:segmentation_id`} provision_net_prefix=${IRONIC_PROVISION_SUBNET_PREFIX##*/} # Set provision network GW on physical interface # Add vlan on br interface in case of IRONIC_PROVISION_PROVIDER_NETWORK_TYPE==vlan # othervise assign ip to br interface directly. sudo ip link set dev $OVS_PHYSICAL_BRIDGE up if [[ "$IRONIC_IP_VERSION" == "4" ]]; then if [[ "$IRONIC_PROVISION_PROVIDER_NETWORK_TYPE" == "vlan" ]]; then sudo ip link add link $OVS_PHYSICAL_BRIDGE name $OVS_PHYSICAL_BRIDGE.$IRONIC_PROVISION_SEGMENTATION_ID type vlan id $IRONIC_PROVISION_SEGMENTATION_ID sudo ip link set dev $OVS_PHYSICAL_BRIDGE.$IRONIC_PROVISION_SEGMENTATION_ID up sudo ip -$IRONIC_IP_VERSION addr add dev $OVS_PHYSICAL_BRIDGE.$IRONIC_PROVISION_SEGMENTATION_ID $ironic_provision_network_ip/$provision_net_prefix else sudo ip -$IRONIC_IP_VERSION addr add dev $OVS_PHYSICAL_BRIDGE $ironic_provision_network_ip/$provision_net_prefix fi else # Turn on the external/integration bridges, for IPV6. sudo ip link set dev br-ex up sudo ip link set dev br-int up sudo ip6tables -I FORWARD -i brbm -j LOG || true sudo ip6tables -I FORWARD -i br-ex -j LOG || true fi iniset $IRONIC_CONF_FILE neutron provisioning_network $IRONIC_PROVISION_NETWORK_NAME } function cleanup_ironic_provision_network { # Cleanup OVS_PHYSICAL_BRIDGE subinterfaces local bridge_subint bridge_subint=$(cat /proc/net/dev | sed -n "s/^\(${OVS_PHYSICAL_BRIDGE}\.[0-9]*\).*/\1/p") for sub_int in $bridge_subint; do sudo ip link set dev $sub_int down sudo ip link del dev $sub_int done } function configure_neutron_l3_lower_v6_ra { iniset $Q_L3_CONF_FILE DEFAULT min_rtr_adv_interval 5 } # configure_ironic() - Set config files, create data dirs, etc function configure_ironic { configure_ironic_dirs # (re)create ironic configuration file and configure common parameters. rm -f $IRONIC_CONF_FILE iniset $IRONIC_CONF_FILE DEFAULT debug True inicomment $IRONIC_CONF_FILE DEFAULT log_file iniset $IRONIC_CONF_FILE database connection `database_connection_url ironic` iniset $IRONIC_CONF_FILE DEFAULT state_path $IRONIC_STATE_PATH iniset $IRONIC_CONF_FILE DEFAULT use_syslog $SYSLOG # NOTE(vsaienko) with multinode each conductor should have its own host. iniset $IRONIC_CONF_FILE DEFAULT host $LOCAL_HOSTNAME # NOTE(TheJulia) Set a minimum amount of memory that is more in-line with # OpenStack CI and the images deployed. iniset $IRONIC_CONF_FILE DEFAULT minimum_required_memory 256 # Retrieve deployment logs iniset $IRONIC_CONF_FILE agent deploy_logs_collect $IRONIC_DEPLOY_LOGS_COLLECT iniset $IRONIC_CONF_FILE agent deploy_logs_storage_backend $IRONIC_DEPLOY_LOGS_STORAGE_BACKEND iniset $IRONIC_CONF_FILE agent deploy_logs_local_path $IRONIC_DEPLOY_LOGS_LOCAL_PATH # Set image_download_source for direct interface if [[ -n "$IRONIC_AGENT_IMAGE_DOWNLOAD_SOURCE" ]]; then iniset $IRONIC_CONF_FILE agent image_download_source $IRONIC_AGENT_IMAGE_DOWNLOAD_SOURCE fi # Configure JSON RPC backend iniset $IRONIC_CONF_FILE DEFAULT rpc_transport $IRONIC_RPC_TRANSPORT iniset $IRONIC_CONF_FILE json_rpc port $IRONIC_JSON_RPC_PORT if [[ "$IRONIC_JSON_RPC_AUTH_STRATEGY" != "" ]]; then iniset $IRONIC_CONF_FILE json_rpc auth_strategy $IRONIC_JSON_RPC_AUTH_STRATEGY fi if [[ "$IRONIC_JSON_RPC_AUTH_STRATEGY" == "http_basic" ]]; then iniset $IRONIC_CONF_FILE json_rpc username myName iniset $IRONIC_CONF_FILE json_rpc password myPassword # json-rpc auth file with bcrypt hash of myPassword echo 'myName:$2y$05$lE3eGtyj41jZwrzS87KTqe6.JETVCWBkc32C63UP2aYrGoYOEpbJm' > /etc/ironic/htpasswd-json-rpc fi if [[ "$IRONIC_JSON_RPC_AUTH_STRATEGY" == "" ]] || [[ "$IRONIC_JSON_RPC_AUTH_STRATEGY" == "keystone" ]]; then configure_client_for json_rpc fi if [[ "$IRONIC_ENFORCE_SCOPE" == "True" ]]; then iniset $IRONIC_CONF_FILE oslo_policy enforce_scope true iniset $IRONIC_CONF_FILE oslo_policy enforce_new_defaults true fi # Set fast track options iniset $IRONIC_CONF_FILE deploy fast_track $IRONIC_DEPLOY_FAST_TRACK # FIXME(dtantsur): configdrive downloading code does not respect IPA TLS # configuration, not even ipa-insecure. if is_service_enabled swift && [[ "$SWIFT_ENABLE_TEMPURLS" == "True" ]] && ! is_service_enabled tls-proxy; then iniset $IRONIC_CONF_FILE deploy configdrive_use_object_store True fi # No need to check if RabbitMQ is enabled, this call does it in a smart way if [[ "$IRONIC_RPC_TRANSPORT" == "oslo" ]]; then iniset_rpc_backend ironic $IRONIC_CONF_FILE fi # Configure Ironic conductor, if it was enabled. if is_service_enabled ir-cond; then configure_ironic_conductor fi # Configure Ironic API, if it was enabled. if is_service_enabled ir-api; then configure_ironic_api fi # Format logging setup_logging $IRONIC_CONF_FILE # Adds ironic site for IPXE and direct deploy if is_http_server_required; then _config_ironic_apache_additions fi # Adds uWSGI for Ironic API if [[ "$IRONIC_USE_WSGI" == "True" ]]; then write_uwsgi_config "$IRONIC_UWSGI_CONF" "$IRONIC_UWSGI" "/baremetal" fi if [[ "$os_VENDOR" =~ (Debian|Ubuntu) ]]; then # The groups change with newer libvirt. Older Ubuntu used # 'libvirtd', but now uses libvirt like Debian. Do a quick check # to see if libvirtd group already exists to handle grenade's case. LIBVIRT_GROUP=$(cut -d ':' -f 1 /etc/group | grep 'libvirtd$' || true) LIBVIRT_GROUP=${LIBVIRT_GROUP:-libvirt} else LIBVIRT_GROUP=libvirtd fi if ! getent group $LIBVIRT_GROUP >/dev/null; then sudo groupadd $LIBVIRT_GROUP fi # NOTE(vsaienko) Add stack to libvirt group when installing without nova. if ! is_service_enabled nova; then # Disable power state change callbacks to nova. iniset $IRONIC_CONF_FILE nova send_power_notifications false add_user_to_group $STACK_USER $LIBVIRT_GROUP # This is the basic set of devices allowed / required by all virtual machines. # Add /dev/net/tun to cgroup_device_acl, needed for type=ethernet interfaces if ! sudo grep -q '^cgroup_device_acl' /etc/libvirt/qemu.conf; then cat <${IRONIC_VM_LOG_DIR}/README << EOF This directory contains the serial console log files from the virtual Ironic bare-metal nodes. The *_console_* log files are the original log files and include ANSI control codes which can make the output difficult to read. The *_no_ansi_* log files have had ANSI control codes removed from the file and are easier to read. On some occasions there won't be a corresponding *_no_ansi_* log file, for example if the job failed due to a time-out. You may see a log file without a date/time in the file name. In that case you can display the logfile in your console by doing: $ curl URL_TO_LOGFILE This will have your terminal process the ANSI escape codes. Another option, if you have the 'pv' executable installed, is to simulate a low-speed connection. In this example simulate a 300 Bytes/second connection. $ curl URL_TO_LOGFILE | pv -q -L 300 This can allow you to see some of the content before the screen is cleared by an ANSI escape sequence. EOF } function initialize_libvirt_storage_pool { [ -d $LIBVIRT_STORAGE_POOL_PATH ] || sudo mkdir -p $LIBVIRT_STORAGE_POOL_PATH if ! sudo virsh pool-list --all | grep -q $LIBVIRT_STORAGE_POOL; then sudo virsh pool-define-as --name $LIBVIRT_STORAGE_POOL dir \ --target $LIBVIRT_STORAGE_POOL_PATH >&2 sudo virsh pool-autostart $LIBVIRT_STORAGE_POOL >&2 sudo virsh pool-start $LIBVIRT_STORAGE_POOL >&2 fi pool_state=$(sudo virsh pool-info $LIBVIRT_STORAGE_POOL | grep State | awk '{ print $2 }') if [ "$pool_state" != "running" ] ; then sudo virsh pool-start $LIBVIRT_STORAGE_POOL >&2 fi } function create_bridge_and_vms { # Call libvirt setup scripts in a new shell to ensure any new group membership sudo su $STACK_USER -c "$IRONIC_SCRIPTS_DIR/setup-network.sh $IRONIC_VM_NETWORK_BRIDGE $PUBLIC_BRIDGE_MTU" if [[ "$IRONIC_VM_LOG_CONSOLE" == "True" ]] ; then local log_arg="-l $IRONIC_VM_LOG_DIR" if [[ "$IRONIC_VM_LOG_ROTATE" == "True" ]] ; then setup_qemu_log_hook fi else local log_arg="" fi local vbmc_port=$IRONIC_VBMC_PORT_RANGE_START local pdu_outlet=$IRONIC_VPDU_PORT_RANGE_START local vm_name local vm_opts="" if [[ -n "$IRONIC_VM_EMULATOR" ]]; then vm_opts+=" -e $IRONIC_VM_EMULATOR" fi vm_opts+=" -E $IRONIC_VM_ENGINE" if [[ "$IRONIC_BOOT_MODE" == "uefi" ]]; then vm_opts+=" -L $UEFI_LOADER_PATH -N $UEFI_NVRAM_PATH" fi if [[ -n "$LIBVIRT_NIC_DRIVER" ]]; then vm_opts+=" -D $LIBVIRT_NIC_DRIVER" elif [[ "$IRONIC_BOOT_MODE" == "uefi" ]]; then # Note(derekh) UEFI for the moment doesn't work with the e1000 net driver vm_opts+=" -D virtio" fi initialize_libvirt_storage_pool local bridge_mac bridge_mac=$(ip link show dev $IRONIC_VM_NETWORK_BRIDGE | grep -Eo "ether [A-Za-z0-9:]+"|sed "s/ether\ //") for vm_name in $(_ironic_bm_vm_names); do # pick up the $LIBVIRT_GROUP we have possibly joint newgrp $LIBVIRT_GROUP <> $IRONIC_VM_MACS_CSV_FILE SUBSHELL if is_deployed_by_ipmi; then vbmc --no-daemon add $vm_name --port $vbmc_port vbmc --no-daemon start $vm_name fi echo " ${bridge_mac} $IRONIC_VM_NETWORK_BRIDGE" >> $IRONIC_VM_MACS_CSV_FILE vbmc_port=$((vbmc_port+1)) pdu_outlet=$((pdu_outlet+1)) # It is sometimes useful to dump out the VM configuration to validate it. sudo virsh dumpxml $vm_name done if [[ -z "${IRONIC_PROVISION_NETWORK_NAME}" ]]; then local ironic_net_id ironic_net_id=$(openstack --os-cloud $OS_CLOUD network show "$PRIVATE_NETWORK_NAME" -c id -f value) create_ovs_taps $ironic_net_id # NOTE(vsaienko) Neutron no longer setup routing to private network. # https://github.com/openstack-dev/devstack/commit/1493bdeba24674f6634160d51b8081c571df4017 # Add route here to have connection to VMs during provisioning. local pub_router_id local r_net_gateway local dns_server local replace_range if [[ "$IRONIC_IP_VERSION" == '4' ]]; then dns_server="8.8.8.8" if [[ -z "${SUBNETPOOL_V4_ID}" ]]; then replace_range=${FIXED_RANGE} else replace_range=${SUBNETPOOL_PREFIX_V4} fi else dns_server="2001:4860:4860::8888" if [[ -z "${SUBNETPOOL_V6_ID}" ]]; then replace_range=${FIXED_RANGE_V6} else replace_range=${SUBNETPOOL_PREFIX_V6} fi fi pub_router_id=$(openstack --os-cloud $OS_CLOUD router show $Q_ROUTER_NAME -f value -c id) # Select the text starting at "src ", and grabbing the following field. r_net_gateway=$(sudo ip netns exec qrouter-$pub_router_id ip -$IRONIC_IP_VERSION route get $dns_server |grep dev | sed s/^.*src\ // |awk '{ print $1 }') sudo ip route replace $replace_range via $r_net_gateway fi # Here is a good place to restart tcpdump to begin capturing packets. # See: https://docs.openstack.org/devstack/latest/debugging.html # stop_tcpdump # start_tcpdump if [[ "$IRONIC_IP_VERSION" == "6" ]]; then # route us back through the neutron router! sudo ip -6 route add $IRONIC_PROVISION_SUBNET_PREFIX via $IPV6_ROUTER_GW_IP sudo ip link set dev br-ex up || true # Route back to our test subnet. Static should be safe for a while. sudo ip -6 route add fd00::/8 via $IPV6_ROUTER_GW_IP fi } function wait_for_nova_resources { # After nodes have been enrolled, we need to wait for both ironic and # nova's periodic tasks to populate the resource tracker with available # nodes and resources. Wait up to 2 minutes for a given resource before # timing out. local expected_count=$1 local resource_class=${IRONIC_DEFAULT_RESOURCE_CLASS^^} # TODO(dtantsur): switch to Placement OSC plugin, once it exists local token token=$(openstack --os-cloud $IRONIC_OS_CLOUD token issue -f value -c id) local endpoint endpoint=$(openstack --os-cloud $IRONIC_OS_CLOUD endpoint list --service placement --interface public -f value -c URL) die_if_not_set $LINENO endpoint "Cannot find Placement API endpoint" local i local count echo_summary "Waiting up to 3 minutes for placement to pick up $expected_count nodes" for i in $(seq 1 12); do # Fetch provider UUIDs from Placement local providers providers=$(curl --noproxy '*' -sH "X-Auth-Token: $token" $endpoint/resource_providers \ | jq -r '.resource_providers[].uuid') local p # Total count of the resource class, has to be equal to nodes count count=0 for p in $providers; do local amount # A resource class inventory record looks something like # {"max_unit": 1, "min_unit": 1, "step_size": 1, "reserved": 0, "total": 1, "allocation_ratio": 1} # Subtrack reserved from total (defaulting both to 0) amount=$(curl --noproxy '*' -sH "X-Auth-Token: $token" $endpoint/resource_providers/$p/inventories \ | jq ".inventories.CUSTOM_$resource_class as \$cls | (\$cls.total // 0) - (\$cls.reserved // 0)") # Check whether the resource provider has all expected traits # registered against it. rp_traits=$(curl --noproxy '*' -sH "X-Auth-Token: $token" \ -H "OpenStack-API-Version: placement 1.6" \ $endpoint/resource_providers/$p/traits) for trait in $IRONIC_DEFAULT_TRAITS; do if [[ $(echo "$rp_traits" | jq ".traits | contains([\"$trait\"])") == false ]]; then amount=0 fi done if [ $amount -gt 0 ]; then count=$(( count + $amount )) fi done if [ $count -ge $expected_count ]; then return 0 fi if is_service_enabled n-api; then $TOP_DIR/tools/discover_hosts.sh fi sleep 15 done die $LINENO "Timed out waiting for Nova to track $expected_count nodes" } function _clean_ncpu_failure { SCREEN_NAME=${SCREEN_NAME:-stack} SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} n_cpu_failure="$SERVICE_DIR/$SCREEN_NAME/n-cpu.failure" if [ -f ${n_cpu_failure} ]; then mv ${n_cpu_failure} "${n_cpu_failure}.before-restart-by-ironic" fi } function provide_nodes { local nodes=$@ for node_id in $nodes; do $IRONIC_CMD node provide $node_id done local attempt for attempt in $(seq 1 $IRONIC_CLEANING_ATTEMPTS); do local available available=$(openstack --os-cloud $IRONIC_OS_CLOUD baremetal node list --provision-state available -f value -c UUID) local nodes_not_finished= for node_id in $nodes; do if ! echo $available | grep -q $node_id; then nodes_not_finished+=" $node_id" fi done nodes=$nodes_not_finished if [[ "$nodes" == "" ]]; then break fi echo "Waiting for nodes to become available: $nodes" echo "Currently available: $available" sleep $IRONIC_CLEANING_DELAY done if [[ "$nodes" != "" ]]; then die $LINENO "Some nodes did not finish cleaning: $nodes" fi } function wait_for_ironic_neutron_agent_report_state_for_all_nodes { local nodes=$@ echo "Waiting for ironic-neutron-agent to report state for nodes: $nodes" local attempt for attempt in $(seq 1 $IRONIC_NEUTRON_AGENT_REPORT_STATE_ATTEMPTS); do local reported reported=$(openstack --os-cloud $OS_CLOUD network agent list -f value -c Host -c Binary | grep ironic-neutron-agent | cut -d ' ' -f 1 | paste -s -d ' ') echo "Currently reported nodes: $reported" local can_break for node_id in $nodes; do if echo $reported | grep -q $node_id; then can_break="True" else can_break="False" break fi done if [[ $can_break == "True" ]]; then break fi sleep $IRONIC_NEUTRON_AGENT_REPORT_STATE_DELAY done if [[ "$can_break" == "False" ]]; then die $LINENO "ironic-neutron-agent did not report some nodes." fi } function enroll_nodes { local chassis_id chassis_id=$($IRONIC_CMD chassis create --description "ironic test chassis" -f value -c uuid) die_if_not_set $LINENO chassis_id "Failed to create chassis" local node_prefix node_prefix=$(get_ironic_node_prefix) local interface_info if [[ "$IRONIC_IS_HARDWARE" == "False" ]]; then local ironic_node_cpu=$IRONIC_VM_SPECS_CPU local ironic_node_ram=$IRONIC_VM_SPECS_RAM local ironic_node_disk=$IRONIC_VM_SPECS_DISK local ironic_ephemeral_disk=$IRONIC_VM_EPHEMERAL_DISK local ironic_node_arch=x86_64 local ironic_hwinfo_file=$IRONIC_VM_MACS_CSV_FILE if is_deployed_by_ipmi; then local node_options="\ --driver-info ipmi_address=${HOST_IP} \ --driver-info ipmi_username=admin \ --driver-info ipmi_password=password" elif is_deployed_by_snmp; then local node_options="\ --driver-info snmp_driver=${IRONIC_VPDU_SNMPDRIVER} \ --driver-info snmp_address=${HOST_IP} \ --driver-info snmp_port=${IRONIC_VPDU_LISTEN_PORT} \ --driver-info snmp_protocol=2c \ --driver-info snmp_community=${IRONIC_VPDU_COMMUNITY}" elif is_deployed_by_redfish; then local node_options="\ --driver-info redfish_address=http://${HOST_IP}:${IRONIC_REDFISH_EMULATOR_PORT} \ --driver-info redfish_username=admin \ --driver-info redfish_password=password" fi else local ironic_node_cpu=$IRONIC_HW_NODE_CPU local ironic_node_ram=$IRONIC_HW_NODE_RAM local ironic_node_disk=$IRONIC_HW_NODE_DISK local ironic_ephemeral_disk=$IRONIC_HW_EPHEMERAL_DISK local ironic_node_arch=$IRONIC_HW_ARCH local ironic_hwinfo_file=$IRONIC_HWINFO_FILE fi local total_nodes=0 local total_cpus=0 local node_uuids= local node_id while read hardware_info; do local node_name node_name=$node_prefix-$total_nodes local node_capabilities="" if [[ "$IRONIC_BOOT_MODE" == "uefi" ]]; then node_capabilities+=" --property capabilities=boot_mode:uefi" fi if [[ "$IRONIC_BOOT_MODE" == "bios" ]]; then node_capabilities+=" --property capabilities=boot_mode:bios" fi if [[ "$IRONIC_SECURE_BOOT" == "True" ]]; then if [[ -n "$node_capabilities" ]]; then node_capabilities+=",secure_boot:true" else node_capabilities+=" --property capabilities=secure_boot:true" fi fi if [[ "$IRONIC_IS_HARDWARE" == "False" ]]; then interface_info=$(echo $hardware_info | awk '{print $1}') if is_deployed_by_ipmi; then local vbmc_port vbmc_port=$(echo $hardware_info | awk '{print $2}') node_options+=" --driver-info ipmi_port=$vbmc_port" elif is_deployed_by_snmp; then local pdu_outlet pdu_outlet=$(echo $hardware_info | awk '{print $3}') node_options+=" --driver-info snmp_outlet=$pdu_outlet" elif is_deployed_by_redfish; then node_options+=" --driver-info redfish_system_id=/redfish/v1/Systems/$node_name" fi # Local-link-connection options local llc_opts="" if [[ "${IRONIC_USE_LINK_LOCAL}" == "True" ]]; then local switch_info local switch_id switch_id=$(echo $hardware_info |awk '{print $4}') switch_info=$(echo $hardware_info |awk '{print $5}') # NOTE(vsaienko) we will add port_id later in the code. llc_opts="--local-link-connection switch_id=${switch_id} \ --local-link-connection switch_info=${switch_info} " fi if [[ "${IRONIC_STORAGE_INTERFACE}" == "cinder" ]]; then local connector_iqn="iqn.2017-05.org.openstack.$node_prefix-$total_nodes" if [[ -n "$node_capabilities" ]]; then node_capabilities+=",iscsi_boot:True" else node_capabilities+=" --property capabilities=iscsi_boot:True" fi fi else # Currently we require all hardware platform have same CPU/RAM/DISK info # in future, this can be enhanced to support different type, and then # we create the bare metal flavor with minimum value local bmc_address bmc_address=$(echo $hardware_info |awk '{print $1}') local mac_address mac_address=$(echo $hardware_info |awk '{print $2}') local bmc_username bmc_username=$(echo $hardware_info |awk '{print $3}') local bmc_passwd bmc_passwd=$(echo $hardware_info |awk '{print $4}') local node_options="" if is_deployed_by_ipmi; then node_options+=" --driver-info ipmi_address=$bmc_address \ --driver-info ipmi_password=$bmc_passwd \ --driver-info ipmi_username=$bmc_username" elif is_deployed_by_ilo; then node_options+=" --driver-info ilo_address=$bmc_address \ --driver-info ilo_password=$bmc_passwd \ --driver-info ilo_username=$bmc_username" if [[ $IRONIC_ENABLED_BOOT_INTERFACES == *"ilo-virtual-media"* ]]; then node_options+=" --driver-info deploy_iso=$IRONIC_DEPLOY_ISO_ID" fi elif is_deployed_by_drac; then node_options+=" --driver-info drac_address=$bmc_address \ --driver-info drac_password=$bmc_passwd \ --driver-info drac_username=$bmc_username" elif is_deployed_by_redfish; then local bmc_redfish_system_id bmc_redfish_system_id=$(echo $hardware_info |awk '{print $5}') node_options+=" --driver-info redfish_address=https://$bmc_address \ --driver-info redfish_system_id=$bmc_redfish_system_id \ --driver-info redfish_password=$bmc_passwd \ --driver-info redfish_username=$bmc_username \ --driver-info redfish_verify_ca=False" elif is_deployed_by_irmc; then node_options+=" --driver-info irmc_address=$bmc_address \ --driver-info irmc_password=$bmc_passwd \ --driver-info irmc_username=$bmc_username" if [[ -n "$IRONIC_DEPLOY_ISO_ID" ]]; then node_options+=" --driver-info deploy_iso=$IRONIC_DEPLOY_ISO_ID" fi elif is_deployed_by_xclarity; then local xclarity_hardware_id xclarity_hardware_id=$(echo $hardware_info |awk '{print $5}') node_options+=" --driver-info xclarity_manager_ip=$bmc_address \ --driver-info xclarity_password=$bmc_passwd \ --driver-info xclarity_username=$bmc_username \ --driver-info xclarity_hardware_id=$xclarity_hardware_id" elif is_deployed_by_ibmc; then node_options+=" --driver-info ibmc_address=$bmc_address \ --driver-info ibmc_username=$bmc_username \ --driver-info ibmc_password=$bmc_passwd \ --driver-info ibmc_verify_ca=False" fi interface_info="${mac_address}" fi # First node created will be used for testing in ironic w/o glance # scenario, so we need to know its UUID. local standalone_node_uuid="" if [ $total_nodes -eq 0 ]; then standalone_node_uuid="--uuid $IRONIC_NODE_UUID" fi # TODO(dtantsur): it would be cool to test with different resource # classes, but for now just use the same. node_id=$($IRONIC_CMD node create $standalone_node_uuid \ --chassis $chassis_id \ --driver $IRONIC_DEPLOY_DRIVER \ --name $node_name \ --resource-class $IRONIC_DEFAULT_RESOURCE_CLASS \ --property cpu_arch=$ironic_node_arch \ $node_capabilities \ $node_options \ -f value -c uuid) die_if_not_set $LINENO node_id "Failed to create node" node_uuids+=" $node_id" if [[ -n $IRONIC_DEFAULT_TRAITS ]]; then $IRONIC_CMD node add trait $node_id $IRONIC_DEFAULT_TRAITS fi $IRONIC_CMD node manage $node_id --wait $IRONIC_MANAGE_TIMEOUT || \ die $LINENO "Node did not reach manageable state in $IRONIC_MANAGE_TIMEOUT seconds" # NOTE(vsaienko) IPA didn't automatically recognize root devices less than 4Gb. # Setting root hint allows to install OS on such devices. # 0x1af4 is VirtIO vendor device ID. if [[ "$ironic_node_disk" -lt "4" && is_deployed_by_agent ]]; then $IRONIC_CMD node set $node_id --property \ root_device='{"vendor": "0x1af4"}' fi # In case we using portgroups, we should API version that support them. # Othervise API will return 406 ERROR # NOTE(vsaienko) interface_info is in the following format here: # mac1,tap-node0i1;mac2,tap-node0i2;...;macN,tap-node0iN for info in ${interface_info//;/ }; do local mac_address="" local port_id="" local llc_port_opt="" local physical_network="" mac_address=$(echo $info| awk -F ',' '{print $1}') port_id=$(echo $info| awk -F ',' '{print $2}') if [[ "${IRONIC_USE_LINK_LOCAL}" == "True" ]]; then llc_port_opt+=" --local-link-connection port_id=${port_id} " fi if [[ "${IRONIC_USE_NEUTRON_SEGMENTS}" == "True" ]]; then physical_network=" --physical-network ${PHYSICAL_NETWORK} " fi $IRONIC_CMD port create --node $node_id $llc_opts $llc_port_opt $mac_address $physical_network done # NOTE(vsaienko) use node-update instead of specifying network_interface # during node creation. If node is added with latest version of API it # will NOT go to available state automatically. if [[ -n "${IRONIC_NETWORK_INTERFACE}" ]]; then $IRONIC_CMD node set $node_id --network-interface $IRONIC_NETWORK_INTERFACE || \ die $LINENO "Failed to update network interface for node" fi if [[ -n "${IRONIC_STORAGE_INTERFACE}" ]]; then $IRONIC_CMD node set $node_id --storage-interface $IRONIC_STORAGE_INTERFACE || \ die $LINENO "Failed to update storage interface for node $node_id" if [[ -n "${connector_iqn}" ]]; then $IRONIC_CMD volume connector create --node $node_id --type iqn \ --connector-id $connector_iqn || \ die $LINENO "Failed to create volume connector for node $node_id" fi fi total_nodes=$((total_nodes+1)) done < $ironic_hwinfo_file # NOTE(hjensas): ensure ironic-neutron-agent has done report_state for all # nodes we attempt cleaning. if [[ "${IRONIC_USE_NEUTRON_SEGMENTS}" == "True" ]]; then wait_for_ironic_neutron_agent_report_state_for_all_nodes $node_uuids fi # NOTE(dtantsur): doing it outside of the loop, because of cleaning provide_nodes $node_uuids if is_service_enabled nova && [[ "$VIRT_DRIVER" == "ironic" ]]; then if [[ "$HOST_TOPOLOGY_ROLE" != "subnode" ]]; then local adjusted_disk adjusted_disk=$(($ironic_node_disk - $ironic_ephemeral_disk)) openstack --os-cloud $OS_CLOUD flavor create --ephemeral $ironic_ephemeral_disk --ram $ironic_node_ram --disk $adjusted_disk --vcpus $ironic_node_cpu baremetal local resource_class=${IRONIC_DEFAULT_RESOURCE_CLASS^^} openstack --os-cloud $OS_CLOUD flavor set baremetal --property "resources:CUSTOM_$resource_class"="1" openstack --os-cloud $OS_CLOUD flavor set baremetal --property "resources:DISK_GB"="0" openstack --os-cloud $OS_CLOUD flavor set baremetal --property "resources:MEMORY_MB"="0" openstack --os-cloud $OS_CLOUD flavor set baremetal --property "resources:VCPU"="0" openstack --os-cloud $OS_CLOUD flavor set baremetal --property "cpu_arch"="$ironic_node_arch" if [[ "$IRONIC_BOOT_MODE" == "uefi" ]]; then openstack --os-cloud $OS_CLOUD flavor set baremetal --property "capabilities:boot_mode"="uefi" fi if [[ "$IRONIC_BOOT_MODE" == "bios" ]]; then openstack --os-cloud $OS_CLOUD flavor set baremetal --property "capabilities:boot_mode"="bios" fi for trait in $IRONIC_DEFAULT_TRAITS; do openstack --os-cloud $OS_CLOUD flavor set baremetal --property "trait:$trait"="required" done if [[ "$IRONIC_SECURE_BOOT" == "True" ]]; then openstack --os-cloud $OS_CLOUD flavor set baremetal --property "capabilities:secure_boot"="true" fi # NOTE(dtantsur): sometimes nova compute fails to start with ironic due # to keystone restarting and not being able to authenticate us. # Restart it just to be sure (and avoid gate problems like bug 1537076) stop_nova_compute || /bin/true # NOTE(pas-ha) if nova compute failed before restart, .failure file # that was created will fail the service_check in the end of the deployment _clean_ncpu_failure start_nova_compute else # NOTE(vsaienko) we enrolling IRONIC_VM_COUNT on each node. So on subnode # we expect to have 2 x total_cpus total_nodes=$(( total_nodes * 2 )) fi wait_for_nova_resources $total_nodes fi } function die_if_module_not_loaded { if ! grep -q $1 /proc/modules; then die $LINENO "$1 kernel module is not loaded" fi } function configure_iptables { # enable tftp natting for allowing connections to HOST_IP's tftp server if ! running_in_container; then sudo modprobe nf_conntrack_tftp sudo modprobe nf_nat_tftp else die_if_module_not_loaded nf_conntrack_tftp die_if_module_not_loaded nf_nat_tftp fi ################ NETWORK DHCP # explicitly allow DHCP - packets are occasionally being dropped here sudo iptables -I INPUT -p udp --dport 67:68 --sport 67:68 -j ACCEPT || true # nodes boot from TFTP and callback to the API server listening on $HOST_IP sudo iptables -I INPUT -d $IRONIC_TFTPSERVER_IP -p udp --dport 69 -j ACCEPT || true # dhcpv6 which is the only way to transmit boot options sudo ip6tables -I INPUT -d $IRONIC_HOST_IPV6 -p udp --dport 546:547 --sport 546:547 -j ACCEPT || true sudo ip6tables -I INPUT -d $IRONIC_HOST_IPV6 -p udp --dport 69 -j ACCEPT || true ################ Webserver/API # To use named /baremetal endpoint we should open default apache port if [[ "$IRONIC_USE_WSGI" == "False" ]]; then sudo iptables -I INPUT -d $HOST_IP -p tcp --dport $IRONIC_SERVICE_PORT -j ACCEPT || true sudo ip6tables -I INPUT -d $HOST_IP -p tcp --dport $IRONIC_SERVICE_PORT -j ACCEPT || true # open ironic API on baremetal network sudo iptables -I INPUT -d $IRONIC_HTTP_SERVER -p tcp --dport $IRONIC_SERVICE_PORT -j ACCEPT || true # allow IPA to connect to ironic API on subnode sudo iptables -I FORWARD -p tcp --dport $IRONIC_SERVICE_PORT -j ACCEPT || true else sudo iptables -I INPUT -d $HOST_IP -p tcp --dport 80 -j ACCEPT || true sudo iptables -I INPUT -d $HOST_IP -p tcp --dport 443 -j ACCEPT || true # open ironic API on baremetal network sudo iptables -I INPUT -d $IRONIC_HTTP_SERVER -p tcp --dport 80 -j ACCEPT || true sudo ip6tables -I INPUT -d $IRONIC_HTTP_SERVER -p tcp --dport 80 -j ACCEPT || true sudo iptables -I INPUT -d $IRONIC_HTTP_SERVER -p tcp --dport 443 -j ACCEPT || true sudo ip6tables -I INPUT -d $IRONIC_HTTP_SERVER -p tcp --dport 443 -j ACCEPT || true fi if is_deployed_by_agent; then # agent ramdisk gets instance image from swift sudo iptables -I INPUT -d $HOST_IP -p tcp --dport ${SWIFT_DEFAULT_BIND_PORT:-8080} -j ACCEPT || true sudo iptables -I INPUT -d $HOST_IP -p tcp --dport $GLANCE_SERVICE_PORT -j ACCEPT || true fi if is_http_server_required; then sudo iptables -I INPUT -d $IRONIC_HTTP_SERVER -p tcp --dport $IRONIC_HTTP_PORT -j ACCEPT || true sudo ip6tables -I INPUT -d $IRONIC_HOST_IPV6 -p tcp --dport $IRONIC_HTTP_PORT -j ACCEPT || true fi if [[ "${IRONIC_STORAGE_INTERFACE}" == "cinder" ]]; then sudo iptables -I INPUT -d $HOST_IP -p tcp --dport $ISCSI_SERVICE_PORT -s $FLOATING_RANGE -j ACCEPT || true fi # (rpittau) workaround to allow TFTP traffic on ubuntu bionic with conntrack helper disabled local qrouter qrouter=$(sudo ip netns list | grep qrouter | awk '{print $1;}') if [[ ! -z "$qrouter" ]]; then sudo ip netns exec $qrouter iptables -A PREROUTING -t raw -p udp --dport 69 -j CT --helper tftp sudo ip netns exec $qrouter ip6tables -A PREROUTING -t raw -p udp --dport 69 -j CT --helper tftp || true fi } function configure_tftpd { # stop tftpd and setup serving via xinetd stop_service tftpd-hpa || true [ -f /etc/init/tftpd-hpa.conf ] && echo "manual" | sudo tee /etc/init/tftpd-hpa.override sudo cp $IRONIC_TEMPLATES_DIR/tftpd-xinetd.template /etc/xinetd.d/tftp sudo sed -e "s|%TFTPBOOT_DIR%|$IRONIC_TFTPBOOT_DIR|g" -i /etc/xinetd.d/tftp sudo sed -e "s|%MAX_BLOCKSIZE%|$IRONIC_TFTP_BLOCKSIZE|g" -i /etc/xinetd.d/tftp if [[ "$IRONIC_IP_VERSION" == '6' ]]; then sudo sed -e "s|IPv4|IPv6|g" -i /etc/xinetd.d/tftp fi # setup tftp file mapping to satisfy requests at the root (booting) and # /tftpboot/ sub-dir (as per deploy-ironic elements) # this section is only for ubuntu and fedora if [[ "$IRONIC_IPXE_ENABLED" == "False" && \ ( "$IRONIC_BOOT_MODE" == "uefi" || "$IRONIC_SECURE_BOOT" == "True" ) && \ "$IRONIC_UEFI_BOOT_LOADER" == "grub2" ]]; then echo "re ^($IRONIC_TFTPBOOT_DIR/) $IRONIC_TFTPBOOT_DIR/\2" >$IRONIC_TFTPBOOT_DIR/map-file echo "re ^$IRONIC_TFTPBOOT_DIR/ $IRONIC_TFTPBOOT_DIR/" >>$IRONIC_TFTPBOOT_DIR/map-file echo "re ^(^/) $IRONIC_TFTPBOOT_DIR/\1" >>$IRONIC_TFTPBOOT_DIR/map-file echo "re ^([^/]) $IRONIC_TFTPBOOT_DIR/\1" >>$IRONIC_TFTPBOOT_DIR/map-file else echo "r ^([^/]) $IRONIC_TFTPBOOT_DIR/\1" >$IRONIC_TFTPBOOT_DIR/map-file echo "r ^(/tftpboot/) $IRONIC_TFTPBOOT_DIR/\2" >>$IRONIC_TFTPBOOT_DIR/map-file fi sudo chmod -R 0755 $IRONIC_TFTPBOOT_DIR restart_service xinetd } function build_ipa_ramdisk { local kernel_path=$1 local ramdisk_path=$2 local iso_path=$3 case $IRONIC_RAMDISK_TYPE in 'tinyipa') build_tinyipa_ramdisk $kernel_path $ramdisk_path $iso_path ;; 'dib') build_ipa_dib_ramdisk $kernel_path $ramdisk_path $iso_path ;; *) die $LINENO "Unrecognised IRONIC_RAMDISK_TYPE: $IRONIC_RAMDISK_TYPE. Expected either of 'dib' or 'tinyipa'." ;; esac } function setup_ipa_builder { git_clone $IRONIC_PYTHON_AGENT_BUILDER_REPO $IRONIC_PYTHON_AGENT_BUILDER_DIR $IRONIC_PYTHON_AGENT_BUILDER_BRANCH } function build_tinyipa_ramdisk { echo "Building ironic-python-agent deploy ramdisk" local kernel_path=$1 local ramdisk_path=$2 local iso_path=$3 cd $IRONIC_PYTHON_AGENT_BUILDER_DIR/tinyipa export BUILD_AND_INSTALL_TINYIPA=true if is_ansible_deploy_enabled; then export AUTHORIZE_SSH=true export SSH_PUBLIC_KEY=$IRONIC_ANSIBLE_SSH_KEY.pub fi make cp tinyipa.gz $ramdisk_path cp tinyipa.vmlinuz $kernel_path if is_deploy_iso_required; then make iso cp tinyipa.iso $iso_path fi make clean cd - } function rebuild_tinyipa_for_ansible { local ansible_tinyipa_ramdisk_name pushd $IRONIC_PYTHON_AGENT_BUILDER_DIR/tinyipa export TINYIPA_RAMDISK_FILE=$IRONIC_DEPLOY_RAMDISK export SSH_PUBLIC_KEY=$IRONIC_ANSIBLE_SSH_KEY.pub make addssh ansible_tinyipa_ramdisk_name="ansible-$(basename $IRONIC_DEPLOY_RAMDISK)" mv $ansible_tinyipa_ramdisk_name $TOP_DIR/files make clean popd IRONIC_DEPLOY_RAMDISK=$TOP_DIR/files/$ansible_tinyipa_ramdisk_name } # install_diskimage_builder() - Collect source and prepare or install from pip function install_diskimage_builder { if use_library_from_git "diskimage-builder"; then git_clone_by_name "diskimage-builder" setup_dev_lib -bindep "diskimage-builder" else local bindep_file bindep_file=$(mktemp) curl -o "$bindep_file" "$IRONIC_DIB_BINDEP_FILE" install_bindep "$bindep_file" pip_install_gr "diskimage-builder" fi } function build_ipa_dib_ramdisk { local kernel_path=$1 local ramdisk_path=$2 local iso_path=$3 local tempdir tempdir=$(mktemp -d --tmpdir=${DEST}) # install diskimage-builder if not present if ! $(type -P disk-image-create > /dev/null); then install_diskimage_builder fi if -e $DEST/ironic-lib; then export IRONIC_LIB_FROM_SOURCE=true export DIB_REPOLOCATION_ironic_lib=$DEST/ironic-lib fi echo "Building IPA ramdisk with DIB options: $IRONIC_DIB_RAMDISK_OPTIONS" if is_deploy_iso_required; then IRONIC_DIB_RAMDISK_OPTIONS+=" iso" fi git_clone $IRONIC_PYTHON_AGENT_BUILDER_REPO $IRONIC_PYTHON_AGENT_BUILDER_DIR $IRONIC_PYTHON_AGENT_BUILDER_BRANCH ELEMENTS_PATH="$IRONIC_PYTHON_AGENT_BUILDER_DIR/dib" \ DIB_DHCP_TIMEOUT=$IRONIC_DIB_DHCP_TIMEOUT \ DIB_RELEASE=$IRONIC_DIB_RAMDISK_RELEASE \ DIB_REPOLOCATION_ironic_python_agent="$IRONIC_PYTHON_AGENT_DIR" \ DIB_REPOLOCATION_requirements="$DEST/requirements" \ disk-image-create "$IRONIC_DIB_RAMDISK_OPTIONS" \ -x -o "$tempdir/ironic-agent" \ ironic-python-agent-ramdisk chmod -R +r $tempdir mv "$tempdir/ironic-agent.kernel" "$kernel_path" mv "$tempdir/ironic-agent.initramfs" "$ramdisk_path" if is_deploy_iso_required; then mv "$tempdir/ironic-agent.iso" "$iso_path" fi rm -rf $tempdir } function upload_image_if_needed { if [[ "$IRONIC_PARTITIONED_IMAGE_NAME" =~ cirros ]] && is_service_enabled glance; then echo Building a Cirros image suitable for local boot local dest IRONIC_PARTITIONED_IMAGE_NAME=cirros-${CIRROS_VERSION}-x86_64-partition dest="$IRONIC_DATA_DIR/$IRONIC_PARTITIONED_IMAGE_NAME.img" # Export some variables that the script is using. CIRROS_ARCH=$CIRROS_ARCH CIRROS_VERSION=$CIRROS_VERSION \ IRONIC_TTY_DEV=$IRONIC_TTY_DEV VERBOSE=$VERBOSE \ $IRONIC_SCRIPTS_DIR/cirros-partition.sh "$dest" # TODO(dtantsur): stop uploading kernel/ramdisk when image_type support # lands. local kernel_id kernel_id=$(openstack image list -f value -c ID -c Name \ | awk '/cirros.*kernel/ { print $1; exit 0; }') die_if_not_set $LINENO kernel_id "Cannot find cirros kernel" local ramdisk_id ramdisk_id=$(openstack image list -f value -c ID -c Name \ | awk '/cirros.*ramdisk/ { print $1; exit 0; }') die_if_not_set $LINENO ramdisk_id "Cannot find cirros ramdisk" openstack image create $IRONIC_PARTITIONED_IMAGE_NAME \ --public --disk-format raw --container-format bare \ --property kernel_id=$kernel_id --property ramdisk_id=$ramdisk_id \ --file "$dest" # Change the default image only if the provided settings prevent the # default cirros image from working. if [[ "$IRONIC_TEMPEST_WHOLE_DISK_IMAGE" != True \ && "$IRONIC_DEFAULT_BOOT_OPTION" == local ]]; then IRONIC_IMAGE_NAME=$IRONIC_PARTITIONED_IMAGE_NAME DEFAULT_IMAGE_NAME=$IRONIC_IMAGE_NAME fi fi } # download EFI boot loader image and upload it to glance # this function sets ``IRONIC_EFIBOOT_ID`` function upload_baremetal_ironic_efiboot { declare -g IRONIC_EFIBOOT_ID local efiboot_name efiboot_name=$(basename $IRONIC_EFIBOOT) echo_summary "Building and uploading EFI boot image for ironic" if [ ! -e "$IRONIC_EFIBOOT" ]; then # NOTE(dtantsur): update doc/source/admin/drivers/redfish.rst when # changing this procedure. local efiboot_path efiboot_path=$(mktemp -d --tmpdir=${DEST})/$efiboot_name local efiboot_mount efiboot_mount=$(mktemp -d --tmpdir=${DEST}) dd if=/dev/zero \ of=$efiboot_path \ bs=4096 count=1024 mkfs.fat -s 4 -r 512 -S 4096 $efiboot_path sudo mount $efiboot_path $efiboot_mount sudo mkdir -p $efiboot_mount/efi/boot sudo cp "$IRONIC_GRUB2_SHIM_FILE" $efiboot_mount/efi/boot/bootx64.efi sudo cp "$IRONIC_GRUB2_FILE" $efiboot_mount/efi/boot/grubx64.efi sudo umount $efiboot_mount mv $efiboot_path $IRONIC_EFIBOOT fi # load efiboot into glance # NOTE(TheJulia): Glance requires a project ID be submitted with the # request *or* we just do it as the project scoped admin using the admin # project which in devstack's case is the demo project. # In other words, we can't use devstack-system-admin to upload the image # unless we set the project_id in the create reqeust. IRONIC_EFIBOOT_ID=$(openstack --os-cloud devstack-admin \ image create \ $efiboot_name \ --public --disk-format=raw \ --container-format=bare \ -f value -c id \ < $IRONIC_EFIBOOT) die_if_not_set $LINENO IRONIC_EFIBOOT_ID "Failed to load EFI bootloader image into glance" iniset $IRONIC_CONF_FILE conductor bootloader $IRONIC_EFIBOOT_ID local efi_grub_path if is_ubuntu; then efi_grub_path=EFI/ubuntu/grub.cfg elif is_fedora; then if grep -qi CentOS /etc/redhat-release; then efi_grub_path=EFI/centos/grub.cfg else efi_grub_path=EFI/fedora/grub.cfg fi else # NOTE(dtantsur): this is likely incorrect efi_grub_path=EFI/BOOT/grub.cfg fi iniset $IRONIC_CONF_FILE DEFAULT grub_config_path $efi_grub_path } # build deploy kernel+ramdisk, then upload them to glance # this function sets ``IRONIC_DEPLOY_KERNEL_ID``, ``IRONIC_DEPLOY_RAMDISK_ID`` function upload_baremetal_ironic_deploy { declare -g IRONIC_DEPLOY_KERNEL_ID IRONIC_DEPLOY_RAMDISK_ID local ironic_deploy_kernel_name local ironic_deploy_ramdisk_name ironic_deploy_kernel_name=$(basename $IRONIC_DEPLOY_KERNEL) ironic_deploy_ramdisk_name=$(basename $IRONIC_DEPLOY_RAMDISK) if [[ "$HOST_TOPOLOGY_ROLE" != "subnode" ]]; then echo_summary "Creating and uploading baremetal images for ironic" if [ ! -e "$IRONIC_DEPLOY_RAMDISK" ] || \ [ ! -e "$IRONIC_DEPLOY_KERNEL" ] || \ ( is_deploy_iso_required && [ ! -e "$IRONIC_DEPLOY_ISO" ] ); then # setup IRONIC_PYTHON_AGENT_BUILDER_DIR setup_ipa_builder # files don't exist, need to build them if [ "$IRONIC_BUILD_DEPLOY_RAMDISK" = "True" ]; then # we can build them only if we're not offline if [ "$OFFLINE" != "True" ]; then build_ipa_ramdisk $IRONIC_DEPLOY_KERNEL $IRONIC_DEPLOY_RAMDISK $IRONIC_DEPLOY_ISO else die $LINENO "Deploy kernel+ramdisk or iso files don't exist and cannot be built in OFFLINE mode" fi else # Grab the agent image tarball, either from a local file or remote URL if [[ "$IRONIC_AGENT_KERNEL_URL" =~ "file://" ]]; then cp ${IRONIC_AGENT_KERNEL_URL:7} $IRONIC_DEPLOY_KERNEL else wget "$IRONIC_AGENT_KERNEL_URL" -O $IRONIC_DEPLOY_KERNEL fi if [[ "$IRONIC_AGENT_RAMDISK_URL" =~ "file://" ]]; then cp ${IRONIC_AGENT_RAMDISK_URL:7} $IRONIC_DEPLOY_RAMDISK else wget "$IRONIC_AGENT_RAMDISK_URL" -O $IRONIC_DEPLOY_RAMDISK fi if is_ansible_with_tinyipa; then # NOTE(pas-ha) if using ansible-deploy and tinyipa, # this will rebuild ramdisk and override $IRONIC_DEPLOY_RAMDISK rebuild_tinyipa_for_ansible fi fi fi # load them into glance if ! is_deploy_iso_required; then IRONIC_DEPLOY_KERNEL_ID=$(openstack --os-cloud devstack-admin \ image create \ $ironic_deploy_kernel_name \ --public --disk-format=aki \ --container-format=aki \ < $IRONIC_DEPLOY_KERNEL | grep ' id ' | get_field 2) die_if_not_set $LINENO IRONIC_DEPLOY_KERNEL_ID "Failed to load kernel image into glance" IRONIC_DEPLOY_RAMDISK_ID=$(openstack --os-cloud devstack-admin \ image create \ $ironic_deploy_ramdisk_name \ --public --disk-format=ari \ --container-format=ari \ < $IRONIC_DEPLOY_RAMDISK | grep ' id ' | get_field 2) die_if_not_set $LINENO IRONIC_DEPLOY_RAMDISK_ID "Failed to load ramdisk image into glance" else IRONIC_DEPLOY_ISO_ID=$(openstack --os-cloud devstack-admin \ image create \ $(basename $IRONIC_DEPLOY_ISO) \ --public --disk-format=iso \ --container-format=bare \ < $IRONIC_DEPLOY_ISO -f value -c id) die_if_not_set $LINENO IRONIC_DEPLOY_ISO_ID "Failed to load deploy iso into glance" fi else if is_ansible_with_tinyipa; then ironic_deploy_ramdisk_name="ansible-$ironic_deploy_ramdisk_name" fi IRONIC_DEPLOY_KERNEL_ID=$(openstack --os-cloud $OS_CLOUD image show $ironic_deploy_kernel_name -f value -c id) IRONIC_DEPLOY_RAMDISK_ID=$(openstack --os-cloud $OS_CLOUD image show $ironic_deploy_ramdisk_name -f value -c id) fi iniset $IRONIC_CONF_FILE conductor deploy_kernel $IRONIC_DEPLOY_KERNEL_ID iniset $IRONIC_CONF_FILE conductor deploy_ramdisk $IRONIC_DEPLOY_RAMDISK_ID iniset $IRONIC_CONF_FILE conductor rescue_kernel $IRONIC_DEPLOY_KERNEL_ID iniset $IRONIC_CONF_FILE conductor rescue_ramdisk $IRONIC_DEPLOY_RAMDISK_ID } function prepare_baremetal_basic_ops { if [[ "$IRONIC_BAREMETAL_BASIC_OPS" != "True" ]]; then return 0 fi if ! is_service_enabled nova && is_http_server_required; then local image_file_path if [[ ${IRONIC_WHOLEDISK_IMAGE_NAME} =~ \.img$ ]]; then image_file_path=$FILES/${IRONIC_WHOLEDISK_IMAGE_NAME} else image_file_path=$FILES/${IRONIC_WHOLEDISK_IMAGE_NAME}.img fi sudo install -g $LIBVIRT_GROUP -o $STACK_USER -m 644 $image_file_path $IRONIC_HTTP_DIR fi upload_baremetal_ironic_deploy if [[ "$IRONIC_BOOT_MODE" == "uefi" && is_deployed_by_redfish ]]; then upload_baremetal_ironic_efiboot fi upload_image_if_needed configure_tftpd configure_iptables } function cleanup_baremetal_basic_ops { if [[ "$IRONIC_BAREMETAL_BASIC_OPS" != "True" ]]; then return 0 fi rm -f $IRONIC_VM_MACS_CSV_FILE sudo rm -rf $IRONIC_DATA_DIR $IRONIC_STATE_PATH local vm_name for vm_name in $(_ironic_bm_vm_names); do # Delete the Virtual BMCs if is_deployed_by_ipmi; then vbmc --no-daemon list | grep -a $vm_name && vbmc --no-daemon delete $vm_name || /bin/true fi # pick up the $LIBVIRT_GROUP we have possibly joint newgrp $LIBVIRT_GROUP < """ CONSOLE_PTY = """ """ def main(): parser = argparse.ArgumentParser( description="Configure a kvm virtual machine for the seed image.") parser.add_argument('--name', default='seed', help='the name to give the machine in libvirt.') parser.add_argument('--image', action='append', default=[], help='Use a custom image file (must be qcow2).') parser.add_argument('--engine', default='qemu', help='The virtualization engine to use') parser.add_argument('--arch', default='i686', help='The architecture to use') parser.add_argument('--memory', default='2097152', help="Maximum memory for the VM in KB.") parser.add_argument('--cpus', default='1', help="CPU count for the VM.") parser.add_argument('--bootdev', default='hd', help="What boot device to use (hd/network).") parser.add_argument('--libvirt-nic-driver', default='virtio', help='The libvirt network driver to use') parser.add_argument('--interface-count', default=1, type=int, help='The number of interfaces to add to VM.'), parser.add_argument('--mac', default=None, help='The mac for the first interface on the vm') parser.add_argument('--console-log', help='File to log console') parser.add_argument('--emulator', default=None, help='Path to emulator bin for vm template') parser.add_argument('--disk-format', default='qcow2', help='Disk format to use.') parser.add_argument('--uefi-loader', default='', help='The absolute path of the UEFI firmware blob.') parser.add_argument('--uefi-nvram', default='', help=('The absolute path of the non-volatile memory ' 'to store the UEFI variables. Should be used ' 'only when --uefi-loader is also specified.')) args = parser.parse_args() env = jinja2.Environment(loader=jinja2.FileSystemLoader(templatedir)) template = env.get_template('vm.xml') images = list(zip(args.image, string.ascii_lowercase)) if not images or len(images) > 6: # 6 is an artificial limitation because of the way we generate PCI IDs sys.exit("Up to 6 images are required") params = { 'name': args.name, 'images': images, 'engine': args.engine, 'arch': args.arch, 'memory': args.memory, 'cpus': args.cpus, 'bootdev': args.bootdev, 'interface_count': args.interface_count, 'mac': args.mac, 'nicdriver': args.libvirt_nic_driver, 'emulator': args.emulator, 'disk_format': args.disk_format, 'uefi_loader': args.uefi_loader, 'uefi_nvram': args.uefi_nvram, } if args.emulator: params['emulator'] = args.emulator else: qemu_kvm_locations = ['/usr/bin/kvm', '/usr/bin/qemu-kvm', '/usr/libexec/qemu-kvm'] for location in qemu_kvm_locations: if os.path.exists(location): params['emulator'] = location break else: raise RuntimeError("Unable to find location of kvm executable") if args.console_log: params['console'] = CONSOLE_LOG % {'console_log': args.console_log} else: params['console'] = CONSOLE_PTY libvirt_template = template.render(**params) conn = libvirt.open("qemu:///system") a = conn.defineXML(libvirt_template) print("Created machine %s with UUID %s" % (args.name, a.UUIDString())) if __name__ == '__main__': main() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/devstack/tools/ironic/scripts/create-node.sh0000775000175000017500000001027100000000000023544 0ustar00zuulzuul00000000000000#!/usr/bin/env bash # **create-nodes** # Creates baremetal poseur nodes for ironic testing purposes set -ex # Make tracing more educational export PS4='+ ${BASH_SOURCE:-}:${FUNCNAME[0]:-}:L${LINENO:-}: ' # Keep track of the DevStack directory TOP_DIR=$(cd $(dirname "$0")/.. && pwd) while getopts "n:c:i:m:M:d:a:b:e:E:p:o:f:l:L:N:A:D:v:P:" arg; do case $arg in n) NAME=$OPTARG;; c) CPU=$OPTARG;; i) INTERFACE_COUNT=$OPTARG;; M) INTERFACE_MTU=$OPTARG;; m) MEM=$(( 1024 * OPTARG ));; # Extra G to allow fuzz for partition table : flavor size and registered # size need to be different to actual size. d) DISK=$(( OPTARG + 1 ));; a) ARCH=$OPTARG;; b) BRIDGE=$OPTARG;; e) EMULATOR=$OPTARG;; E) ENGINE=$OPTARG;; p) VBMC_PORT=$OPTARG;; o) PDU_OUTLET=$OPTARG;; f) DISK_FORMAT=$OPTARG;; l) LOGDIR=$OPTARG;; L) UEFI_LOADER=$OPTARG;; N) UEFI_NVRAM=$OPTARG;; A) MAC_ADDRESS=$OPTARG;; D) NIC_DRIVER=$OPTARG;; v) VOLUME_COUNT=$OPTARG;; P) STORAGE_POOL=$OPTARG;; esac done shift $(( $OPTIND - 1 )) if [ -z "$UEFI_LOADER" ] && [ ! -z "$UEFI_NVRAM" ]; then echo "Parameter -N (UEFI NVRAM) cannot be used without -L (UEFI Loader)" exit 1 fi LIBVIRT_NIC_DRIVER=${NIC_DRIVER:-"e1000"} LIBVIRT_STORAGE_POOL=${STORAGE_POOL:-"default"} LIBVIRT_CONNECT_URI=${LIBVIRT_CONNECT_URI:-"qemu:///system"} export VIRSH_DEFAULT_CONNECT_URI=$LIBVIRT_CONNECT_URI if [ -n "$LOGDIR" ] ; then mkdir -p "$LOGDIR" fi PREALLOC= if [ -f /etc/debian_version -a "$DISK_FORMAT" == "qcow2" ]; then PREALLOC="--prealloc-metadata" fi if [ -n "$LOGDIR" ] ; then VM_LOGGING="--console-log $LOGDIR/${NAME}_console.log" else VM_LOGGING="" fi UEFI_OPTS="" if [ ! -z "$UEFI_LOADER" ]; then UEFI_OPTS="--uefi-loader $UEFI_LOADER" if [ ! -z "$UEFI_NVRAM" ]; then UEFI_OPTS+=" --uefi-nvram $UEFI_NVRAM" fi fi # Create bridge and add VM interface to it. # Additional interface will be added to this bridge and # it will be plugged to OVS. # This is needed in order to have interface in OVS even # when VM is in shutdown state INTERFACE_COUNT=${INTERFACE_COUNT:-1} for int in $(seq 1 $INTERFACE_COUNT); do tapif=tap-${NAME}i${int} ovsif=ovs-${NAME}i${int} # NOTE(vsaienko) use veth pair here to ensure that interface # exists in OVS even when VM is powered off. sudo ip link add dev $tapif type veth peer name $ovsif for l in $tapif $ovsif; do sudo ip link set dev $l up sudo ip link set $l mtu $INTERFACE_MTU done sudo ovs-vsctl add-port $BRIDGE $ovsif done if [ -n "$MAC_ADDRESS" ] ; then MAC_ADDRESS="--mac $MAC_ADDRESS" fi VOLUME_COUNT=${VOLUME_COUNT:-1} if ! virsh list --all | grep -q $NAME; then vm_opts="" for int in $(seq 1 $VOLUME_COUNT); do if [[ "$int" == "1" ]]; then # Compatibility with old naming vol_name="$NAME.$DISK_FORMAT" else vol_name="$NAME-$int.$DISK_FORMAT" fi virsh vol-list --pool $LIBVIRT_STORAGE_POOL | grep -q $vol_name && virsh vol-delete $vol_name --pool $LIBVIRT_STORAGE_POOL >&2 virsh vol-create-as $LIBVIRT_STORAGE_POOL ${vol_name} ${DISK}G --allocation 0 --format $DISK_FORMAT $PREALLOC >&2 volume_path=$(virsh vol-path --pool $LIBVIRT_STORAGE_POOL $vol_name) # Pre-touch the VM to set +C, as it can only be set on empty files. sudo touch "$volume_path" sudo chattr +C "$volume_path" || true vm_opts+="--image $volume_path " done if [[ -n "$EMULATOR" ]]; then vm_opts+="--emulator $EMULATOR " fi $PYTHON $TOP_DIR/scripts/configure-vm.py \ --bootdev network --name $NAME \ --arch $ARCH --cpus $CPU --memory $MEM --libvirt-nic-driver $LIBVIRT_NIC_DRIVER \ --disk-format $DISK_FORMAT $VM_LOGGING --engine $ENGINE $UEFI_OPTS $vm_opts \ --interface-count $INTERFACE_COUNT $MAC_ADDRESS >&2 fi # echo mac in format mac1,ovs-node-0i1;mac2,ovs-node-0i2;...;macN,ovs-node0iN VM_MAC=$(echo -n $(virsh domiflist $NAME |awk '/tap-/{print $5","$3}')|tr ' ' ';' |sed s/tap-/ovs-/g) echo -n "$VM_MAC $VBMC_PORT $PDU_OUTLET" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/devstack/tools/ironic/scripts/setup-network.sh0000775000175000017500000000210200000000000024177 0ustar00zuulzuul00000000000000#!/usr/bin/env bash # **setup-network** # Setups openvswitch libvirt network suitable for # running baremetal poseur nodes for ironic testing purposes set -exu # Make tracing more educational export PS4='+ ${BASH_SOURCE:-}:${FUNCNAME[0]:-}:L${LINENO:-}: ' LIBVIRT_CONNECT_URI=${LIBVIRT_CONNECT_URI:-"qemu:///system"} # Keep track of the DevStack directory TOP_DIR=$(cd $(dirname "$0")/.. && pwd) BRIDGE_NAME=${1:-brbm} PUBLIC_BRIDGE_MTU=${2:-1500} export VIRSH_DEFAULT_CONNECT_URI="$LIBVIRT_CONNECT_URI" # Only add bridge if missing. Bring it UP. (sudo ovs-vsctl list-br | grep ${BRIDGE_NAME}) || sudo ovs-vsctl add-br ${BRIDGE_NAME} sudo ip link set dev ${BRIDGE_NAME} up # Remove bridge before replacing it. (virsh net-list | grep "${BRIDGE_NAME} ") && virsh net-destroy ${BRIDGE_NAME} (virsh net-list --inactive | grep "${BRIDGE_NAME} ") && virsh net-undefine ${BRIDGE_NAME} virsh net-define <(sed s/brbm/$BRIDGE_NAME/ $TOP_DIR/templates/brbm.xml) virsh net-autostart ${BRIDGE_NAME} virsh net-start ${BRIDGE_NAME} sudo ip link set dev ${BRIDGE_NAME} mtu $PUBLIC_BRIDGE_MTU ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648108935.8786666 ironic-20.1.0/devstack/tools/ironic/templates/0000775000175000017500000000000000000000000021325 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/devstack/tools/ironic/templates/brbm.xml0000664000175000017500000000020000000000000022761 0ustar00zuulzuul00000000000000 brbm ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/devstack/tools/ironic/templates/tftpd-xinetd.template0000664000175000017500000000070400000000000025475 0ustar00zuulzuul00000000000000service tftp { protocol = udp port = 69 socket_type = dgram wait = yes user = root server = /usr/sbin/in.tftpd server_args = -v -v -v -v -v --blocksize %MAX_BLOCKSIZE% --map-file %TFTPBOOT_DIR%/map-file %TFTPBOOT_DIR% disable = no # This is a workaround for Fedora, where TFTP will listen only on # IPv6 endpoint, if IPv4 flag is not used. flags = IPv4 } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/devstack/tools/ironic/templates/vm.xml0000664000175000017500000000410700000000000022473 0ustar00zuulzuul00000000000000 {{ name }} {{ memory }} {{ cpus }} hvm {% if bootdev == 'network' and not uefi_loader %} {% endif %} {% if uefi_loader %} {{ uefi_loader }} {% if uefi_nvram %} {{ uefi_nvram }}-{{ name }} {% endif %} {% endif %} {% if engine == 'kvm' %} {% endif %} destroy destroy restart {{ emulator }} {% for (imagefile, letter) in images %}
{% endfor %}
{% for n in range(1, interface_count+1) %} {% if n == 1 and mac %} {% endif %}
{% if uefi_loader and bootdev == 'network' %} {% endif %} {% endfor %} {{ console }}
././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648108935.8786666 ironic-20.1.0/devstack/upgrade/0000775000175000017500000000000000000000000016333 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648108935.8826666 ironic-20.1.0/devstack/upgrade/from-queens/0000775000175000017500000000000000000000000020574 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/devstack/upgrade/from-queens/upgrade-ironic0000664000175000017500000000035100000000000023426 0ustar00zuulzuul00000000000000function configure_ironic_upgrade { # Remove the classic drivers from the configuration (forced by devstack-gate) # TODO(dtantsur): remove when classic drivers are removed sed -i '/^enabled_drivers/d' $IRONIC_CONF_FILE } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/devstack/upgrade/resources.sh0000775000175000017500000001421000000000000020702 0ustar00zuulzuul00000000000000#!/bin/bash # # Copyright 2015 Hewlett-Packard Development Company, L.P. # Copyright 2016 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. set -o errexit source $GRENADE_DIR/grenaderc source $GRENADE_DIR/functions source $TOP_DIR/openrc admin admin IRONIC_DEVSTACK_DIR=$(cd $(dirname "$0")/.. && pwd) source $IRONIC_DEVSTACK_DIR/lib/ironic RESOURCES_NETWORK_GATEWAY=${RESOURCES_NETWORK_GATEWAY:-10.2.0.1} RESOURCES_FIXED_RANGE=${RESOURCES_FIXED_RANGE:-10.2.0.0/20} NEUTRON_NET=ironic_grenade set -o xtrace # TODO(dtantsur): remove in Rocky, needed for parsing Placement API responses install_package jq function wait_for_ironic_resources { local i local nodes_count nodes_count=$(openstack baremetal node list -f value -c "Provisioning State" | wc -l) echo_summary "Waiting 5 minutes for Ironic resources become available again" for i in $(seq 1 30); do if openstack baremetal node list -f value -c "Provisioning State" | grep -qi failed; then die $LINENO "One of nodes is in failed state." fi if [[ $(openstack baremetal node list -f value -c "Provisioning State" | grep -ci available) == $nodes_count ]]; then return 0 fi sleep 10 done openstack baremetal node list die $LINENO "Timed out waiting for Ironic nodes are available again." } total_nodes=$IRONIC_VM_COUNT if [[ "${HOST_TOPOLOGY}" == "multinode" ]]; then total_nodes=$(( 2 * $total_nodes )) fi function early_create { # We need these steps only in case of flat-network if [[ -n "${IRONIC_PROVISION_NETWORK_NAME}" ]]; then return fi # Ironic needs to have network access to the instance during deployment # from the control plane (ironic-conductor). This 'early_create' function # creates a new network with a unique CIDR, adds a route to this network # from ironic-conductor and creates taps between br-int and brbm. # ironic-conductor will be able to access the ironic nodes via this new # network. # TODO(vsaienko) use OSC when Neutron commands are supported in the stable # release. local net_id net_id=$(openstack network create --share $NEUTRON_NET -f value -c id) resource_save network net_id $net_id local subnet_params="" subnet_params+="--ip_version 4 " subnet_params+="--gateway $RESOURCES_NETWORK_GATEWAY " subnet_params+="--name $NEUTRON_NET " subnet_params+="$net_id $RESOURCES_FIXED_RANGE" local subnet_id subnet_id=$(neutron subnet-create $subnet_params | grep ' id ' | get_field 2) resource_save network subnet_id $subnet_id local router_id router_id=$(openstack router create $NEUTRON_NET -f value -c id) resource_save network router_id $router_id neutron router-interface-add $NEUTRON_NET $subnet_id neutron router-gateway-set $NEUTRON_NET public # Add a route to the baremetal network via the Neutron public router. # ironic-conductor will be able to access the ironic nodes via this new # route. local r_net_gateway # Determine the IP address of the interface (ip -4 route get 8.8.8.8) that # will be used to access a public IP on the router we created ($router_id). # In this case we use the Google DNS server at 8.8.8.8 as the public IP # address. This does not actually attempt to contact 8.8.8.8, it just # determines the IP address of the interface that traffic to 8.8.8.8 would # use. We use the IP address of this interface to setup the route. test_with_retry "sudo ip netns exec qrouter-$router_id ip -4 route get 8.8.8.8 " "Route did not start" 60 r_net_gateway=$(sudo ip netns exec qrouter-$router_id ip -4 route get 8.8.8.8 |grep dev | awk '{print $7}') sudo ip route replace $RESOURCES_FIXED_RANGE via $r_net_gateway # NOTE(vsaienko) remove connection between br-int and brbm from old setup sudo ovs-vsctl -- --if-exists del-port ovs-1-tap1 sudo ovs-vsctl -- --if-exists del-port brbm-1-tap1 create_ovs_taps $net_id } function create { : } function verify { : } function verify_noapi { : } function destroy { # We need these steps only in case of flat-network if [[ -n "${IRONIC_PROVISION_NETWORK_NAME}" ]]; then return fi # NOTE(vsaienko) move ironic VMs back to private network. local net_id net_id=$(openstack network show private -f value -c id) create_ovs_taps $net_id # NOTE(vsaienko) during early_create phase we update grenade resources neutron/subnet_id, # neutron/router_id, neutron/net_id. It was needed to instruct nova to boot instances # in ironic_grenade network instead of neutron_grenade during resources phase. As result # during neutron/resources.sh destroy phase ironic_grenade router|subnet|network were deleted. # Make sure that we removed neutron resources here. neutron router-gateway-clear neutron_grenade || /bin/true neutron router-interface-delete neutron_grenade neutron_grenade || /bin/true neutron router-delete neutron_grenade || /bin/true neutron net-delete neutron_grenade || /bin/true } # Dispatcher case $1 in "early_create") wait_for_ironic_resources wait_for_nova_resources $total_nodes early_create ;; "create") create ;; "verify_noapi") # NOTE(vdrok): our implementation of verify_noapi is a noop, but # grenade always passes the upgrade side (pre-upgrade or post-upgrade) # as an argument to it. Pass all the arguments grenade passes further. verify_noapi "${@:2}" ;; "verify") # NOTE(vdrok): pass all the arguments grenade passes further. verify "${@:2}" ;; "destroy") destroy ;; esac ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/devstack/upgrade/settings0000664000175000017500000000311000000000000020111 0ustar00zuulzuul00000000000000# Grenade needs to know that Ironic has a Grenade plugin. This is done in the # gate by setting GRENADE_PLUGINRC when using openstack-infra/devstack-gate. # That means that in the project openstack-infra/project-config we will need to # update the Ironic grenade job(s) in jenkins/jobs/devstack-gate.yaml with # this: # export GRENADE_PLUGINRC="enable_grenade_plugin ironic https://opendev.org/openstack/ironic" # If openstack-infra/project-config is not updated then the Grenade tests will # never get run for Ironic register_project_for_upgrade ironic register_db_to_save ironic # Duplicate some settings from devstack. Use old devstack as we install base # environment from it. In common_settings we also source the old localrc # variables, so we need to do this before checking the HOST_TOPOLOGY value IRONIC_BASE_DEVSTACK_DIR=$TOP_DIR/../../old/ironic/devstack source $IRONIC_BASE_DEVSTACK_DIR/common_settings if [[ "${HOST_TOPOLOGY}" != "multinode" ]]; then # Disable automated cleaning on single node grenade to save a time and resources. export IRONIC_AUTOMATED_CLEAN_ENABLED=False fi # NOTE(jlvillal): For multi-node grenade jobs we do not want to upgrade Nova if [[ "${HOST_TOPOLOGY}" == "multinode" ]]; then # Remove 'nova' from the list of projects to upgrade UPGRADE_PROJECTS=$(echo $UPGRADE_PROJECTS | sed -e 's/\s*nova//g' ) fi # NOTE(vdrok): Do not setup multicell during upgrade export CELLSV2_SETUP="singleconductor" # https://storyboard.openstack.org/#!/story/2003808 # pxe booting with virtio broken in xenial-updates/queens/main export LIBVIRT_NIC_DRIVER=e1000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/devstack/upgrade/shutdown.sh0000775000175000017500000000070600000000000020550 0ustar00zuulzuul00000000000000#!/bin/bash # # set -o errexit source $GRENADE_DIR/grenaderc source $GRENADE_DIR/functions # We need base DevStack functions for this source $BASE_DEVSTACK_DIR/functions source $BASE_DEVSTACK_DIR/stackrc # needed for status directory source $BASE_DEVSTACK_DIR/lib/tls source $BASE_DEVSTACK_DIR/lib/apache # Keep track of the DevStack directory IRONIC_DEVSTACK_DIR=$(dirname "$0")/.. source $IRONIC_DEVSTACK_DIR/lib/ironic set -o xtrace stop_ironic ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/devstack/upgrade/upgrade.sh0000775000175000017500000001300200000000000020315 0ustar00zuulzuul00000000000000#!/usr/bin/env bash # ``upgrade-ironic`` echo "*********************************************************************" echo "Begin $0" echo "*********************************************************************" # Clean up any resources that may be in use cleanup() { set +o errexit echo "*********************************************************************" echo "ERROR: Abort $0" echo "*********************************************************************" # Kill ourselves to signal any calling process trap 2; kill -2 $$ } trap cleanup SIGHUP SIGINT SIGTERM # Keep track of the grenade directory RUN_DIR=$(cd $(dirname "$0") && pwd) # Source params source $GRENADE_DIR/grenaderc # Import common functions source $GRENADE_DIR/functions # This script exits on an error so that errors don't compound and you see # only the first error that occurred. set -o errexit # Upgrade Ironic # ============ # Duplicate some setup bits from target DevStack source $TARGET_DEVSTACK_DIR/stackrc source $TARGET_DEVSTACK_DIR/lib/tls source $TARGET_DEVSTACK_DIR/lib/nova source $TARGET_DEVSTACK_DIR/lib/neutron-legacy source $TARGET_DEVSTACK_DIR/lib/apache source $TARGET_DEVSTACK_DIR/lib/keystone source $TOP_DIR/openrc admin admin # Keep track of the DevStack directory IRONIC_DEVSTACK_DIR=$(dirname "$0")/.. source $IRONIC_DEVSTACK_DIR/lib/ironic # Print the commands being run so that we can see the command that triggers # an error. It is also useful for following allowing as the install occurs. set -o xtrace function wait_for_keystone { if ! wait_for_service $SERVICE_TIMEOUT ${KEYSTONE_AUTH_URI}/v$IDENTITY_API_VERSION/; then die $LINENO "keystone did not start" fi } # Save current config files for posterity if [[ -d $IRONIC_CONF_DIR ]] && [[ ! -d $SAVE_DIR/etc.ironic ]] ; then cp -pr $IRONIC_CONF_DIR $SAVE_DIR/etc.ironic fi stack_install_service ironic # calls upgrade-ironic for specific release upgrade_project ironic $RUN_DIR $BASE_DEVSTACK_BRANCH $TARGET_DEVSTACK_BRANCH # NOTE(rloo): make sure it is OK to do an upgrade. Except that we aren't # parsing/checking the output of this command because the output could change # based on the checks it makes. $IRONIC_BIN_DIR/ironic-status upgrade check && ret_val=$? || ret_val=$? if [ $ret_val -gt 1 ] ; then # NOTE(TheJulia): We need to evaluate the return code from the # upgrade status check as the framework defines # Warnings are permissible and returned as status code 1, errors are # returned as greater than 1 which means there is a major upgrade # stopping issue which needs to be addressed. echo "WARNING: Status check failed, we're going to attempt to apply the schema update and then re-evaluate." $IRONIC_BIN_DIR/ironic-dbsync --config-file=$IRONIC_CONF_FILE upgrade $IRONIC_BIN_DIR/ironic-status upgrade check && ret_val=$? || ret_val=$? if [ $ret_val -gt 1 ] ; then die $LINENO "Ironic DB Status check failed, returned: $ret_val" fi fi $IRONIC_BIN_DIR/ironic-dbsync --config-file=$IRONIC_CONF_FILE # NOTE(vsaienko) pin_release only on multinode job, for cold upgrade (single node) # run online data migration instead. if [[ "${HOST_TOPOLOGY}" == "multinode" ]]; then iniset $IRONIC_CONF_FILE DEFAULT pin_release_version ${BASE_DEVSTACK_BRANCH#*/} else ironic-dbsync online_data_migrations fi ensure_started='ironic-conductor nova-compute ' ensure_stopped='' # Multinode grenade is designed to upgrade services only on primary node. And there is no way to manipulate # subnode during grenade phases. With this after upgrade we can have upgraded (new) services on primary # node and not upgraded (old) services on subnode. # According to Ironic upgrade procedure, we shouldn't have upgraded (new) ironic-api and not upgraded (old) # ironic-conductor. By setting redirect of API requests from primary node to subnode during upgrade # allow to satisfy ironic upgrade requirements. if [[ "$HOST_TOPOLOGY_ROLE" == "primary" ]]; then disable_service ir-api ensure_stopped+='ironic-api' ironic_wsgi_conf=$(apache_site_config_for ironic-api-wsgi) sudo cp $IRONIC_DEVSTACK_FILES_DIR/apache-ironic-api-redirect.template $ironic_wsgi_conf sudo sed -e " s|%IRONIC_SERVICE_PROTOCOL%|$IRONIC_SERVICE_PROTOCOL|g; s|%IRONIC_SERVICE_HOST%|$IRONIC_PROVISION_SUBNET_SUBNODE_IP|g; " -i $ironic_wsgi_conf enable_apache_site ipxe-ironic restart_apache_server else ensure_started+='ironic-api ' fi start_ironic # NOTE(vsaienko) do not restart n-cpu on multinode as we didn't upgrade nova. if [[ "${HOST_TOPOLOGY}" != "multinode" ]]; then # NOTE(vsaienko) installing ironic service triggers apache restart, that # may cause nova-compute failure due to LP1537076 stop_nova_compute || true wait_for_keystone start_nova_compute fi if [[ -n "$ensure_stopped" ]]; then ensure_services_stopped $ensure_stopped fi ensure_services_started $ensure_started # We need these steps only in case of flat-network # NOTE(vsaienko) starting from Ocata when Neutron is restarted there is no guarantee that # internal tag, that was assigned to network will be the same. As result we need to update # tag on link between br-int and brbm to new value after restart. if [[ -z "${IRONIC_PROVISION_NETWORK_NAME}" ]]; then net_id=$(openstack network show ironic_grenade -f value -c id) create_ovs_taps $net_id fi set +o xtrace echo "*********************************************************************" echo "SUCCESS: End $0" echo "*********************************************************************" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648108935.8826666 ironic-20.1.0/doc/0000775000175000017500000000000000000000000013645 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/requirements.txt0000664000175000017500000000035500000000000017134 0ustar00zuulzuul00000000000000openstackdocstheme>=2.2.0 # Apache-2.0 os-api-ref>=1.4.0 # Apache-2.0 reno>=3.1.0 # Apache-2.0 sphinx>=2.0.0,!=2.1.0 # BSD sphinxcontrib-apidoc>=0.2.0 # BSD sphinxcontrib-seqdiag>=0.8.4 # BSD sphinxcontrib-svg2pdfconverter>=0.1.0 # BSD ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648108935.8826666 ironic-20.1.0/doc/source/0000775000175000017500000000000000000000000015145 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648108935.8826666 ironic-20.1.0/doc/source/_exts/0000775000175000017500000000000000000000000016267 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/_exts/automated_steps.py0000664000175000017500000001446700000000000022056 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from collections import defaultdict import inspect import itertools import operator import os.path from docutils import nodes from docutils.parsers import rst from docutils.parsers.rst import directives from docutils.statemachine import ViewList from sphinx.util import logging from sphinx.util.nodes import nested_parse_with_titles import stevedore from ironic.common import driver_factory LOG = logging.getLogger(__name__) # Enable this locally if you need debugging output DEBUG = False def _list_table(add, headers, data, title='', columns=None): """Build a list-table directive. :param add: Function to add one row to output. :param headers: List of header values. :param data: Iterable of row data, yielding lists or tuples with rows. """ add('.. list-table:: %s' % title) add(' :header-rows: 1') if columns: add(' :widths: %s' % (','.join(str(c) for c in columns))) add('') add(' - * %s' % headers[0]) for h in headers[1:]: add(' * %s' % h) for row in data: add(' - * %s' % row[0]) for r in row[1:]: lines = str(r).splitlines() if not lines: # empty string add(' * ') else: # potentially multi-line string add(' * %s' % lines[0]) for l in lines[1:]: add(' %s' % l) add('') def _format_doc(doc): "Format one method docstring to be shown in the step table." paras = doc.split('\n\n') if paras[-1].startswith(':'): # Remove the field table that commonly appears at the end of a # docstring. paras = paras[:-1] return '\n\n'.join(paras) _clean_steps = {} def _init_steps_by_driver(): "Load step information from drivers." # NOTE(dhellmann): This reproduces some of the logic of # ironic.drivers.base.BaseInterface.__new__ and # ironic.common.driver_factory but does so without # instantiating the interface classes, which means that if # some of the preconditions aren't met we can still inspect # the methods of the class. for interface_name in sorted(driver_factory.driver_base.ALL_INTERFACES): if DEBUG: LOG.info('[{}] probing available plugins for interface {}'.format( __name__, interface_name)) loader = stevedore.ExtensionManager( 'ironic.hardware.interfaces.{}'.format(interface_name), invoke_on_load=False, ) for plugin in loader: if plugin.name == 'fake': continue steps = [] for method_name, method in inspect.getmembers(plugin.plugin): if not getattr(method, '_is_clean_step', False): continue step = { 'step': method.__name__, 'priority': method._clean_step_priority, 'abortable': method._clean_step_abortable, 'argsinfo': method._clean_step_argsinfo, 'interface': interface_name, 'doc': _format_doc(inspect.getdoc(method)), } if DEBUG: LOG.info('[{}] interface {!r} driver {!r} STEP {}'.format( __name__, interface_name, plugin.name, step)) steps.append(step) if steps: if interface_name not in _clean_steps: _clean_steps[interface_name] = {} _clean_steps[interface_name][plugin.name] = steps def _format_args(argsinfo): argsinfo = argsinfo or {} return '\n\n'.join( '``{}``{}{} {}'.format( argname, ' (*required*)' if argdetail.get('required') else '', ' --' if argdetail.get('description') else '', argdetail.get('description', ''), ) for argname, argdetail in sorted(argsinfo.items()) ) class AutomatedStepsDirective(rst.Directive): option_spec = { 'phase': directives.unchanged, } def run(self): series = self.options.get('series', 'cleaning') if series != 'cleaning': raise NotImplementedError('Showing deploy steps not implemented') source_name = '<{}>'.format(__name__) result = ViewList() for interface_name in ['power', 'management', 'deploy', 'bios', 'raid']: interface_info = _clean_steps.get(interface_name, {}) if not interface_info: continue title = '{} Interface'.format(interface_name.capitalize()) result.append(title, source_name) result.append('~' * len(title), source_name) for driver_name, steps in sorted(interface_info.items()): _list_table( title='{} cleaning steps'.format(driver_name), add=lambda x: result.append(x, source_name), headers=['Name', 'Details', 'Priority', 'Stoppable', 'Arguments'], columns=[20, 30, 10, 10, 30], data=( ('``{}``'.format(s['step']), s['doc'], s['priority'], 'yes' if s['abortable'] else 'no', _format_args(s['argsinfo']), ) for s in steps ), ) # NOTE(dhellmann): Useful for debugging. # print('\n'.join(result)) node = nodes.section() node.document = self.state.document nested_parse_with_titles(self.state, result, node) return node.children def setup(app): app.add_directive('show-steps', AutomatedStepsDirective) _init_steps_by_driver() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/_exts/web_api_docstring.py0000664000175000017500000002507500000000000022334 0ustar00zuulzuul00000000000000# -*- coding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import HTTPStatus import os import re # Stdlib from docutils import nodes from docutils.parsers.rst import Directive # 3rd Party from sphinx.util.docfields import GroupedField # 3rd Party import yaml # 3rd party from ironic.common import exception # Application def read_from_file(fpath): """Read the data in file given by fpath.""" with open(fpath, 'r') as stream: yaml_data = yaml.load(stream, Loader=yaml.SafeLoader) return yaml_data def split_str_to_field(input_str): """Split the input_str into 2 parts, the field name and field body. The split is based on this regex format: :field_name: field_body. """ regex_pattern = "((^:{1}.*:{1})(.*))" field_name = None field_body = None if input_str is None: return field_name, field_body regex_output = re.match(regex_pattern, input_str) if regex_output is None and len(input_str) > 0: field_body = input_str.lstrip(' ') if regex_output is not None: field = regex_output.groups() field_name = field[1].strip(':') field_body = field[2].strip() return field_name, field_body def parse_field_list(content): """Convert list of fields as strings, to a dictionary. This function takes a list of strings as input, each item being a :field_name: field_body combination, and converts it into a dictionary with the field names as keys, and field bodies as values. """ field_list = {} # dictionary to hold parsed input field list for c in content: if c is None: continue field_name, field_body = split_str_to_field(c) field_list[field_name] = field_body return field_list def create_bullet_list(input_dict, input_build_env): """Convert input_dict into a sphinx representaion of a bullet list.""" grp_field = GroupedField('grp_field', label='title') bullet_list = nodes.paragraph() for field_name in input_dict: fbody_txt_node = nodes.Text(data=input_dict[field_name]) tmp_field_node = grp_field.make_field(domain='py', types=nodes.field, items=[(field_name, fbody_txt_node)], env=input_build_env) for c in tmp_field_node.children: if c.tagname == 'field_body': for ch in c.children: bullet_list += ch return bullet_list def create_table(table_title, table_contents): """Construct a docutils-based table (single row and column).""" table = nodes.table() tgroup = nodes.tgroup(cols=1) colspec = nodes.colspec(colwidth=1) tgroup.append(colspec) table += tgroup thead = nodes.thead() tgroup += thead row = nodes.row() entry = nodes.entry() entry += nodes.paragraph(text=table_title) row += entry thead.append(row) rows = [] row = nodes.row() rows.append(row) entry = nodes.entry() entry += table_contents row += entry tbody = nodes.tbody() tbody.extend(rows) tgroup += tbody return table def split_list(input_list): """Split input_list into three sub-lists. This function splits the input_list into three, one list containing the inital non-empty items, one list containing items appearing after the string 'Success' in input_list; and the other list containing items appearing after the string 'Failure' in input_list. """ initial_flag = 1 success_flag = 0 failure_flag = 0 initial_list = [] success_list = [] failure_list = [] for c in input_list: if c == 'Success:': success_flag = 1 failure_flag = 0 elif c == 'Failure:': failure_flag = 1 success_flag = 0 elif c != '' and success_flag: success_list.append(c) elif c != '' and failure_flag: failure_list.append(c) elif c != '' and initial_flag: initial_list.append(c) return initial_list, success_list, failure_list def process_list(input_list): """Combine fields split over multiple list items into one. This function expects to receive a field list as input, with each item in the list representing a line read from the document, as-is. It combines the field bodies split over multiple lines into one list item, making each field (name and body) one list item. It also removes extra whitespace which was used for indentation in input. """ out_list = [] # Convert list to string str1 = "".join(input_list) # Replace multiple spaces with one space str2 = re.sub(r'\s+', ' ', str1) regex_pattern = r'(:\S*.:)' # Split the string, based on field names list3 = re.split(regex_pattern, str2) # Remove empty items from the list list4 = list(filter(None, list3)) # Append the field name and field body strings together for i in range(0, len(list4), 2): out_list.append(list4[i] + list4[i + 1]) return out_list def add_exception_info(failure_list): """Add exception information to fields. This function takes a list of fields (field name and field body) as an argument. If the field name is the name of an exception, it adds the exception code into the field name, and exception message into the field body. """ failure_dict = {} # Add the exception code and message string for f in failure_list: field_name, field_body = split_str_to_field(f) exc_code = "" exc_msg = "" if (field_name is not None) and hasattr(exception, field_name): # Get the exception code and message string exc_class = getattr(exception, field_name) try: exc_code = exc_class.code exc_msg = exc_class._msg_fmt except AttributeError: pass # Add the exception's HTTP code and HTTP phrase # to the field name if isinstance(exc_code, HTTPStatus): field_name = (field_name + " (HTTP " + str(exc_code.value) + " " + exc_code.phrase + ")") else: field_name = field_name + " (HTTP " + str(exc_code) + ")" # Add the exception's HTTP description to the field body field_body = exc_msg + " \n" + field_body # Add to dictionary if field name and field body exist if field_name is not None and field_body is not None: failure_dict[field_name] = field_body return failure_dict class Parameters(Directive): """This class implements the Parameters Directive.""" required_arguments = 1 has_content = True def run(self): # Parse the input field list from the docstring, as a dictionary input_dict = {} input_dict = parse_field_list(self.content) # Read from yaml file param_file = self.arguments[0] cur_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) param_file_path = cur_path + '/' + param_file yaml_data = read_from_file(param_file_path) # Substitute the parameter descriptions with the yaml file descriptions for field_name in input_dict: old_field_body = input_dict[field_name] if old_field_body in yaml_data.keys(): input_dict[field_name] = yaml_data[old_field_body]["description"] # Convert dictionary to bullet list format params_build_env = self.state.document.settings.env params_bullet_list = create_bullet_list(input_dict, params_build_env) # Create a table to display the final Parameters directive output params_table = create_table('Parameters', params_bullet_list) return [params_table] class Return(Directive): """This class implements the Return Directive.""" has_content = True def run(self): initial_list, success_list, failure_list = split_list(self.content) # Concatenate the field bodies split over multiple lines proc_fail_list = process_list(failure_list) # Add the exception code(s) and corresponding message string(s) failure_dict = {} failure_dict = add_exception_info(proc_fail_list) ret_table_contents = nodes.paragraph() if len(initial_list) > 0: for i in initial_list: initial_cont = nodes.Text(data=i) ret_table_contents += initial_cont if len(success_list) > 0: # Add heading 'Success:' to output success_heading = nodes.strong() success_heading += nodes.Text(data='Success:') ret_table_contents += success_heading # Add Success details to output success_detail = nodes.paragraph() for s in success_list: success_detail += nodes.Text(data=s) ret_table_contents += success_detail if len(proc_fail_list) > 0: # Add heading 'Failure:' to output failure_heading = nodes.strong() failure_heading += nodes.Text(data='Failure:') ret_table_contents += failure_heading # Add failure details to output ret_build_env = self.state.document.settings.env failure_detail = create_bullet_list(failure_dict, ret_build_env) ret_table_contents += failure_detail if len(initial_list) > 0 or len(success_list) > 0 or len(proc_fail_list) > 0: # Create a table to display the final Returns directive output ret_table = create_table('Returns', ret_table_contents) return [ret_table] else: return None def setup(app): app.add_directive("parameters", Parameters) app.add_directive("return", Return) return { 'version': '0.1', 'parallel_read_safe': True, 'parallel_write_safe': True, } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648108935.8866668 ironic-20.1.0/doc/source/admin/0000775000175000017500000000000000000000000016235 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/admin/adoption.rst0000664000175000017500000002305300000000000020607 0ustar00zuulzuul00000000000000.. _adoption: ============= Node adoption ============= Overview ======== As part of hardware inventory lifecycle management, it is not an unreasonable need to have the capability to be able to add hardware that should be considered "in-use" by the Bare Metal service, that may have been deployed by another Bare Metal service installation or deployed via other means. As such, the node adoption feature allows a user to define a node as ``active`` while skipping the ``available`` and ``deploying`` states, which will prevent the node from being seen by the Compute service as ready for use. This feature is leveraged as part of the state machine workflow, where a node in ``manageable`` can be moved to ``active`` state via the provision_state verb ``adopt``. To view the state transition capabilities, please see :ref:`states`. .. NOTE:: For deployments using Ironic in conjunction with Nova, Ironic's node adoption feature is not suitable. If you need to adopt production nodes into Ironic **and** Nova, you can find a high-level recipe in :ref:`adoption_with_nova`. How it works ============ A node initially enrolled begins in the ``enroll`` state. An operator must then move the node to ``manageable`` state, which causes the node's ``power`` interface to be validated. Once in ``manageable`` state, an operator can then explicitly choose to adopt a node. Adoption of a node results in the validation of its ``boot`` interface, and upon success the process leverages what is referred to as the "takeover" logic. The takeover process is intended for conductors to take over the management of nodes for a conductor that has failed. The takeover process involves the deploy interface's ``prepare`` and ``take_over`` methods being called. These steps take specific actions such as downloading and staging the deployment kernel and ramdisk, ISO image, any required boot image, or boot ISO image and then places any PXE or virtual media configuration necessary for the node should it be required. The adoption process makes no changes to the physical node, with the exception of operator supplied configurations where virtual media is used to boot the node under normal circumstances. An operator should ensure that any supplied configuration defining the node is sufficient for the continued operation of the node moving forward. Possible Risk ============= The main risk with this feature is that supplied configuration may ultimately be incorrect or invalid which could result in potential operational issues: * ``rebuild`` verb - Rebuild is intended to allow a user to re-deploy the node to a fresh state. The risk with adoption is that the image defined when an operator adopts the node may not be the valid image for the pre-existing configuration. If this feature is utilized for a migration from one deployment to another, and pristine original images are loaded and provided, then ultimately the risk is the same with any normal use of the ``rebuild`` feature, the server is effectively wiped. * When deleting a node, the deletion or cleaning processes may fail if the incorrect deployment image is supplied in the configuration as the node may NOT have been deployed with the supplied image and driver or compatibility issues may exist as a result. Operators will need to be cognizant of that possibility and should plan accordingly to ensure that deployment images are known to be compatible with the hardware in their environment. * Networking - Adoption will assert no new networking configuration to the newly adopted node as that would be considered modifying the node. Operators will need to plan accordingly and have network configuration such that the nodes will be able to network boot. How to use ========== .. NOTE:: The power state that the ironic-conductor observes upon the first successful power state check, as part of the transition to the ``manageable`` state will be enforced with a node that has been adopted. This means a node that is in ``power off`` state will, by default, have the power state enforced as ``power off`` moving forward, unless an administrator actively changes the power state using the Bare Metal service. Requirements ------------ Requirements for use are essentially the same as to deploy a node: * Sufficient driver information to allow for a successful power management validation. * Sufficient instance_info to pass deploy interface preparation. Each driver may have additional requirements dependent upon the configuration that is supplied. An example of this would be defining a node to always boot from the network, which will cause the conductor to attempt to retrieve the pertinent files. Inability to do so will result in the adoption failing, and the node being placed in the ``adopt failed`` state. Example ------- This is an example to create a new node, named ``testnode``, with sufficient information to pass basic validation in order to be taken from the ``manageable`` state to ``active`` state:: # Explicitly set the client API version environment variable to # 1.17, which introduces the adoption capability. export OS_BAREMETAL_API_VERSION=1.17 baremetal node create --name testnode \ --driver ipmi \ --driver-info ipmi_address= \ --driver-info ipmi_username= \ --driver-info ipmi_password= \ --driver-info deploy_kernel= \ --driver-info deploy_ramdisk= baremetal port create --node baremetal node set testnode \ --instance-info image_source="http://localhost:8080/blankimage" \ --instance-info capabilities="{\"boot_option\": \"local\"}" baremetal node manage testnode --wait baremetal node adopt testnode --wait .. NOTE:: In the above example, the image_source setting must reference a valid image or file, however that image or file can ultimately be empty. .. NOTE:: The above example utilizes a capability that defines the boot operation to be local. It is recommended to define the node as such unless network booting is desired. .. NOTE:: The above example will fail a re-deployment as a fake image is defined and no instance_info/image_checksum value is defined. As such any actual attempt to write the image out will fail as the image_checksum value is only validated at time of an actual deployment operation. .. NOTE:: A user may wish to assign an instance_uuid to a node, which could be used to match an instance in the Compute service. Doing so is not required for the proper operation of the Bare Metal service. baremetal node set --instance-uuid .. NOTE:: In Newton, coupled with API version 1.20, the concept of a network_interface was introduced. A user of this feature may wish to add new nodes with a network_interface of ``noop`` and then change the interface at a later point and time. Troubleshooting =============== Should an adoption operation fail for a node, the error that caused the failure will be logged in the node's ``last_error`` field when viewing the node. This error, in the case of node adoption, will largely be due to failure of a validation step. Validation steps are dependent upon what driver is selected for the node. Any node that is in the ``adopt failed`` state can have the ``adopt`` verb re-attempted. Example:: baremetal node adopt If a user wishes to abort their attempt at adopting, they can then move the node back to ``manageable`` from ``adopt failed`` state by issuing the ``manage`` verb. Example:: baremetal node manage If all else fails the hardware node can be removed from the Bare Metal service. The ``node delete`` command, which is **not** the same as setting the provision state to ``deleted``, can be used while the node is in ``adopt failed`` state. This will delete the node without cleaning occurring to preserve the node's current state. Example:: baremetal node delete .. _adoption_with_nova: Adoption with Nova ================== Since there is no mechanism to create bare metal instances in Nova when nodes are adopted into Ironic, the node adoption feature described above cannot be used to add in production nodes to deployments which use Ironic together with Nova. One option to add in production nodes to an Ironic/Nova deployment is to use the fake drivers. The overall idea is that for Nova the nodes are instantiated normally to ensure the instances are properly created in the compute project while Ironic does not touch them. Here are some high level steps to be used as a guideline: * create a bare metal flavor and a hosting project for the instances * enroll the nodes into Ironic, create the ports, move them to manageable * change the hardware type and the interfaces to fake drivers * provide the nodes to make them available * one by one, add the nodes to the placement aggregate and create instances * change the hardware type and the interfaces back to the real ones Make sure you change the drivers to the fake ones **before** providing the nodes as cleaning will otherwise wipe your production servers! The reason to make all nodes available and manage access via the aggregate is that this is much faster than providing nodes one by one and relying on the resource tracker to find them. Enabling them one by one is required to make sure the instance name and the (pre-adoption) name of the server match. The above recipe does not cover Neutron which, depending on your deployment, may need to be handled in addition. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/admin/agent-power.rst0000664000175000017500000000452200000000000021222 0ustar00zuulzuul00000000000000================================= Deploying without BMC Credentials ================================= The Bare Metal service usually requires BMC credentials for all provisioning operations. Starting with the Victoria release series there is limited support for inspection, cleaning and deployments without the credentials. .. warning:: This feature is experimental and only works in a limited scenario. When using it, you have to be prepared to provide BMC credentials in case of a failure or any non-supported actions. How it works ============ The expected workflow is as follows: #. The node is discovered by manually powering it on and gets the `manual-management` hardware type and `agent` power interface. If discovery is not used, a node can be enrolled through the API and then powered on manually. #. The operator moves the node to `manageable`. It works because the `agent` power only requires to be able to connect to the agent. #. The operator moves the node to `available`. Cleaning happens normally via the already running agent. If reboot is needed, it is done by telling the agent to reboot the node in-band. #. A user deploys the node. Deployment happens normally via the already running agent. #. In the end of the deployment, the node is rebooted via the reboot command instead of power off+on. Enabling ======== :doc:`fast-track` is a requirement for this feature to work. After enabling it, adds the ``agent`` power interface and the ``manual-management`` hardware type to the enabled list: .. code-block:: ini [DEFAULT] enabled_hardware_types = manual-management enabled_management_interfaces = noop enabled_power_interfaces = agent [deploy] fast_track = true As usual with the ``noop`` management, enable the networking boot fallback: .. code-block:: ini [pxe] enable_netboot_fallback = true If using discovery, :ironic-inspector-doc:`configure discovery in ironic-inspector ` with the default driver set to ``manual-management``. Limitations =========== * Only the ``noop`` network interface is supported. * Undeploy and rescue are not supported, you need to add BMC credentials first. * If any errors happens in the process, recovery will likely require BMC credentials. * Only rebooting is possible through the API, power on/off commands will fail. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/admin/agent-token.rst0000664000175000017500000001112300000000000021201 0ustar00zuulzuul00000000000000.. _agent_token: =========== Agent Token =========== Purpose ======= The concept of agent tokens is to provide a mechanism by which the relationship between an operating deployment of the Bare Metal Service and an instance of the ``ironic-python-agent`` is verified. In a sense, this token can be viewed as a session identifier or authentication token. .. warning:: This functionality does not remove the risk of a man-in-the-middle attack that could occur from connection intercept or when TLS is not used for all communication. This becomes useful in the case of deploying an "edge" node where intermediate networks are not trustworthy. How it works ============ These tokens are provided in one of two ways to the running agent. 1. A pre-generated token which is embedded into virtual media ISOs. 2. A one-time generated token that are provided upon the first "lookup" of the node. In both cases, the tokens are a randomly generated using the Python ``secrets`` library. As of mid-2020, the default length is 43 characters. Once the token has been provided, the token cannot be retrieved or accessed. It remains available to the conductors, and is stored in memory of the ``ironic-python-agent``. .. note:: In the case of the token being embedded with virtual media, it is read from a configuration file with-in the image. Ideally this should be paired with Swift temporary URLs. With the token is available in memory in the agent, the token is embedded with ``heartbeat`` operations to the ironic API endpoint. This enables the API to authenticate the heartbeat request, and refuse "heartbeat" requests from the ``ironic-python-agent``. As of the Victoria release, use of Agent Token is required for all agents and the previously available setting to force this functionality to be mandatory, ``[DEFAULT]require_agent_token`` no longer has any effect. .. warning:: If the Bare Metal Service is updated, and the version of ``ironic-python-agent`` should be updated to enable this feature. In addition to heartbeats being verified, commands from the ``ironic-conductor`` service to the ``ironic-python-agent`` also include the token, allowing the agent to authenticate the caller. With Virtual Media ------------------ .. seqdiag:: :scale: 80 diagram { API; Conductor; Baremetal; Swift; IPA; activation = none; span_height = 1; edge_length = 250; default_note_color = white; default_fontsize = 14; Conductor -> Conductor [label = "Generates a random token"]; Conductor -> Conductor [label = "Generates configuration for IPA ramdisk"]; Conductor -> Swift [label = "IPA image, with configuration is uploaded"]; Conductor -> Baremetal [label = "Attach IPA virtual media in Swift as virtual CD"]; Conductor -> Baremetal [label = "Conductor turns power on"]; Baremetal -> Swift [label = "Baremetal reads virtual media"]; Baremetal -> Baremetal [label = "Boots IPA virtual media image"]; Baremetal -> Baremetal [label = "IPA is started"]; IPA -> Baremetal [label = "IPA loads configuration and agent token into memory"]; IPA -> API [label = "Lookup node"]; API -> IPA [label = "API responds with node UUID and token value of '******'"]; IPA -> API [label = "Heartbeat with agent token"]; } With PXE/iPXE/etc. ------------------ .. seqdiag:: :scale: 80 diagram { API; Conductor; Baremetal; iPXE; IPA; activation = none; span_height = 1; edge_length = 250; default_note_color = white; default_fontsize = 14; Conductor -> Baremetal [label = "Conductor turns power on"]; Baremetal -> iPXE [label = "Baremetal reads kernel/ramdisk and starts boot"]; Baremetal -> Baremetal [label = "Boots IPA iPXE image"]; Baremetal -> Baremetal [label = "IPA is started"]; IPA -> Baremetal [label = "IPA loads configuration"]; IPA -> API [label = "Lookup node"]; API -> Conductor [label = "API requests conductor to generates a random token"]; API -> IPA [label = "API responds with node UUID and token value"]; IPA -> API [label = "Heartbeat with agent token"]; } Agent Configuration =================== An additional setting which may be leveraged with the ``ironic-python-agent`` is a ``agent_token_required`` setting. Under normal circumstances, this setting can be asserted via the configuration supplied from the Bare Metal service deployment upon the ``lookup`` action, but can be asserted via the embedded configuration for the agent in the ramdisk. This setting is also available via kernel command line as ``ipa-agent-token-required``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/admin/anaconda-deploy-interface.rst0000664000175000017500000001325700000000000023773 0ustar00zuulzuul00000000000000Deploying with anaconda deploy interface ======================================== Ironic supports deploying an OS with the `anaconda`_ installer. This anaconda deploy interface works with ``pxe`` and ``ipxe`` boot interfaces. Configuration ------------- The anaconda deploy interface is not enabled by default. To enable this, add ``anaconda`` to the value of the ``enabled_deploy_interfaces`` configuration option in ironic.conf. For example: .. code-block:: ini [DEFAULT] ... enabled_deploy_interfaces = direct,anaconda ... This change takes effect after all the ironic conductors have been restarted. The default kickstart template is specified via the configuration option ``[anaconda]default_ks_template``. It is set to this `ks.cfg.template`_ but can be modified to be some other template. .. code-block:: ini [anaconda] default_ks_template = file:///etc/ironic/ks.cfg.template When creating an ironic node, specify ``anaconda`` as the deploy interface. For example: .. code-block:: shell baremetal node create --driver ipmi \ --deploy-interface anaconda \ --boot-interface ipxe You can also set the anaconda deploy interface via ``--deploy-interface`` on an existing node: .. code-block:: shell baremetal node set --deploy-interface anaconda Creating an OS Image -------------------- While anaconda allows installing individual RPMs, the default kickstart file expects an OS tarball to be used as the OS image. This ``baremetal.yum`` file contains all the yum/dnf commands that need to be run in order to generate the OS tarball. These commands install packages and package groups that need to be in the image: .. code-block:: ini group install 'Minimal Install' install cloud-init ts run An OS tarball can be created using following set of commands, along with the above ``baremetal.yum`` file: .. code-block:: shell export CHROOT=/home//os-image mkdir -p $(CHROOT) mkdir -p $(CHROOT)/{dev,proc,run,sys} chown -hR root:root $(CHROOT) mount --bind /var/cache/yum $(CHROOT)/var/cache/yum mount --bind /dev $(CHROOT)/dev mount -t proc proc $(CHROOT)/proc mount -t tmpfs tmpfs $(CHROOT)/run mount -t sysfs sysfs $(CHROOT)/sys dnf -y --installroot=$(CHROOT) makecache dnf -y --installroot=$(CHROOT) shell baremetal.yum rpm --root $(CHROOT) --import $(CHROOT)/etc/pki/rpm-gpg/RPM-GPG-KEY-* truncate -s 0 $(CHROOT)/etc/machine-id umount $(CHROOT)/var/cache/yum umount $(CHROOT)/dev umount $(CHROOT)/proc umount $(CHROOT)/run umount $(CHROOT)/sys tar cpzf os-image.tar.gz --xattrs --acls --selinux -C $(CHROOT) . Configuring the OS Image in glance ---------------------------------- Anaconda is a two-stage installer -- stage 1 consists of the kernel and ramdisk and stage 2 lives in a squashfs file. All these components can be found in the CentOS/RHEL/Fedora ISO images. The kernel and ramdisk can be found at ``/images/pxeboot/vmlinuz`` and ``/images/pxeboot/initrd.img`` respectively in the ISO. The stage 2 squashfs image can be normally found at ``/LiveOS/squashfs.img`` or ``/images/install.img``. The OS tarball must be configured with the following properties in glance, in order to be used with the anaconda deploy driver: * ``kernel_id`` * ``ramdisk_id`` * ``stage2_id`` * ``disk_file_extension`` (optional) Valid ``disk_file_extension`` values are ``.img``, ``.tar``, ``.tbz``, ``.tgz``, ``.txz``, ``.tar.gz``, ``.tar.bz2``, and ``.tar.xz``. When ``disk_file_extension`` property is not set to one of the above valid values the anaconda installer will assume that the image provided is a mountable OS disk. This is an example of adding the anaconda-related images and the OS tarball to glance: .. code-block:: shell openstack image create --file ./vmlinuz --container-format aki \ --disk-format aki --shared anaconda-kernel- openstack image create --file ./initrd.img --container-format ari \ --disk-format ari --shared anaconda-ramdisk- openstack image create --file ./squashfs.img --container-format ari \ --disk-format ari --shared anaconda-stage- openstack image create --file ./os-image.tar.gz --container-format \ compressed --disk-format raw --shared \ --property kernel_id= \ --property ramdisk_id= \ --property stage2_id= disto-name-version \ --property disk_file_extension=.tgz Creating a bare metal server ---------------------------- Apart from uploading a custom kickstart template to glance and associating it with the OS image via the ``ks_template`` property in glance, operators can also set the kickstart template in the ironic node's ``instance_info`` field. The kickstart template set in ``instance_info`` takes precedence over the one specified via the OS image in glance. If no kickstart template is specified (via the node's ``instance_info`` or ``ks_template`` glance image property), the default kickstart template will be used to deploy the OS. This is an example of how to set the kickstart template for a specific ironic node: .. code-block:: shell openstack baremetal node set \ --instance_info ks_template=glance://uuid Limitations ----------- This deploy interface has only been tested with Red Hat based operating systems that use anaconda. Other systems are not supported. .. _`anaconda`: https://fedoraproject.org/wiki/Anaconda .. _`ks.cfg.template`: https://opendev.org/openstack/ironic/src/branch/master/ironic/drivers/modules/ks.cfg.template ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/admin/api-audit-support.rst0000664000175000017500000000720400000000000022361 0ustar00zuulzuul00000000000000.. _api-audit-support: ================= API Audit Logging ================= Audit middleware supports delivery of CADF audit events via Oslo messaging notifier capability. Based on `notification_driver` configuration, audit events can be routed to messaging infrastructure (notification_driver = messagingv2) or can be routed to a log file (`[oslo_messaging_notifications]/driver = log`). Audit middleware creates two events per REST API interaction. First event has information extracted from request data and the second one has request outcome (response). Enabling API Audit Logging ========================== Audit middleware is available as part of `keystonemiddleware` (>= 1.6) library. For information regarding how audit middleware functions refer :keystonemiddleware-doc:`here `. Auditing can be enabled for the Bare Metal service by making the following changes to ``/etc/ironic/ironic.conf``. #. To enable audit logging of API requests:: [audit] ... enabled=true #. To customize auditing API requests, the audit middleware requires the audit_map_file setting to be defined. Update the value of configuration setting 'audit_map_file' to set its location. Audit map file configuration options for the Bare Metal service are included in the etc/ironic/ironic_api_audit_map.conf.sample file. To understand CADF format specified in ironic_api_audit_map.conf file refer to `CADF Format. `_:: [audit] ... audit_map_file=/etc/ironic/api_audit_map.conf #. Comma separated list of Ironic REST API HTTP methods to be ignored during audit. It is used only when API audit is enabled. For example:: [audit] ... ignore_req_list=GET,POST Sample Audit Event ================== Following is the sample of audit event for ironic node list request. .. code-block:: json { "event_type":"audit.http.request", "timestamp":"2016-06-15 06:04:30.904397", "payload":{ "typeURI":"http://schemas.dmtf.org/cloud/audit/1.0/event", "eventTime":"2016-06-15T06:04:30.903071+0000", "target":{ "id":"ironic", "typeURI":"unknown", "addresses":[ { "url":"http://{ironic_admin_host}:6385", "name":"admin" }, { "url":"http://{ironic_internal_host}:6385", "name":"private" }, { "url":"http://{ironic_public_host}:6385", "name":"public" } ], "name":"ironic" }, "observer":{ "id":"target" }, "tags":[ "correlation_id?value=685f1abb-620e-5d5d-b74a-b4135fb32373" ], "eventType":"activity", "initiator":{ "typeURI":"service/security/account/user", "name":"admin", "credential":{ "token":"***", "identity_status":"Confirmed" }, "host":{ "agent":"python-ironicclient", "address":"10.1.200.129" }, "project_id":"d8f52dd7d9e1475dbbf3ba47a4a83313", "id":"8c1a948bad3948929aa5d5b50627a174" }, "action":"read", "outcome":"pending", "id":"061b7aa7-5879-5225-a331-c002cf23cb6c", "requestPath":"/v1/nodes/?associated=True" }, "priority":"INFO", "publisher_id":"ironic-api", "message_id":"2f61ebaa-2d3e-4023-afba-f9fca6f21fc2" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/admin/bios.rst0000664000175000017500000000775000000000000017734 0ustar00zuulzuul00000000000000.. _bios: ================== BIOS Configuration ================== Overview ======== The Bare Metal service supports BIOS configuration for bare metal nodes. It allows administrators to retrieve and apply the desired BIOS settings via CLI or REST API. The desired BIOS settings are applied during manual cleaning. Prerequisites ============= Bare metal servers must be configured by the administrator to be managed via ironic hardware type that supports BIOS configuration. Enabling hardware types ----------------------- Enable a specific hardware type that supports BIOS configuration. Refer to :doc:`/install/enabling-drivers` for how to enable a hardware type. Enabling hardware interface --------------------------- To enable the bios interface: .. code-block:: ini [DEFAULT] enabled_bios_interfaces = no-bios Append the actual bios interface name supported by the enabled hardware type to ``enabled_bios_interfaces`` with comma separated values in ``ironic.conf``. All available in-tree bios interfaces are listed in setup.cfg file in the source code tree, for example: .. code-block:: ini ironic.hardware.interfaces.bios = fake = ironic.drivers.modules.fake:FakeBIOS no-bios = ironic.drivers.modules.noop:NoBIOS Retrieve BIOS settings ====================== To retrieve the cached BIOS configuration from a specified node:: $ baremetal node bios setting list BIOS settings are cached on each node cleaning operation or when settings have been applied successfully via BIOS cleaning steps. The return of above command is a table of last cached BIOS settings from specified node. If ``-f json`` is added as suffix to above command, it returns BIOS settings as following:: [ { "setting name": { "name": "setting name", "value": "value" } }, { "setting name": { "name": "setting name", "value": "value" } }, ... ] To get a specified BIOS setting for a node:: $ baremetal node bios setting show If ``-f json`` is added as suffix to above command, it returns BIOS settings as following:: { "setting name": { "name": "setting name", "value": "value" } } Configure BIOS settings ======================= Two :ref:`manual_cleaning` steps are available for managing nodes' BIOS settings: Factory reset ------------- This cleaning step resets all BIOS settings to factory default for a given node:: { "target":"clean", "clean_steps": [ { "interface": "bios", "step": "factory_reset" } ] } The ``factory_reset`` cleaning step does not require any arguments, as it resets all BIOS settings to factory defaults. Apply BIOS configuration ------------------------ This cleaning step applies a set of BIOS settings for a node:: { "target":"clean", "clean_steps": [ { "interface": "bios", "step": "apply_configuration", "args": { "settings": [ { "name": "name", "value": "value" }, { "name": "name", "value": "value" } ] } } ] } The representation of ``apply_configuration`` cleaning step follows the same format of :ref:`manual_cleaning`. The desired BIOS settings can be provided via the ``settings`` argument which contains a list of BIOS options to be applied, each BIOS option is a dictionary with ``name`` and ``value`` keys. To check whether the desired BIOS configuration is set properly, use the command mentioned in the `Retrieve BIOS settings`_ section. .. note:: When applying BIOS settings to a node, vendor-specific driver may take the given BIOS settings from the argument and compare them with the current BIOS settings on the node and only apply when there is a difference. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/admin/boot-from-volume.rst0000664000175000017500000002540400000000000022205 0ustar00zuulzuul00000000000000.. _boot-from-volume: ================ Boot From Volume ================ Overview ======== The Bare Metal service supports booting from a Cinder iSCSI volume as of the Pike release. This guide will primarily deal with this use case, but will be updated as more paths for booting from a volume, such as FCoE, are introduced. The boot from volume is supported on both legacy BIOS and UEFI (iPXE binary for EFI booting) boot mode. We need to perform with suitable images which will be created by diskimage-builder tool. How this works - From Ironic's point of view -------------------------------------------- In essence, ironic sets the stage for the process, by providing the required information to the boot interface to facilitate the configuration of the the node OR the iPXE boot templates such that the node CAN be booted. .. seqdiag:: :scale: 80 diagram { User; API; Conductor; Storage; Boot; Network; Deploy; activation = none; span_height = 1; edge_length = 250; default_note_color = white; default_fontsize = 14; User -> API [label = "User or intermediate service such as nova supplies volume target configuration."]; User -> API [label = "Sends deployment request."]; API -> Conductor [label = "API transmits the action to the conductor service"]; Conductor -> Storage [label = "Conductor calls the storage_interface to perform attachment of volume to node"]; Conductor -> Boot [label = "Conductor calls the boot interface signaling preparation of an instance"]; Conductor -> Network [label = "Conductor attaches the machine to network requested by the user VIF"]; Conductor -> Deploy [label = "Conductor starts deployment steps which just turn the power on."]; } In this example, the boot interface does the heavy lifting. For drivers the ``irmc`` and ``ilo`` hardware types with hardware type specific boot interfaces, they are able to signal via an out of band mechanism to the baremetal node's BMC that the integrated iSCSI initiators are to connect to the supplied volume target information. In most hardware this would be the network cards of the machine. In the case of the ``ipxe`` boot interface, templates are created on disk which point to the iscsi target information that was either submitted as part of the volume target, or when integrated with Nova, what was requested as the baremetal's boot from volume disk upon requesting the instance. In terms of network access, both interface methods require connectivity to the iscsi target. In the vendor driver specific path, additional network configuration options may be available to allow separation of standard network traffic and instance network traffic. In the iPXE case, this is not possible as the OS userspace re-configures the iSCSI connection after detection inside the OS ramdisk boot. An iPXE user *may* be able to leverage multiple VIFs, one specifically set to be set with ``pxe_enabled`` to handle the initial instance boot and back-end storage traffic where as external facing network traffic occurs on a different interface. This is a common pattern in iSCSI based deployments in the physical realm. Prerequisites ============= Currently booting from a volume requires: - Bare Metal service version 9.0.0 - Bare Metal API microversion 1.33 or later - A driver that utilizes the :doc:`PXE boot mechanism `. Currently booting from a volume is supported by the reference drivers that utilize PXE boot mechanisms when iPXE is enabled. - iPXE is an explicit requirement, as it provides the mechanism that attaches and initiates booting from an iSCSI volume. - Metadata services need to be configured and available for the instance images to obtain configuration such as keys. Configuration drives are not supported due to minimum disk extension sizes. Conductor Configuration ======================= In ironic.conf, you can specify a list of enabled storage interfaces. Check ``[DEFAULT]enabled_storage_interfaces`` in your ironic.conf to ensure that your desired interface is enabled. For example, to enable the ``cinder`` and ``noop`` storage interfaces:: [DEFAULT] enabled_storage_interfaces = cinder,noop If you want to specify a default storage interface rather than setting the storage interface on a per node basis, set ``[DEFAULT]default_storage_interface`` in ironic.conf. The ``default_storage_interface`` will be used for any node that doesn't have a storage interface defined. Node Configuration ================== Storage Interface ----------------- You will need to specify what storage interface the node will use to handle storage operations. For example, to set the storage interface to ``cinder`` on an existing node:: baremetal node set --storage-interface cinder $NODE_UUID A default storage interface can be specified in ironic.conf. See the `Conductor Configuration`_ section for details. iSCSI Configuration ------------------- In order for a bare metal node to boot from an iSCSI volume, the ``iscsi_boot`` capability for the node must be set to ``True``. For example, if you want to update an existing node to boot from volume:: baremetal node set --property capabilities=iscsi_boot:True $NODE_UUID You will also need to create a volume connector for the node, so the storage interface will know how to communicate with the node for storage operation. In the case of iSCSI, you will need to provide an iSCSI Qualifying Name (IQN) that is unique to your SAN. For example, to create a volume connector for iSCSI:: baremetal volume connector create \ --node $NODE_UUID --type iqn --connector-id iqn.2017-08.org.openstack.$NODE_UUID Image Creation ============== We use ``disk-image-create`` in diskimage-builder tool to create images for boot from volume feature. Some required elements for this mechanism for corresponding boot modes are as following: - Legacy BIOS boot mode: ``iscsi-boot`` element. - UEFI boot mode: ``iscsi-boot`` and ``block-device-efi`` elements. An example below:: export IMAGE_NAME= export DIB_CLOUD_INIT_DATASOURCES="ConfigDrive, OpenStack" disk-image-create centos7 vm cloud-init-datasources dhcp-all-interfaces iscsi-boot dracut-regenerate block-device-efi -o $IMAGE_NAME .. note:: * For CentOS images, we must add dependent element named ``dracut-regenerate`` during image creation. Otherwise, the image creation will fail with an error. * For Ubuntu images, we only support ``iscsi-boot`` element without ``dracut-regenerate`` element during image creation. Advanced Topics =============== Use without the Compute Service ------------------------------- As discussed in other sections, the Bare Metal service has a concept of a `connector` that is used to represent an interface that is intended to be utilized to attach the remote volume. In addition to the connectors, we have a concept of a `target` that can be defined via the API. While a user of this feature through the Compute service would automatically have a new target record created for them, it is not explicitly required, and can be performed manually. A target record can be created using a command similar to the example below:: baremetal volume target create \ --node $NODE_UUID --type iscsi --boot-index 0 --volume $VOLUME_UUID .. Note:: A ``boot-index`` value of ``0`` represents the boot volume for a node. As the ``boot-index`` is per-node in sequential order, only one boot volume is permitted for each node. Use Without Cinder ------------------ In the Rocky release, an ``external`` storage interface is available that can be utilized without a Block Storage Service installation. Under normal circumstances the ``cinder`` storage interface interacts with the Block Storage Service to orchestrate and manage attachment and detachment of volumes from the underlying block service system. The ``external`` storage interface contains the logic to allow the Bare Metal service to determine if the Bare Metal node has been requested with a remote storage volume for booting. This is in contrast to the default ``noop`` storage interface which does not contain logic to determine if the node should or could boot from a remote volume. It must be noted that minimal configuration or value validation occurs with the ``external`` storage interface. The ``cinder`` storage interface contains more extensive validation, that is likely un-necessary in a ``external`` scenario. Setting the external storage interface:: baremetal node set --storage-interface external $NODE_UUID Setting a volume:: baremetal volume target create --node $NODE_UUID \ --type iscsi --boot-index 0 --volume-id $VOLUME_UUID \ --property target_iqn="iqn.2010-10.com.example:vol-X" \ --property target_lun="0" \ --property target_portal="192.168.0.123:3260" \ --property auth_method="CHAP" \ --property auth_username="ABC" \ --property auth_password="XYZ" \ Ensure that no image_source is defined:: baremetal node unset \ --instance-info image_source $NODE_UUID Deploy the node:: baremetal node deploy $NODE_UUID Upon deploy, the boot interface for the baremetal node will attempt to either create iPXE configuration OR set boot parameters out-of-band via the management controller. Such action is boot interface specific and may not support all forms of volume target configuration. As of the Rocky release, the bare metal service does not support writing an Operating System image to a remote boot from volume target, so that also must be ensured by the user in advance. Records of volume targets are removed upon the node being undeployed, and as such are not persistent across deployments. Cinder Multi-attach ------------------- Volume multi-attach is a function that is commonly performed in computing clusters where dedicated storage subsystems are utilized. For some time now, the Block Storage service has supported the concept of multi-attach. However, the Compute service, as of the Pike release, does not yet have support to leverage multi-attach. Concurrently, multi-attach requires the backend volume driver running as part of the Block Storage service to contain support for multi-attach volumes. When support for storage interfaces was added to the Bare Metal service, specifically for the ``cinder`` storage interface, the concept of volume multi-attach was accounted for, however has not been fully tested, and is unlikely to be fully tested until there is Compute service integration as well as volume driver support. The data model for storage of volume targets in the Bare Metal service has no constraints on the same target volume from being utilized. When interacting with the Block Storage service, the Bare Metal service will prevent the use of volumes that are being reported as ``in-use`` if they do not explicitly support multi-attach. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/admin/building-windows-images.rst0000664000175000017500000000724100000000000023523 0ustar00zuulzuul00000000000000.. _building_image_windows: Building images for Windows --------------------------- We can use ``New-WindowsOnlineImage`` in `windows-openstack-imaging-tools`_ tool as an option to create Windows images (whole disk images) corresponding boot modes which will support for Windows NIC Teaming. And allow the utilization of link aggregation when the instance is spawned on hardware servers (Bare metals). Requirements: ~~~~~~~~~~~~~ * A Microsoft Windows Server Operating System along with ``Hyper-V virtualization`` enabled, ``PowerShell`` version >=4 supported, ``Windows Assessment and Deployment Kit``, in short ``Windows ADK``. * The windows Server compatible drivers. * Working git environment. Preparation: ~~~~~~~~~~~~ * Download a Windows Server 2012R2/ 2016 installation ISO. * Install Windows Server 2012R2/ 2016 OS on workstation PC along with following feature: - Enable Hyper-V virtualization. - Install PowerShell 4.0. - Install Git environment & import git proxy (if have). - Create new ``Path`` in Microsoft Windows Server Operating System which support for submodule update via ``git submodule update –init`` command:: - Variable name: Path - Variable value: C:\Windows\System32\WindowsPowerShell\v1.0\;C:\Program Files\Git\bin - Rename virtual switch name in Windows Server 2012R2/ 2016 in ``Virtual Switch Manager`` into `external`. Implementation: ~~~~~~~~~~~~~~~ * ``Step 1``: Create folders: ``C:\`` where output images will be located, ``C:\`` where you need to place the necessary hardware drivers. * ``Step 2``: Copy and extract necessary hardware drivers in ``C:\``. * ``Step 3``: Insert or burn Windows Server 2016 ISO to ``D:\``. * ``Step 4``: Download ``windows-openstack-imaging-tools`` tools. .. code-block:: console git clone https://github.com/cloudbase/windows-openstack-imaging-tools.git * ``Step 5``: Create & running script `create-windows-cloud-image.ps1`: .. code-block:: console git submodule update --init Import-Module WinImageBuilder.psm1 $windowsImagePath = "C:\\.qcow2" $VirtIOISOPath = "C:\\virtio.iso" $virtIODownloadLink = "https://fedorapeople.org/groups/virt/virtio-win/direct-downloads/archive-virtio/virtio-win-0.1.133-2/virtio-win.iso" (New-Object System.Net.WebClient).DownloadFile($virtIODownloadLink, $VirtIOISOPath) $wimFilePath = "D:\sources\install.wim" $extraDriversPath = "C:\\" $image = (Get-WimFileImagesInfo -WimFilePath $wimFilePath)[1] $switchName = 'external' New-WindowsOnlineImage -WimFilePath $wimFilePath -ImageName $image.ImageName ` -WindowsImagePath $windowsImagePath -Type 'KVM' -ExtraFeatures @() ` -SizeBytes 20GB -CpuCores 2 -Memory 2GB -SwitchName $switchName ` -ProductKey $productKey -DiskLayout 'BIOS' ` -ExtraDriversPath $extraDriversPath ` -InstallUpdates:$false -AdministratorPassword 'Pa$$w0rd' ` -PurgeUpdates:$true -DisableSwap:$true After executing this command you will get two output files, first one being "C:\\.qcow2", which is the resulting windows whole disk image and "C:\\virtio.iso", which is virtio iso contains all the synthetic drivers for the KVM hypervisor. See `example_windows_images`_ for more details and examples. .. note:: We can change ``SizeBytes``, ``CpuCores`` and ``Memory`` depending on requirements. .. _`example_windows_images`: https://github.com/cloudbase/windows-openstack-imaging-tools/blob/master/Examples .. _`windows-openstack-imaging-tools`: https://github.com/cloudbase/windows-openstack-imaging-tools ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/admin/cleaning.rst0000664000175000017500000004074200000000000020556 0ustar00zuulzuul00000000000000.. _cleaning: ============= Node cleaning ============= Overview ======== Ironic provides two modes for node cleaning: ``automated`` and ``manual``. ``Automated cleaning`` is automatically performed before the first workload has been assigned to a node and when hardware is recycled from one workload to another. ``Manual cleaning`` must be invoked by the operator. .. _automated_cleaning: Automated cleaning ================== When hardware is recycled from one workload to another, ironic performs automated cleaning on the node to ensure it's ready for another workload. This ensures the tenant will get a consistent bare metal node deployed every time. Ironic implements automated cleaning by collecting a list of cleaning steps to perform on a node from the Power, Deploy, Management, BIOS, and RAID interfaces of the driver assigned to the node. These steps are then ordered by priority and executed on the node when the node is moved to ``cleaning`` state, if automated cleaning is enabled. With automated cleaning, nodes move to ``cleaning`` state when moving from ``active`` -> ``available`` state (when the hardware is recycled from one workload to another). Nodes also traverse cleaning when going from ``manageable`` -> ``available`` state (before the first workload is assigned to the nodes). For a full understanding of all state transitions into cleaning, please see :ref:`states`. Ironic added support for automated cleaning in the Kilo release. .. _enabling-cleaning: Enabling automated cleaning --------------------------- To enable automated cleaning, ensure that your ironic.conf is set as follows: .. code-block:: ini [conductor] automated_clean=true This will enable the default set of cleaning steps, based on your hardware and ironic hardware types used for nodes. This includes, by default, erasing all of the previous tenant's data. You may also need to configure a `Cleaning Network`_. Cleaning steps -------------- Cleaning steps used for automated cleaning are ordered from higher to lower priority, where a larger integer is a higher priority. In case of a conflict between priorities across interfaces, the following resolution order is used: Power, Management, Deploy, BIOS, and RAID interfaces. You can skip a cleaning step by setting the priority for that cleaning step to zero or 'None'. You can reorder the cleaning steps by modifying the integer priorities of the cleaning steps. See `How do I change the priority of a cleaning step?`_ for more information. Storage cleaning options ------------------------ Clean steps specific to storage are ``erase_devices``, ``erase_devices_metadata`` and (added in Yoga) ``erase_devices_express``. ``erase_devices`` aims to ensure that the data is removed in the most secure way available. On devices that support hardware assisted secure erasure (many NVMe and some ATA drives) this is the preferred option. If hardware-assisted secure erasure is not available and if ``[deploy]/continue_if_disk_secure_erase_fails`` is set to ``True``, cleaning will fall back to using ``shred`` to overwrite the contents of the device. Otherwise cleaning will fail. It is important to note that ``erase_devices`` may take a very long time (hours or even days) to complete, unless fast, hardware assisted data erasure is supported by all the devices in a system. Generally, it is very difficult (if possible at all) to recover data after performing cleaning with ``erase_devices``. ``erase_devices_metadata`` clean step doesn't provide as strong assurance of irreversible destruction of data as ``erase_devices``. However, it has the advantage of a reasonably quick runtime (seconds to minutes). It operates by destroying metadata of the storage device without erasing every bit of the data itself. Attempts of restoring data after running ``erase_devices_metadata`` may be successful but would certainly require relevant expertise and specialized tools. Lastly, ``erase_devices_express`` combines some of the perks of both ``erase_devices`` and ``erase_devices_metadata``. It attempts to utilize hardware assisted data erasure features if available (currently only NVMe devices are supported). In case hardware-asssisted data erasure is not available, it falls back to metadata erasure for the device (which is identical to ``erase_devices_metadata``). It can be considered a time optimized mode of storage cleaning, aiming to perform as thorough data erasure as it is possible within a short period of time. This clean step is particularly well suited for environments with hybrid NVMe-HDD storage configuration as it allows fast and secure erasure of data stored on NVMes combined with equally fast but more basic metadata-based erasure of data on HDDs. ``erase_devices_express`` is disabled by default. In order to use it, the following configuration is recommended. .. code-block:: ini [deploy]/erase_devices_priority=0 [deploy]/erase_devices_metadata_priority=0 [conductor]/clean_step_priority_override=deploy.erase_devices_express:5 This ensures that ``erase_devices`` and ``erase_devices_metadata`` are disabled so that storage is not cleaned twice and then assigns a non-zero priority to ``erase_devices_express``, hence enabling it. Any non-zero priority specified in the priority override will work. Also `[deploy]/enable_nvme_secure_erase` should not be disabled (it is on by default). .. show-steps:: :phase: cleaning .. _manual_cleaning: Manual cleaning =============== ``Manual cleaning`` is typically used to handle long running, manual, or destructive tasks that an operator wishes to perform either before the first workload has been assigned to a node or between workloads. When initiating a manual clean, the operator specifies the cleaning steps to be performed. Manual cleaning can only be performed when a node is in the ``manageable`` state. Once the manual cleaning is finished, the node will be put in the ``manageable`` state again. Ironic added support for manual cleaning in the 4.4 (Mitaka series) release. Setup ----- In order for manual cleaning to work, you may need to configure a `Cleaning Network`_. Starting manual cleaning via API -------------------------------- Manual cleaning can only be performed when a node is in the ``manageable`` state. The REST API request to initiate it is available in API version 1.15 and higher:: PUT /v1/nodes//states/provision (Additional information is available `here `_.) This API will allow operators to put a node directly into ``cleaning`` provision state from ``manageable`` state via 'target': 'clean'. The PUT will also require the argument 'clean_steps' to be specified. This is an ordered list of cleaning steps. A cleaning step is represented by a dictionary (JSON), in the form:: { "interface": "", "step": "", "args": {"": "", ..., "": } } The 'interface' and 'step' keys are required for all steps. If a cleaning step method takes keyword arguments, the 'args' key may be specified. It is a dictionary of keyword variable arguments, with each keyword-argument entry being : . If any step is missing a required keyword argument, manual cleaning will not be performed and the node will be put in ``clean failed`` provision state with an appropriate error message. If, during the cleaning process, a cleaning step determines that it has incorrect keyword arguments, all earlier steps will be performed and then the node will be put in ``clean failed`` provision state with an appropriate error message. An example of the request body for this API:: { "target":"clean", "clean_steps": [{ "interface": "raid", "step": "create_configuration", "args": {"create_nonroot_volumes": false} }, { "interface": "deploy", "step": "erase_devices" }] } In the above example, the node's RAID interface would configure hardware RAID without non-root volumes, and then all devices would be erased (in that order). Starting manual cleaning via "openstack metal" CLI ------------------------------------------------------ Manual cleaning is available via the ``baremetal node clean`` command, starting with Bare Metal API version 1.15. The argument ``--clean-steps`` must be specified. Its value is one of: - a JSON string - path to a JSON file whose contents are passed to the API - '-', to read from stdin. This allows piping in the clean steps. Using '-' to signify stdin is common in Unix utilities. The following examples assume that the Bare Metal API version was set via the ``OS_BAREMETAL_API_VERSION`` environment variable. (The alternative is to add ``--os-baremetal-api-version 1.15`` to the command.):: export OS_BAREMETAL_API_VERSION=1.15 Examples of doing this with a JSON string:: baremetal node clean \ --clean-steps '[{"interface": "deploy", "step": "erase_devices_metadata"}]' baremetal node clean \ --clean-steps '[{"interface": "deploy", "step": "erase_devices"}]' Or with a file:: baremetal node clean \ --clean-steps my-clean-steps.txt Or with stdin:: cat my-clean-steps.txt | baremetal node clean \ --clean-steps - Cleaning Network ================ If you are using the Neutron DHCP provider (the default) you will also need to ensure you have configured a cleaning network. This network will be used to boot the ramdisk for in-band cleaning. You can use the same network as your tenant network. For steps to set up the cleaning network, please see :ref:`configure-cleaning`. .. _InbandvsOutOfBandCleaning: In-band vs out-of-band ====================== Ironic uses two main methods to perform actions on a node: in-band and out-of-band. Ironic supports using both methods to clean a node. In-band ------- In-band steps are performed by ironic making API calls to a ramdisk running on the node using a deploy interface. Currently, all the deploy interfaces support in-band cleaning. By default, ironic-python-agent ships with a minimal cleaning configuration, only erasing disks. However, you can add your own cleaning steps and/or override default cleaning steps with a custom Hardware Manager. Out-of-band ----------- Out-of-band are actions performed by your management controller, such as IPMI, iLO, or DRAC. Out-of-band steps will be performed by ironic using a power or management interface. Which steps are performed depends on the hardware type and hardware itself. For Out-of-Band cleaning operations supported by iLO hardware types, refer to :ref:`ilo_node_cleaning`. FAQ === How are cleaning steps ordered? ------------------------------- For automated cleaning, cleaning steps are ordered by integer priority, where a larger integer is a higher priority. In case of a conflict between priorities across hardware interfaces, the following resolution order is used: #. Power interface #. Management interface #. Deploy interface #. BIOS interface #. RAID interface For manual cleaning, the cleaning steps should be specified in the desired order. How do I skip a cleaning step? ------------------------------ For automated cleaning, cleaning steps with a priority of 0 or None are skipped. How do I change the priority of a cleaning step? ------------------------------------------------ For manual cleaning, specify the cleaning steps in the desired order. For automated cleaning, it depends on whether the cleaning steps are out-of-band or in-band. Most out-of-band cleaning steps have an explicit configuration option for priority. Changing the priority of an in-band (ironic-python-agent) cleaning step requires use of a custom HardwareManager. The only exception is ``erase_devices``, which can have its priority set in ironic.conf. For instance, to disable erase_devices, you'd set the following configuration option:: [deploy] erase_devices_priority=0 To enable/disable the in-band disk erase using ``ilo`` hardware type, use the following configuration option:: [ilo] clean_priority_erase_devices=0 The generic hardware manager first identifies whether a device is an NVMe drive or an ATA drive so that it can attempt a platform-specific secure erase method. In case of NVMe drives, it tries to perform a secure format operation by using the ``nvme-cli`` utility. This behavior can be controlled using the following configuration option (by default it is set to True):: [deploy] enable_nvme_secure_erase=True In case of ATA drives, it tries to perform ATA disk erase by using the ``hdparm`` utility. If neither method is supported, it performs software based disk erase using the ``shred`` utility. By default, the number of iterations performed by ``shred`` for software based disk erase is 1. To configure the number of iterations, use the following configuration option:: [deploy] erase_devices_iterations=1 Overriding step priority ------------------------ ``[conductor]clean_step_priority_override`` is a new configuration option which allows specifying priority of each step using multiple configuration values: .. code-block:: ini [conductor] clean_step_priority_override=deploy.erase_devices_metadata:123 clean_step_priority_override=management.reset_bios_to_default:234 clean_step_priority_override=management.clean_priority_reset_ilo:345 This parameter can be specified as many times as required to define priorities for several cleaning steps - the values will be combined. What cleaning step is running? ------------------------------ To check what cleaning step the node is performing or attempted to perform and failed, run the following command; it will return the value in the node's ``driver_internal_info`` field:: baremetal node show $node_ident -f value -c driver_internal_info The ``clean_steps`` field will contain a list of all remaining steps with their priorities, and the first one listed is the step currently in progress or that the node failed before going into ``clean failed`` state. Should I disable automated cleaning? ------------------------------------ Automated cleaning is recommended for ironic deployments, however, there are some tradeoffs to having it enabled. For instance, ironic cannot deploy a new instance to a node that is currently cleaning, and cleaning can be a time consuming process. To mitigate this, we suggest using NVMe drives with support for NVMe Secure Erase (based on ``nvme-cli`` format command) or ATA drives with support for cryptographic ATA Security Erase, as typically the erase_devices step in the deploy interface takes the longest time to complete of all cleaning steps. Why can't I power on/off a node while it's cleaning? ---------------------------------------------------- During cleaning, nodes may be performing actions that shouldn't be interrupted, such as BIOS or Firmware updates. As a result, operators are forbidden from changing power state via the ironic API while a node is cleaning. Troubleshooting =============== If cleaning fails on a node, the node will be put into ``clean failed`` state. If the failure happens while running a clean step, the node is also placed in maintenance mode to prevent ironic from taking actions on the node. The operator should validate that no permanent damage has been done to the node and no processes are still running on it before removing the maintenance mode. .. note:: Older versions of ironic may put the node to maintenance even when no clean step has been running. Nodes in ``clean failed`` will not be powered off, as the node might be in a state such that powering it off could damage the node or remove useful information about the nature of the cleaning failure. A ``clean failed`` node can be moved to ``manageable`` state, where it cannot be scheduled by nova and you can safely attempt to fix the node. To move a node from ``clean failed`` to ``manageable``:: baremetal node manage $node_ident You can now take actions on the node, such as replacing a bad disk drive. Strategies for determining why a cleaning step failed include checking the ironic conductor logs, viewing logs on the still-running ironic-python-agent (if an in-band step failed), or performing general hardware troubleshooting on the node. When the node is repaired, you can move the node back to ``available`` state, to allow it to be scheduled by nova. :: # First, move it out of maintenance mode baremetal node maintenance unset $node_ident # Now, make the node available for scheduling by nova baremetal node provide $node_ident The node will begin automated cleaning from the start, and move to ``available`` state when complete. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/admin/conductor-groups.rst0000664000175000017500000000462600000000000022314 0ustar00zuulzuul00000000000000.. _conductor-groups: ================ Conductor Groups ================ Overview ======== Large scale operators tend to have needs that involve creating well defined and delinated resources. In some cases, these systems may reside close by or in far away locations. Reasoning may be simple or complex, and yet is only known to the deployer and operator of the infrastructure. A common case is the need for delineated high availability domains where it would be much more efficient to manage a datacenter in Antarctica with a conductor in Antarctica, as opposed to a conductor in New York City. How it works ============ Starting in ironic 11.1, each node has a ``conductor_group`` field which influences how the ironic conductor calculates (and thus allocates) baremetal nodes under ironic's management. This calculation is performed independently by each operating conductor and as such if a conductor has a ``[conductor]conductor_group`` configuration option defined in its `ironic.conf` configuration file, the conductor will then be limited to only managing nodes with a matching ``conductor_group`` string. .. note:: Any conductor without a ``[conductor]conductor_group`` setting will only manage baremetal nodes without a ``conductor_group`` value set upon node creation. If no such conductor is present when conductor groups are configured, node creation will fail unless a ``conductor_group`` is specified upon node creation. .. warning:: Nodes without a ``conductor_group`` setting can only be managed when a conductor exists that does not have a ``[conductor]conductor_group`` defined. If all conductors have been migrated to use a conductor group, such nodes are effectively "orphaned". How to use ========== A conductor group value may be any case insensitive string up to 255 characters long which matches the ``^[a-zA-Z0-9_\-\.]*$`` regular expression. #. Set the ``[conductor]conductor_group`` option in ironic.conf on one or more, but not all conductors:: [conductor] conductor_group = OperatorDefinedString #. Restart the ironic-conductor service. #. Set the conductor group on one or more nodes:: baremetal node set \ --conductor-group "OperatorDefinedString" #. As desired and as needed, remaining conductors can be updated with the first two steps. Please be mindful of the constraints covered earlier in the document related to ability to manage nodes. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/admin/console.rst0000664000175000017500000002334600000000000020441 0ustar00zuulzuul00000000000000.. _console: ================================= Configuring Web or Serial Console ================================= Overview -------- There are two types of console which are available in Bare Metal service, one is web console (`Node web console`_) which is available directly from web browser, another is serial console (`Node serial console`_). Node web console ---------------- The web console can be configured in Bare Metal service in the following way: * Install shellinabox in ironic conductor node. For RHEL/CentOS, shellinabox package is not present in base repositories, user must enable EPEL repository, you can find more from `FedoraProject page`_. .. note:: shellinabox is no longer maintained by the authorized author. `This `_ is a fork of the project on GitHub that aims to continue with maintenance of the shellinabox project. Installation example: Ubuntu:: sudo apt-get install shellinabox RHEL8/CentOS8/Fedora:: sudo dnf install shellinabox You can find more about shellinabox on the `shellinabox page`_. You can optionally use the SSL certificate in shellinabox. If you want to use the SSL certificate in shellinabox, you should install openssl and generate the SSL certificate. 1. Install openssl, for example: Ubuntu:: sudo apt-get install openssl RHEL8/CentOS8/Fedora:: sudo dnf install openssl 2. Generate the SSL certificate, here is an example, you can find more about openssl on the `openssl page`_:: cd /tmp/ca openssl genrsa -des3 -out my.key 1024 openssl req -new -key my.key -out my.csr cp my.key my.key.org openssl rsa -in my.key.org -out my.key openssl x509 -req -days 3650 -in my.csr -signkey my.key -out my.crt cat my.crt my.key > certificate.pem * Customize the console section in the Bare Metal service configuration file (/etc/ironic/ironic.conf), if you want to use SSL certificate in shellinabox, you should specify ``terminal_cert_dir``. for example:: [console] # # Options defined in ironic.drivers.modules.console_utils # # Path to serial console terminal program. Used only by Shell # In A Box console. (string value) #terminal=shellinaboxd # Directory containing the terminal SSL cert (PEM) for serial # console access. Used only by Shell In A Box console. (string # value) terminal_cert_dir=/tmp/ca # Directory for holding terminal pid files. If not specified, # the temporary directory will be used. (string value) #terminal_pid_dir= # Time interval (in seconds) for checking the status of # console subprocess. (integer value) #subprocess_checking_interval=1 # Time (in seconds) to wait for the console subprocess to # start. (integer value) #subprocess_timeout=10 * Append console parameters for bare metal PXE boot in the Bare Metal service configuration file (/etc/ironic/ironic.conf). See the reference for configuration in :ref:`kernel-boot-parameters`. * Enable the ``ipmitool-shellinabox`` console interface, for example: .. code-block:: ini [DEFAULT] enabled_console_interfaces = ipmitool-shellinabox,no-console * Configure node web console. If the node uses a hardware type, for example ``ipmi``, set the node's console interface to ``ipmitool-shellinabox``:: baremetal node set --console-interface ipmitool-shellinabox Enable the web console, for example:: baremetal node set \ --driver-info = baremetal node console enable Check whether the console is enabled, for example:: baremetal node validate Disable the web console, for example:: baremetal node console disable baremetal node unset --driver-info The ```` is driver dependent. The actual name of this field can be checked in driver properties, for example:: baremetal driver property list For the ``ipmi`` hardware type, this option is ``ipmi_terminal_port``. Give a customized port number to ````, for example ``8023``, this customized port is used in web console url. Get web console information for a node as follows:: baremetal node console show +-----------------+----------------------------------------------------------------------+ | Property | Value | +-----------------+----------------------------------------------------------------------+ | console_enabled | True | | console_info | {u'url': u'http://:', u'type': u'shellinabox'} | +-----------------+----------------------------------------------------------------------+ You can open web console using above ``url`` through web browser. If ``console_enabled`` is ``false``, ``console_info`` is ``None``, web console is disabled. If you want to launch web console, see the ``Configure node web console`` part. .. note:: An error message you may encounter when enabling the console can read ``Console subprocess failed to start. Timeout or error while waiting for console subprocess to start for node`` along with ``[server] Failed to find any available port!``. This error is coming from shellinabox itself, not from the communication with the BMC. One potential cause for this issue is that there are already shellinabox daemons running which block the configured port (remove them if appropriate and retry to enable the console). .. _`shellinabox page`: https://code.google.com/archive/p/shellinabox/ .. _`openssl page`: https://www.openssl.org/ .. _`FedoraProject page`: https://fedoraproject.org/wiki/Infrastructure/Mirroring Node serial console ------------------- Serial consoles for nodes are implemented using `socat`_. It is supported by the ``ipmi`` and ``irmc`` hardware types. Serial consoles can be configured in the Bare Metal service as follows: * Install socat on the ironic conductor node. Also, ``socat`` needs to be in the $PATH environment variable that the ironic-conductor service uses. Installation example: Ubuntu:: sudo apt-get install socat RHEL8/CentOS8/Fedora:: sudo dnf install socat * Append console parameters for bare metal PXE boot in the Bare Metal service configuration file. See the reference on how to configure them in :ref:`kernel-boot-parameters`. * Enable the ``ipmitool-socat`` console interface, for example: .. code-block:: ini [DEFAULT] enabled_console_interfaces = ipmitool-socat,no-console * Configure node console. If the node uses a hardware type, for example ``ipmi``, set the node's console interface to ``ipmitool-socat``:: baremetal node set --console-interface ipmitool-socat Enable the serial console, for example:: baremetal node set --driver-info ipmi_terminal_port= baremetal node console enable Check whether the serial console is enabled, for example:: baremetal node validate Disable the serial console, for example:: baremetal node console disable baremetal node unset --driver-info Serial console information is available from the Bare Metal service. Get serial console information for a node from the Bare Metal service as follows:: baremetal node console show +-----------------+----------------------------------------------------------------------+ | Property | Value | +-----------------+----------------------------------------------------------------------+ | console_enabled | True | | console_info | {u'url': u'tcp://:', u'type': u'socat'} | +-----------------+----------------------------------------------------------------------+ If ``console_enabled`` is ``false`` or ``console_info`` is ``None`` then the serial console is disabled. If you want to launch serial console, see the ``Configure node console``. Node serial console of the Bare Metal service is compatible with the serial console of the Compute service. Hence, serial consoles to Bare Metal nodes can be seen and interacted with via the Dashboard service. In order to achieve that, you need to follow the documentation for :nova-doc:`Serial Console ` from the Compute service. Configuring HA ~~~~~~~~~~~~~~ When using Bare Metal serial console under High Availability (HA) configuration, you may consider some settings below. * If you use HAProxy, you may need to set the timeout for both client and server sides with appropriate values. Here is an example of the configuration for the timeout parameter. :: frontend nova_serial_console bind 192.168.20.30:6083 timeout client 10m # This parameter is necessary use_backend nova_serial_console if <...> backend nova_serial_console balance source timeout server 10m # This parameter is necessary option tcpka option tcplog server controller01 192.168.30.11:6083 check inter 2000 rise 2 fall 5 server controller02 192.168.30.12:6083 check inter 2000 rise 2 fall 5 * The Compute service's caching feature may need to be enabled in order to make the Bare Metal serial console work under a HA configuration. Here is an example of caching configuration in ``nova.conf``. .. code-block:: ini [cache] enabled = true backend = dogpile.cache.memcached memcache_servers = memcache01:11211,memcache02:11211,memcache03:11211 .. _`socat`: http://www.dest-unreach.org/socat ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/admin/deploy-steps.rst0000664000175000017500000000014600000000000021420 0ustar00zuulzuul00000000000000============ Deploy Steps ============ The deploy steps section has moved to :doc:`node-deployment`. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/admin/dhcp-less.rst0000664000175000017500000001077100000000000020657 0ustar00zuulzuul00000000000000Layer 3 or DHCP-less ramdisk booting ==================================== Booting nodes via PXE, while universally supported, suffers from one disadvantage: it requires a direct L2 connectivity between the node and the control plane for DHCP. Using virtual media it is possible to avoid not only the unreliable TFTP protocol, but DHCP altogether. When network data is provided for a node as explained below, the generated virtual media ISO will also serve as a configdrive_, and the network data will be stored in the standard OpenStack location. The simple-init_ element needs to be used when creating the deployment ramdisk. The Glean_ tool will look for a media labeled as ``config-2``. If found, the network information from it will be read, and the node's networking stack will be configured accordingly. .. code-block:: console ironic-python-agent-builder -o /output/ramdisk \ debian-minimal -e simple-init .. warning:: Ramdisks based on distributions with NetworkManager require Glean_ 1.19.0 or newer to work. .. note:: If desired, some interfaces can still be configured to use DHCP. Hardware type support --------------------- This feature is known to work with the following hardware types: * :doc:`Redfish ` with ``redfish-virtual-media`` boot * :doc:`iLO ` with ``ilo-virtual-media`` boot Configuring network data ------------------------ When the Bare Metal service is running within OpenStack, no additional configuration is required - the network configuration will be fetched from the Network service. Alternatively, the user can build and pass network configuration in form of a network_data_ JSON to a node via the ``network_data`` field. Node-based configuration takes precedence over the configuration generated by the Network service and also works in standalone mode. .. code-block:: bash baremetal node set --network-data ~/network_data.json An example network data: .. code-block:: json { "links": [ { "id": "port-92750f6c-60a9-4897-9cd1-090c5f361e18", "type": "phy", "ethernet_mac_address": "52:54:00:d3:6a:71" } ], "networks": [ { "id": "network0", "type": "ipv4", "link": "port-92750f6c-60a9-4897-9cd1-090c5f361e18", "ip_address": "192.168.122.42", "netmask": "255.255.255.0", "network_id": "network0", "routes": [] } ], "services": [] } .. note:: Some fields are redundant with the port information. We're looking into simplifying the format, but currently all these fields are mandatory. You'll need the deployed image to support network data, e.g. by pre-installing cloud-init_ or Glean_ on it (most cloud images have the former). Then you can provide the network data when deploying, for example: .. code-block:: bash baremetal node deploy \ --config-drive "{\"network_data\": $(cat ~/network_data.json)}" Some first-boot services, such as Ignition_, don't support network data. You can provide their configuration as part of user data instead: .. code-block:: bash baremetal node deploy \ --config-drive "{\"user_data\": \"... ignition config ...\"}" .. _configdrive: https://docs.openstack.org/nova/queens/user/config-drive.html .. _Glean: https://docs.openstack.org/infra/glean/ .. _simple-init: https://docs.openstack.org/diskimage-builder/latest/elements/simple-init/README.html .. _network_data: https://specs.openstack.org/openstack/nova-specs/specs/liberty/implemented/metadata-service-network-info.html .. _cloud-init: https://cloudinit.readthedocs.io/en/latest/ .. _Ignition: https://coreos.github.io/ignition/ .. _l3-external-ip: Deploying outside of the provisioning network --------------------------------------------- If you need to combine traditional deployments using a provisioning network with virtual media deployments over L3, you may need to provide an alternative IP address for the remote nodes to connect to: .. code-block:: ini [deploy] http_url = external_http_url = You may also need to override the callback URL, which is normally fetched from the service catalog or configured in the ``[service_catalog]`` section: .. code-block:: ini [deploy] external_callback_url = ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648108935.8866668 ironic-20.1.0/doc/source/admin/drivers/0000775000175000017500000000000000000000000017713 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/admin/drivers/ansible.rst0000664000175000017500000004266400000000000022076 0ustar00zuulzuul00000000000000======================== Ansible deploy interface ======================== `Ansible`_ is a mature and popular automation tool, written in Python and requiring no agents running on the node being configured. All communications with the node are by default performed over secure SSH transport. The ``ansible`` deploy interface uses Ansible playbooks to define the deployment logic. It is not based on :ironic-python-agent-doc:`Ironic Python Agent (IPA) <>` and does not generally need IPA to be running in the deploy ramdisk. Overview ======== The main advantage of this deploy interface is extended flexibility in regards to changing and adapting node deployment logic for specific use cases, via Ansible tooling that is already familiar to operators. It can be used to shorten the usual feature development cycle of * implementing logic in ironic, * implementing logic in IPA, * rebuilding deploy ramdisk, * uploading deploy ramdisk to Glance/HTTP storage, * reassigning deploy ramdisk to nodes, * restarting ironic-conductor service(s) and * running a test deployment by using a "stable" deploy ramdisk and not requiring ironic-conductor restarts (see `Extending playbooks`_). The main disadvantage of this deploy interface is the synchronous manner of performing deployment/cleaning tasks. A separate ``ansible-playbook`` process is spawned for each node being provisioned or cleaned, which consumes one thread from the thread pool available to the ``ironic-conductor`` process and blocks this thread until the node provisioning or cleaning step is finished or fails. This has to be taken into account when planning an ironic deployment that enables this deploy interface. Each action (deploy, clean) is described by a single playbook with roles, which is run whole during deployment, or tag-wise during cleaning. Control of cleaning steps is through tags and auxiliary clean steps file. The playbooks for actions can be set per-node, as can the clean steps file. Features -------- Similar to deploy interfaces relying on :ironic-python-agent-doc:`Ironic Python Agent (IPA) <>`, this deploy interface also depends on the deploy ramdisk calling back to ironic API's ``heartbeat`` endpoint. However, the driver is currently synchronous, so only the first heartbeat is processed and is used as a signal to start ``ansible-playbook`` process. User images ~~~~~~~~~~~ Supports whole-disk images and partition images: - compressed images are downloaded to RAM and converted to disk device; - raw images are streamed to disk directly. For partition images the driver will create root partition, and, if requested, ephemeral and swap partitions as set in node's ``instance_info`` by the Compute service or operator. The create partition table will be of ``msdos`` type by default, the node's ``disk_label`` capability is honored if set in node's ``instance_info`` (see also :ref:`choosing_the_disk_label`). Configdrive partition ~~~~~~~~~~~~~~~~~~~~~ Creating a configdrive partition is supported for both whole disk and partition images, on both ``msdos`` and ``GPT`` labeled disks. Root device hints ~~~~~~~~~~~~~~~~~ Root device hints are currently supported in their basic form only, with exact matches (see :ref:`root-device-hints` for more details). If no root device hint is provided for the node, the first device returned as part of ``ansible_devices`` fact is used as root device to create partitions on or write the whole disk image to. Node cleaning ~~~~~~~~~~~~~ Cleaning is supported, both automated and manual. The driver has two default clean steps: - wiping device metadata - disk shredding Their priority can be overridden via ``[deploy]\erase_devices_metadata_priority`` and ``[deploy]\erase_devices_priority`` options, respectively, in the ironic configuration file. As in the case of this driver all cleaning steps are known to the ironic-conductor service, booting the deploy ramdisk is completely skipped when there are no cleaning steps to perform. .. note:: Aborting cleaning steps is not supported. Logging ~~~~~~~ Logging is implemented as custom Ansible callback module, that makes use of ``oslo.log`` and ``oslo.config`` libraries and can re-use logging configuration defined in the main ironic configuration file to set logging for Ansible events, or use a separate file for this purpose. It works best when ``journald`` support for logging is enabled. Requirements ============ Ansible Tested with, and targets, Ansible 2.5.x Bootstrap image requirements ---------------------------- - password-less sudo permissions for the user used by Ansible - python 2.7.x - openssh-server - GNU coreutils - utils-linux - parted - gdisk - qemu-utils - python-requests (for ironic callback and streaming image download) - python-netifaces (for ironic callback) A set of scripts to build a suitable deploy ramdisk based on TinyCore Linux and ``tinyipa`` ramdisk, and an element for ``diskimage-builder`` can be found in ironic-staging-drivers_ project but will be eventually migrated to the new ironic-python-agent-builder_ project. Setting up your environment =========================== #. Install ironic (either as part of OpenStack or standalone) - If using ironic as part of OpenStack, ensure that the Image service is configured to use the Object Storage service as backend, and the Bare Metal service is configured accordingly, see :doc:`Configure the Image service for temporary URLs <../../install/configure-glance-swift>`. #. Install Ansible version as specified in ``ironic/driver-requirements.txt`` file #. Edit ironic configuration file A. Add ``ansible`` to the list of deploy interfaces defined in ``[DEFAULT]\enabled_deploy_interfaces`` option. B. Ensure that a hardware type supporting ``ansible`` deploy interface is enabled in ``[DEFAULT]\enabled_hardware_types`` option. C. Modify options in the ``[ansible]`` section of ironic's configuration file if needed (see `Configuration file`_). #. (Re)start ironic-conductor service #. Build suitable deploy kernel and ramdisk images #. Upload them to Glance or put in your HTTP storage #. Create new or update existing nodes to use the enabled driver of your choice and populate `Driver properties for the Node`_ when different from defaults. #. Deploy the node as usual. Ansible-deploy options ---------------------- Configuration file ~~~~~~~~~~~~~~~~~~~ Driver options are configured in ``[ansible]`` section of ironic configuration file, for their descriptions and default values please see `configuration file sample <../../configuration/config.html#ansible>`_. Driver properties for the Node ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Set them per-node via ``baremetal node set`` command, for example: .. code-block:: shell baremetal node set \ --deploy-interface ansible \ --driver-info ansible_username=stack \ --driver-info ansible_key_file=/etc/ironic/id_rsa ansible_username User name to use for Ansible to access the node. Default is taken from ``[ansible]/default_username`` option of the ironic configuration file (defaults to ``ansible``). ansible_key_file Private SSH key used to access the node. Default is taken from ``[ansible]/default_key_file`` option of the ironic configuration file. If neither is set, the default private SSH keys of the user running the ``ironic-conductor`` process will be used. ansible_deploy_playbook Playbook to use when deploying this node. Default is taken from ``[ansible]/default_deploy_playbook`` option of the ironic configuration file (defaults to ``deploy.yaml``). ansible_shutdown_playbook Playbook to use to gracefully shutdown the node in-band. Default is taken from ``[ansible]/default_shutdown_playbook`` option of the ironic configuration file (defaults to ``shutdown.yaml``). ansible_clean_playbook Playbook to use when cleaning the node. Default is taken from ``[ansible]/default_clean_playbook`` option of the ironic configuration file (defaults to ``clean.yaml``). ansible_clean_steps_config Auxiliary YAML file that holds description of cleaning steps used by this node, and defines playbook tags in ``ansible_clean_playbook`` file corresponding to each cleaning step. Default is taken from ``[ansible]/default_clean_steps_config`` option of the ironic configuration file (defaults to ``clean_steps.yaml``). ansible_python_interpreter Absolute path to the python interpreter on the managed machine. Default is taken from ``[ansible]/default_python_interpreter`` option of the ironic configuration file. Ansible uses ``/usr/bin/python`` by default. Customizing the deployment logic ================================ Expected playbooks directory layout ----------------------------------- The ``[ansible]\playbooks_path`` option in the ironic configuration file is expected to have a standard layout for an Ansible project with some additions:: | \_ inventory \_ add-ironic-nodes.yaml \_ roles \_ role1 \_ role2 \_ ... | \_callback_plugins \_ ... | \_ library \_ ... The extra files relied by this driver are: inventory Ansible inventory file containing a single entry of ``conductor ansible_connection=local``. This basically defines an alias to ``localhost``. Its purpose is to make logging for tasks performed by Ansible locally and referencing the localhost in playbooks more intuitive. This also suppresses warnings produced by Ansible about ``hosts`` file being empty. add-ironic-nodes.yaml This file contains an Ansible play that populates in-memory Ansible inventory with access information received from the ansible-deploy interface, as well as some per-node variables. Include it in all your custom playbooks as the first play. The default ``deploy.yaml`` playbook is using several smaller roles that correspond to particular stages of deployment process: - ``discover`` - e.g. set root device and image target - ``prepare`` - if needed, prepare system, for example create partitions - ``deploy`` - download/convert/write user image and configdrive - ``configure`` - post-deployment steps, e.g. installing the bootloader Some more included roles are: - ``shutdown`` - used to gracefully power the node off in-band - ``clean`` - defines cleaning procedure, with each clean step defined as separate playbook tag. Extending playbooks ------------------- Most probably you'd start experimenting like this: #. Create a copy of ``deploy.yaml`` playbook *in the same folder*, name it distinctively. #. Create Ansible roles with your customized logic in ``roles`` folder. A. In your custom deploy playbook, replace the ``prepare`` role with your own one that defines steps to be run *before* image download/writing. This is a good place to set facts overriding those provided/omitted by the driver, like ``ironic_partitions`` or ``ironic_root_device``, and create custom partitions or (software) RAIDs. B. In your custom deploy playbook, replace the ``configure`` role with your own one that defines steps to be run *after* image is written to disk. This is a good place for example to configure the bootloader and add kernel options to avoid additional reboots. C. Use those new roles in your new playbook. #. Assign the custom deploy playbook you've created to the node's ``driver_info/ansible_deploy_playbook`` field. #. Run deployment. A. No ironic-conductor restart is necessary. B. A new deploy ramdisk must be built and assigned to nodes only when you want to use a command/script/package not present in the current deploy ramdisk and you can not or do not want to install those at runtime. Variables you have access to ---------------------------- This driver will pass the single JSON-ified extra var argument to Ansible (as in ``ansible-playbook -e ..``). Those values are then accessible in your plays as well (some of them are optional and might not be defined): .. code-block:: yaml ironic: nodes: - ip: "" name: "" user: "" extra: "" image: url: "" disk_format: "" container_format: "" checksum: "" mem_req: "" tags: "" properties: "" configdrive: type: "" location: "" partition_info: label: "" preserve_ephemeral: "" ephemeral_format: "" partitions: "" raid_config: "" ``ironic.nodes`` List of dictionaries (currently of only one element) that will be used by ``add-ironic-nodes.yaml`` play to populate in-memory inventory. It also contains a copy of node's ``extra`` field so you can access it in the playbooks. The Ansible's host is set to node's UUID. ``ironic.image`` All fields of node's ``instance_info`` that start with ``image_`` are passed inside this variable. Some extra notes and fields: - ``mem_req`` is calculated from image size (if available) and config option ``[ansible]extra_memory``. - if ``checksum`` is not in the form ``:``, hashing algorithm is assumed to be ``md5`` (default in Glance). - ``validate_certs`` - boolean (``yes/no``) flag that turns validating image store SSL certificate on or off (default is 'yes'). Governed by ``[ansible]image_store_insecure`` option in ironic configuration file. - ``cafile`` - custom CA bundle to use for validating image store SSL certificate. Takes value of ``[ansible]image_store_cafile`` if that is defined. Currently is not used by default playbooks, as Ansible has no way to specify the custom CA bundle to use for single HTTPS actions, however you can use this value in your custom playbooks to for example upload and register this CA in the ramdisk at deploy time. - ``client_cert`` - cert file for client-side SSL authentication. Takes value of ``[ansible]image_store_certfile`` option if defined. Currently is not used by default playbooks, however you can use this value in your custom playbooks. - ``client_key`` - private key file for client-side SSL authentication. Takes value of ``[ansible]image_store_keyfile`` option if defined. Currently is not used by default playbooks, however you can use this value in your custom playbooks. ``ironic.partition_info.partitions`` Optional. List of dictionaries defining partitions to create on the node in the form: .. code-block:: yaml partitions: - name: "" unit: "" size: "" type: "" align: "" format: "" flags: flag_name: "" The driver will populate this list from ``root_gb``, ``swap_mb`` and ``ephemeral_gb`` fields of ``instance_info``. The driver will also prepend the ``bios_grub``-labeled partition when deploying on GPT-labeled disk, and pre-create a 64 MiB partition for configdrive if it is set in ``instance_info``. Please read the documentation included in the ``ironic_parted`` module's source for more info on the module and its arguments. ``ironic.partition_info.ephemeral_format`` Optional. Taken from ``instance_info``, it defines file system to be created on the ephemeral partition. Defaults to the value of ``[pxe]\default_ephemeral_format`` option in ironic configuration file. ``ironic.partition_info.preserve_ephemeral`` Optional. Taken from the ``instance_info``, it specifies if the ephemeral partition must be preserved or rebuilt. Defaults to ``no``. ``ironic.raid_config`` Taken from the ``target_raid_config`` if not empty, it specifies the RAID configuration to apply. As usual for Ansible playbooks, you also have access to standard Ansible facts discovered by ``setup`` module. Included custom Ansible modules ------------------------------- The provided ``playbooks_path/library`` folder includes several custom Ansible modules used by default implementation of ``deploy`` and ``prepare`` roles. You can use these modules in your playbooks as well. ``stream_url`` Streaming download from HTTP(S) source to the disk device directly, tries to be compatible with Ansible's ``get_url`` module in terms of module arguments. Due to the low level of such operation it is not idempotent. ``ironic_parted`` creates partition tables and partitions with ``parted`` utility. Due to the low level of such operation it is not idempotent. Please read the documentation included in the module's source for more information about this module and its arguments. The name is chosen so that the ``parted`` module included in Ansible is not shadowed. .. _Ansible: https://docs.ansible.com/ansible/latest/index.html .. _ironic-staging-drivers: https://opendev.org/x/ironic-staging-drivers/src/branch/stable/pike/imagebuild .. _ironic-python-agent-builder: https://opendev.org/openstack/ironic-python-agent-builder ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/admin/drivers/ibmc.rst0000664000175000017500000002456700000000000021375 0ustar00zuulzuul00000000000000=============== iBMC driver =============== Overview ======== The ``ibmc`` driver is targeted for Huawei V5 series rack server such as 2288H V5, CH121 V5. The iBMC hardware type enables the user to take advantage of features of `Huawei iBMC`_ to control Huawei server. The ``ibmc`` hardware type supports the following Ironic interfaces: * Management Interface: Boot device management * Power Interface: Power management * `RAID Interface`_: RAID controller and disk management * `Vendor Interface`_: ibmc passthru interfaces Prerequisites ============= The `HUAWEI iBMC Client library`_ should be installed on the ironic conductor node(s). For example, it can be installed with ``pip``:: sudo pip install python-ibmcclient Enabling the iBMC driver ============================ #. Add ``ibmc`` to the list of ``enabled_hardware_types``, ``enabled_power_interfaces``, ``enabled_vendor_interfaces`` and ``enabled_management_interfaces`` in ``/etc/ironic/ironic.conf``. For example:: [DEFAULT] ... enabled_hardware_types = ibmc enabled_power_interfaces = ibmc enabled_management_interfaces = ibmc enabled_raid_interfaces = ibmc enabled_vendor_interfaces = ibmc #. Restart the ironic conductor service:: sudo service ironic-conductor restart # Or, for RDO: sudo systemctl restart openstack-ironic-conductor Registering a node with the iBMC driver =========================================== Nodes configured to use the driver should have the ``driver`` property set to ``ibmc``. The following properties are specified in the node's ``driver_info`` field: - ``ibmc_address``: The URL address to the ibmc controller. It must include the authority portion of the URL, and can optionally include the scheme. If the scheme is missing, https is assumed. For example: https://ibmc.example.com. This is required. - ``ibmc_username``: User account with admin/server-profile access privilege. This is required. - ``ibmc_password``: User account password. This is required. - ``ibmc_verify_ca``: If ibmc_address has the **https** scheme, the driver will use a secure (TLS_) connection when talking to the ibmc controller. By default (if this is set to True), the driver will try to verify the host certificates. This can be set to the path of a certificate file or directory with trusted certificates that the driver will use for verification. To disable verifying TLS_, set this to False. This is optional. The ``baremetal node create`` command can be used to enroll a node with the ``ibmc`` driver. For example: .. code-block:: bash baremetal node create --driver ibmc --driver-info ibmc_address=https://example.com \ --driver-info ibmc_username=admin \ --driver-info ibmc_password=password For more information about enrolling nodes see :ref:`enrollment` in the install guide. RAID Interface ============== Currently, only RAID controller which supports OOB management can be managed. See :doc:`/admin/raid` for more information on Ironic RAID support. The following properties are supported by the iBMC raid interface implementation, ``ibmc``: Mandatory properties -------------------- * ``size_gb``: Size in gigabytes (integer) for the logical disk. Use ``MAX`` as ``size_gb`` if this logical disk is supposed to use the rest of the space available. * ``raid_level``: RAID level for the logical disk. Valid values are ``JBOD``, ``0``, ``1``, ``5``, ``6``, ``1+0``, ``5+0`` and ``6+0``. And it is possible that some RAID controllers can only support a subset RAID levels. .. NOTE:: RAID level ``2`` is not supported by ``iBMC`` driver. Optional properties ------------------- * ``is_root_volume``: Optional. Specifies whether this disk is a root volume. By default, this is ``False``. * ``volume_name``: Optional. Name of the volume to be created. If this is not specified, it will be N/A. Backing physical disk hints --------------------------- See :doc:`/admin/raid` for more information on backing disk hints. These are machine-independent properties. The hints are specified for each logical disk to help Ironic find the desired disks for RAID configuration. * ``share_physical_disks`` * ``disk_type`` * ``interface_type`` * ``number_of_physical_disks`` Backing physical disks ---------------------- These are HUAWEI RAID controller dependent properties: * ``controller``: Optional. Supported values are: RAID storage id, RAID storage name or RAID controller name. If a bare metal server have more than one controller, this is mandatory. Typical values would look like: * RAID Storage Id: ``RAIDStorage0`` * RAID Storage Name: ``RAIDStorage0`` * RAID Controller Name: ``RAID Card1 Controller``. * ``physical_disks``: Optional. Supported values are: disk-id, disk-name or disk serial number. Typical values for hdd disk would look like: * Disk Id: ``HDDPlaneDisk0`` * Disk Name: ``Disk0``. * Disk SerialNumber: ``38DGK77LF77D`` Delete RAID configuration ------------------------- For ``delete_configuration`` step, ``ibmc`` will do: * delete all logical disks * delete all hot-spare disks Logical disks creation priority ------------------------------- Logical Disks creation priority based on three properties: * ``share_physical_disks`` * ``physical_disks`` * ``size_gb`` The logical disks creation priority strictly follow the table below, if multiple logical disks have the same priority, then they will be created with the same order in ``logical_disks`` array. ==================== ========================== ========= Share physical disks Specified Physical Disks Size ==================== ========================== ========= no yes int|max no no int yes yes int yes yes max yes no int yes no max no no max ==================== ========================== ========= Physical disks choice strategy ------------------------------ .. note:: physical-disk-group: a group of physical disks which have been used by some logical-disks with same RAID level. * If no ``physical_disks`` are specified, the "waste least" strategy will be used to choose the physical disks. * waste least disk capacity: when using disks with different capacity, it will cause a waste of disk capacity. This is to avoid with highest priority. * using least total disk capacity: for example, we can create 400G RAID 5 with both 5 100G-disks and 3 200G-disks. 5 100G disks is a better strategy because it uses a 500G capacity totally. While 3 200G-disks are 600G totally. * using least disk count: finally, if waste capacity and total disk capacity are both the same (it rarely happens?), we will choose the one with the minimum number of disks. * when ``share_physical_disks`` option is present, ``ibmc`` driver will create logical disk upon existing physical-disk-group list first. Only when no existing physical-disk-group matches, then it chooses unused physical disks with same strategy described above. When multiple exists physical-disk-groups matches, it will use "waste least" strategy too, the bigger capacity left the better. For example, to create a logical disk shown below on a ``ibmc`` server which has two RAID5 logical disks already. And the shareable capacity of this two logical-disks are 500G and 300G, then ``ibmc`` driver will choose the second one. .. code-block:: json { "logical_disks": [ { "controller": "RAID Card1 Controller", "raid_level": "5", "size_gb": 100, "share_physical_disks": true } ] } And the ``ibmc`` server has two RAID5 logical disks already. * When ``size_gb`` is set to ``MAX``, ``ibmc`` driver will auto work through all possible cases and choose the "best" solution which has the biggest capacity and use least capacity. For example: to create a RAID 5+0 logical disk with MAX size in a server has 9 200G-disks, it will finally choose "8 disks + span-number 2" but not "9 disks + span-number 3". Although they both have 1200G capacity totally, but the former uses only 8 disks and the latter uses 9 disks. If you want to choose the latter solution, you can specified the disk count to use by adding ``number_of_physical_disks`` option. .. code-block:: json { "logical_disks": [ { "controller": "RAID Card1 Controller", "raid_level": "5+0", "size_gb": "MAX" } ] } Examples -------- In a typical scenario we may want to create: * RAID 5, 500G, root OS volume with 3 disks * RAID 5, rest available space, data volume with rest disks .. code-block:: json { "logical_disks": [ { "volume_name": "os_volume", "controller": "RAID Card1 Controller", "is_root_volume": "True", "physical_disks": [ "Disk0", "Disk1", "Disk2" ], "raid_level": "5", "size_gb": "500" }, { "volume_name": "data_volume", "controller": "RAID Card1 Controller", "raid_level": "5", "size_gb": "MAX" } ] } Vendor Interface ========================================= The ``ibmc`` hardware type provides vendor passthru interfaces shown below: ======================== ============ ====================================== Method Name HTTP Method Description ======================== ============ ====================================== boot_up_seq GET Query boot up sequence get_raid_controller_list GET Query RAID controller summary info ======================== ============ ====================================== .. _Huawei iBMC: https://e.huawei.com/en/products/cloud-computing-dc/servers/accessories/ibmc .. _TLS: https://en.wikipedia.org/wiki/Transport_Layer_Security .. _HUAWEI iBMC Client library: https://pypi.org/project/python-ibmcclient/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/admin/drivers/idrac.rst0000664000175000017500000010324600000000000021535 0ustar00zuulzuul00000000000000============ iDRAC driver ============ Overview ======== The integrated Dell Remote Access Controller (iDRAC_) is an out-of-band management platform on Dell EMC servers, and is supported directly by the ``idrac`` hardware type. This driver uses the Dell Web Services for Management (WSMAN) protocol and the standard Distributed Management Task Force (DMTF) Redfish protocol to perform all of its functions. iDRAC_ hardware is also supported by the generic ``ipmi`` and ``redfish`` hardware types, though with smaller feature sets. Key features of the Dell iDRAC driver include: * Out-of-band node inspection * Boot device management and firmware management * Power management * RAID controller management and RAID volume configuration * BIOS settings configuration Ironic Features --------------- The ``idrac`` hardware type supports the following Ironic interfaces: * `BIOS Interface`_: BIOS management * `Inspect Interface`_: Hardware inspection * `Management Interface`_: Boot device and firmware management * Power Interface: Power management * `RAID Interface`_: RAID controller and disk management * `Vendor Interface`_: BIOS management (WSMAN) and eject virtual media (Redfish) Prerequisites ------------- The ``idrac`` hardware type requires the ``python-dracclient`` library to be installed on the ironic conductor node(s) if an Ironic node is configured to use an ``idrac-wsman`` interface implementation, for example:: sudo pip install 'python-dracclient>=3.1.0' Additionally, the ``idrac`` hardware type requires the ``sushy`` library to be installed on the ironic conductor node(s) if an Ironic node is configured to use an ``idrac-redfish`` interface implementation, for example:: sudo pip install 'python-dracclient>=3.1.0' 'sushy>=2.0.0' Enabling -------- The iDRAC driver supports WSMAN for the bios, inspect, management, power, raid, and vendor interfaces. In addition, it supports Redfish for the bios, inspect, management, power, and raid interfaces. The iDRAC driver allows you to mix and match WSMAN and Redfish interfaces. The ``idrac-wsman`` implementation must be enabled to use WSMAN for an interface. The ``idrac-redfish`` implementation must be enabled to use Redfish for an interface. To enable the ``idrac`` hardware type with the minimum interfaces, all using WSMAN, add the following to your ``/etc/ironic/ironic.conf``: .. code-block:: ini [DEFAULT] enabled_hardware_types=idrac enabled_management_interfaces=idrac-wsman enabled_power_interfaces=idrac-wsman To enable all optional features (BIOS, inspection, RAID, and vendor passthru) using Redfish where it is supported and WSMAN where not, use the following configuration: .. code-block:: ini [DEFAULT] enabled_hardware_types=idrac enabled_bios_interfaces=idrac-redfish enabled_inspect_interfaces=idrac-redfish enabled_management_interfaces=idrac-redfish enabled_power_interfaces=idrac-redfish enabled_raid_interfaces=idrac-redfish enabled_vendor_interfaces=idrac-redfish Below is the list of supported interface implementations in priority order: ================ =================================================== Interface Supported Implementations ================ =================================================== ``bios`` ``idrac-wsman``, ``idrac-redfish``, ``no-bios`` ``boot`` ``ipxe``, ``pxe``, ``idrac-redfish-virtual-media`` ``console`` ``no-console`` ``deploy`` ``direct``, ``ansible``, ``ramdisk`` ``inspect`` ``idrac-wsman``, ``idrac``, ``idrac-redfish``, ``inspector``, ``no-inspect`` ``management`` ``idrac-wsman``, ``idrac``, ``idrac-redfish`` ``network`` ``flat``, ``neutron``, ``noop`` ``power`` ``idrac-wsman``, ``idrac``, ``idrac-redfish`` ``raid`` ``idrac-wsman``, ``idrac``, ``idrac-redfish``, ``no-raid`` ``rescue`` ``no-rescue``, ``agent`` ``storage`` ``noop``, ``cinder``, ``external`` ``vendor`` ``idrac-wsman``, ``idrac``, ``idrac-redfish``, ``no-vendor`` ================ =================================================== .. NOTE:: ``idrac`` is the legacy name of the WSMAN interface. It has been deprecated in favor of ``idrac-wsman`` and may be removed in a future release. Protocol-specific Properties ---------------------------- The WSMAN and Redfish protocols require different properties to be specified in the Ironic node's ``driver_info`` field to communicate with the bare metal system's iDRAC. The WSMAN protocol requires the following properties: * ``drac_username``: The WSMAN user name to use when communicating with the iDRAC. Usually ``root``. * ``drac_password``: The password for the WSMAN user to use when communicating with the iDRAC. * ``drac_address``: The IP address of the iDRAC. The Redfish protocol requires the following properties: * ``redfish_username``: The Redfish user name to use when communicating with the iDRAC. Usually ``root``. * ``redfish_password``: The password for the Redfish user to use when communicating with the iDRAC. * ``redfish_address``: The URL address of the iDRAC. It must include the authority portion of the URL, and can optionally include the scheme. If the scheme is missing, https is assumed. * ``redfish_system_id``: The Redfish ID of the server to be managed. This should always be: ``/redfish/v1/Systems/System.Embedded.1``. For other Redfish protocol parameters see :doc:`/admin/drivers/redfish`. If using only interfaces which use WSMAN (``idrac-wsman``), then only the WSMAN properties must be supplied. If using only interfaces which use Redfish (``idrac-redfish``), then only the Redfish properties must be supplied. If using a mix of interfaces, where some use WSMAN and others use Redfish, both the WSMAN and Redfish properties must be supplied. Enrolling --------- The following command enrolls a bare metal node with the ``idrac`` hardware type using WSMAN for all interfaces: .. code-block:: bash baremetal node create --driver idrac \ --driver-info drac_username=user \ --driver-info drac_password=pa$$w0rd \ --driver-info drac_address=drac.host The following command enrolls a bare metal node with the ``idrac`` hardware type using Redfish for all interfaces: .. code-block:: bash baremetal node create --driver idrac \ --driver-info redfish_username=user \ --driver-info redfish_password=pa$$w0rd \ --driver-info redfish_address=drac.host \ --driver-info redfish_system_id=/redfish/v1/Systems/System.Embedded.1 \ --bios-interface idrac-redfish \ --inspect-interface idrac-redfish \ --management-interface idrac-redfish \ --power-interface idrac-redfish \ --raid-interface idrac-redfish \ --vendor-interface idrac-redfish The following command enrolls a bare metal node with the ``idrac`` hardware type assuming a mix of Redfish and WSMAN interfaces are used: .. code-block:: bash baremetal node create --driver idrac \ --driver-info drac_username=user \ --driver-info drac_password=pa$$w0rd --driver-info drac_address=drac.host \ --driver-info redfish_username=user \ --driver-info redfish_password=pa$$w0rd \ --driver-info redfish_address=drac.host \ --driver-info redfish_system_id=/redfish/v1/Systems/System.Embedded.1 \ --bios-interface idrac-redfish \ --inspect-interface idrac-redfish \ --management-interface idrac-redfish \ --power-interface idrac-redfish .. NOTE:: If using WSMAN for the management interface, then WSMAN must be used for the power interface. The same applies to Redfish. It is currently not possible to use Redfish for one and WSMAN for the other. BIOS Interface ============== The BIOS interface implementations supported by the ``idrac`` hardware type allows BIOS to be configured with the standard clean/deploy step approach. Example ------- A clean step to enable ``Virtualization`` and ``SRIOV`` in BIOS of an iDRAC BMC would be as follows:: { "target":"clean", "clean_steps": [ { "interface": "bios", "step": "apply_configuration", "args": { "settings": [ { "name": "ProcVirtualization", "value": "Enabled" }, { "name": "SriovGlobalEnable", "value": "Enabled" } ] } } ] } See the `Known Issues`_ for a known issue with ``factory_reset`` clean step. For additional details of BIOS configuration, see :doc:`/admin/bios`. Inspect Interface ================= The Dell iDRAC out-of-band inspection process catalogs all the same attributes of the server as the IPMI driver. Unlike IPMI, it does this without requiring the system to be rebooted, or even to be powered on. Inspection is performed using the Dell WSMAN or Redfish protocol directly without affecting the operation of the system being inspected. The inspection discovers the following properties: * ``cpu_arch``: cpu architecture * ``cpus``: number of cpus * ``local_gb``: disk size in gigabytes * ``memory_mb``: memory size in megabytes Extra capabilities: * ``boot_mode``: UEFI or BIOS boot mode. * ``pci_gpu_devices``: number of GPU devices connected to the bare metal. It also creates baremetal ports for each NIC port detected in the system. The ``idrac-wsman`` inspect interface discovers which NIC ports are configured to PXE boot and sets ``pxe_enabled`` to ``True`` on those ports. The ``idrac-redfish`` inspect interface does not currently set ``pxe_enabled`` on the ports. The user should ensure that ``pxe_enabled`` is set correctly on the ports following inspection with the ``idrac-redfish`` inspect interface. Management Interface ==================== The management interface for ``idrac-redfish`` supports: * updating firmware on nodes using a manual cleaning step. See :doc:`/admin/drivers/redfish` for more information on firmware update support. * updating system and getting its inventory using configuration molds. For more information see `Import and export configuration`_. Import and export configuration ------------------------------- The clean and deploy steps provided in this section allow to configure the system and collect the system inventory using configuration mold files. The introduction of this feature in the Wallaby release is experimental. These steps are: * ``export_configuration`` with the ``export_configuration_location`` input parameter to export the configuration from the existing system. * ``import_configuration`` with the ``import_configuration_location`` input parameter to import the existing configuration mold into the system. * ``import_export_configuration`` with the ``export_configuration_location`` and ``import_configuration_location`` input parameters. This step combines the previous two steps into one step that first imports existing configuration mold into system, then exports the resulting configuration. The input parameters provided include the URL where the configuration mold is to be stored after the export, or the reference location for an import. For more information on setting up storage and available options see `Storage setup`_. Configuration molds are JSON files that contain three top-level sections: ``bios``, ``raid`` and ``oem``. The following is an example of a configuration mold: .. code-block:: { "bios": { "reset": false, "settings": [ { "name": "ProcVirtualization", "value": "Enabled" }, { "name": "MemTest", "value": "Disabled" } ] } "raid": { "create_nonroot_volumes": true, "create_root_volume": true, "delete_existing": false, "target_raid_config": { "logical_disks": [ { "size_gb": 50, "raid_level": "1+0", "controller": "RAID.Integrated.1-1", "volume_name": "root_volume", "is_root_volume": true, "physical_disks": [ "Disk.Bay.0:Encl.Int.0-1:RAID.Integrated.1-1", "Disk.Bay.1:Encl.Int.0-1:RAID.Integrated.1-1" ] }, { "size_gb": 100, "raid_level": "5", "controller": "RAID.Integrated.1-1", "volume_name": "data_volume", "physical_disks": [ "Disk.Bay.2:Encl.Int.0-1:RAID.Integrated.1-1", "Disk.Bay.3:Encl.Int.0-1:RAID.Integrated.1-1", "Disk.Bay.4:Encl.Int.0-1:RAID.Integrated.1-1" ] } ] } } "oem": { "interface": "idrac-redfish", "data": { "SystemConfiguration": { "Model": "PowerEdge R640", "ServiceTag": "8CY9Z99", "TimeStamp": "Fri Jun 26 08:43:15 2020", "Components": [ { [...] "FQDD": "NIC.Slot.1-1-1", "Attributes": [ { "Name": "BlnkLeds", "Value": "15", "Set On Import": "True", "Comment": "Read and Write" }, { "Name": "VirtMacAddr", "Value": "00:00:00:00:00:00", "Set On Import": "False", "Comment": "Read and Write" }, { "Name": "VirtualizationMode", "Value": "NONE", "Set On Import": "True", "Comment": "Read and Write" }, [...] ] } ] } } } Currently, the OEM section is the only section that is supported. The OEM section uses the iDRAC Server Configuration Profile (SCP) and can be edited as necessary if it complies with the SCP. For more information about SCP and its capabilities, see SCP_Reference_Guide_. .. NOTE:: iDRAC BMC connection settings are not exported to avoid overwriting these in another system when using unmodified exported configuration mold in import step. If need to replicate iDRAC BMC connection settings, then add these settings manually to configuration mold for import step. To replicate the system configuration to that of a similar system, perform the following steps: #. Configure a golden, or one to many, system. #. Use the ``export_configuration`` step to export the configuration to the wanted location. #. Adjust the exported configuration mold for other systems to replicate. For example, remove sections that do not need to be replicated such as iDRAC connection settings. The configuration mold can be accessed directly from the storage location. #. Import the selected configuration mold into the other systems using the ``import_configuration`` step. It is not mandatory to use ``export_configuration`` step to create a configuration mold. Upload the file to a designated storage location without using Ironic if it has been created manually or by other means. Storage setup ^^^^^^^^^^^^^ To start using these steps, configure the storage location. The settings can be found in the ``[molds]`` section. Configure the storage type from the ``[molds]storage`` setting. Currently, ``swift``, which is enabled by default, and ``http`` are supported. In the setup input parameters, the complete HTTP URL is used. This requires that the containers (for ``swift``) and the directories (for ``http``) are created beforehand, and that read/write access is configured accordingly. .. NOTE:: Use of TLS is strongly advised. This setup configuration allows a user to access these locations outside of Ironic to list, create, update, and delete the configuration molds. For more information see `Swift configuration`_ and `HTTP configuration`_. Swift configuration ~~~~~~~~~~~~~~~~~~~ To use Swift with configuration molds, #. Create the containers to be used for configuration mold storage. #. For Ironic Swift user that is configured in the ``[swift]`` section add read/write access to these containers. HTTP configuration ~~~~~~~~~~~~~~~~~~ To use HTTP server with configuration molds, #. Enable HTTP PUT support. #. Create the directory to be used for the configuration mold storage. #. Configure read/write access for HTTP Basic access authentication and provide user credentials in ``[molds]user`` and ``[molds]password`` fields. The HTTP web server does not support multitenancy and is intended to be used in a stand-alone Ironic, or single-tenant OpenStack environment. RAID Interface ============== See :doc:`/admin/raid` for more information on Ironic RAID support. RAID interface of ``redfish`` hardware type can be used on iDRAC systems. Compared to ``redfish`` RAID interface, using ``idrac-redfish`` adds: * Waiting for real-time operations to be available on RAID controllers. When using ``redfish`` this is not guaranteed and reboots might be intermittently required to complete, * Converting non-RAID disks to RAID mode if there are any, * Clearing foreign configuration, if any, after deleting virtual disks. The following properties are supported by the iDRAC WSMAN and Redfish RAID interface implementation: .. NOTE:: When using ``idrac-redfish`` for RAID interface iDRAC firmware greater than 4.40.00.00 is required. Mandatory properties -------------------- * ``size_gb``: Size in gigabytes (integer) for the logical disk. Use ``MAX`` as ``size_gb`` if this logical disk is supposed to use the rest of the space available. * ``raid_level``: RAID level for the logical disk. Valid values are ``0``, ``1``, ``5``, ``6``, ``1+0``, ``5+0`` and ``6+0``. .. NOTE:: ``JBOD`` and ``2`` are not supported, and will fail with reason: 'Cannot calculate spans for RAID level.' Optional properties ------------------- * ``is_root_volume``: Optional. Specifies whether this disk is a root volume. By default, this is ``False``. * ``volume_name``: Optional. Name of the volume to be created. If this is not specified, it will be auto-generated. Backing physical disk hints --------------------------- See :doc:`/admin/raid` for more information on backing disk hints. These are machine-independent information. The hints are specified for each logical disk to help Ironic find the desired disks for RAID configuration. * ``disk_type`` * ``interface_type`` * ``share_physical_disks`` * ``number_of_physical_disks`` Backing physical disks ---------------------- These are Dell RAID controller-specific values and must match the names provided by the iDRAC. * ``controller``: Mandatory. The name of the controller to use. * ``physical_disks``: Optional. The names of the physical disks to use. .. NOTE:: ``physical_disks`` is a mandatory parameter if the property ``size_gb`` is set to ``MAX``. Examples -------- Creation of RAID ``1+0`` logical disk with six disks on one controller: .. code-block:: json { "logical_disks": [ { "controller": "RAID.Integrated.1-1", "is_root_volume": "True", "physical_disks": [ "Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1", "Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1", "Disk.Bay.2:Enclosure.Internal.0-1:RAID.Integrated.1-1", "Disk.Bay.3:Enclosure.Internal.0-1:RAID.Integrated.1-1", "Disk.Bay.4:Enclosure.Internal.0-1:RAID.Integrated.1-1", "Disk.Bay.5:Enclosure.Internal.0-1:RAID.Integrated.1-1"], "raid_level": "1+0", "size_gb": "MAX"}]} Manual RAID Invocation ---------------------- The following command can be used to delete any existing RAID configuration. It deletes all virtual disks/RAID volumes, unassigns all global and dedicated hot spare physical disks, and clears foreign configuration: .. code-block:: bash baremetal node clean --clean-steps \ '[{"interface": "raid", "step": "delete_configuration"}]' ${node_uuid} The following command shows an example of how to set the target RAID configuration: .. code-block:: bash baremetal node set --target-raid-config '{ "logical_disks": [ { "controller": "RAID.Integrated.1-1", "is_root_volume": true, "physical_disks": [ "Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1", "Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1"], "raid_level": "0", "size_gb": "MAX"}]}' ${node_uuid} The following command can be used to create a RAID configuration: .. code-block:: bash baremetal node clean --clean-steps \ '[{"interface": "raid", "step": "create_configuration"}]' When the physical disk names or controller names are not known, the following Python code example shows how the ``python-dracclient`` can be used to fetch the information directly from the Dell bare metal: .. code-block:: python import dracclient.client client = dracclient.client.DRACClient( host="192.168.1.1", username="root", password="calvin") controllers = client.list_raid_controllers() print(controllers) physical_disks = client.list_physical_disks() print(physical_disks) Or using ``sushy`` with Redfish: .. code-block:: python import sushy client = sushy.Sushy('https://192.168.1.1', username='root', password='calvin', verify=False) for s in client.get_system_collection().get_members(): print("System: %(id)s" % {'id': s.identity}) for c in system1.storage.get_members(): print("\tController: %(id)s" % {'id': c.identity}) for d in c.drives: print("\t\tDrive: %(id)s" % {'id': d.identity}) Vendor Interface ================ idrac-wsman ----------- Dell iDRAC BIOS management is available through the Ironic WSMAN vendor passthru interface. ======================== ============ ====================================== Method Name HTTP Method Description ======================== ============ ====================================== ``abandon_bios_config`` ``DELETE`` Abandon a BIOS configuration job. ``commit_bios_config`` ``POST`` Commit a BIOS configuration job submitted through ``set_bios_config``. Required argument: ``reboot`` - indicates whether a reboot job should be automatically created with the config job. Returns a dictionary containing the ``job_id`` key with the ID of the newly created config job, and the ``reboot_required`` key indicating whether the node needs to be rebooted to execute the config job. ``get_bios_config`` ``GET`` Returns a dictionary containing the node's BIOS settings. ``list_unfinished_jobs`` ``GET`` Returns a dictionary containing the key ``unfinished_jobs``; its value is a list of dictionaries. Each dictionary represents an unfinished config job object. ``set_bios_config`` ``POST`` Change the BIOS configuration on a node. Required argument: a dictionary of {``AttributeName``: ``NewValue``}. Returns a dictionary containing the ``is_commit_required`` key indicating whether ``commit_bios_config`` needs to be called to apply the changes and the ``is_reboot_required`` value indicating whether the server must also be rebooted. Possible values are ``true`` and ``false``. ======================== ============ ====================================== Examples ^^^^^^^^ Get BIOS Config ~~~~~~~~~~~~~~~ .. code-block:: bash baremetal node passthru call --http-method GET get_bios_config Snippet of output showing virtualization enabled: .. code-block:: json {"ProcVirtualization": { "current_value": "Enabled", "instance_id": "BIOS.Setup.1-1:ProcVirtualization", "name": "ProcVirtualization", "pending_value": null, "possible_values": [ "Enabled", "Disabled"], "read_only": false }} There are a number of items to note from the above snippet: * ``name``: this is the name to use in a call to ``set_bios_config``. * ``current_value``: the current state of the setting. * ``pending_value``: if the value has been set, but not yet committed, the new value is shown here. The change can either be committed or abandoned. * ``possible_values``: shows a list of valid values which can be used in a call to ``set_bios_config``. * ``read_only``: indicates if the value is capable of being changed. Set BIOS Config ~~~~~~~~~~~~~~~ .. code-block:: bash baremetal node passthru call set_bios_config --arg "name=value" Walkthrough of perfoming a BIOS configuration change: The following section demonstrates how to change BIOS configuration settings, detect that a commit and reboot are required, and act on them accordingly. The two properties that are being changed are: * Enable virtualization technology of the processor * Globally enable SR-IOV .. code-block:: bash baremetal node passthru call set_bios_config \ --arg "ProcVirtualization=Enabled" \ --arg "SriovGlobalEnable=Enabled" This returns a dictionary indicating what actions are required next: .. code-block:: json { "is_reboot_required": true, "is_commit_required": true } Commit BIOS Changes ~~~~~~~~~~~~~~~~~~~ The next step is to commit the pending change to the BIOS. Note that in this example, the ``reboot`` argument is set to ``true``. The response indicates that a reboot is no longer required as it has been scheduled automatically by the ``commit_bios_config`` call. If the reboot argument is not supplied, the job is still created, however it remains in the ``scheduled`` state until a reboot is performed. The reboot can be initiated through the Ironic power API. .. code-block:: bash baremetal node passthru call commit_bios_config \ --arg "reboot=true" .. code-block:: json { "job_id": "JID_499377293428", "reboot_required": false } The state of any executing job can be queried: .. code-block:: bash baremetal node passthru call --http-method GET list_unfinished_jobs .. code-block:: json {"unfinished_jobs": [{"status": "Scheduled", "name": "ConfigBIOS:BIOS.Setup.1-1", "until_time": "TIME_NA", "start_time": "TIME_NOW", "message": "Task successfully scheduled.", "percent_complete": "0", "id": "JID_499377293428"}]} Abandon BIOS Changes ~~~~~~~~~~~~~~~~~~~~ Instead of committing, a pending change can be abandoned: .. code-block:: bash baremetal node passthru call --http-method DELETE abandon_bios_config The abandon command does not provide a response body. Change Boot Mode ^^^^^^^^^^^^^^^^ The boot mode of the iDRAC can be changed to: * BIOS - Also called legacy or traditional boot mode. The BIOS initializes the system’s processors, memory, bus controllers, and I/O devices. After initialization is complete, the BIOS passes control to operating system (OS) software. The OS loader uses basic services provided by the system BIOS to locate and load OS modules into system memory. After booting the system, the BIOS and embedded management controllers execute system management algorithms, which monitor and optimize the condition of the underlying hardware. BIOS configuration settings enable fine-tuning of the performance, power management, and reliability features of the system. * UEFI - The Unified Extensible Firmware Interface does not change the traditional purposes of the system BIOS. To a large extent, a UEFI-compliant BIOS performs the same initialization, boot, configuration, and management tasks as a traditional BIOS. However, UEFI does change the interfaces and data structures the BIOS uses to interact with I/O device firmware and operating system software. The primary intent of UEFI is to eliminate shortcomings in the traditional BIOS environment, enabling system firmware to continue scaling with industry trends. The UEFI boot mode offers: * Improved partitioning scheme for boot media * Support for media larger than 2 TB * Redundant partition tables * Flexible handoff from BIOS to OS * Consolidated firmware user interface * Enhanced resource allocation for boot device firmware The boot mode can be changed via the WSMAN vendor passthru interface as follows: .. code-block:: bash baremetal node passthru call set_bios_config \ --arg "BootMode=Uefi" baremetal node passthru call commit_bios_config \ --arg "reboot=true" .. code-block:: bash baremetal node passthru call set_bios_config \ --arg "BootMode=Bios" baremetal node passthru call commit_bios_config \ --arg "reboot=true" idrac-redfish ------------- Through the ``idrac-redfish`` vendor passthru interface these methods are available: ================ ============ ============================================== Method Name HTTP Method Description ================ ============ ============================================== ``eject_media`` ``POST`` Eject a virtual media device. If no device is provided then all attached devices will be ejected. Optional argument: ``boot_device`` - the boot device to eject, either, ``cd``, ``dvd``, ``usb`` or ``floppy``. ================ ============ ============================================== Known Issues ============ Nodes go into maintenance mode ------------------------------ After some period of time, nodes managed by the ``idrac`` hardware type may go into maintenance mode in Ironic. This issue can be worked around by changing the Ironic power state poll interval to 70 seconds. See ``[conductor]sync_power_state_interval`` in ``/etc/ironic/ironic.conf``. PXE reset with "factory_reset" BIOS clean step ---------------------------------------------- When using the ``UEFI boot mode`` with non-default PXE interface, the factory reset can cause the PXE interface to be reset to default, which doesn't allow the server to PXE boot for any further operations. This can cause a ``clean_failed`` state on the node or ``deploy_failed`` if you attempt to deploy a node after this step. For now, the only solution is for the operator to manually restore the PXE settings of the server for it to PXE boot again, properly. The problem is caused by the fact that with the ``UEFI boot mode``, the ``idrac`` uses BIOS settings to manage PXE configuration. This is not the case with the ``BIOS boot mode`` where the PXE configuration is handled as a configuration job on the integrated NIC itself, independently of the BIOS settings. .. _Ironic_RAID: https://docs.openstack.org/ironic/latest/admin/raid.html .. _iDRAC: https://www.dell.com/idracmanuals WSMAN vendor passthru timeout ----------------------------- When iDRAC is not ready and executing WSMAN vendor passthru commands, they take more time as waiting for iDRAC to become ready again and then time out, for example: .. code-block:: bash baremetal node passthru call --http-method GET \ aed58dca-1b25-409a-a32f-3a817d59e1e0 list_unfinished_jobs Timed out waiting for a reply to message ID 547ce7995342418c99ef1ea4a0054572 (HTTP 500) To avoid this need to increase timeout for messaging in ``/etc/ironic/ironic.conf`` and restart Ironic API service. .. code-block:: ini [DEFAULT] rpc_response_timeout = 600 Timeout when powering off ------------------------- Some servers might be slow when soft powering off and time out. The default retry count is 6, resulting in 30 seconds timeout (the default retry interval set by ``post_deploy_get_power_state_retry_interval`` is 5 seconds). To resolve this issue, increase the timeout to 90 seconds by setting the retry count to 18 as follows: .. code-block:: ini [agent] post_deploy_get_power_state_retries = 18 Unable to mount remote share with iDRAC firmware before 4.40.40.00 ------------------------------------------------------------------ When using iDRAC firmware 4.40.00.00 and consecutive versions before 4.40.40.00 with virtual media boot and new Virtual Console plug-in type eHTML5, there is an error: "Unable to mount remote share". This is a known issue that is fixed in 4.40.40.00 iDRAC firmware release. If cannot upgrade, then adjust settings in iDRAC to use plug-in type HTML5. In iDRAC web UI go to Configuration -> Virtual Console and select Plug-in Type to HTML5. During upgrade to 4.40.00.00 or newer iDRAC firmware eHTML5 is automatically selected if default plug-in type has been used and never changed. Systems that have plug-in type changed will keep selected plug-in type after iDRAC firmware upgrade. Firmware update from Swift fails -------------------------------- When using Swift to stage firmware update files in Management interface ``firmware_update`` clean step of ``redfish`` or ``idrac`` hardware type, the cleaning fails with error "An internal error occurred. Unable to complete the specified operation." in iDRAC job. Until this is fixed, use HTTP service to stage firmware files for iDRAC. .. _SCP_Reference_Guide: http://downloads.dell.com/manuals/common/dellemc-server-config-profile-refguide.pdf ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/admin/drivers/ilo.rst0000664000175000017500000030317600000000000021242 0ustar00zuulzuul00000000000000.. _ilo: ========== iLO driver ========== Overview ======== iLO driver enables to take advantage of features of iLO management engine in HPE ProLiant servers. The ``ilo`` hardware type is targeted for HPE ProLiant Gen8 and Gen9 systems which have `iLO 4 management engine`_. From **Pike** release ``ilo`` hardware type supports ProLiant Gen10 systems which have `iLO 5 management engine`_. iLO5 conforms to `Redfish`_ API and hence hardware type ``redfish`` (see :doc:`redfish`) is also an option for this kind of hardware but it lacks the iLO specific features. For more details and for up-to-date information (like tested platforms, known issues, etc), please check the `iLO driver wiki page `_. For enabling Gen10 systems and getting detailed information on Gen10 feature support in Ironic please check this `Gen10 wiki section`_. Hardware type ============= ProLiant hardware is primarily supported by the ``ilo`` hardware type. ``ilo5`` hardware type is only supported on ProLiant Gen10 and later systems. Both hardware can be used with reference hardware type ``ipmi`` (see :doc:`ipmitool`) and ``redfish`` (see :doc:`redfish`). For information on how to enable the ``ilo`` and ``ilo5`` hardware type, see :ref:`enable-hardware-types`. .. note:: Only HPE ProLiant Gen10 servers supports hardware type ``redfish``. .. warning:: It is important to note that while the HPE Edgeline series of servers may contain iLO adapters, they are known to not be compatible with the ``ilo`` hardware type. The ``redfish`` hardware type should be used instead. The hardware type ``ilo`` supports following HPE server features: * `Boot mode support`_ * `UEFI Secure Boot Support`_ * `Node Cleaning Support`_ * `Node Deployment Customization`_ * `Hardware Inspection Support`_ * `Swiftless deploy for intermediate images`_ * `HTTP(S) Based Deploy Support`_ * `Support for iLO driver with Standalone Ironic`_ * `RAID Support`_ * `Disk Erase Support`_ * `Initiating firmware update as manual clean step`_ * `Smart Update Manager (SUM) based firmware update`_ * `Updating security parameters as manual clean step`_ * `Update Minimum Password Length security parameter as manual clean step`_ * `Update Authentication Failure Logging security parameter as manual clean step`_ * `Activating iLO Advanced license as manual clean step`_ * `Removing CA certificates from iLO as manual clean step`_ * `Firmware based UEFI iSCSI boot from volume support`_ * `Certificate based validation in iLO`_ * `Rescue mode support`_ * `Inject NMI support`_ * `Soft power operation support`_ * `BIOS configuration support`_ * `IPv6 support`_ * `Layer 3 or DHCP-less ramdisk booting`_ Apart from above features hardware type ``ilo5`` also supports following features: * `Out of Band RAID Support`_ * `Out of Band Sanitize Disk Erase Support`_ * `Out of Band One Button Secure Erase Support`_ * `UEFI-HTTPS Boot support`_ Hardware interfaces ^^^^^^^^^^^^^^^^^^^ The ``ilo`` hardware type supports following hardware interfaces: * bios Supports ``ilo`` and ``no-bios``. The default is ``ilo``. They can be enabled by using the ``[DEFAULT]enabled_bios_interfaces`` option in ``ironic.conf`` as given below: .. code-block:: ini [DEFAULT] enabled_hardware_types = ilo enabled_bios_interfaces = ilo,no-bios * boot Supports ``ilo-virtual-media``, ``ilo-pxe`` and ``ilo-ipxe``. The default is ``ilo-virtual-media``. The ``ilo-virtual-media`` interface provides security enhanced PXE-less deployment by using iLO virtual media to boot up the bare metal node. The ``ilo-pxe`` and ``ilo-ipxe`` interfaces use PXE and iPXE respectively for deployment(just like :ref:`pxe-boot`). These interfaces do not require iLO Advanced license. They can be enabled by using the ``[DEFAULT]enabled_boot_interfaces`` option in ``ironic.conf`` as given below: .. code-block:: ini [DEFAULT] enabled_hardware_types = ilo enabled_boot_interfaces = ilo-virtual-media,ilo-pxe,ilo-ipxe * console Supports ``ilo`` and ``no-console``. The default is ``ilo``. They can be enabled by using the ``[DEFAULT]enabled_console_interfaces`` option in ``ironic.conf`` as given below: .. code-block:: ini [DEFAULT] enabled_hardware_types = ilo enabled_console_interfaces = ilo,no-console .. note:: To use ``ilo`` console interface you need to enable iLO feature 'IPMI/DCMI over LAN Access' on `iLO4 `_ and `iLO5 `_ management engine. * inspect Supports ``ilo`` and ``inspector``. The default is ``ilo``. They can be enabled by using the ``[DEFAULT]enabled_inspect_interfaces`` option in ``ironic.conf`` as given below: .. code-block:: ini [DEFAULT] enabled_hardware_types = ilo enabled_inspect_interfaces = ilo,inspector .. note:: :ironic-inspector-doc:`Ironic Inspector <>` needs to be configured to use ``inspector`` as the inspect interface. * management Supports only ``ilo``. It can be enabled by using the ``[DEFAULT]enabled_management_interfaces`` option in ``ironic.conf`` as given below: .. code-block:: ini [DEFAULT] enabled_hardware_types = ilo enabled_management_interfaces = ilo * power Supports only ``ilo``. It can be enabled by using the ``[DEFAULT]enabled_power_interfaces`` option in ``ironic.conf`` as given below: .. code-block:: ini [DEFAULT] enabled_hardware_types = ilo enabled_power_interfaces = ilo * raid Supports ``agent`` and ``no-raid``. The default is ``no-raid``. They can be enabled by using the ``[DEFAULT]enabled_raid_interfaces`` option in ``ironic.conf`` as given below: .. code-block:: ini [DEFAULT] enabled_hardware_types = ilo enabled_raid_interfaces = agent,no-raid * storage Supports ``cinder`` and ``noop``. The default is ``noop``. They can be enabled by using the ``[DEFAULT]enabled_storage_interfaces`` option in ``ironic.conf`` as given below: .. code-block:: ini [DEFAULT] enabled_hardware_types = ilo enabled_storage_interfaces = cinder,noop .. note:: The storage interface ``cinder`` is supported only when corresponding boot interface of the ``ilo`` hardware type based node is ``ilo-pxe`` or ``ilo-ipxe``. Please refer to :doc:`/admin/boot-from-volume` for configuring ``cinder`` as a storage interface. * rescue Supports ``agent`` and ``no-rescue``. The default is ``no-rescue``. They can be enabled by using the ``[DEFAULT]enabled_rescue_interfaces`` option in ``ironic.conf`` as given below: .. code-block:: ini [DEFAULT] enabled_hardware_types = ilo enabled_rescue_interfaces = agent,no-rescue The ``ilo5`` hardware type supports all the ``ilo`` interfaces described above, except for ``boot`` and ``raid`` interfaces. The details of ``boot`` and ``raid`` interfaces is as under: * raid Supports ``ilo5`` and ``no-raid``. The default is ``ilo5``. They can be enabled by using the ``[DEFAULT]enabled_raid_interfaces`` option in ``ironic.conf`` as given below: .. code-block:: ini [DEFAULT] enabled_hardware_types = ilo5 enabled_raid_interfaces = ilo5,no-raid * boot Supports ``ilo-uefi-https`` apart from the other boot interfaces supported by ``ilo`` hardware type. This can be enabled by using the ``[DEFAULT]enabled_boot_interfaces`` option in ``ironic.conf`` as given below: .. code-block:: ini [DEFAULT] enabled_hardware_types = ilo5 enabled_boot_interfaces = ilo-uefi-https,ilo-virtual-media The ``ilo`` and ``ilo5`` hardware type support all standard ``deploy`` and ``network`` interface implementations, see :ref:`enable-hardware-interfaces` for details. The following command can be used to enroll a ProLiant node with ``ilo`` hardware type: .. code-block:: console baremetal node create \ --driver ilo \ --deploy-interface direct \ --raid-interface agent \ --rescue-interface agent \ --driver-info ilo_address= \ --driver-info ilo_username= \ --driver-info ilo_password= \ --driver-info deploy_iso= \ --driver-info rescue_iso= .. note:: The fields ``deploy_iso`` and ``rescue_iso`` used to be called ``ilo_deploy_iso`` and ``ilo_rescue_iso`` before the Xena release. The following command can be used to enroll a ProLiant node with ``ilo5`` hardware type: .. code-block:: console baremetal node create \ --driver ilo5 \ --deploy-interface direct \ --raid-interface ilo5 \ --rescue-interface agent \ --driver-info ilo_address= \ --driver-info ilo_username= \ --driver-info ilo_password= \ --driver-info deploy_iso= \ --driver-info rescue_iso= Please refer to :doc:`/install/enabling-drivers` for detailed explanation of hardware type. Node configuration ^^^^^^^^^^^^^^^^^^ * Each node is configured for ``ilo`` and ``ilo5`` hardware type by setting the following ironic node object's properties in ``driver_info``: - ``ilo_address``: IP address or hostname of the iLO. - ``ilo_username``: Username for the iLO with administrator privileges. - ``ilo_password``: Password for the above iLO user. - ``client_port``: (optional) Port to be used for iLO operations if you are using a custom port on the iLO. Default port used is 443. - ``client_timeout``: (optional) Timeout for iLO operations. Default timeout is 60 seconds. - ``ca_file``: (optional) CA certificate file to validate iLO. - ``console_port``: (optional) Node's UDP port for console access. Any unused port on the ironic conductor node may be used. This is required only when ``ilo-console`` interface is used. * The following properties are also required in node object's ``driver_info`` if ``ilo-virtual-media`` boot interface is used: - ``deploy_iso``: The glance UUID of the deploy ramdisk ISO image. - ``instance info/boot_iso`` property to be either boot iso Glance UUID or a HTTP(S) URL. This is optional property and is used when ``boot_option`` is set to ``netboot`` or ``ramdisk``. .. note:: When ``boot_option`` is set to ``ramdisk``, the ironic node must be configured to use ``ramdisk`` deploy interface. See :ref:`ramdisk-deploy` for details. .. note:: The ``boot_iso`` property used to be called ``ilo_boot_iso`` before the Xena release. - ``rescue_iso``: The glance UUID of the rescue ISO image. This is optional property and is used when ``rescue`` interface is set to ``agent``. * The following properties are also required in node object's ``driver_info`` if ``ilo-pxe`` or ``ilo-ipxe`` boot interface is used: - ``deploy_kernel``: The glance UUID or a HTTP(S) URL of the deployment kernel. - ``deploy_ramdisk``: The glance UUID or a HTTP(S) URL of the deployment ramdisk. - ``rescue_kernel``: The glance UUID or a HTTP(S) URL of the rescue kernel. This is optional property and is used when ``rescue`` interface is set to ``agent``. - ``rescue_ramdisk``: The glance UUID or a HTTP(S) URL of the rescue ramdisk. This is optional property and is used when ``rescue`` interface is set to ``agent``. * The following properties are also required in node object's ``driver_info`` if ``ilo-uefi-https`` boot interface is used for ``ilo5`` hardware type: - ``deploy_kernel``: The glance UUID or a HTTPS URL of the deployment kernel. - ``deploy_ramdisk``: The glance UUID or a HTTPS URL of the deployment ramdisk. - ``bootloader``: The glance UUID or a HTTPS URL of the bootloader. - ``rescue_kernel``: The glance UUID or a HTTPS URL of the rescue kernel. This is optional property and is used when ``rescue`` interface is set to ``agent``. - ``rescue_ramdisk``: The glance UUID or a HTTP(S) URL of the rescue ramdisk. This is optional property and is used when ``rescue`` interface is set to ``agent``. .. note:: ``ilo-uefi-https`` boot interface is supported by only ``ilo5`` hardware type. If the images are not hosted in glance, the references must be HTTPS URLs hosted by secure webserver. This boot interface can be used only when the current boot mode is ``UEFI``. .. note:: The fields ``deploy_kernel``, ``deploy_ramdisk``, ``rescue_kernel`` ``rescue_ramdisk`` and ``bootloader`` used to have an ``ilo_`` prefix before the Xena release. * The following parameters are mandatory in ``driver_info`` if ``ilo-inspect`` inspect inteface is used and SNMPv3 inspection (`SNMPv3 Authentication` in `HPE iLO4 User Guide`_) is desired: * ``snmp_auth_user`` : The SNMPv3 user. * ``snmp_auth_prot_password`` : The auth protocol pass phrase. * ``snmp_auth_priv_password`` : The privacy protocol pass phrase. The following parameters are optional for SNMPv3 inspection: * ``snmp_auth_protocol`` : The Auth Protocol. The valid values are "MD5" and "SHA". The iLO default value is "MD5". * ``snmp_auth_priv_protocol`` : The Privacy protocol. The valid values are "AES" and "DES". The iLO default value is "DES". .. note:: If configuration values for ``ca_file``, ``client_port`` and ``client_timeout`` are not provided in the ``driver_info`` of the node, the corresponding config variables defined under ``[ilo]`` section in ironic.conf will be used. Prerequisites ============= * `proliantutils `_ is a python package which contains a set of modules for managing HPE ProLiant hardware. Install ``proliantutils`` module on the ironic conductor node. Minimum version required is 2.8.0:: $ pip install "proliantutils>=2.8.0" * ``ipmitool`` command must be present on the service node(s) where ``ironic-conductor`` is running. On most distros, this is provided as part of the ``ipmitool`` package. Please refer to `Hardware Inspection Support`_ for more information on recommended version. Different configuration for ilo hardware type ============================================= Glance Configuration ^^^^^^^^^^^^^^^^^^^^ 1. :glance-doc:`Configure Glance image service with its storage backend as Swift `. 2. Set a temp-url key for Glance user in Swift. For example, if you have configured Glance with user ``glance-swift`` and tenant as ``service``, then run the below command:: swift --os-username=service:glance-swift post -m temp-url-key:mysecretkeyforglance 3. Fill the required parameters in the ``[glance]`` section in ``/etc/ironic/ironic.conf``. Normally you would be required to fill in the following details:: [glance] swift_temp_url_key=mysecretkeyforglance swift_endpoint_url=https://10.10.1.10:8080 swift_api_version=v1 swift_account=AUTH_51ea2fb400c34c9eb005ca945c0dc9e1 swift_container=glance The details can be retrieved by running the below command: .. code-block:: bash $ swift --os-username=service:glance-swift stat -v | grep -i url StorageURL: http://10.10.1.10:8080/v1/AUTH_51ea2fb400c34c9eb005ca945c0dc9e1 Meta Temp-Url-Key: mysecretkeyforglance 4. Swift must be accessible with the same admin credentials configured in Ironic. For example, if Ironic is configured with the below credentials in ``/etc/ironic/ironic.conf``:: [keystone_authtoken] admin_password = password admin_user = ironic admin_tenant_name = service Ensure ``auth_version`` in ``keystone_authtoken`` to 2. Then, the below command should work.: .. code-block:: bash $ swift --os-username ironic --os-password password --os-tenant-name service --auth-version 2 stat Account: AUTH_22af34365a104e4689c46400297f00cb Containers: 2 Objects: 18 Bytes: 1728346241 Objects in policy "policy-0": 18 Bytes in policy "policy-0": 1728346241 Meta Temp-Url-Key: mysecretkeyforglance X-Timestamp: 1409763763.84427 X-Trans-Id: tx51de96a28f27401eb2833-005433924b Content-Type: text/plain; charset=utf-8 Accept-Ranges: bytes 5. Restart the Ironic conductor service:: $ service ironic-conductor restart Web server configuration on conductor ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ * The HTTP(S) web server can be configured in many ways. For apache web server on Ubuntu, refer `here `_ * Following config variables need to be set in ``/etc/ironic/ironic.conf``: * ``use_web_server_for_images`` in ``[ilo]`` section:: [ilo] use_web_server_for_images = True * ``http_url`` and ``http_root`` in ``[deploy]`` section:: [deploy] # Ironic compute node's http root path. (string value) http_root=/httpboot # Ironic compute node's HTTP server URL. Example: # http://192.1.2.3:8080 (string value) http_url=http://192.168.0.2:8080 ``use_web_server_for_images``: If the variable is set to ``false``, the ``ilo-virtual-media`` boot interface uses swift containers to host the intermediate floppy image and the boot ISO. If the variable is set to ``true``, it uses the local web server for hosting the intermediate files. The default value for ``use_web_server_for_images`` is False. ``http_url``: The value for this variable is prefixed with the generated intermediate files to generate a URL which is attached in the virtual media. ``http_root``: It is the directory location to which ironic conductor copies the intermediate floppy image and the boot ISO. .. note:: HTTPS is strongly recommended over HTTP web server configuration for security enhancement. The ``ilo-virtual-media`` boot interface will send the instance's configdrive over an encrypted channel if web server is HTTPS enabled. However for ``ilo-uefi-https`` boot interface HTTPS webserver is mandatory as this interface only supports HTTPS URLs. Enable driver ============= 1. Build a deploy ISO (and kernel and ramdisk) image, see :ref:`deploy-ramdisk` 2. See `Glance Configuration`_ for configuring glance image service with its storage backend as ``swift``. 3. Upload this image to Glance:: glance image-create --name deploy-ramdisk.iso --disk-format iso --container-format bare < deploy-ramdisk.iso 4. Enable hardware type and hardware interfaces in ``/etc/ironic/ironic.conf``:: [DEFAULT] enabled_hardware_types = ilo enabled_bios_interfaces = ilo enabled_boot_interfaces = ilo-virtual-media,ilo-pxe,ilo-ipxe enabled_power_interfaces = ilo enabled_console_interfaces = ilo enabled_raid_interfaces = agent enabled_management_interfaces = ilo enabled_inspect_interfaces = ilo enabled_rescue_interfaces = agent 5. Restart the ironic conductor service:: $ service ironic-conductor restart Optional functionalities for the ``ilo`` hardware type ====================================================== Boot mode support ^^^^^^^^^^^^^^^^^ The hardware type ``ilo`` supports automatic detection and setting of boot mode (Legacy BIOS or UEFI). * When boot mode capability is not configured: - If config variable ``default_boot_mode`` in ``[ilo]`` section of ironic configuration file is set to either 'bios' or 'uefi', then iLO driver uses that boot mode for provisioning the baremetal ProLiant servers. - If the pending boot mode is set on the node then iLO driver uses that boot mode for provisioning the baremetal ProLiant servers. - If the pending boot mode is not set on the node then iLO driver uses 'uefi' boot mode for UEFI capable servers and "bios" when UEFI is not supported. * When boot mode capability is configured, the driver sets the pending boot mode to the configured value. * Only one boot mode (either ``uefi`` or ``bios``) can be configured for the node. * If the operator wants a node to boot always in ``uefi`` mode or ``bios`` mode, then they may use ``capabilities`` parameter within ``properties`` field of an ironic node. To configure a node in ``uefi`` mode, then set ``capabilities`` as below:: baremetal node set --property capabilities='boot_mode:uefi' Nodes having ``boot_mode`` set to ``uefi`` may be requested by adding an ``extra_spec`` to the nova flavor:: nova flavor-key ironic-test-3 set capabilities:boot_mode="uefi" nova boot --flavor ironic-test-3 --image test-image instance-1 If ``capabilities`` is used in ``extra_spec`` as above, nova scheduler (``ComputeCapabilitiesFilter``) will match only ironic nodes which have the ``boot_mode`` set appropriately in ``properties/capabilities``. It will filter out rest of the nodes. The above facility for matching in nova can be used in heterogeneous environments where there is a mix of ``uefi`` and ``bios`` machines, and operator wants to provide a choice to the user regarding boot modes. If the flavor doesn't contain ``boot_mode`` then nova scheduler will not consider boot mode as a placement criteria, hence user may get either a BIOS or UEFI machine that matches with user specified flavors. The automatic boot ISO creation for UEFI boot mode has been enabled in Kilo. The manual creation of boot ISO for UEFI boot mode is also supported. For the latter, the boot ISO for the deploy image needs to be built separately and the deploy image's ``boot_iso`` property in glance should contain the glance UUID of the boot ISO. For building boot ISO, add ``iso`` element to the diskimage-builder command to build the image. For example:: disk-image-create ubuntu baremetal iso .. _`iLO UEFI Secure Boot Support`: UEFI Secure Boot Support ^^^^^^^^^^^^^^^^^^^^^^^^ The hardware type ``ilo`` supports secure boot deploy, see :ref:`secure-boot` for details. iLO specific notes: In UEFI secure boot, digitally signed bootloader should be able to validate digital signatures of kernel during boot process. This requires that the bootloader contains the digital signatures of the kernel. For the ``ilo-virtual-media`` boot interface, it is recommended that ``boot_iso`` property for user image contains the glance UUID of the boot ISO. If ``boot_iso`` property is not updated in glance for the user image, it would create the ``boot_iso`` using bootloader from the deploy iso. This ``boot_iso`` will be able to boot the user image in UEFI secure boot environment only if the bootloader is signed and can validate digital signatures of user image kernel. For HPE ProLiant Gen9 servers, one can enroll public key using iLO System Utilities UI. Please refer to section ``Accessing Secure Boot options`` in `HP UEFI System Utilities User Guide `_. One can also refer to white paper on `Secure Boot for Linux on HP ProLiant servers `_ for additional details. For more up-to-date information, refer `iLO driver wiki page `_ .. _ilo_node_cleaning: Node Cleaning Support ^^^^^^^^^^^^^^^^^^^^^ The hardware type ``ilo`` and ``ilo5`` supports node cleaning. For more information on node cleaning, see :ref:`cleaning` Supported **Automated** Cleaning Operations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * The automated cleaning operations supported are: * ``reset_bios_to_default``: Resets system ROM settings to default. By default, enabled with priority 10. This clean step is supported only on Gen9 and above servers. * ``reset_secure_boot_keys_to_default``: Resets secure boot keys to manufacturer's defaults. This step is supported only on Gen9 and above servers. By default, enabled with priority 20 . * ``reset_ilo_credential``: Resets the iLO password, if ``ilo_change_password`` is specified as part of node's driver_info. By default, enabled with priority 30. * ``clear_secure_boot_keys``: Clears all secure boot keys. This step is supported only on Gen9 and above servers. By default, this step is disabled. * ``reset_ilo``: Resets the iLO. By default, this step is disabled. * ``erase_devices``: An inband clean step that performs disk erase on all the disks including the disks visible to OS as well as the raw disks visible to Smart Storage Administrator (SSA). This step supports erasing of the raw disks visible to SSA in Proliant servers only with the ramdisk created using diskimage-builder from Ocata release. By default, this step is disabled. See `Disk Erase Support`_ for more details. * For supported in-band cleaning operations, see :ref:`InbandvsOutOfBandCleaning`. * All the automated cleaning steps have an explicit configuration option for priority. In order to disable or change the priority of the automated clean steps, respective configuration option for priority should be updated in ironic.conf. * Updating clean step priority to 0, will disable that particular clean step and will not run during automated cleaning. * Configuration Options for the automated clean steps are listed under ``[ilo]`` and ``[deploy]`` section in ironic.conf :: [ilo] clean_priority_reset_ilo=0 clean_priority_reset_bios_to_default=10 clean_priority_reset_secure_boot_keys_to_default=20 clean_priority_clear_secure_boot_keys=0 clean_priority_reset_ilo_credential=30 [deploy] erase_devices_priority=0 For more information on node automated cleaning, see :ref:`automated_cleaning` Supported **Manual** Cleaning Operations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * The manual cleaning operations supported are: ``activate_license``: Activates the iLO Advanced license. This is an out-of-band manual cleaning step associated with the ``management`` interface. See `Activating iLO Advanced license as manual clean step`_ for user guidance on usage. Please note that this operation cannot be performed using the ``ilo-virtual-media`` boot interface as it needs this type of advanced license already active to use virtual media to boot into to start cleaning operation. Virtual media is an advanced feature. If an advanced license is already active and the user wants to overwrite the current license key, for example in case of a multi-server activation key delivered with a flexible-quantity kit or after completing an Activation Key Agreement (AKA), then the driver can still be used for executing this cleaning step. ``clear_ca_certificates``: Removes the CA certificates from iLO. See `Removing CA certificates from iLO as manual clean step`_ for user guidance on usage. ``apply_configuration``: Applies given BIOS settings on the node. See `BIOS configuration support`_. This step is part of the ``bios`` interface. ``factory_reset``: Resets the BIOS settings on the node to factory defaults. See `BIOS configuration support`_. This step is part of the ``bios`` interface. ``create_configuration``: Applies RAID configuration on the node. See :ref:`raid` for more information. This step is part of the ``raid`` interface. ``delete_configuration``: Deletes RAID configuration on the node. See :ref:`raid` for more information. This step is part of the ``raid`` interface. ``update_firmware``: Updates the firmware of the devices. Also an out-of-band step associated with the ``management`` interface. See `Initiating firmware update as manual clean step`_ for user guidance on usage. The supported devices for firmware update are: ``ilo``, ``cpld``, ``power_pic``, ``bios`` and ``chassis``. Please refer to below table for their commonly used descriptions. .. csv-table:: :header: "Device", "Description" :widths: 30, 80 "``ilo``", "BMC for HPE ProLiant servers" "``cpld``", "System programmable logic device" "``power_pic``", "Power management controller" "``bios``", "HPE ProLiant System ROM" "``chassis``", "System chassis device" Some devices firmware cannot be updated via this method, such as: storage controllers, host bus adapters, disk drive firmware, network interfaces and Onboard Administrator (OA). ``update_firmware_sum``: Updates all or list of user specified firmware components on the node using Smart Update Manager (SUM). It is an inband step associated with the ``management`` interface. See `Smart Update Manager (SUM) based firmware update`_ for more information on usage. ``security_parameters_update``: Updates the Security Parameters. See `Updating security parameters as manual clean step`_ for user guidance on usage. The supported security parameters for this clean step are: ``Password_Complexity``, ``RequiredLoginForiLORBSU``, ``IPMI/DCMI_Over_LAN``, ``RequireHostAuthentication`` and ``Secure_Boot``. ``update_minimum_password_length``: Updates the Minimum Password Length security parameter. See `Update Minimum Password Length security parameter as manual clean step`_ for user guidance on usage. ``update_auth_failure_logging_threshold``: Updates the Authentication Failure Logging security parameter. See `Update Authentication Failure Logging security parameter as manual clean step`_ for user guidance on usage. * iLO with firmware version 1.5 is minimally required to support all the operations. For more information on node manual cleaning, see :ref:`manual_cleaning` Node Deployment Customization ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The hardware type ``ilo`` and ``ilo5`` supports customization of node deployment via deploy templates, see :doc:`/admin/node-deployment`. The supported deploy steps are: * ``apply_configuration``: Applies given BIOS settings on the node. See `BIOS configuration support`_. This step is part of the ``bios`` interface. * ``factory_reset``: Resets the BIOS settings on the node to factory defaults. See `BIOS configuration support`_. This step is part of the ``bios`` interface. * ``reset_bios_to_default``: Resets system ROM settings to default. This step is supported only on Gen9 and above servers. This step is part of the ``management`` interface. * ``reset_secure_boot_keys_to_default``: Resets secure boot keys to manufacturer's defaults. This step is supported only on Gen9 and above servers. This step is part of the ``management`` interface. * ``reset_ilo_credential``: Resets the iLO password. The password need to be specified in ``ilo_password`` argument of the step. This step is part of the ``management`` interface. * ``clear_secure_boot_keys``: Clears all secure boot keys. This step is supported only on Gen9 and above servers. This step is part of the ``management`` interface. * ``reset_ilo``: Resets the iLO. This step is part of the ``management`` interface. * ``update_firmware``: Updates the firmware of the devices. This step is part of the ``management`` interface. See `Initiating firmware update as manual clean step`_ for user guidance on usage. The supported devices for firmware update are: ``ilo``, ``cpld``, ``power_pic``, ``bios`` and ``chassis``. This step is part of ``management`` interface. Please refer to below table for their commonly used descriptions. .. csv-table:: :header: "Device", "Description" :widths: 30, 80 "``ilo``", "BMC for HPE ProLiant servers" "``cpld``", "System programmable logic device" "``power_pic``", "Power management controller" "``bios``", "HPE ProLiant System ROM" "``chassis``", "System chassis device" Some devices firmware cannot be updated via this method, such as: storage controllers, host bus adapters, disk drive firmware, network interfaces and Onboard Administrator (OA). * ``flash_firmware_sum``: Updates all or list of user specified firmware components on the node using Smart Update Manager (SUM). It is an inband step associated with the ``management`` interface. See `Smart Update Manager (SUM) based firmware update`_ for more information on usage. * ``apply_configuration``: Applies RAID configuration on the node. See :ref:`raid` for more information. This step is part of the ``raid`` interface. Example of using deploy template with the Compute service ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Create a deploy template with a single step: .. code-block:: console baremetal deploy template create \ CUSTOM_HYPERTHREADING_ON \ --steps '[{"interface": "bios", "step": "apply_configuration", "args": {"settings": [{"name": "ProcHyperthreading", "value": "Enabled"}]}, "priority": 150}]' Add the trait ``CUSTOM_HYPERTHREADING_ON`` to the node represented by ``$node_ident``: .. code-block:: console baremetal node add trait $node_ident CUSTOM_HYPERTHREADING_ON Update the flavor ``bm-hyperthreading-on`` in the Compute service with the following property: .. code-block:: console openstack flavor set --property trait:CUSTOM_HYPERTHREADING_ON=required bm-hyperthreading-on Creating a Compute instance with this flavor will ensure that the instance is scheduled only to Bare Metal nodes with the ``CUSTOM_HYPERTHREADING_ON`` trait. When an instance is created using the ``bm-hyperthreading-on`` flavor, then the deploy steps of deploy template ``CUSTOM_HYPERTHREADING_ON`` will be executed during the deployment of the scheduled node, causing Hyperthreading to be enabled in the node's BIOS configuration. .. _ilo-inspection: Hardware Inspection Support ^^^^^^^^^^^^^^^^^^^^^^^^^^^ The hardware type ``ilo`` supports hardware inspection. .. note:: * The disk size is returned by RIBCL/RIS only when RAID is preconfigured on the storage. If the storage is Direct Attached Storage, then RIBCL/RIS fails to get the disk size. * The SNMPv3 inspection gets disk size for all types of storages. If RIBCL/RIS is unable to get disk size and SNMPv3 inspection is requested, the proliantutils does SNMPv3 inspection to get the disk size. If proliantutils is unable to get the disk size, it raises an error. This feature is available in proliantutils release version >= 2.2.0. * The iLO must be updated with SNMPv3 authentication details. Pleae refer to the section `SNMPv3 Authentication` in `HPE iLO4 User Guide`_ for setting up authentication details on iLO. The following parameters are mandatory to be given in driver_info for SNMPv3 inspection: * ``snmp_auth_user`` : The SNMPv3 user. * ``snmp_auth_prot_password`` : The auth protocol pass phrase. * ``snmp_auth_priv_password`` : The privacy protocol pass phrase. The following parameters are optional for SNMPv3 inspection: * ``snmp_auth_protocol`` : The Auth Protocol. The valid values are "MD5" and "SHA". The iLO default value is "MD5". * ``snmp_auth_priv_protocol`` : The Privacy protocol. The valid values are "AES" and "DES". The iLO default value is "DES". The inspection process will discover the following essential properties (properties required for scheduling deployment): * ``memory_mb``: memory size * ``cpus``: number of cpus * ``cpu_arch``: cpu architecture * ``local_gb``: disk size Inspection can also discover the following extra capabilities for iLO driver: * ``ilo_firmware_version``: iLO firmware version * ``rom_firmware_version``: ROM firmware version * ``secure_boot``: secure boot is supported or not. The possible values are 'true' or 'false'. The value is returned as 'true' if secure boot is supported by the server. * ``server_model``: server model * ``pci_gpu_devices``: number of gpu devices connected to the bare metal. * ``nic_capacity``: the max speed of the embedded NIC adapter. * ``sriov_enabled``: true, if server has the SRIOV supporting NIC. * ``has_rotational``: true, if server has HDD disk. * ``has_ssd``: true, if server has SSD disk. * ``has_nvme_ssd``: true, if server has NVME SSD disk. * ``cpu_vt``: true, if server supports cpu virtualization. * ``hardware_supports_raid``: true, if RAID can be configured on the server using RAID controller. * ``nvdimm_n``: true, if server has NVDIMM_N type of persistent memory. * ``persistent_memory``: true, if server has persistent memory. * ``logical_nvdimm_n``: true, if server has logical NVDIMM_N configured. * ``rotational_drive__rpm``: The capabilities ``rotational_drive_4800_rpm``, ``rotational_drive_5400_rpm``, ``rotational_drive_7200_rpm``, ``rotational_drive_10000_rpm`` and ``rotational_drive_15000_rpm`` are set to true if the server has HDD drives with speed of 4800, 5400, 7200, 10000 and 15000 rpm respectively. * ``logical_raid_level_``: The capabilities ``logical_raid_level_0``, ``logical_raid_level_1``, ``logical_raid_level_2``, ``logical_raid_level_5``, ``logical_raid_level_6``, ``logical_raid_level_10``, ``logical_raid_level_50`` and ``logical_raid_level_60`` are set to true if any of the raid levels among 0, 1, 2, 5, 6, 10, 50 and 60 are configured on the system. * ``overall_security_status``: ``Ok`` or ``Risk`` or ``Ignored`` as returned by iLO security dashboard. iLO computes the overall security status by evaluating the security status for each of the security parameters. Admin needs to fix the actual parameters and then re-inspect so that iLO can recompute the overall security status. If the all security params, whose ``security_status`` is ``Risk``, have the ``Ignore`` field set to ``True``, then iLO sets the overall security status value as ``Ignored``. All the security params must have the ``security_status`` as ``Ok`` for the ``overall_security_status`` to have the value as ``Ok``. * ``last_firmware_scan_status``: ``Ok`` or ``Risk`` as returned by iLO security dashboard. This denotes security status of the last firmware scan done on the system. If it is ``Risk``, the recommendation is to run clean_step ``update_firmware_sum`` without any specific firmware components so that firmware is updated for all the components using latest SPP (Service Provider Pack) ISO and then re-inspect to get the security status again. * ``security_override_switch``: ``Ok`` or ``Risk`` as returned by iLO security dashboard. This is disable/enable login to the iLO using credentials. This can be toggled only by physical visit to the bare metal. * ``gpu__count``: Integer value. The capability name is dynamically formed as gpu__count. The vendor name is replaced in the "". If the vendor name is not returned by the hardware, then vendor ID in hexadecimal form is replaced in the capability name. Examples: {'gpu_Nvidia_count': 1}, {'gpu_0x102b_count': 1}. * ``gpu__count``: Integer value. The capability name is formed dynamically by replacing the gpu device name as returned by ilo in "". Examples: {'gpu_Nvidia_Tesla_M10_count': 1}, {'gpu_Embedded_Video_Controller_count': 1} * ``gpu_``: Boolean. The capability name is formed dynamically by replacing the gpu device name as returned by ilo in "". Examples: {'gpu_Nvidia_Tesla_M10': True}, {'gpu_Embedded_Video_Controller': True} .. note:: * The capability ``nic_capacity`` can only be discovered if ipmitool version >= 1.8.15 is used on the conductor. The latest version can be downloaded from `here `__. * The iLO firmware version needs to be 2.10 or above for nic_capacity to be discovered. * To discover IPMI based attributes you need to enable iLO feature 'IPMI/DCMI over LAN Access' on `iLO4 `_ and `iLO5 `_ management engine. * The proliantutils returns only active NICs for Gen10 ProLiant HPE servers. The user would need to delete the ironic ports corresponding to inactive NICs for Gen8 and Gen9 servers as proliantutils returns all the discovered (active and otherwise) NICs for Gen8 and Gen9 servers and ironic ports are created for all of them. Inspection logs a warning if the node under inspection is Gen8 or Gen9. * The security dashboard capabilities are applicable only for Gen10 ProLiant HPE servers and above. To fix the security dashboard parameters value from ``Risk`` to ``Ok``, user need to fix the parameters separately and re-inspect to see the security status of the parameters. The operator can specify these capabilities in nova flavor for node to be selected for scheduling:: nova flavor-key my-baremetal-flavor set capabilities:server_model=" Gen8" nova flavor-key my-baremetal-flavor set capabilities:nic_capacity="10Gb" nova flavor-key my-baremetal-flavor set capabilities:ilo_firmware_version=" 2.10" nova flavor-key my-baremetal-flavor set capabilities:has_ssd="true" See :ref:`capabilities-discovery` for more details and examples. Swiftless deploy for intermediate images ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The hardware type ``ilo`` with ``ilo-virtual-media`` as boot interface can deploy and boot the server with and without ``swift`` being used for hosting the intermediate temporary floppy image (holding metadata for deploy kernel and ramdisk) and the boot ISO. A local HTTP(S) web server on each conductor node needs to be configured. Please refer to `Web server configuration on conductor`_ for more information. The HTTPS web server needs to be enabled (instead of HTTP web server) in order to send management information and images in encrypted channel over HTTPS. .. note:: This feature assumes that the user inputs are on Glance which uses swift as backend. If swift dependency has to be eliminated, please refer to `HTTP(S) Based Deploy Support`_ also. Deploy Process ~~~~~~~~~~~~~~ Please refer to `Netboot in swiftless deploy for intermediate images`_ for partition image support and `Localboot in swiftless deploy for intermediate images`_ for whole disk image support. HTTP(S) Based Deploy Support ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The user input for the images given in ``driver_info`` like ``deploy_iso``, ``deploy_kernel`` and ``deploy_ramdisk`` and in ``instance_info`` like ``image_source``, ``kernel``, ``ramdisk`` and ``boot_iso`` may also be given as HTTP(S) URLs. The HTTP(S) web server can be configured in many ways. For the Apache web server on Ubuntu, refer `here `_. The web server may reside on a different system than the conductor nodes, but its URL must be reachable by the conductor and the bare metal nodes. Deploy Process ~~~~~~~~~~~~~~ Please refer to `Netboot with HTTP(S) based deploy`_ for partition image boot and `Localboot with HTTP(S) based deploy`_ for whole disk image boot. Support for iLO driver with Standalone Ironic ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ It is possible to use ironic as standalone services without other OpenStack services. The ``ilo`` hardware type can be used in standalone ironic. This feature is referred to as ``iLO driver with standalone ironic`` in this document. Configuration ~~~~~~~~~~~~~ The HTTP(S) web server needs to be configured as described in `HTTP(S) Based Deploy Support`_ and `Web server configuration on conductor`_ needs to be configured for hosting intermediate images on conductor as described in `Swiftless deploy for intermediate images`_. Deploy Process ============== .. note:: Network boot is deprecated and will be removed in the Zed release. .. TODO(dtantsur): review these diagrams to exclude netboot. Netboot with glance and swift ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. seqdiag:: :scale: 80 diagram { Glance; Conductor; Baremetal; Swift; IPA; iLO; activation = none; span_height = 1; edge_length = 250; default_note_color = white; default_fontsize = 14; Conductor -> iLO [label = "Powers off the node"]; Conductor -> Glance [label = "Download user image"]; Conductor -> Glance [label = "Get the metadata for deploy ISO"]; Conductor -> Conductor [label = "Generates swift tempURL for deploy ISO"]; Conductor -> Conductor [label = "Creates the FAT32 image containing Ironic API URL and driver name"]; Conductor -> Swift [label = "Uploads the FAT32 image"]; Conductor -> Conductor [label = "Generates swift tempURL for FAT32 image"]; Conductor -> iLO [label = "Attaches the FAT32 image swift tempURL as virtual media floppy"]; Conductor -> iLO [label = "Attaches the deploy ISO swift tempURL as virtual media CDROM"]; Conductor -> iLO [label = "Sets one time boot to CDROM"]; Conductor -> iLO [label = "Reboot the node"]; iLO -> Swift [label = "Downloads deploy ISO"]; Baremetal -> iLO [label = "Boots deploy kernel/ramdisk from iLO virtual media CDROM"]; IPA -> Conductor [label = "Lookup node"]; Conductor -> IPA [label = "Provides node UUID"]; IPA -> Conductor [label = "Heartbeat"]; Conductor -> IPA [label = "Sends the user image HTTP(S) URL"]; IPA -> Swift [label = "Retrieves the user image on bare metal"]; IPA -> IPA [label = "Writes user image to disk"]; Conductor -> Conductor [label = "Generates the boot ISO"]; Conductor -> Swift [label = "Uploads the boot ISO"]; Conductor -> Conductor [label = "Generates swift tempURL for boot ISO"]; Conductor -> iLO [label = "Attaches boot ISO swift tempURL as virtual media CDROM"]; Conductor -> iLO [label = "Sets boot device to CDROM"]; Conductor -> IPA [label = "Power off the node"]; Conductor -> iLO [label = "Power on the node"]; iLO -> Swift [label = "Downloads boot ISO"]; iLO -> Baremetal [label = "Boots the instance kernel/ramdisk from iLO virtual media CDROM"]; Baremetal -> Baremetal [label = "Instance kernel finds root partition and continues booting from disk"]; } Localboot with glance and swift for partition images ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. seqdiag:: :scale: 80 diagram { Glance; Conductor; Baremetal; Swift; IPA; iLO; activation = none; span_height = 1; edge_length = 250; default_note_color = white; default_fontsize = 14; Conductor -> iLO [label = "Powers off the node"]; Conductor -> Glance [label = "Get the metadata for deploy ISO"]; Glance -> Conductor [label = "Returns the metadata for deploy ISO"]; Conductor -> Conductor [label = "Generates swift tempURL for deploy ISO"]; Conductor -> Conductor [label = "Creates the FAT32 image containing ironic API URL and driver name"]; Conductor -> Swift [label = "Uploads the FAT32 image"]; Conductor -> Conductor [label = "Generates swift tempURL for FAT32 image"]; Conductor -> iLO [label = "Attaches the FAT32 image swift tempURL as virtual media floppy"]; Conductor -> iLO [label = "Attaches the deploy ISO swift tempURL as virtual media CDROM"]; Conductor -> iLO [label = "Sets one time boot to CDROM"]; Conductor -> iLO [label = "Reboot the node"]; iLO -> Swift [label = "Downloads deploy ISO"]; Baremetal -> iLO [label = "Boots deploy kernel/ramdisk from iLO virtual media CDROM"]; IPA -> Conductor [label = "Lookup node"]; Conductor -> IPA [label = "Provides node UUID"]; IPA -> Conductor [label = "Heartbeat"]; Conductor -> IPA [label = "Sends the user image HTTP(S) URL"]; IPA -> Swift [label = "Retrieves the user image on bare metal"]; IPA -> IPA [label = "Writes user image to root partition"]; IPA -> IPA [label = "Installs boot loader"]; IPA -> Conductor [label = "Heartbeat"]; Conductor -> Baremetal [label = "Sets boot device to disk"]; Conductor -> IPA [label = "Power off the node"]; Conductor -> iLO [label = "Power on the node"]; Baremetal -> Baremetal [label = "Boot user image from disk"]; } Localboot with glance and swift ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. seqdiag:: :scale: 80 diagram { Glance; Conductor; Baremetal; Swift; IPA; iLO; activation = none; span_height = 1; edge_length = 250; default_note_color = white; default_fontsize = 14; Conductor -> iLO [label = "Powers off the node"]; Conductor -> Glance [label = "Get the metadata for deploy ISO"]; Glance -> Conductor [label = "Returns the metadata for deploy ISO"]; Conductor -> Conductor [label = "Generates swift tempURL for deploy ISO"]; Conductor -> Conductor [label = "Creates the FAT32 image containing ironic API URL and driver name"]; Conductor -> Swift [label = "Uploads the FAT32 image"]; Conductor -> Conductor [label = "Generates swift tempURL for FAT32 image"]; Conductor -> iLO [label = "Attaches the FAT32 image swift tempURL as virtual media floppy"]; Conductor -> iLO [label = "Attaches the deploy ISO swift tempURL as virtual media CDROM"]; Conductor -> iLO [label = "Sets one time boot to CDROM"]; Conductor -> iLO [label = "Reboot the node"]; iLO -> Swift [label = "Downloads deploy ISO"]; Baremetal -> iLO [label = "Boots deploy kernel/ramdisk from iLO virtual media CDROM"]; IPA -> Conductor [label = "Lookup node"]; Conductor -> IPA [label = "Provides node UUID"]; IPA -> Conductor [label = "Heartbeat"]; Conductor -> IPA [label = "Sends the user image HTTP(S) URL"]; IPA -> Swift [label = "Retrieves the user image on bare metal"]; IPA -> IPA [label = "Writes user image to disk"]; IPA -> Conductor [label = "Heartbeat"]; Conductor -> Baremetal [label = "Sets boot device to disk"]; Conductor -> IPA [label = "Power off the node"]; Conductor -> iLO [label = "Power on the node"]; Baremetal -> Baremetal [label = "Boot user image from disk"]; } Netboot in swiftless deploy for intermediate images ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. seqdiag:: :scale: 80 diagram { Glance; Conductor; Baremetal; ConductorWebserver; IPA; iLO; activation = none; span_height = 1; edge_length = 250; default_note_color = white; default_fontsize = 14; Conductor -> iLO [label = "Powers off the node"]; Conductor -> Glance [label = "Download user image"]; Conductor -> Glance [label = "Get the metadata for deploy ISO"]; Conductor -> Conductor [label = "Generates swift tempURL for deploy ISO"]; Conductor -> Conductor [label = "Creates the FAT32 image containing Ironic API URL and driver name"]; Conductor -> ConductorWebserver [label = "Uploads the FAT32 image"]; Conductor -> iLO [label = "Attaches the FAT32 image URL as virtual media floppy"]; Conductor -> iLO [label = "Attaches the deploy ISO swift tempURL as virtual media CDROM"]; Conductor -> iLO [label = "Sets one time boot to CDROM"]; Conductor -> iLO [label = "Reboot the node"]; iLO -> Swift [label = "Downloads deploy ISO"]; Baremetal -> iLO [label = "Boots deploy kernel/ramdisk from iLO virtual media CDROM"]; IPA -> Conductor [label = "Lookup node"]; Conductor -> IPA [label = "Provides node UUID"]; IPA -> Conductor [label = "Heartbeat"]; Conductor -> IPA [label = "Sends the user image HTTP(S) URL"]; IPA -> ConductorWebserver [label = "Retrieves the user image on bare metal"]; IPA -> IPA [label = "Writes user image to root partition"]; Conductor -> Conductor [label = "Generates the boot ISO"]; Conductor -> ConductorWebserver [label = "Uploads the boot ISO"]; Conductor -> iLO [label = "Attaches boot ISO URL as virtual media CDROM"]; Conductor -> iLO [label = "Sets boot device to CDROM"]; Conductor -> IPA [label = "Power off the node"]; Conductor -> iLO [label = "Power on the node"]; iLO -> ConductorWebserver [label = "Downloads boot ISO"]; iLO -> Baremetal [label = "Boots the instance kernel/ramdisk from iLO virtual media CDROM"]; Baremetal -> Baremetal [label = "Instance kernel finds root partition and continues booting from disk"]; } Localboot in swiftless deploy for intermediate images ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. seqdiag:: :scale: 80 diagram { Glance; Conductor; Baremetal; ConductorWebserver; IPA; iLO; activation = none; span_height = 1; edge_length = 250; default_note_color = white; default_fontsize = 14; Conductor -> iLO [label = "Powers off the node"]; Conductor -> Glance [label = "Get the metadata for deploy ISO"]; Glance -> Conductor [label = "Returns the metadata for deploy ISO"]; Conductor -> Conductor [label = "Generates swift tempURL for deploy ISO"]; Conductor -> Conductor [label = "Creates the FAT32 image containing Ironic API URL and driver name"]; Conductor -> ConductorWebserver [label = "Uploads the FAT32 image"]; Conductor -> iLO [label = "Attaches the FAT32 image URL as virtual media floppy"]; Conductor -> iLO [label = "Attaches the deploy ISO swift tempURL as virtual media CDROM"]; Conductor -> iLO [label = "Sets one time boot to CDROM"]; Conductor -> iLO [label = "Reboot the node"]; iLO -> Swift [label = "Downloads deploy ISO"]; Baremetal -> iLO [label = "Boots deploy kernel/ramdisk from iLO virtual media CDROM"]; IPA -> Conductor [label = "Lookup node"]; Conductor -> IPA [label = "Provides node UUID"]; IPA -> Conductor [label = "Heartbeat"]; Conductor -> IPA [label = "Sends the user image HTTP(S) URL"]; IPA -> Swift [label = "Retrieves the user image on bare metal"]; IPA -> IPA [label = "Writes user image to disk"]; IPA -> Conductor [label = "Heartbeat"]; Conductor -> Baremetal [label = "Sets boot device to disk"]; Conductor -> IPA [label = "Power off the node"]; Conductor -> Baremetal [label = "Power on the node"]; Baremetal -> Baremetal [label = "Boot user image from disk"]; } Netboot with HTTP(S) based deploy ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. seqdiag:: :scale: 80 diagram { Webserver; Conductor; Baremetal; Swift; IPA; iLO; activation = none; span_height = 1; edge_length = 250; default_note_color = white; default_fontsize = 14; Conductor -> iLO [label = "Powers off the node"]; Conductor -> Webserver [label = "Download user image"]; Conductor -> Conductor [label = "Creates the FAT32 image containing Ironic API URL and driver name"]; Conductor -> Swift [label = "Uploads the FAT32 image"]; Conductor -> Conductor [label = "Generates swift tempURL for FAT32 image"]; Conductor -> iLO [label = "Attaches the FAT32 image swift tempURL as virtual media floppy"]; Conductor -> iLO [label = "Attaches the deploy ISO URL as virtual media CDROM"]; Conductor -> iLO [label = "Sets one time boot to CDROM"]; Conductor -> iLO [label = "Reboot the node"]; iLO -> Webserver [label = "Downloads deploy ISO"]; Baremetal -> iLO [label = "Boots deploy kernel/ramdisk from iLO virtual media CDROM"]; IPA -> Conductor [label = "Lookup node"]; Conductor -> IPA [label = "Provides node UUID"]; IPA -> Conductor [label = "Heartbeat"]; Conductor -> IPA [label = "Sends the user image HTTP(S) URL"]; IPA -> Swift [label = "Retrieves the user image on bare metal"]; IPA -> IPA [label = "Writes user image to disk"]; Conductor -> Conductor [label = "Generates the boot ISO"]; Conductor -> Swift [label = "Uploads the boot ISO"]; Conductor -> Conductor [label = "Generates swift tempURL for boot ISO"]; Conductor -> iLO [label = "Attaches boot ISO swift tempURL as virtual media CDROM"]; Conductor -> iLO [label = "Sets boot device to CDROM"]; Conductor -> IPA [label = "Power off the node"]; Conductor -> iLO [label = "Power on the node"]; iLO -> Swift [label = "Downloads boot ISO"]; iLO -> Baremetal [label = "Boots the instance kernel/ramdisk from iLO virtual media CDROM"]; Baremetal -> Baremetal [label = "Instance kernel finds root partition and continues booting from disk"]; } Localboot with HTTP(S) based deploy ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. seqdiag:: :scale: 80 diagram { Webserver; Conductor; Baremetal; Swift; IPA; iLO; activation = none; span_height = 1; edge_length = 250; default_note_color = white; default_fontsize = 14; Conductor -> iLO [label = "Powers off the node"]; Conductor -> Conductor [label = "Creates the FAT32 image containing ironic API URL and driver name"]; Conductor -> Swift [label = "Uploads the FAT32 image"]; Conductor -> Conductor [label = "Generates swift tempURL for FAT32 image"]; Conductor -> iLO [label = "Attaches the FAT32 image swift tempURL as virtual media floppy"]; Conductor -> iLO [label = "Attaches the deploy ISO URL as virtual media CDROM"]; Conductor -> iLO [label = "Sets one time boot to CDROM"]; Conductor -> iLO [label = "Reboot the node"]; iLO -> Webserver [label = "Downloads deploy ISO"]; Baremetal -> iLO [label = "Boots deploy kernel/ramdisk from iLO virtual media CDROM"]; IPA -> Conductor [label = "Lookup node"]; Conductor -> IPA [label = "Provides node UUID"]; IPA -> Conductor [label = "Heartbeat"]; Conductor -> IPA [label = "Sends the user image HTTP(S) URL"]; IPA -> Webserver [label = "Retrieves the user image on bare metal"]; IPA -> IPA [label = "Writes user image to disk"]; IPA -> Conductor [label = "Heartbeat"]; Conductor -> Baremetal [label = "Sets boot device to disk"]; Conductor -> IPA [label = "Power off the node"]; Conductor -> Baremetal [label = "Power on the node"]; Baremetal -> Baremetal [label = "Boot user image from disk"]; } Netboot in standalone ironic ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. seqdiag:: :scale: 80 diagram { Webserver; Conductor; Baremetal; ConductorWebserver; IPA; iLO; activation = none; span_height = 1; edge_length = 250; default_note_color = white; default_fontsize = 14; Conductor -> iLO [label = "Powers off the node"]; Conductor -> Webserver [label = "Download user image"]; Conductor -> Conductor [label = "Creates the FAT32 image containing Ironic API URL and driver name"]; Conductor -> ConductorWebserver[label = "Uploads the FAT32 image"]; Conductor -> iLO [label = "Attaches the FAT32 image URL as virtual media floppy"]; Conductor -> iLO [label = "Attaches the deploy ISO URL as virtual media CDROM"]; Conductor -> iLO [label = "Sets one time boot to CDROM"]; Conductor -> iLO [label = "Reboot the node"]; iLO -> Webserver [label = "Downloads deploy ISO"]; Baremetal -> iLO [label = "Boots deploy kernel/ramdisk from iLO virtual media CDROM"]; IPA -> Conductor [label = "Lookup node"]; Conductor -> IPA [label = "Provides node UUID"]; IPA -> Conductor [label = "Heartbeat"]; Conductor -> IPA [label = "Sends the user image HTTP(S) URL"]; IPA -> ConductorWebserver [label = "Retrieves the user image on bare metal"]; IPA -> IPA [label = "Writes user image to root partition"]; Conductor -> Conductor [label = "Generates the boot ISO"]; Conductor -> ConductorWebserver [label = "Uploads the boot ISO"]; Conductor -> iLO [label = "Attaches boot ISO URL as virtual media CDROM"]; Conductor -> iLO [label = "Sets boot device to CDROM"]; Conductor -> IPA [label = "Power off the node"]; Conductor -> iLO [label = "Power on the node"]; iLO -> ConductorWebserver [label = "Downloads boot ISO"]; iLO -> Baremetal [label = "Boots the instance kernel/ramdisk from iLO virtual media CDROM"]; Baremetal -> Baremetal [label = "Instance kernel finds root partition and continues booting from disk"]; } Localboot in standalone ironic ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. seqdiag:: :scale: 80 diagram { Webserver; Conductor; Baremetal; ConductorWebserver; IPA; iLO; activation = none; span_height = 1; edge_length = 250; default_note_color = white; default_fontsize = 14; Conductor -> iLO [label = "Powers off the node"]; Conductor -> Conductor [label = "Creates the FAT32 image containing Ironic API URL and driver name"]; Conductor -> ConductorWebserver [label = "Uploads the FAT32 image"]; Conductor -> Conductor [label = "Generates URL for FAT32 image"]; Conductor -> iLO [label = "Attaches the FAT32 image URL as virtual media floppy"]; Conductor -> iLO [label = "Attaches the deploy ISO URL as virtual media CDROM"]; Conductor -> iLO [label = "Sets one time boot to CDROM"]; Conductor -> iLO [label = "Reboot the node"]; iLO -> Webserver [label = "Downloads deploy ISO"]; Baremetal -> iLO [label = "Boots deploy kernel/ramdisk from iLO virtual media CDROM"]; IPA -> Conductor [label = "Lookup node"]; Conductor -> IPA [label = "Provides node UUID"]; IPA -> Conductor [label = "Heartbeat"]; Conductor -> IPA [label = "Sends the user image HTTP(S) URL"]; IPA -> Webserver [label = "Retrieves the user image on bare metal"]; IPA -> IPA [label = "Writes user image to disk"]; IPA -> Conductor [label = "Heartbeat"]; Conductor -> Baremetal [label = "Sets boot device to disk"]; Conductor -> IPA [label = "Power off the node"]; Conductor -> Baremetal [label = "Power on the node"]; Baremetal -> Baremetal [label = "Boot user image from disk"]; } Activating iLO Advanced license as manual clean step ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ iLO driver can activate the iLO Advanced license key as a manual cleaning step. Any manual cleaning step can only be initiated when a node is in the ``manageable`` state. Once the manual cleaning is finished, the node will be put in the ``manageable`` state again. User can follow steps from :ref:`manual_cleaning` to initiate manual cleaning operation on a node. An example of a manual clean step with ``activate_license`` as the only clean step could be:: "clean_steps": [{ "interface": "management", "step": "activate_license", "args": { "ilo_license_key": "ABC12-XXXXX-XXXXX-XXXXX-YZ345" } }] The different attributes of ``activate_license`` clean step are as follows: .. csv-table:: :header: "Attribute", "Description" :widths: 30, 120 "``interface``", "Interface of clean step, here ``management``" "``step``", "Name of clean step, here ``activate_license``" "``args``", "Keyword-argument entry (: ) being passed to clean step" "``args.ilo_license_key``", "iLO Advanced license key to activate enterprise features. This is mandatory." Removing CA certificates from iLO as manual clean step ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ iLO driver can remove the invalidated CA certificates as a manual step. Any manual cleaning step can only be initiated when a node is in the ``manageable`` state. Once the manual cleaning is finished, the node will be put in the ``manageable`` state again. User can follow steps from :ref:`manual_cleaning` to initiate manual cleaning operation on a node. An example of a manual clean step with ``clear_ca_certificates`` as the only clean step could be:: "clean_steps": [{ "interface": "management", "step": "clear_ca_certificates", "args": { "certificate_files" : ["/path/to/certsA", "/path/to/certsB"] } }] The different attributes of ``clear_ca_certificates`` clean step are as follows: .. csv-table:: :header: "Attribute", "Description" :widths: 30, 120 "``interface``", "Interface of clean step, here ``management``" "``step``", "Name of clean step, here ``clear_ca_certificates``" "``args``", "Keyword-argument entry (: ) being passed to clean step" "``args.certificate_files``", "List of CA certificates which are to be removed. " "This is mandatory." Initiating firmware update as manual clean step ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ iLO driver can invoke secure firmware update as a manual cleaning step. Any manual cleaning step can only be initiated when a node is in the ``manageable`` state. Once the manual cleaning is finished, the node will be put in the ``manageable`` state again. A user can follow steps from :ref:`manual_cleaning` to initiate manual cleaning operation on a node. An example of a manual clean step with ``update_firmware`` as the only clean step could be:: "clean_steps": [{ "interface": "management", "step": "update_firmware", "args": { "firmware_update_mode": "ilo", "firmware_images":[ { "url": "file:///firmware_images/ilo/1.5/CP024444.scexe", "checksum": "a94e683ea16d9ae44768f0a65942234d", "component": "ilo" }, { "url": "swift://firmware_container/cpld2.3.rpm", "checksum": "", "component": "cpld" }, { "url": "http://my_address:port/firmwares/bios_vLatest.scexe", "checksum": "", "component": "bios" }, { "url": "https://my_secure_address_url/firmwares/chassis_vLatest.scexe", "checksum": "", "component": "chassis" }, { "url": "file:///home/ubuntu/firmware_images/power_pic/pmc_v3.0.bin", "checksum": "", "component": "power_pic" } ] } }] The different attributes of ``update_firmware`` clean step are as follows: .. csv-table:: :header: "Attribute", "Description" :widths: 30, 120 "``interface``", "Interface of clean step, here ``management``" "``step``", "Name of clean step, here ``update_firmware``" "``args``", "Keyword-argument entry (: ) being passed to clean step" "``args.firmware_update_mode``", "Mode (or mechanism) of out-of-band firmware update. Supported value is ``ilo``. This is mandatory." "``args.firmware_images``", "Ordered list of dictionaries of images to be flashed. This is mandatory." Each firmware image block is represented by a dictionary (JSON), in the form:: { "url": "", "checksum": "", "component": "" } All the fields in the firmware image block are mandatory. * The different types of firmware url schemes supported are: ``file``, ``http``, ``https`` and ``swift``. .. note:: This feature assumes that while using ``file`` url scheme the file path is on the conductor controlling the node. .. note:: The ``swift`` url scheme assumes the swift account of the ``service`` project. The ``service`` project (tenant) is a special project created in the Keystone system designed for the use of the core OpenStack services. When Ironic makes use of Swift for storage purpose, the account is generally ``service`` and the container is generally ``ironic`` and ``ilo`` driver uses a container named ``ironic_ilo_container`` for their own purpose. .. note:: While using firmware files with a ``.rpm`` extension, make sure the commands ``rpm2cpio`` and ``cpio`` are present on the conductor, as they are utilized to extract the firmware image from the package. * The firmware components that can be updated are: ``ilo``, ``cpld``, ``power_pic``, ``bios`` and ``chassis``. * The firmware images will be updated in the order given by the operator. If there is any error during processing of any of the given firmware images provided in the list, none of the firmware updates will occur. The processing error could happen during image download, image checksum verification or image extraction. The logic is to process each of the firmware files and update them on the devices only if all the files are processed successfully. If, during the update (uploading and flashing) process, an update fails, then the remaining updates, if any, in the list will be aborted. But it is recommended to triage and fix the failure and re-attempt the manual clean step ``update_firmware`` for the aborted ``firmware_images``. The devices for which the firmwares have been updated successfully would start functioning using their newly updated firmware. * As a troubleshooting guidance on the complete process, check Ironic conductor logs carefully to see if there are any firmware processing or update related errors which may help in root causing or gain an understanding of where things were left off or where things failed. You can then fix or work around and then try again. A common cause of update failure is HPE Secure Digital Signature check failure for the firmware image file. * To compute ``md5`` checksum for your image file, you can use the following command:: $ md5sum image.rpm 66cdb090c80b71daa21a67f06ecd3f33 image.rpm Smart Update Manager (SUM) based firmware update ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The firmware update based on `SUM`_ is an inband clean/deploy step supported by iLO driver. The firmware update is performed on all or list of user specified firmware components on the node. Refer to `SUM User Guide`_ to get more information on SUM based firmware update. .. note:: ``update_firmware_sum`` clean step requires the agent ramdisk with ``Proliant Hardware Manager`` from the proliantutils version 2.5.0 or higher. See `DIB support for Proliant Hardware Manager`_ to create the agent ramdisk with ``Proliant Hardware Manager``. .. note:: ``flash_firmware_sum`` deploy step requires the agent ramdisk with ``Proliant Hardware Manager`` from the proliantutils version 2.9.5 or higher. See `DIB support for Proliant Hardware Manager`_ to create the agent ramdisk with ``Proliant Hardware Manager``. The attributes of ``update_firmware_sum``/``flash_firmware_sum`` step are as follows: .. csv-table:: :header: "Attribute", "Description" :widths: 30, 120 "``interface``", "Interface of the clean step, here ``management``" "``step``", "Name of the clean step, here ``update_firmware_sum``" "``args``", "Keyword-argument entry (: ) being passed to the clean step" The keyword arguments used for the step are as follows: * ``url``: URL of SPP (Service Pack for Proliant) ISO. It is mandatory. The URL schemes supported are ``http``, ``https`` and ``swift``. * ``checksum``: MD5 checksum of SPP ISO to verify the image. It is mandatory. * ``components``: List of filenames of the firmware components to be flashed. It is optional. If not provided, the firmware update is performed on all the firmware components. The step performs an update on all or a list of firmware components and returns the SUM log files. The log files include ``hpsum_log.txt`` and ``hpsum_detail_log.txt`` which holds the information about firmware components, firmware version for each component and their update status. The log object will be named with the following pattern:: [_]_update_firmware_sum_.tar.gz or [_]_flash_firmware_sum_.tar.gz Refer to :ref:`retrieve_deploy_ramdisk_logs` for more information on enabling and viewing the logs returned from the ramdisk. An example of ``update_firmware_sum`` clean step: .. code-block:: json { "interface": "management", "step": "update_firmware_sum", "args": { "url": "http://my_address:port/SPP.iso", "checksum": "abcdefxyz", "components": ["CP024356.scexe", "CP008097.exe"] } } The step fails if there is any error in the processing of step arguments. The processing error could happen during validation of components' file extension, image download, image checksum verification or image extraction. In case of a failure, check Ironic conductor logs carefully to see if there are any validation or firmware processing related errors which may help in root cause analysis or gaining an understanding of where things were left off or where things failed. You can then fix or work around and then try again. .. warning:: This feature is officially supported only with RHEL and SUSE based IPA ramdisk. Refer to `SUM`_ for supported OS versions for specific SUM version. .. note:: Refer `Guidelines for SPP ISO`_ for steps to get SPP (Service Pack for ProLiant) ISO. Updating security parameters as manual clean step ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ iLO driver can invoke security parameters update as a manual clean step. Any manual cleaning step can only be initiated when a node is in the ``manageable`` state. Once the manual cleaning is finished, the node will be put in the ``manageable`` state again. A user can follow steps from :ref:`manual_cleaning` to initiate manual cleaning operation on a node. This feature is only supported for iLO5 based hardware. An example of a manual clean step with ``security_parameters_update`` as the only clean step could be:: "clean_steps": [{ "interface": "management", "step": "security_parameters_update", "args": { "security_parameters":[ { "param": "password_complexity", "enable": "True", "ignore": "False" }, { "param": "require_login_for_ilo_rbsu", "enable": "True", "ignore": "False" }, { "param": "ipmi_over_lan", "enable": "True", "ignore": "False" }, { "param": "secure_boot", "enable": "True", "ignore": "False" }, { "param": "require_host_authentication", "enable": "True", "ignore": "False" } ] } }] The different attributes of ``security_parameters_update`` clean step are as follows: .. csv-table:: :header: "Attribute", "Description" :widths: 30, 120 "``interface``", "Interface of clean step, here ``management``" "``step``", "Name of clean step, here ``security_parameters_update``" "``args``", "Keyword-argument entry (: ) being passed to clean step" "``args.security_parameters``", "Ordered list of dictionaries of security parameters to be updated. This is mandatory." Each security parameter block is represented by a dictionary (JSON), in the form:: { "param": "", "enable": "security parameter to be enabled/disabled", "ignore": "security parameter status to be ignored or not" } In all of these fields, ``param`` field is mandatory. Remaining fields are boolean and are optional. If user doesn't pass any value then for ``enable`` field the default will be True and for ``ignore`` field default will be False. * The Security Parameters which are supported for this clean step are: ``Password_Complexity``, ``RequiredLoginForiLORBSU``, ``RequireHostAuthentication``, ``IPMI/DCMI_Over_LAN`` and ``Secure_Boot``. Update Minimum Password Length security parameter as manual clean step ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ iLO driver can invoke ``Minimum Password Length`` security parameter update as a manual clean step. This feature is only supported for iLO5 based hardware. An example of a manual clean step with ``update_minimum_password_length`` as the only clean step could be:: "clean_steps": [{ "interface": "management", "step": "update_minimum_password_length", "args": { "password_length": "8", "ignore": "False" } }] Both the arguments ``password_length`` and ``ignore`` are optional. The accepted values for password_length are 0 to 39. If user doesn't pass any value, the default value for password_length will be 8 and for ignore the default value be False. Update Authentication Failure Logging security parameter as manual clean step ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ iLO driver can invoke ``Authentication Failure Logging`` security parameter update as a manual clean step. This feature is only supported for iLO5 based hardware. An example of a manual clean step with ``Authentication Failure Logging`` as the only clean step could be:: "clean_steps": [{ "interface": "management", "step": "update_auth_failure_logging_threshold", "args": { "logging_threshold": "1", "ignore": "False" } }] Both the arguments ``logging_threshold`` and ``ignore`` are optional. The accepted values for logging_threshold are 0 to 5. If user doesn't pass any value, the default value for logging_threshold will be 1 and for ignore the default value be False. If user passes the value of logging_threshold as 0, the Authentication Failure Logging security parameter will be disabled. RAID Support ^^^^^^^^^^^^ The inband RAID functionality is supported by iLO driver. See :ref:`raid` for more information. Bare Metal service update node with following information after successful configuration of RAID: * Node ``properties/local_gb`` is set to the size of root volume. * Node ``properties/root_device`` is filled with ``wwn`` details of root volume. It is used by iLO driver as root device hint during provisioning. * The value of raid level of root volume is added as ``raid_level`` capability to the node's ``capabilities`` parameter within ``properties`` field. The operator can specify the ``raid_level`` capability in nova flavor for node to be selected for scheduling:: nova flavor-key ironic-test set capabilities:raid_level="1+0" nova boot --flavor ironic-test --image test-image instance-1 .. _DIB_raid_support: DIB support for Proliant Hardware Manager ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Install ``ironic-python-agent-builder`` following the guide [1]_ To create an agent ramdisk with ``Proliant Hardware Manager``, use the ``proliant-tools`` element in DIB:: ironic-python-agent-builder -o proliant-agent-ramdisk -e proliant-tools fedora Disk Erase Support ^^^^^^^^^^^^^^^^^^ ``erase_devices`` is an inband clean step supported by iLO driver. It performs erase on all the disks including the disks visible to OS as well as the raw disks visible to the Smart Storage Administrator (SSA). This inband clean step requires ``ssacli`` utility starting from version ``2.60-19.0`` to perform the erase on physical disks. See the `ssacli documentation`_ for more information on ssacli utility and different erase methods supported by SSA. The disk erasure via ``shred`` is used to erase disks visible to the OS and its implementation is available in Ironic Python Agent. The raw disks connected to the Smart Storage Controller are erased using Sanitize erase which is a ssacli supported erase method. If Sanitize erase is not supported on the Smart Storage Controller the disks are erased using One-pass erase (overwrite with zeros). This clean step is supported when the agent ramdisk contains the ``Proliant Hardware Manager`` from the proliantutils version 2.3.0 or higher. This clean step is performed as part of automated cleaning and it is disabled by default. See :ref:`InbandvsOutOfBandCleaning` for more information on enabling/disabling a clean step. Install ``ironic-python-agent-builder`` following the guide [1]_ To create an agent ramdisk with ``Proliant Hardware Manager``, use the ``proliant-tools`` element in DIB:: ironic-python-agent-builder -o proliant-agent-ramdisk -e proliant-tools fedora See the `proliant-tools`_ for more information on creating agent ramdisk with ``proliant-tools`` element in DIB. Firmware based UEFI iSCSI boot from volume support ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ With Gen9 (UEFI firmware version 1.40 or higher) and Gen10 HPE Proliant servers, the driver supports firmware based UEFI boot of an iSCSI cinder volume. This feature requires the node to be configured to boot in ``UEFI`` boot mode, as well as user image should be ``UEFI`` bootable image, and ``PortFast`` needs to be enabled in switch configuration for immediate spanning tree forwarding state so it wouldn't take much time setting the iSCSI target as persistent device. The driver does not support this functionality when in ``bios`` boot mode. In case the node is configured with ``ilo-pxe`` or ``ilo-ipxe`` as boot interface and the boot mode configured on the bare metal is ``bios``, the iscsi boot from volume is performed using iPXE. See :doc:`/admin/boot-from-volume` for more details. To use this feature, configure the boot mode of the bare metal to ``uefi`` and configure the corresponding ironic node using the steps given in :doc:`/admin/boot-from-volume`. In a cloud environment with nodes configured to boot from ``bios`` and ``uefi`` boot modes, the virtual media driver only supports uefi boot mode, and that attempting to use iscsi boot at the same time with a bios volume will result in an error. BIOS configuration support ^^^^^^^^^^^^^^^^^^^^^^^^^^ The ``ilo`` and ``ilo5`` hardware types support ``ilo`` BIOS interface. The support includes providing manual clean steps *apply_configuration* and *factory_reset* to manage supported BIOS settings on the node. See :ref:`bios` for more details and examples. .. note:: Prior to the Stein release the user is required to reboot the node manually in order for the settings to take into effect. Starting with the Stein release, iLO drivers reboot the node after running clean steps related to the BIOS configuration. The BIOS settings are cached and the clean step is marked as success only if all the requested settings are applied without any failure. If application of any of the settings fails, the clean step is marked as failed and the settings are not cached. Configuration ~~~~~~~~~~~~~ Following are the supported BIOS settings and the corresponding brief description for each of the settings. For a detailed description please refer to `HPE Integrated Lights-Out REST API Documentation `_. - ``AdvancedMemProtection``: Configure additional memory protection with ECC (Error Checking and Correcting). Allowed values are ``AdvancedEcc``, ``OnlineSpareAdvancedEcc``, ``MirroredAdvancedEcc``. - ``AutoPowerOn``: Configure the server to automatically power on when AC power is applied to the system. Allowed values are ``AlwaysPowerOn``, ``AlwaysPowerOff``, ``RestoreLastState``. - ``BootMode``: Select the boot mode of the system. Allowed values are ``Uefi``, ``LegacyBios`` - ``BootOrderPolicy``: Configure how the system attempts to boot devices per the Boot Order when no bootable device is found. Allowed values are ``RetryIndefinitely``, ``AttemptOnce``, ``ResetAfterFailed``. - ``CollabPowerControl``: Enables the Operating System to request processor frequency changes even if the Power Regulator option on the server configured for Dynamic Power Savings Mode. Allowed values are ``Enabled``, ``Disabled``. - ``DynamicPowerCapping``: Configure when the System ROM executes power calibration during the boot process. Allowed values are ``Enabled``, ``Disabled``, ``Auto``. - ``DynamicPowerResponse``: Enable the System BIOS to control processor performance and power states depending on the processor workload. Allowed values are ``Fast``, ``Slow``. - ``IntelligentProvisioning``: Enable or disable the Intelligent Provisioning functionality. Allowed values are ``Enabled``, ``Disabled``. - ``IntelPerfMonitoring``: Exposes certain chipset devices that can be used with the Intel Performance Monitoring Toolkit. Allowed values are ``Enabled``, ``Disabled``. - ``IntelProcVtd``: Hypervisor or operating system supporting this option can use hardware capabilities provided by Intel's Virtualization Technology for Directed I/O. Allowed values are ``Enabled``, ``Disabled``. - ``IntelQpiFreq``: Set the QPI Link frequency to a lower speed. Allowed values are ``Auto``, ``MinQpiSpeed``. - ``IntelTxt``: Option to modify Intel TXT support. Allowed values are ``Enabled``, ``Disabled``. - ``PowerProfile``: Set the power profile to be used. Allowed values are ``BalancedPowerPerf``, ``MinPower``, ``MaxPerf``, ``Custom``. - ``PowerRegulator``: Determines how to regulate the power consumption. Allowed values are ``DynamicPowerSavings``, ``StaticLowPower``, ``StaticHighPerf``, ``OsControl``. - ``ProcAes``: Enable or disable the Advanced Encryption Standard Instruction Set (AES-NI) in the processor. Allowed values are ``Enabled``, ``Disabled``. - ``ProcCoreDisable``: Disable processor cores using Intel's Core Multi-Processing (CMP) Technology. Allowed values are Integers ranging from ``0`` to ``24``. - ``ProcHyperthreading``: Enable or disable Intel Hyperthreading. Allowed values are ``Enabled``, ``Disabled``. - ``ProcNoExecute``: Protect your system against malicious code and viruses. Allowed values are ``Enabled``, ``Disabled``. - ``ProcTurbo``: Enables the processor to transition to a higher frequency than the processor's rated speed using Turbo Boost Technology if the processor has available power and is within temperature specifications. Allowed values are ``Enabled``, ``Disabled``. - ``ProcVirtualization``: Enables or Disables a hypervisor or operating system supporting this option to use hardware capabilities provided by Intel's Virtualization Technology. Allowed values are ``Enabled``, ``Disabled``. - ``SecureBootStatus``: The current state of Secure Boot configuration. Allowed values are ``Enabled``, ``Disabled``. .. note:: This setting is read-only and can't be modified with ``apply_configuration`` clean step. - ``Sriov``: If enabled, SR-IOV support enables a hypervisor to create virtual instances of a PCI-express device, potentially increasing performance. If enabled, the BIOS allocates additional resources to PCI-express devices. Allowed values are ``Enabled``, ``Disabled``. - ``ThermalConfig``: select the fan cooling solution for the system. Allowed values are ``OptimalCooling``, ``IncreasedCooling``, ``MaxCooling`` - ``ThermalShutdown``: Control the reaction of the system to caution level thermal events. Allowed values are ``Enabled``, ``Disabled``. - ``TpmState``: Current TPM device state. Allowed values are ``NotPresent``, ``PresentDisabled``, ``PresentEnabled``. .. note:: This setting is read-only and can't be modified with ``apply_configuration`` clean step. - ``TpmType``: Current TPM device type. Allowed values are ``NoTpm``, ``Tpm12``, ``Tpm20``, ``Tm10``. .. note:: This setting is read-only and can't be modified with ``apply_configuration`` clean step. - ``UefiOptimizedBoot``: Enables or Disables the System BIOS boot using native UEFI graphics drivers. Allowed values are ``Enabled``, ``Disabled``. - ``WorkloadProfile``: Change the Workload Profile to accomodate your desired workload. Allowed values are ``GeneralPowerEfficientCompute``, ``GeneralPeakFrequencyCompute``, ``GeneralThroughputCompute``, ``Virtualization-PowerEfficient``, ``Virtualization-MaxPerformance``, ``LowLatency``, ``MissionCritical``, ``TransactionalApplicationProcessing``, ``HighPerformanceCompute``, ``DecisionSupport``, ``GraphicProcessing``, ``I/OThroughput``, ``Custom`` .. note:: This setting is only applicable to ProLiant Gen10 servers with iLO 5 management systems. Certificate based validation in iLO ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The driver supports validation of certificates on the HPE Proliant servers. The path to certificate file needs to be appropriately set in ``ca_file`` in the node's ``driver_info``. To update SSL certificates into iLO, refer to `HPE Integrated Lights-Out Security Technology Brief `_. Use iLO hostname or IP address as a 'Common Name (CN)' while generating Certificate Signing Request (CSR). Use the same value as `ilo_address` while enrolling node to Bare Metal service to avoid SSL certificate validation errors related to hostname mismatch. Rescue mode support ^^^^^^^^^^^^^^^^^^^ The hardware type ``ilo`` supports rescue functionality. Rescue operation can be used to boot nodes into a rescue ramdisk so that the ``rescue`` user can access the node. Please refer to :doc:`/admin/rescue` for detailed explanation of rescue feature. Inject NMI support ^^^^^^^^^^^^^^^^^^ The management interface ``ilo`` supports injection of non-maskable interrupt (NMI) to a bare metal. Following command can be used to inject NMI on a server: .. code-block:: console baremetal node inject nmi Following command can be used to inject NMI via Compute service: .. code-block:: console openstack server dump create .. note:: This feature is supported on HPE ProLiant Gen9 servers and beyond. Soft power operation support ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The power interface ``ilo`` supports soft power off and soft reboot operations on a bare metal. Following commands can be used to perform soft power operations on a server: .. code-block:: console baremetal node reboot --soft \ [--power-timeout ] baremetal node power off --soft \ [--power-timeout ] .. note:: The configuration ``[conductor]soft_power_off_timeout`` is used as a default timeout value when no timeout is provided while invoking hard or soft power operations. .. note:: Server POST state is used to track the power status of HPE ProLiant Gen9 servers and beyond. Out of Band RAID Support ^^^^^^^^^^^^^^^^^^^^^^^^ With Gen10 HPE Proliant servers and later the ``ilo5`` hardware type supports firmware based RAID configuration as a clean step. This feature requires the node to be configured to ``ilo5`` hardware type and its raid interface to be ``ilo5``. See :ref:`raid` for more information. After a successful RAID configuration, the Bare Metal service will update the node with the following information: * Node ``properties/local_gb`` is set to the size of root volume. * Node ``properties/root_device`` is filled with ``wwn`` details of root volume. It is used by iLO driver as root device hint during provisioning. Later the value of raid level of root volume can be added in ``baremetal-with-RAID10`` (RAID10 for raid level 10) resource class. And consequently flavor needs to be updated to request the resource class to create the server using selected node:: baremetal node set test_node --resource-class \ baremetal-with-RAID10 openstack flavor set --property \ resources:CUSTOM_BAREMETAL_WITH_RAID10=1 test-flavor openstack server create --flavor test-flavor --image test-image instance-1 .. note:: Supported raid levels for ``ilo5`` hardware type are: 0, 1, 5, 6, 10, 50, 60 IPv6 support ^^^^^^^^^^^^ With the IPv6 support in ``proliantutils>=2.8.0``, nodes can be enrolled into the baremetal service using the iLO IPv6 addresses. .. code-block:: console baremetal node create --driver ilo --deploy-interface direct \ --driver-info ilo_address=2001:0db8:85a3:0000:0000:8a2e:0370:7334 \ --driver-info ilo_username=test-user \ --driver-info ilo_password=test-password \ --driver-info deploy_iso=test-iso \ --driver-info rescue_iso=test-iso .. note:: No configuration changes (in e.g. ironic.conf) are required in order to support IPv6. Out of Band Sanitize Disk Erase Support ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ With Gen10 HPE Proliant servers and later the ``ilo5`` hardware type supports firmware based sanitize disk erase as a clean step. This feature requires the node to be configured to ``ilo5`` hardware type and its management interface to be ``ilo5``. The possible erase pattern its supports are: * For HDD - 'overwrite', 'zero', 'crypto' * For SSD - 'block', 'zero', 'crypto' The default erase pattern are, for HDD, 'overwrite' and for SSD, 'block'. .. note:: In average 300GB HDD with default pattern "overwrite" would take approx. 9 hours and 300GB SSD with default pattern "block" would take approx. 30 seconds to complete the erase. Out of Band One Button Secure Erase Support ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ With Gen10 HPE Proliant servers which have been updated with SPP version 2019.03.0 or later the ``ilo5`` hardware type supports firmware based one button secure erase as a clean step. The One Button Secure Erase resets iLO and deletes all licenses stored there, resets BIOS settings, and deletes all Active Health System (AHS) and warranty data stored on the system. It also erases supported non-volatile storage data and deletes any deployment settings profiles. See `HPE Gen10 Security Reference Guide`_ for more information. Below are the steps to perform this clean step: * Perform the cleaning using 'one_button_secure_erase' clean step .. code-block:: console baremetal node clean $node_ident --clean-steps\ '[{"interface": "management", "step": "one_button_secure_erase"}]' * Once the clean step would triggered and node go to 'clean wait' state and 'maintenance' flag on node would be set to 'True', then delete the node .. code-block:: console baremetal node delete $node_ident .. note:: * Even after deleting the node, One Button Secure Erase operation would continue on the node. * This clean step should be kept last if the multiple clean steps are to be executed. No clean step after this step would be executed. * One Button Secure Erase should be used with extreme caution, and only when a system is being decommissioned. During the erase the iLO network would keep disconnecting and after the erase user will completely lose iLO access along with the credentials of the server, which needs to be regained by the administrator. The process can take up to a day or two to fully erase and reset all user data. * When you activate One Button Secure Erase, iLO 5 does not allow firmware update or reset operations. .. note:: Do not perform any iLO 5 configuration changes until this process is completed. UEFI-HTTPS Boot support ^^^^^^^^^^^^^^^^^^^^^^^ The UEFI firmware on Gen10 HPE Proliant servers supports booting from secured URLs. With this capability ``ilo5`` hardware with ``ilo-uefi-https`` boot interface supports deploy/rescue features in more secured environments. If swift is used as glance backend and ironic is configured to use swift to store temporary images, it is required that swift is configured on HTTPS so that the tempurl generated is HTTPS URL. If the webserver is used for hosting the temporary images, then the webserver is required to serve requests on HTTPS. If the images are hosted on a HTTPS webserver or swift configured with HTTPS with custom certificates, the user is required to export SSL certificates into iLO. Refer to `HPE Integrated Lights-Out Security Technology Brief`_ for more information. The following command can be used to enroll a ProLiant node with ``ilo5`` hardware type and ``ilo-uefi-https`` boot interface: .. code-block:: console baremetal node create \ --driver ilo5 \ --boot-interface ilo-uefi-https \ --deploy-interface direct \ --raid-interface ilo5 \ --rescue-interface agent \ --driver-info ilo_address= \ --driver-info ilo_username= \ --driver-info ilo_password= \ --driver-info deploy_kernel= \ --driver-info deploy_ramdisk= \ --driver-info bootloader= Layer 3 or DHCP-less ramdisk booting ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ DHCP-less deploy is supported by ``ilo`` and ``ilo5`` hardware types. However it would work only with ilo-virtual-media boot interface. See :doc:`/admin/dhcp-less` for more information. .. _`ssacli documentation`: https://support.hpe.com/hpsc/doc/public/display?docId=c03909334 .. _`proliant-tools`: https://docs.openstack.org/diskimage-builder/latest/elements/proliant-tools/README.html .. _`HPE iLO4 User Guide`: https://h20566.www2.hpe.com/hpsc/doc/public/display?docId=c03334051 .. _`HPE Gen10 Security Reference Guide`: https://support.hpe.com/hpesc/public/docDisplay?docLocale=en_US&docId=a00018320en_us .. _`iLO 4 management engine`: https://www.hpe.com/us/en/servers/integrated-lights-out-ilo.html .. _`iLO 5 management engine`: https://www.hpe.com/us/en/servers/integrated-lights-out-ilo.html#innovations .. _`Redfish`: https://www.dmtf.org/standards/redfish .. _`Gen10 wiki section`: https://wiki.openstack.org/wiki/Ironic/Drivers/iLODrivers/master#Enabling_ProLiant_Gen10_systems_in_Ironic .. _`Guidelines for SPP ISO`: https://h17007.www1.hpe.com/us/en/enterprise/servers/products/service_pack/spp .. _`SUM`: https://h17007.www1.hpe.com/us/en/enterprise/servers/products/service_pack/hpsum/index.aspx .. _`SUM User Guide`: https://h20565.www2.hpe.com/hpsc/doc/public/display?docId=c05210448 .. [1] `ironic-python-agent-builder`: https://docs.openstack.org/ironic-python-agent-builder/latest/install/index.html .. _`HPE Integrated Lights-Out Security Technology Brief`: http://h20564.www2.hpe.com/hpsc/doc/public/display?docId=c04530504 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/admin/drivers/intel-ipmi.rst0000664000175000017500000001267700000000000022531 0ustar00zuulzuul00000000000000================= Intel IPMI driver ================= Overview ======== The ``intel-ipmi`` hardware type is same as the :doc:`ipmitool` hardware type except for the support of Intel Speed Select Performance Profile (Intel SST-PP_) feature. Intel SST-PP allows a server to run different workloads by configuring the CPU to run at 3 distinct operating points or profiles. Intel SST-PP supports three configuration levels: * 0 - Intel SST-PP Base Config * 1 - Intel SST-PP Config 1 * 2 - Intel SST-PP Config 2 The following table shows the list of active cores and their base frequency at different SST-PP config levels: ============== ========= =================== Config Cores Base Freq (GHz) ============== ========= =================== Base 24 2.4 Config 1 20 2.5 Config 2 16 2.7 ============== ========= =================== This configuration is managed by the management interface ``intel-ipmitool`` for IntelIPMI hardware. IntelIPMI manages nodes by using IPMI_ (Intelligent Platform Management Interface) protocol versions 2.0 or 1.5. It uses the IPMItool_ utility which is an open-source command-line interface (CLI) for controlling IPMI-enabled devices. Glossary ======== * IPMI - Intelligent Platform Management Interface. * Intel SST-PP - Intel Speed Select Performance Profile. Enabling the IntelIPMI hardware type ==================================== Please see :doc:`/install/configure-ipmi` for the required dependencies. #. To enable ``intel-ipmi`` hardware, add the following configuration to your ``ironic.conf``: .. code-block:: ini [DEFAULT] enabled_hardware_types = intel-ipmi enabled_management_interfaces = intel-ipmitool #. Restart the Ironic conductor service:: sudo service ironic-conductor restart # Or, for RDO: sudo systemctl restart openstack-ironic-conductor Registering a node with the IntelIPMI driver ============================================ Nodes configured to use the IntelIPMI drivers should have the ``driver`` field set to ``intel-ipmi``. All the configuration value required for IntelIPMI is the same as the IPMI hardware type except the management interface which is ``intel-ipmitool``. Refer :doc:`ipmitool` for details. The ``baremetal node create`` command can be used to enroll a node with an IntelIPMI driver. For example:: baremetal node create --driver intel-ipmi \ --driver-info ipmi_address=
\ --driver-info ipmi_username= \ --driver-info ipmi_password= Features of the ``intel-ipmi`` hardware type ============================================ Intel SST-PP ^^^^^^^^^^^^^ A node with Intel SST-PP can be configured to use it via ``configure_intel_speedselect`` deploy step. This deploy accepts: * ``intel_speedselect_config``: Hexadecimal code of Intel SST-PP configuration. Accepted values are '0x00', '0x01', '0x02'. These values correspond to `Intel SST-PP Config Base`, `Intel SST-PP Config 1`, `Intel SST-PP Config 2` respectively. The input value must be a string. * ``socket_count``: Number of sockets in the node. The input value must be a positive integer (1 by default). The deploy step issues an IPMI command with the raw code for each socket in the node to set the requested configuration. A reboot is required to reflect the changes. Each configuration profile is mapped to traits that Ironic understands. Please note that these names are used for example purpose only. Any name can be used. Only the parameter value should match the deploy step ``configure_intel_speedselect``. * 0 - ``CUSTOM_INTEL_SPEED_SELECT_CONFIG_BASE`` * 1 - ``CUSTOM_INTEL_SPEED_SELECT_CONFIG_1`` * 2 - ``CUSTOM_INTEL_SPEED_SELECT_CONFIG_2`` Now to configure a node with Intel SST-PP while provisioning, create deploy templates for each profiles in Ironic. .. code-block:: console baremetal deploy template create \ CUSTOM_INTEL_SPEED_SELECT_CONFIG_BASE \ --steps '[{"interface": "management", "step": "configure_intel_speedselect", "args": {"intel_speedselect_config": "0x00", "socket_count": 2}, "priority": 150}]' baremetal deploy template create \ CUSTOM_INTEL_SPEED_SELECT_CONFIG_1 \ --steps '[{"interface": "management", "step": "configure_intel_speedselect", "args": {"intel_speedselect_config": "0x01", "socket_count": 2}, "priority": 150}]' baremetal deploy template create \ CUSTOM_INTEL_SPEED_SELECT_CONFIG_2 \ --steps '[{"interface": "management", "step": "configure_intel_speedselect", "args": {"intel_speedselect_config": "0x02", "socket_count": 2}, "priority": 150}]' All Intel SST-PP capable nodes should have these traits associated. .. code-block:: console baremetal node add trait node-0 \ CUSTOM_INTEL_SPEED_SELECT_CONFIG_BASE \ CUSTOM_INTEL_SPEED_SELECT_CONFIG_1 \ CUSTOM_INTEL_SPEED_SELECT_CONFIG_2 To trigger the Intel SST-PP configuration during node provisioning, one of the traits can be added to the flavor. .. code-block:: console openstack flavor set baremetal --property trait:CUSTOM_INTEL_SPEED_SELECT_CONFIG_1=required Finally create a server with ``baremetal`` flavor to provision a baremetal node with Intel SST-PP profile *Config 1*. .. _IPMI: https://en.wikipedia.org/wiki/Intelligent_Platform_Management_Interface .. _IPMItool: https://sourceforge.net/projects/ipmitool/ .. _SST-PP: https://www.intel.com/content/www/us/en/architecture-and-technology/speed-select-technology-article.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/admin/drivers/ipa.rst0000664000175000017500000001145300000000000021222 0ustar00zuulzuul00000000000000=================== Ironic Python Agent =================== Overview ======== *Ironic Python Agent* (also often called *IPA* or just *agent*) is a Python-based agent which handles *ironic* bare metal nodes in a variety of actions such as inspect, configure, clean and deploy images. IPA is distributed over nodes and runs, inside of a ramdisk, the process of booting this ramdisk on the node. For more information see the :ironic-python-agent-doc:`ironic-python-agent documentation <>`. Drivers ======= Starting with the Kilo release all deploy interfaces (except for fake ones) are using IPA. For nodes using the :ref:`direct-deploy` interface, the conductor prepares a swift temporary URL or a local HTTP URL for the image. IPA then handles the whole deployment process: downloading an image from swift, putting it on the machine and doing any post-deploy actions. Requirements ------------ Using IPA requires it to be present and configured on the deploy ramdisk, see :ref:`deploy-ramdisk` .. _ipa-proxies: Using proxies for image download ================================ Overview -------- When using the :ref:`direct-deploy`, IPA supports using proxies for downloading the user image. For example, this could be used to speed up download by using a caching proxy. Steps to enable proxies ----------------------- #. Configure the proxy server of your choice (for example `Squid `_, `Apache Traffic Server `_). This will probably require you to configure the proxy server to cache the content even if the requested URL contains a query, and to raise the maximum cached file size as images can be pretty big. If you have HTTPS enabled in swift (see :swift-doc:`swift deployment guide `), it is possible to configure the proxy server to talk to swift via HTTPS to download the image, store it in the cache unencrypted and return it to the node via HTTPS again. Because the image will be stored unencrypted in the cache, this approach is recommended for images that do not contain sensitive information. Refer to your proxy server's documentation to complete this step. #. Set ``[glance]swift_temp_url_cache_enabled`` in the ironic conductor config file to ``True``. The conductor will reuse the cached swift temporary URLs instead of generating new ones each time an image is requested, so that the proxy server does not create new cache entries for the same image, based on the query part of the URL (as it contains some query parameters that change each time it is regenerated). #. Set ``[glance]swift_temp_url_expected_download_start_delay`` option in the ironic conductor config file to the value appropriate for your hardware. This is the delay (in seconds) from the time of the deploy request (when the swift temporary URL is generated) to when the URL is used for the image download. You can think of it as roughly the time needed for IPA ramdisk to startup and begin download. This value is used to check if the swift temporary URL duration is large enough to let the image download begin. Also if temporary URL caching is enabled, this will determine if a cached entry will still be valid when the download starts. It is used only if ``[glance]swift_temp_url_cache_enabled`` is ``True``. #. Increase ``[glance]swift_temp_url_duration`` option in the ironic conductor config file, as only non-expired links to images will be returned from the swift temporary URLs cache. This means that if ``swift_temp_url_duration=1200`` then after 20 minutes a new image will be cached by the proxy server as the query in its URL will change. The value of this option must be greater than or equal to ``[glance]swift_temp_url_expected_download_start_delay``. #. Add one or more of ``image_http_proxy``, ``image_https_proxy``, ``image_no_proxy`` to driver_info properties in each node that will use the proxy. Advanced configuration ====================== Out-of-band vs. in-band power off on deploy ------------------------------------------- After deploying an image onto the node's hard disk, Ironic will reboot the machine into the new image. By default this power action happens ``in-band``, meaning that the ironic-conductor will instruct the IPA ramdisk to power itself off. Some hardware may have a problem with the default approach and would require Ironic to talk directly to the management controller to switch the power off and on again. In order to tell Ironic to do that, you have to update the node's ``driver_info`` field and set the ``deploy_forces_oob_reboot`` parameter with the value of **True**. For example, the below command sets this configuration in a specific node:: baremetal node set --driver-info deploy_forces_oob_reboot=True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/admin/drivers/ipmitool.rst0000664000175000017500000002554000000000000022307 0ustar00zuulzuul00000000000000=========== IPMI driver =========== Overview ======== The ``ipmi`` hardware type manage nodes by using IPMI_ (Intelligent Platform Management Interface) protocol versions 2.0 or 1.5. It uses the IPMItool_ utility which is an open-source command-line interface (CLI) for controlling IPMI-enabled devices. Glossary ======== * IPMI_ - Intelligent Platform Management Interface. * IPMB - Intelligent Platform Management Bus/Bridge. * BMC_ - Baseboard Management Controller. * RMCP - Remote Management Control Protocol. Enabling the IPMI hardware type =============================== Please see :doc:`/install/configure-ipmi` for the required dependencies. #. The ``ipmi`` hardware type is enabled by default starting with the Ocata release. To enable it explicitly, add the following to your ``ironic.conf``: .. code-block:: ini [DEFAULT] enabled_hardware_types = ipmi enabled_management_interfaces = ipmitool,noop enabled_power_interfaces = ipmitool Optionally, enable the :doc:`vendor passthru interface ` and either or both :doc:`console interfaces `: .. code-block:: ini [DEFAULT] enabled_hardware_types = ipmi enabled_console_interfaces = ipmitool-socat,ipmitool-shellinabox,no-console enabled_management_interfaces = ipmitool,noop enabled_power_interfaces = ipmitool enabled_vendor_interfaces = ipmitool,no-vendor #. Restart the Ironic conductor service. Please see :doc:`/install/enabling-drivers` for more details. Registering a node with the IPMI driver ======================================= Nodes configured to use the IPMItool drivers should have the ``driver`` field set to ``ipmi``. The following configuration value is required and has to be added to the node's ``driver_info`` field: - ``ipmi_address``: The IP address or hostname of the BMC. Other options may be needed to match the configuration of the BMC, the following options are optional, but in most cases, it's considered a good practice to have them set: - ``ipmi_username``: The username to access the BMC; defaults to *NULL* user. - ``ipmi_password``: The password to access the BMC; defaults to *NULL*. - ``ipmi_port``: The remote IPMI RMCP port. By default ipmitool will use the port *623*. .. note:: It is highly recommend that you setup a username and password for your BMC. The ``baremetal node create`` command can be used to enroll a node with an IPMItool-based driver. For example:: baremetal node create --driver ipmi \ --driver-info ipmi_address=
\ --driver-info ipmi_username= \ --driver-info ipmi_password= Advanced configuration ====================== When a simple configuration such as providing the ``address``, ``username`` and ``password`` is not enough, the IPMItool driver contains many other options that can be used to address special usages. Single/Double bridging functionality ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. note:: A version of IPMItool higher or equal to 1.8.12 is required to use the bridging functionality. There are two different bridging functionalities supported by the IPMItool-based drivers: *single* bridge and *dual* bridge. The following configuration values need to be added to the node's ``driver_info`` field so bridging can be used: - ``ipmi_bridging``: The bridging type; default is *no*; other supported values are *single* for single bridge or *dual* for double bridge. - ``ipmi_local_address``: The local IPMB address for bridged requests. Required only if ``ipmi_bridging`` is set to *single* or *dual*. This configuration is optional, if not specified it will be auto discovered by IPMItool. - ``ipmi_target_address``: The destination address for bridged requests. Required only if ``ipmi_bridging`` is set to *single* or *dual*. - ``ipmi_target_channel``: The destination channel for bridged requests. Required only if ``ipmi_bridging`` is set to *single* or *dual*. Double bridge specific options: - ``ipmi_transit_address``: The transit address for bridged requests. Required only if ``ipmi_bridging`` is set to *dual*. - ``ipmi_transit_channel``: The transit channel for bridged requests. Required only if ``ipmi_bridging`` is set to *dual*. The parameter ``ipmi_bridging`` should specify the type of bridging required: *single* or *dual* to access the bare metal node. If the parameter is not specified, the default value will be set to *no*. The ``baremetal node set`` command can be used to set the required bridging information to the Ironic node enrolled with the IPMItool driver. For example: * Single Bridging:: baremetal node set \ --driver-info ipmi_local_address=
\ --driver-info ipmi_bridging=single \ --driver-info ipmi_target_channel= \ --driver-info ipmi_target_address= * Double Bridging:: baremetal node set \ --driver-info ipmi_local_address=
\ --driver-info ipmi_bridging=dual \ --driver-info ipmi_transit_channel= \ --driver-info ipmi_transit_address= \ --driver-info ipmi_target_channel= \ --driver-info ipmi_target_address= Changing the version of the IPMI protocol ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The IPMItool-based drivers works with the versions *2.0* and *1.5* of the IPMI protocol. By default, the version *2.0* is used. In order to change the IPMI protocol version in the bare metal node, the following option needs to be set to the node's ``driver_info`` field: - ``ipmi_protocol_version``: The version of the IPMI protocol; default is *2.0*. Supported values are *1.5* or *2.0*. The ``baremetal node set`` command can be used to set the desired protocol version:: baremetal node set --driver-info ipmi_protocol_version= .. warning:: Version *1.5* of the IPMI protocol does not support encryption. Therefore, it is highly recommended that version 2.0 is used. .. _ipmi-cipher-suites: Cipher suites ~~~~~~~~~~~~~ IPMI 2.0 introduces support for encryption and allows setting which cipher suite to use. Traditionally, ``ipmitool`` was using cipher suite 3 by default, but since SHA1 no longer complies with modern security requirement, recent versions (e.g. the one used in RHEL 8.2) are switching to suite 17. Normally, the cipher suite to use is negotiated with the BMC using the special command. On some hardware the negotiation yields incorrect results and IPMI commands fail with :: Error in open session response message : no matching cipher suite Error: Unable to establish IPMI v2 / RMCP+ session Another possible problem is ``ipmitool`` commands taking very long (tens of seconds or even minutes) because the BMC does not support cipher suite negotiation. In both cases you can specify the required suite yourself, e.g. .. code-block:: console baremetal node set --driver-info ipmi_cipher_suite=3 In scenarios where the operator can't specify the ``ipmi_cipher_suite`` for each node, the configuration parameter ``[ipmi]/cipher_suite_versions`` can be set to a list of cipher suites that will be used, Ironic will attempt to find a value that can be used from the list provided (from last to first): .. code-block:: ini [ipmi] cipher_suite_versions = ['1','2','3','6','7','8','11','12'] To find the suitable values for this configuration, you can check the field `RMCP+ Cipher Suites` after running an ``ipmitool`` command, e.g: .. code-block:: console $ ipmitool -I lanplus -H $HOST -U $USER -v -R 12 -N 5 lan print # output Set in Progress : Set Complete Auth Type Support : NONE MD2 MD5 PASSWORD OEM Auth Type Enable : Callback : NONE MD2 MD5 PASSWORD OEM IP Address Source : Static Address IP Address : Subnet Mask : MAC Address : RMCP+ Cipher Suites : 0,1,2,3,6,7,8,11,12 .. warning:: Only the cipher suites 3 and 17 are considered secure by the modern standards. Cipher suite 0 means "no security at all". .. _ipmi-priv-level: Using a different privilege level ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ By default Ironic requests the ``ADMINISTRATOR`` privilege level of all commands. This is the easiest option, but if it's not available for you, you can change it to ``CALLBACK``, ``OPERATOR`` or ``USER`` this way: .. code-block:: console baremetal node set --driver-info ipmi_priv_level=OPERATOR You must ensure that the user can still change power state and boot devices. Static boot order configuration ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ See :ref:`static-boot-order`. .. TODO(lucasagomes): Write about privilege level .. TODO(lucasagomes): Write about force boot device Vendor Differences ~~~~~~~~~~~~~~~~~~ While the Intelligent Platform Management Interface (IPMI) interface is based upon a defined standard, the Ironic community is aware of at least one vendor which utilizes a non-standard boot device selector. In essence, this could be something as simple as different interpretation of the standard. As of October 2020, the known difference is with Supermicro hardware where a selector of ``0x24``, signifying a *REMOTE* boot device in the standard, must be used when a boot operation from the local disk subsystem is requested **in UEFI mode**. This is contrary to BIOS mode where the same BMC's expect the selector to be a value of ``0x08``. Because the BMC does not respond with any sort of error, nor do we want to risk BMC connectivity issues by explicitly querying all BMCs what vendor it may be before every operation, the vendor can automatically be recorded in the ``properties`` field ``vendor``. When this is set to a value of ``supermicro``, Ironic will navigate the UEFI behavior difference enabling the UEFI to be requested with boot to disk. Example:: baremetal node set \ --properties vendor="supermicro" Luckily, Ironic will attempt to perform this detection in power synchronization process, and record this value if not already set. While similar issues may exist when setting the boot mode and target boot device in other vendors' BMCs, we are not aware of them at present. Should you encounter such an issue, please feel free to report this via `Storyboard `_, and be sure to include the ``chassis bootparam get 5`` output value along with the ``mc info`` output from your BMC. Example:: ipmitool -I lanplus -H -U -P \ mc info ipmitool -I lanplus -H -U -P \ chassis bootparam get 5 .. _IPMItool: https://sourceforge.net/projects/ipmitool/ .. _IPMI: https://en.wikipedia.org/wiki/Intelligent_Platform_Management_Interface .. _BMC: https://en.wikipedia.org/wiki/Intelligent_Platform_Management_Interface#Baseboard_management_controller ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/admin/drivers/irmc.rst0000664000175000017500000005145600000000000021412 0ustar00zuulzuul00000000000000.. _irmc: =========== iRMC driver =========== Overview ======== The iRMC driver enables control FUJITSU PRIMERGY via ServerView Common Command Interface (SCCI). Support for FUJITSU PRIMERGY servers consists of the ``irmc`` hardware type and a few hardware interfaces specific for that hardware type. Prerequisites ============= * Install `python-scciclient `_ and `pysnmp `_ packages:: $ pip install "python-scciclient>=0.7.2" pysnmp Hardware Type ============= The ``irmc`` hardware type is available for FUJITSU PRIMERGY servers. For information on how to enable the ``irmc`` hardware type, see :ref:`enable-hardware-types`. Hardware interfaces ^^^^^^^^^^^^^^^^^^^ The ``irmc`` hardware type overrides the selection of the following hardware interfaces: * bios Supports ``irmc`` and ``no-bios``. The default is ``irmc``. * boot Supports ``irmc-virtual-media``, ``irmc-pxe``, and ``pxe``. The default is ``irmc-virtual-media``. The ``irmc-virtual-media`` boot interface enables the virtual media based deploy with IPA (Ironic Python Agent). .. warning:: We deprecated the ``pxe`` boot interface when used with ``irmc`` hardware type. Support for this interface will be removed in the future. Instead, use ``irmc-pxe``. ``irmc-pxe`` boot interface was introduced in Pike. * console Supports ``ipmitool-socat``, ``ipmitool-shellinabox``, and ``no-console``. The default is ``ipmitool-socat``. * inspect Supports ``irmc``, ``inspector``, and ``no-inspect``. The default is ``irmc``. .. note:: :ironic-inspector-doc:`Ironic Inspector <>` needs to be present and configured to use ``inspector`` as the inspect interface. * management Supports only ``irmc``. * power Supports ``irmc``, which enables power control via ServerView Common Command Interface (SCCI), by default. Also supports ``ipmitool``. * raid Supports ``irmc``, ``no-raid`` and ``agent``. The default is ``no-raid``. For other hardware interfaces, ``irmc`` hardware type supports the Bare Metal reference interfaces. For more details about the hardware interfaces and how to enable the desired ones, see :ref:`enable-hardware-interfaces`. Here is a complete configuration example with most of the supported hardware interfaces enabled for ``irmc`` hardware type. .. code-block:: ini [DEFAULT] enabled_hardware_types = irmc enabled_bios_interfaces = irmc enabled_boot_interfaces = irmc-virtual-media,irmc-pxe enabled_console_interfaces = ipmitool-socat,ipmitool-shellinabox,no-console enabled_deploy_interfaces = direct enabled_inspect_interfaces = irmc,inspector,no-inspect enabled_management_interfaces = irmc enabled_network_interfaces = flat,neutron enabled_power_interfaces = irmc enabled_raid_interfaces = no-raid,irmc enabled_storage_interfaces = noop,cinder enabled_vendor_interfaces = no-vendor,ipmitool Here is a command example to enroll a node with ``irmc`` hardware type. .. code-block:: console baremetal node create \ --bios-interface irmc \ --boot-interface irmc-pxe \ --deploy-interface direct \ --inspect-interface irmc \ --raid-interface irmc Node configuration ^^^^^^^^^^^^^^^^^^ * Each node is configured for ``irmc`` hardware type by setting the following ironic node object's properties: - ``driver_info/irmc_address`` property to be ``IP address`` or ``hostname`` of the iRMC. - ``driver_info/irmc_username`` property to be ``username`` for the iRMC with administrator privileges. - ``driver_info/irmc_password`` property to be ``password`` for irmc_username. - ``properties/capabilities`` property to be ``boot_mode:uefi`` if UEFI boot is required. - ``properties/capabilities`` property to be ``secure_boot:true`` if UEFI Secure Boot is required. Please refer to `UEFI Secure Boot Support`_ for more information. * The following properties are also required if ``irmc-virtual-media`` boot interface is used: - ``driver_info/deploy_iso`` property to be either deploy iso file name, Glance UUID, or Image Service URL. - ``instance info/boot_iso`` property to be either boot iso file name, Glance UUID, or Image Service URL. This is optional property when ``boot_option`` is set to ``netboot``. .. note:: The ``deploy_iso`` and ``boot_iso`` properties used to be called ``irmc_deploy_iso`` and ``irmc_boot_iso`` accordingly before the Xena release. * All of the nodes are configured by setting the following configuration options in the ``[irmc]`` section of ``/etc/ironic/ironic.conf``: - ``port``: Port to be used for iRMC operations; either 80 or 443. The default value is 443. Optional. - ``auth_method``: Authentication method for iRMC operations; either ``basic`` or ``digest``. The default value is ``basic``. Optional. - ``client_timeout``: Timeout (in seconds) for iRMC operations. The default value is 60. Optional. - ``sensor_method``: Sensor data retrieval method; either ``ipmitool`` or ``scci``. The default value is ``ipmitool``. Optional. * The following options are required if ``irmc-virtual-media`` boot interface is enabled: - ``remote_image_share_root``: Ironic conductor node's ``NFS`` or ``CIFS`` root path. The default value is ``/remote_image_share_root``. - ``remote_image_server``: IP of remote image server. - ``remote_image_share_type``: Share type of virtual media, either ``NFS`` or ``CIFS``. The default is ``CIFS``. - ``remote_image_share_name``: share name of ``remote_image_server``. The default value is ``share``. - ``remote_image_user_name``: User name of ``remote_image_server``. - ``remote_image_user_password``: Password of ``remote_image_user_name``. - ``remote_image_user_domain``: Domain name of ``remote_image_user_name``. * The following options are required if ``irmc`` inspect interface is enabled: - ``snmp_version``: SNMP protocol version; either ``v1``, ``v2c`` or ``v3``. The default value is ``v2c``. Optional. - ``snmp_port``: SNMP port. The default value is ``161``. Optional. - ``snmp_community``: SNMP community required for versions ``v1`` and ``v2c``. The default value is ``public``. Optional. - ``snmp_security``: SNMP security name required for version ``v3``. Optional. * Each node can be further configured by setting the following ironic node object's properties which override the parameter values in ``[irmc]`` section of ``/etc/ironic/ironic.conf``: - ``driver_info/irmc_port`` property overrides ``port``. - ``driver_info/irmc_auth_method`` property overrides ``auth_method``. - ``driver_info/irmc_client_timeout`` property overrides ``client_timeout``. - ``driver_info/irmc_sensor_method`` property overrides ``sensor_method``. - ``driver_info/irmc_snmp_version`` property overrides ``snmp_version``. - ``driver_info/irmc_snmp_port`` property overrides ``snmp_port``. - ``driver_info/irmc_snmp_community`` property overrides ``snmp_community``. - ``driver_info/irmc_snmp_security`` property overrides ``snmp_security``. Optional functionalities for the ``irmc`` hardware type ======================================================= UEFI Secure Boot Support ^^^^^^^^^^^^^^^^^^^^^^^^ The hardware type ``irmc`` supports secure boot deploy, see :ref:`secure-boot` for details. .. warning:: Secure boot feature is not supported with ``pxe`` boot interface. .. _irmc_node_cleaning: Node Cleaning Support ^^^^^^^^^^^^^^^^^^^^^ The ``irmc`` hardware type supports node cleaning. For more information on node cleaning, see :ref:`cleaning`. Supported **Automated** Cleaning Operations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The automated cleaning operations supported are: * ``restore_irmc_bios_config``: Restores BIOS settings on a baremetal node from backup data. If this clean step is enabled, the BIOS settings of a baremetal node will be backed up automatically before the deployment. By default, this clean step is disabled with priority ``0``. Set its priority to a positive integer to enable it. The recommended value is ``10``. .. warning:: ``pxe`` boot interface, when used with ``irmc`` hardware type, does not support this clean step. If uses ``irmc`` hardware type, it is required to select ``irmc-pxe`` or ``irmc-virtual-media`` as the boot interface in order to make this clean step work. Configuration options for the automated cleaning steps are listed under ``[irmc]`` section in ironic.conf :: clean_priority_restore_irmc_bios_config = 0 For more information on node automated cleaning, see :ref:`automated_cleaning` Boot from Remote Volume ^^^^^^^^^^^^^^^^^^^^^^^ The ``irmc`` hardware type supports the generic PXE-based remote volume booting when using the following boot interfaces: * ``irmc-pxe`` * ``pxe`` In addition, the ``irmc`` hardware type supports remote volume booting without PXE. This is available when using the ``irmc-virtual-media`` boot interface. This feature configures a node to boot from a remote volume by using the API of iRMC. It supports iSCSI and FibreChannel. Configuration ~~~~~~~~~~~~~ In addition to the configuration for generic drivers to :ref:`remote volume boot `, the iRMC driver requires the following configuration: * It is necessary to set physical port IDs to network ports and volume connectors. All cards including those not used for volume boot should be registered. The format of a physical port ID is: ``-`` where: - ````: could be ``LAN``, ``FC`` or ``CNA`` - ````: 0 indicates onboard slot. Use 1 to 9 for add-on slots. - ````: A port number starting from 1. These IDs are specified in a node's ``driver_info[irmc_pci_physical_ids]``. This value is a dictionary. The key is the UUID of a resource (Port or Volume Connector) and its value is the physical port ID. For example:: { "1ecd14ee-c191-4007-8413-16bb5d5a73a2":"LAN0-1", "87f6c778-e60e-4df2-bdad-2605d53e6fc0":"CNA1-1" } It can be set with the following command:: baremetal node set $NODE_UUID \ --driver-info irmc_pci_physical_ids={} \ --driver-info irmc_pci_physical_ids/$PORT_UUID=LAN0-1 \ --driver-info irmc_pci_physical_ids/$VOLUME_CONNECTOR_UUID=CNA1-1 * For iSCSI boot, volume connectors with both types ``iqn`` and ``ip`` are required. The configuration with DHCP is not supported yet. * For iSCSI, the size of the storage network is needed. This value should be specified in a node's ``driver_info[irmc_storage_network_size]``. It must be a positive integer < 32. For example, if the storage network is 10.2.0.0/22, use the following command:: baremetal node set $NODE_UUID --driver-info irmc_storage_network_size=22 Supported hardware ~~~~~~~~~~~~~~~~~~ The driver supports the PCI controllers, Fibrechannel Cards, Converged Network Adapters supported by `Fujitsu ServerView Virtual-IO Manager `_. Hardware Inspection Support ^^^^^^^^^^^^^^^^^^^^^^^^^^^ The ``irmc`` hardware type provides the iRMC-specific hardware inspection with ``irmc`` inspect interface. .. note:: SNMP requires being enabled in ServerView® iRMC S4 Web Server(Network Settings\SNMP section). Configuration ~~~~~~~~~~~~~ The Hardware Inspection Support in the iRMC driver requires the following configuration: * It is necessary to set ironic configuration with ``gpu_ids`` and ``fpga_ids`` options in ``[irmc]`` section. ``gpu_ids`` and ``fpga_ids`` are lists of ``/`` where: - ````: 4 hexadecimal digits starts with '0x'. - ````: 4 hexadecimal digits starts with '0x'. Here are sample values for ``gpu_ids`` and ``fpga_ids``:: gpu_ids = 0x1000/0x0079,0x2100/0x0080 fpga_ids = 0x1000/0x005b,0x1100/0x0180 * The python-scciclient package requires pyghmi version >= 1.0.22 and pysnmp version >= 4.2.3. They are used by the conductor service on the conductor. The latest version of pyghmi can be downloaded from `here `__ and pysnmp can be downloaded from `here `__. Supported properties ~~~~~~~~~~~~~~~~~~~~ The inspection process will discover the following essential properties (properties required for scheduling deployment): * ``memory_mb``: memory size * ``cpus``: number of cpus * ``cpu_arch``: cpu architecture * ``local_gb``: disk size Inspection can also discover the following extra capabilities for iRMC driver: * ``irmc_firmware_version``: iRMC firmware version * ``rom_firmware_version``: ROM firmware version * ``trusted_boot``: The flag whether TPM(Trusted Platform Module) is supported by the server. The possible values are 'True' or 'False'. * ``server_model``: server model * ``pci_gpu_devices``: number of gpu devices connected to the bare metal. Inspection can also set/unset node's traits with the following cpu type for iRMC driver: * ``CUSTOM_CPU_FPGA``: The bare metal contains fpga cpu type. .. note:: * The disk size is returned only when eLCM License for FUJITSU PRIMERGY servers is activated. If the license is not activated, then Hardware Inspection will fail to get this value. * Before inspecting, if the server is power-off, it will be turned on automatically. System will wait for a few second before start inspecting. After inspection, power status will be restored to the previous state. The operator can specify these capabilities in compute service flavor, for example:: openstack flavor set baremetal-flavor-name --property capabilities:irmc_firmware_version="iRMC S4-8.64F" openstack flavor set baremetal-flavor-name --property capabilities:server_model="TX2540M1F5" openstack flavor set baremetal-flavor-name --property capabilities:pci_gpu_devices="1" See :ref:`capabilities-discovery` for more details and examples. The operator can add a trait in compute service flavor, for example:: baremetal node add trait $NODE_UUID CUSTOM_CPU_FPGA A valid trait must be no longer than 255 characters. Standard traits are defined in the os_traits library. A custom trait must start with the prefix ``CUSTOM_`` and use the following characters: A-Z, 0-9 and _. RAID configuration Support ^^^^^^^^^^^^^^^^^^^^^^^^^^ The ``irmc`` hardware type provides the iRMC RAID configuration with ``irmc`` raid interface. .. note:: * RAID implementation for ``irmc`` hardware type is based on eLCM license and SDCard. Otherwise, SP(Service Platform) in lifecycle management must be available. * RAID implementation only supported for RAIDAdapter 0 in Fujitsu Servers. Configuration ~~~~~~~~~~~~~ The RAID configuration Support in the iRMC drivers requires the following configuration: * It is necessary to set ironic configuration into Node with JSON file option:: $ baremetal node set \ --target-raid-config Here is some sample values for JSON file:: { "logical_disks": [ { "size_gb": 1000, "raid_level": "1" ] } or:: { "logical_disks": [ { "size_gb": 1000, "raid_level": "1", "controller": "FTS RAID Ctrl SAS 6G 1GB (D3116C) (0)", "physical_disks": [ "0", "1" ] } ] } .. note:: RAID 1+0 and 5+0 in iRMC driver does not support property ``physical_disks`` in ``target_raid_config`` during create raid configuration yet. See following example:: { "logical_disks": [ { "size_gb": "MAX", "raid_level": "1+0" } ] } See :ref:`raid` for more details and examples. Supported properties ~~~~~~~~~~~~~~~~~~~~ The RAID configuration using iRMC driver supports following parameters in JSON file: * ``size_gb``: is mandatory properties in Ironic. * ``raid_level``: is mandatory properties in Ironic. Currently, iRMC Server supports following RAID levels: 0, 1, 5, 6, 1+0 and 5+0. * ``controller``: is name of the controller as read by the RAID interface. * ``physical_disks``: are specific values for each raid array in LogicalDrive which operator want to set them along with ``raid_level``. The RAID configuration is supported as a manual cleaning step. .. note:: * iRMC server will power-on after create/delete raid configuration is applied, FGI (Foreground Initialize) will process raid configuration in iRMC server, thus the operation will completed upon power-on and power-off when created RAID on iRMC server. See :ref:`raid` for more details and examples. BIOS configuration Support ^^^^^^^^^^^^^^^^^^^^^^^^^^ The ``irmc`` hardware type provides the iRMC BIOS configuration with ``irmc`` bios interface. .. warning:: ``irmc`` bios interface does not support ``factory_reset``. Starting from version ``0.10.0`` of ``python-scciclient``, the BIOS setting obtained may not be the latest. If you want to get the latest BIOS setting, you need to delete the existing BIOS profile in iRMC. For example:: curl -u user:pass -H "Content-type: application/json" -X DELETE -i http://192.168.0.1/rest/v1/Oem/eLCM/ProfileManagement/BiosConfig Configuration ~~~~~~~~~~~~~ The BIOS configuration in the iRMC driver supports the following settings: - ``boot_option_filter``: Specifies from which drives can be booted. This supports following options: ``UefiAndLegacy``, ``LegacyOnly``, ``UefiOnly``. - ``check_controllers_health_status_enabled``: The UEFI FW checks the controller health status. This supports following options: ``true``, ``false``. - ``cpu_active_processor_cores``: The number of active processor cores 1...n. Option 0 indicates that all available processor cores are active. - ``cpu_adjacent_cache_line_prefetch_enabled``: The processor loads the requested cache line and the adjacent cache line. This supports following options: ``true``, ``false``. - ``cpu_vt_enabled``: Supports the virtualization of platform hardware and several software environments, based on Virtual Machine Extensions to support the use of several software environments using virtual computers. This supports following options: ``true``, ``false``. - ``flash_write_enabled``: The system BIOS can be written. Flash BIOS update is possible. This supports following options: ``true``, ``false``. - ``hyper_threading_enabled``: Hyper-threading technology allows a single physical processor core to appear as several logical processors. This supports following options: ``true``, ``false``. - ``keep_void_boot_options_enabled``: Boot Options will not be removed from "Boot Option Priority" list. This supports following options: ``true``, ``false``. - ``launch_csm_enabled``: Specifies whether the Compatibility Support Module (CSM) is executed. This supports following options: ``true``, ``false``. - ``os_energy_performance_override_enabled``: Prevents the OS from overruling any energy efficiency policy setting of the setup. This supports following options: ``true``, ``false``. - ``pci_aspm_support``: Active State Power Management (ASPM) is used to power-manage the PCI Express links, thus consuming less power. This supports following options: ``Disabled``, ``Auto``, ``L0Limited``, ``L1only``, ``L0Force``. - ``pci_above_4g_decoding_enabled``: Specifies if memory resources above the 4GB address boundary can be assigned to PCI devices. This supports following options: ``true``, ``false``. - ``power_on_source``: Specifies whether the switch on sources for the system are managed by the BIOS or the ACPI operating system. This supports following options: ``BiosControlled``, ``AcpiControlled``. - ``single_root_io_virtualization_support_enabled``: Single Root IO Virtualization Support is active. This supports following options: ``true``, ``false``. The BIOS configuration is supported as a manual cleaning step. See :ref:`bios` for more details and examples. Supported platforms =================== This driver supports FUJITSU PRIMERGY RX M4 servers and above. When ``irmc`` power interface is used, Soft Reboot (Graceful Reset) and Soft Power Off (Graceful Power Off) are only available if `ServerView agents `_ are installed. See `iRMC S4 Manual `_ for more details. RAID configuration feature supports FUJITSU PRIMERGY servers with RAID-Ctrl-SAS-6G-1GB(D3116C) controller and above. For detail supported controller with OOB-RAID configuration, please see `the whitepaper for iRMC RAID configuration `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/admin/drivers/redfish.rst0000664000175000017500000005517400000000000022105 0ustar00zuulzuul00000000000000============== Redfish driver ============== Overview ======== The ``redfish`` driver enables managing servers compliant with the Redfish_ protocol. Prerequisites ============= * The Sushy_ library should be installed on the ironic conductor node(s). For example, it can be installed with ``pip``:: sudo pip install sushy Enabling the Redfish driver =========================== #. Add ``redfish`` to the list of ``enabled_hardware_types``, ``enabled_power_interfaces``, ``enabled_management_interfaces`` and ``enabled_inspect_interfaces`` as well as ``redfish-virtual-media`` to ``enabled_boot_interfaces`` in ``/etc/ironic/ironic.conf``. For example:: [DEFAULT] ... enabled_hardware_types = ipmi,redfish enabled_boot_interfaces = ipxe,redfish-virtual-media enabled_power_interfaces = ipmitool,redfish enabled_management_interfaces = ipmitool,redfish enabled_inspect_interfaces = inspector,redfish #. Restart the ironic conductor service:: sudo service ironic-conductor restart # Or, for RDO: sudo systemctl restart openstack-ironic-conductor Registering a node with the Redfish driver =========================================== Nodes configured to use the driver should have the ``driver`` property set to ``redfish``. The following properties are specified in the node's ``driver_info`` field: - ``redfish_address``: The URL address to the Redfish controller. It must include the authority portion of the URL, and can optionally include the scheme. If the scheme is missing, https is assumed. For example: https://mgmt.vendor.com. This is required. - ``redfish_system_id``: The canonical path to the ComputerSystem resource that the driver will interact with. It should include the root service, version and the unique resource path to the ComputerSystem. This property is only required if target BMC manages more than one ComputerSystem. Otherwise ironic will pick the only available ComputerSystem automatically. For example: /redfish/v1/Systems/1. - ``redfish_username``: User account with admin/server-profile access privilege. Although not required, it is highly recommended. - ``redfish_password``: User account password. Although not required, it is highly recommended. - ``redfish_verify_ca``: If redfish_address has the **https** scheme, the driver will use a secure (TLS_) connection when talking to the Redfish controller. By default (if this is not set or set to True), the driver will try to verify the host certificates. This can be set to the path of a certificate file or directory with trusted certificates that the driver will use for verification. To disable verifying TLS_, set this to False. This is optional. - ``redfish_auth_type``: Redfish HTTP client authentication method. Can be "basic", "session" or "auto". The "auto" mode first tries "session" and falls back to "basic" if session authentication is not supported by the Redfish BMC. Default is set in ironic config as ``[redfish]auth_type``. The ``baremetal node create`` command can be used to enroll a node with the ``redfish`` driver. For example: .. code-block:: bash baremetal node create --driver redfish --driver-info \ redfish_address=https://example.com --driver-info \ redfish_system_id=/redfish/v1/Systems/CX34R87 --driver-info \ redfish_username=admin --driver-info redfish_password=password \ --name node-0 For more information about enrolling nodes see :ref:`enrollment` in the install guide. Boot mode support ================= The ``redfish`` hardware type can read current boot mode from the bare metal node as well as set it to either Legacy BIOS or UEFI. .. note:: Boot mode management is the optional part of the Redfish specification. Not all Redfish-compliant BMCs might implement it. In that case it remains the responsibility of the operator to configure proper boot mode to their bare metal nodes. UEFI secure boot ~~~~~~~~~~~~~~~~ Secure boot mode can be automatically set and unset during deployment for nodes in UEFI boot mode, see :ref:`secure-boot` for an explanation how to use it. Two clean and deploy steps are provided for key management: ``management.reset_secure_boot_keys_to_default`` resets secure boot keys to their manufacturing defaults. ``management.clear_secure_boot_keys`` removes all secure boot keys from the node. Out-Of-Band inspection ====================== The ``redfish`` hardware type can inspect the bare metal node by querying Redfish compatible BMC. This process is quick and reliable compared to the way the ``inspector`` hardware type works i.e. booting bare metal node into the introspection ramdisk. .. note:: The ``redfish`` inspect interface relies on the optional parts of the Redfish specification. Not all Redfish-compliant BMCs might serve the required information, in which case bare metal node inspection will fail. .. note:: The ``local_gb`` property cannot always be discovered, for example, when a node does not have local storage or the Redfish implementation does not support the required schema. In this case the property will be set to 0. .. _redfish-virtual-media: Virtual media boot ================== The idea behind virtual media boot is that BMC gets hold of the boot image one way or the other (e.g. by HTTP GET, other methods are defined in the standard), then "inserts" it into node's virtual drive as if it was burnt on a physical CD/DVD. The node can then boot from that virtual drive into the operating system residing on the image. The major advantage of virtual media boot feature is that potentially unreliable TFTP image transfer phase of PXE protocol suite is fully eliminated. Hardware types based on the ``redfish`` fully support booting deploy/rescue and user images over virtual media. Ironic builds bootable ISO images, for either UEFI or BIOS (Legacy) boot modes, at the moment of node deployment out of kernel and ramdisk images associated with the ironic node. To boot a node managed by ``redfish`` hardware type over virtual media using BIOS boot mode, it suffice to set ironic boot interface to ``redfish-virtual-media``, as opposed to ``ipmitool``. .. code-block:: bash baremetal node set --boot-interface redfish-virtual-media node-0 .. warning:: Dell hardware requires a non-standard Redfish call to boot from virtual media, thus you **must** use the ``idrac`` hardware type and the ``idrac-redfish-virtual-media`` boot interface with it instead. See :doc:`/admin/drivers/idrac` for more details on this hardware type. If UEFI boot mode is desired, the user should additionally supply EFI System Partition image (ESP_), see `Configuring an ESP image`_ for details. If ``[driver_info]/config_via_floppy`` boolean property of the node is set to ``true``, ironic will create a file with runtime configuration parameters, place into on a FAT image, then insert the image into node's virtual floppy drive. When booting over PXE or virtual media, and user instance requires some specific kernel configuration, the node's ``instance_info[kernel_append_params]`` or ``driver_info[kernel_append_params]`` properties can be used to pass user-specified kernel command line parameters. .. code-block:: bash baremetal node set node-0 \ --driver-info kernel_append_params="nofb nomodeset vga=normal" .. note:: The ``driver_info`` field is supported starting with the Xena release. For ramdisk boot, the ``instance_info[ramdisk_kernel_arguments]`` property serves the same purpose. Pre-built ISO images ~~~~~~~~~~~~~~~~~~~~ By default an ISO images is built per node using the deploy kernel and initramfs provided in the configuration or the node's ``driver_info``. Starting with the Wallaby release it's possible to provide a pre-built ISO image: .. code-block:: bash baremetal node set node-0 \ --driver_info deploy_iso=http://url/of/deploy.iso \ --driver_info rescue_iso=http://url/of/rescue.iso .. note:: OpenStack Image service (glance) image IDs and ``file://`` links are also accepted. .. note:: Before the Xena release the parameters were called ``redfish_deploy_iso`` and ``redfish_rescue_iso`` accordingly. The old names are still supported for backward compatibility. No customization is currently done to the image, so e.g. :doc:`/admin/dhcp-less` won't work. `Configuring an ESP image`_ is also unnecessary. Configuring an ESP image ~~~~~~~~~~~~~~~~~~~~~~~~~ An ESP image is an image that contains the necessary bootloader to boot the ISO in UEFI mode. You will need a GRUB2 image file, as well as Shim for secure boot. See :ref:`uefi-pxe-grub` for an explanation how to get them. Then the following script can be used to build an ESP image: .. code-block:: bash DEST=/path/to/esp.img GRUB2=/path/to/grub.efi SHIM=/path/to/shim.efi TEMP_MOUNT=$(mktemp -d) dd if=/dev/zero of=$DEST bs=4096 count=1024 mkfs.fat -s 4 -r 512 -S 4096 $DEST sudo mount $DEST $TEMP_MOUNT sudo mkdir -p $DEST/EFI/BOOT sudo cp "$SHIM" $DEST/EFI/BOOT/BOOTX64.efi sudo cp "$GRUB2" $DEST/EFI/BOOT/GRUBX64.efi sudo umount $TEMP_MOUNT .. note:: If you use an architecture other than x86-64, you'll need to adjust the destination paths. The resulting image should be provided via the ``driver_info/bootloader`` ironic node property in form of an image UUID or a URL: .. code-block:: bash baremetal node set --driver-info bootloader= node-0 Alternatively, set the bootloader UUID or URL in the configuration file: .. code-block:: ini [conductor] bootloader = Finally, you need to provide the correct GRUB2 configuration path for your image. In most cases this path will depend on your distribution, more precisely, the distribution you took the GRUB2 image from. For example: CentOS: .. code-block:: ini [DEFAULT] grub_config_path = EFI/centos/grub.cfg Ubuntu: .. code-block:: ini [DEFAULT] grub_config_path = EFI/ubuntu/grub.cfg .. note:: Unlike in the script above, these paths are case-sensitive! .. _redfish-virtual-media-ramdisk: Virtual Media Ramdisk ~~~~~~~~~~~~~~~~~~~~~ The ``ramdisk`` deploy interface can be used in concert with the ``redfish-virtual-media`` boot interface to facilitate the boot of a remote node utilizing pre-supplied virtual media. See :doc:`/admin/ramdisk-boot` for information on how to enable and configure it. Instead of supplying an ``[instance_info]/image_source`` parameter, a ``[instance_info]/boot_iso`` parameter can be supplied. The image will be downloaded by the conductor, and the instance will be booted using the supplied ISO image. In accordance with the ``ramdisk`` deployment interface behavior, once booted the machine will have a ``provision_state`` of ``ACTIVE``. .. code-block:: bash baremetal node set \ --boot-interface redfish-virtual-media \ --deploy-interface ramdisk \ --instance_info boot_iso=http://url/to.iso This initial interface does not support bootloader configuration parameter injection, as such the ``[instance_info]/kernel_append_params`` setting is ignored. Configuration drives are supported starting with the Wallaby release for nodes that have a free virtual USB slot: .. code-block:: bash baremetal node deploy \ --config-drive '{"meta_data": {...}, "user_data": "..."}' or via a link to a raw image: .. code-block:: bash baremetal node deploy \ --config-drive http://example.com/config.img Layer 3 or DHCP-less ramdisk booting ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ DHCP-less deploy is supported by the Redfish virtual media boot. See :doc:`/admin/dhcp-less` for more information. Firmware update using manual cleaning ===================================== The ``redfish`` hardware type supports updating the firmware on nodes using a manual cleaning step. The firmware update cleaning step allows one or more firmware updates to be applied to a node. If multiple updates are specified, then they are applied sequentially in the order given. The server is rebooted once per update. If a failure occurs, the cleaning step immediately fails which may result in some updates not being applied. If the node is placed into maintenance mode while a firmware update cleaning step is running that is performing multiple firmware updates, the update in progress will complete, and processing of the remaining updates will pause. When the node is taken out of maintenance mode, processing of the remaining updates will continue. When updating the BMC firmware, the BMC may become unavailable for a period of time as it resets. In this case, it may be desireable to have the cleaning step wait after the update has been applied before indicating that the update was successful. This allows the BMC time to fully reset before further operations are carried out against it. To cause the cleaning step to wait after applying an update, an optional ``wait`` argument may be specified in the firmware image dictionary. The value of this argument indicates the number of seconds to wait following the update. If the ``wait`` argument is not specified, then this is equivalent to ``wait 0``, meaning that it will not wait and immediately proceed with the next firmware update if there is one, or complete the cleaning step if not. The ``update_firmware`` cleaning step accepts JSON in the following format:: [{ "interface": "management", "step": "update_firmware", "args": { "firmware_images":[ { "url": "", "checksum": "", "source": "", "wait": }, { "url": "" }, ... ] } }] The different attributes of the ``update_firmware`` cleaning step are as follows: .. csv-table:: :header: "Attribute", "Description" :widths: 30, 120 "``interface``", "Interface of the cleaning step. Must be ``management`` for firmware update" "``step``", "Name of cleaning step. Must be ``update_firmware`` for firmware update" "``args``", "Keyword-argument entry (: ) being passed to cleaning step" "``args.firmware_images``", "Ordered list of dictionaries of firmware images to be applied" Each firmware image dictionary, is of the form:: { "url": "", "checksum": "", "source": "", "wait": } The ``url``and ``checksum`` arguments in the firmware image dictionary are mandatory, while the ``source`` and ``wait`` arguments are optional. For ``url`` currently ``http``, ``https``, ``swift`` and ``file`` schemes are supported. ``source`` corresponds to ``[redfish]firmware_source`` and by setting it here, it is possible to override global setting per firmware image in clean step arguments. .. note:: At the present time, targets for the firmware update cannot be specified. In testing, the BMC applied the update to all applicable targets on the node. It is assumed that the BMC knows what components a given firmware image is applicable to. To perform a firmware update, first download the firmware to a web server, Swift or filesystem that the Ironic conductor or BMC has network access to. This could be the ironic conductor web server or another web server on the BMC network. Using a web browser, curl, or similar tool on a server that has network access to the BMC or Ironic conductor, try downloading the firmware to verify that the URLs are correct and that the web server is configured properly. Next, construct the JSON for the firmware update cleaning step to be executed. When launching the firmware update, the JSON may be specified on the command line directly or in a file. The following example shows one cleaning step that installs four firmware updates. All except 3rd entry that has explicit ``source`` added, uses setting from ``[redfish]firmware_source`` to determine if and where to stage the files:: [{ "interface": "management", "step": "update_firmware", "args": { "firmware_images":[ { "url": "http://192.0.2.10/BMC_4_22_00_00.EXE", "checksum": "", "wait": 300 }, { "url": "https://192.0.2.10/NIC_19.0.12_A00.EXE", "checksum": "" }, { "url": "file:///firmware_images/idrac/9/PERC_WN64_6.65.65.65_A00.EXE", "checksum": "", "source": "http" }, { "url": "swift://firmware_container/BIOS_W8Y0W_WN64_2.1.7.EXE", "checksum": "" } ] } }] Finally, launch the firmware update cleaning step against the node. The following example assumes the above JSON is in a file named ``firmware_update.json``:: baremetal node clean --clean-steps firmware_update.json In the following example, the JSON is specified directly on the command line:: baremetal node clean --clean-steps '[{"interface": "management", "step": "update_firmware", "args": {"firmware_images":[{"url": "http://192.0.2.10/BMC_4_22_00_00.EXE", "wait": 300}, {"url": "https://192.0.2.10/NIC_19.0.12_A00.EXE"}]}}]' .. note:: Firmware updates may take some time to complete. If a firmware update cleaning step consistently times out, then consider performing fewer firmware updates in the cleaning step or increasing ``clean_callback_timeout`` in ironic.conf to increase the timeout value. .. warning:: Warning: Removing power from a server while it is in the process of updating firmware may result in devices in the server, or the server itself becoming inoperable. Retrieving BIOS Settings ======================== When the :doc:`bios interface ` is set to ``redfish``, Ironic will retrieve the node's BIOS settings as described in `BIOS Configuration`_. In addition, via Sushy_, Ironic will get the BIOS Attribute Registry (`BIOS Registry`_) from the node which is a schema providing details on the settings. The following fields will be returned in the BIOS API (``/v1/nodes/{node_ident}/bios``) along with the setting name and value: .. csv-table:: :header: "Field", "Description" :widths: 25, 120 "``attribute_type``", "The type of setting - ``Enumeration``, ``Integer``, ``String``, ``Boolean``, or ``Password``" "``allowable_values``", "A list of allowable values when the attribute_type is ``Enumeration``" "``lower_bound``", "The lowest allowed value when attribute_type is ``Integer``" "``upper_bound``", "The highest allowed value when attribute_type is ``Integer``" "``min_length``", "The shortest string length that the value can have when attribute_type is ``String``" "``max_length``", "The longest string length that the value can have when attribute_type is ``String``" "``read_only``", "The setting is ready only and cannot be modified" "``unique``", "The setting is specific to this node" "``reset_required``", "After changing this setting a node reboot is required" Node Vendor Passthru Methods ============================ .. csv-table:: :header: "Method", "Description" :widths: 25, 120 "``create_subscription``", "Create a new subscription on the Node" "``delete_subscription``", "Delete a subscription of a Node" "``get_all_subscriptions``", "List all subscriptions of a Node" "``get_subscription``", "Show a single subscription of a Node" "``eject_vmedia``", "Eject attached virtual media from a Node" Create Subscription ~~~~~~~~~~~~~~~~~~~ .. csv-table:: Request :header: "Name", "In", "Type", "Description" :widths: 25, 15, 15, 90 "Destination", "body", "string", "The URI of the destination Event Service" "EventTypes (optional)", "body", "array", "List of ypes of events that shall be sent to the destination" "Context (optional)", "body", "string", "A client-supplied string that is stored with the event destination subscription" "Protocol (optional)", "body", "string", "The protocol type that the event will use for sending the event to the destination" Example JSON to use in ``create_subscription``:: { "Destination": "https://someurl", "EventTypes": ["Alert"], "Context": "MyProtocol", "args": "Redfish" } Delete Subscription ~~~~~~~~~~~~~~~~~~~ .. csv-table:: Request :header: "Name", "In", "Type", "Description" :widths: 21, 21, 21, 37 "id", "body", "string", "The Id of the subscription generated by the BMC " Example JSON to use in ``delete_subscription``:: { "id": "" } Get Subscription ~~~~~~~~~~~~~~~~ .. csv-table:: Request :header: "Name", "In", "Type", "Description" :widths: 21, 21, 21, 37 "id", "body", "string", "The Id of the subscription generated by the BMC " Example JSON to use in ``get_subscription``:: { "id": "" } Get All Subscriptions ~~~~~~~~~~~~~~~~~~~~~ The ``get_all_subscriptions`` doesn't require any parameters. Eject Virtual Media ~~~~~~~~~~~~~~~~~~~ .. csv-table:: Request :header: "Name", "In", "Type", "Description" :widths: 25, 15, 15, 90 "boot_device (optional)", "body", "string", "Type of the device to eject (all devices by default)" .. _Redfish: http://redfish.dmtf.org/ .. _Sushy: https://opendev.org/openstack/sushy .. _TLS: https://en.wikipedia.org/wiki/Transport_Layer_Security .. _ESP: https://wiki.ubuntu.com/EFIBootLoaders#Booting_from_EFI .. _`BIOS Registry`: https://redfish.dmtf.org/schemas/v1/AttributeRegistry.v1_3_5.json .. _`BIOS Configuration`: https://docs.openstack.org/ironic/latest/admin/bios.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/admin/drivers/snmp.rst0000664000175000017500000001543600000000000021433 0ustar00zuulzuul00000000000000=========== SNMP driver =========== The SNMP hardware type enables control of power distribution units of the type frequently found in data centre racks. PDUs frequently have a management ethernet interface and SNMP support enabling control of the power outlets. The SNMP power interface works with the :ref:`pxe-boot` interface for network deployment and network-configured boot. .. note:: Unlike most of the other power interfaces, the SNMP power interface does not have a corresponding management interface. The SNMP hardware type uses the ``noop`` management interface instead. List of supported devices ========================= This is a non-exhaustive list of supported devices. Any device not listed in this table could possibly work using a similar driver. Please report any device status. ============== ========== ========== ===================== Manufacturer Model Supported? Driver name ============== ========== ========== ===================== APC AP7920 Yes apc_masterswitch APC AP9606 Yes apc_masterswitch APC AP9225 Yes apc_masterswitchplus APC AP7155 Yes apc_rackpdu APC AP7900 Yes apc_rackpdu APC AP7901 Yes apc_rackpdu APC AP7902 Yes apc_rackpdu APC AP7911a Yes apc_rackpdu APC AP7921 Yes apc_rackpdu APC AP7922 Yes apc_rackpdu APC AP7930 Yes apc_rackpdu APC AP7931 Yes apc_rackpdu APC AP7932 Yes apc_rackpdu APC AP7940 Yes apc_rackpdu APC AP7941 Yes apc_rackpdu APC AP7951 Yes apc_rackpdu APC AP7960 Yes apc_rackpdu APC AP7990 Yes apc_rackpdu APC AP7998 Yes apc_rackpdu APC AP8941 Yes apc_rackpdu APC AP8953 Yes apc_rackpdu APC AP8959 Yes apc_rackpdu APC AP8961 Yes apc_rackpdu APC AP8965 Yes apc_rackpdu Aten all? Yes aten CyberPower all? Untested cyberpower EatonPower all? Untested eatonpower Teltronix all? Yes teltronix BayTech MRP27 Yes baytech_mrp27 ============== ========== ========== ===================== Software Requirements ===================== - The PySNMP package must be installed, variously referred to as ``pysnmp`` or ``python-pysnmp`` Enabling the SNMP Hardware Type =============================== #. Add ``snmp`` to the list of ``enabled_hardware_types`` in ``ironic.conf``. Also update ``enabled_management_interfaces`` and ``enabled_power_interfaces`` in ``ironic.conf`` as shown below: .. code-block:: ini [DEFAULT] enabled_hardware_types = snmp enabled_management_interfaces = noop enabled_power_interfaces = snmp #. To enable the network boot fallback, update ``enable_netboot_fallback`` in ``ironic.conf``: .. code-block:: ini [pxe] enable_netboot_fallback = True .. note:: It is important to enable the fallback as SNMP hardware type does not support setting of boot devices. When booting in legacy (BIOS) mode, the generated network booting artifact will force booting from local disk. In UEFI mode, Ironic will configure the boot order using UEFI variables. #. Restart the Ironic conductor service. .. code-block:: bash service ironic-conductor restart Ironic Node Configuration ========================= Nodes configured to use the SNMP hardware type should have the ``driver`` field set to the hardware type ``snmp``. The following property values have to be added to the node's ``driver_info`` field: - ``snmp_driver``: PDU manufacturer driver name or ``auto`` to automatically choose ironic snmp driver based on ``SNMPv2-MIB::sysObjectID`` value as reported by PDU. - ``snmp_address``: the IPv4 address of the PDU controlling this node. - ``snmp_port``: (optional) A non-standard UDP port to use for SNMP operations. If not specified, the default port (161) is used. - ``snmp_outlet``: The power outlet on the PDU (1-based indexing). - ``snmp_version``: (optional) SNMP protocol version (permitted values ``1``, ``2c`` or ``3``). If not specified, SNMPv1 is chosen. - ``snmp_community``: (Required for SNMPv1/SNMPv2c unless ``snmp_community_read`` and/or ``snmp_community_write`` properties are present in which case the latter take over) SNMP community name parameter for reads and writes to the PDU. - ``snmp_community_read``: SNMP community name parameter for reads to the PDU. Takes precedence over the ``snmp_community`` property. - ``snmp_community_write``: SNMP community name parameter for writes to the PDU. Takes precedence over the ``snmp_community`` property. - ``snmp_user``: (Required for SNMPv3) SNMPv3 User-based Security Model (USM) user name. Synonym for now obsolete ``snmp_security`` parameter. - ``snmp_auth_protocol``: SNMPv3 message authentication protocol ID. Valid values include: ``none``, ``md5``, ``sha`` for all pysnmp versions and additionally ``sha224``, ``sha256``, ``sha384``, ``sha512`` for pysnmp versions 4.4.1 and later. Default is ``none`` unless ``snmp_auth_key`` is provided. In the latter case ``md5`` is the default. - ``snmp_auth_key``: SNMPv3 message authentication key. Must be 8+ characters long. Required when message authentication is used. - ``snmp_priv_protocol``: SNMPv3 message privacy (encryption) protocol ID. Valid values include: ``none``, ``des``, ``3des``, ``aes``, ``aes192``, ``aes256`` for all pysnmp version and additionally ``aes192blmt``, ``aes256blmt`` for pysnmp versions 4.4.3+. Note that message privacy requires using message authentication. Default is ``none`` unless ``snmp_priv_key`` is provided. In the latter case ``des`` is the default. - ``snmp_priv_key``: SNMPv3 message privacy (encryption) key. Must be 8+ characters long. Required when message encryption is used. - ``snmp_context_engine_id``: SNMPv3 context engine ID. Default is the value of authoritative engine ID. - ``snmp_context_name``: SNMPv3 context name. Default is an empty string. The following command can be used to enroll a node with the ``snmp`` hardware type: .. code-block:: bash baremetal node create \ --driver snmp --driver-info snmp_driver= \ --driver-info snmp_address= \ --driver-info snmp_outlet= \ --driver-info snmp_community= ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/admin/drivers/xclarity.rst0000664000175000017500000000447200000000000022313 0ustar00zuulzuul00000000000000=============== XClarity driver =============== Overview ======== The ``xclarity`` driver is targeted for IMM 2.0 and IMM 3.0 managed Lenovo servers. The xclarity hardware type enables the user to take advantage of `XClarity Manager`_ by using the `XClarity Python Client`_. Prerequisites ============= * The XClarity Client library should be installed on the ironic conductor node(s). For example, it can be installed with ``pip``:: sudo pip install python-xclarityclient Enabling the XClarity driver ============================ #. Add ``xclarity`` to the list of ``enabled_hardware_types``, ``enabled_power_interfaces`` and ``enabled_management_interfaces`` in ``/etc/ironic/ironic.conf``. For example:: [DEFAULT] ... enabled_hardware_types = ipmi,xclarity enabled_power_interfaces = ipmitool,xclarity enabled_management_interfaces = ipmitool,xclarity #. Restart the ironic conductor service:: sudo service ironic-conductor restart # Or, for RDO: sudo systemctl restart openstack-ironic-conductor Registering a node with the XClarity driver =========================================== Nodes configured to use the driver should have the ``driver`` property set to ``xclarity``. The following properties are specified in the node's ``driver_info`` field and are required: - ``xclarity_manager_ip``: The IP address of the XClarity Controller. - ``xclarity_username``: User account with admin/server-profile access privilege to the XClarity Controller. - ``xclarity_password``: User account password corresponding to the xclarity_username to the XClarity Controller. - ``xclarity_hardware_id``: The hardware ID of the XClarity managed server. The ``baremetal node create`` command can be used to enroll a node with the ``xclarity`` driver. For example: .. code-block:: bash baremetal node create --driver xclarity \ --driver-info xclarity_manager_ip=https://10.240.217.101 \ --driver-info xclarity_username=admin \ --driver-info xclarity_password=password \ --driver-info xclarity_hardware_id=hardware_id For more information about enrolling nodes see :ref:`enrollment` in the install guide. .. _`XClarity Manager`: http://www3.lenovo.com/us/en/data-center/software/systems-management/xclarity/ .. _`XClarity Python Client`: http://pypi.org/project/python-xclarityclient/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/admin/drivers.rst0000664000175000017500000001201300000000000020442 0ustar00zuulzuul00000000000000=============================================== Drivers, Hardware Types and Hardware Interfaces =============================================== Generic Interfaces ------------------ .. toctree:: :maxdepth: 2 interfaces/boot interfaces/deploy Hardware Types -------------- .. toctree:: :maxdepth: 1 drivers/ibmc drivers/idrac drivers/ilo drivers/intel-ipmi drivers/ipmitool drivers/irmc drivers/redfish drivers/snmp drivers/xclarity Changing Hardware Types and Interfaces -------------------------------------- Hardware types and interfaces are enabled in the configuration as described in :doc:`/install/enabling-drivers`. Usually, a hardware type is configured on enrolling as described in :doc:`/install/enrollment`:: baremetal node create --driver Any hardware interfaces can be specified on enrollment as well:: baremetal node create --driver \ --deploy-interface direct ---interface For the remaining interfaces the default value is assigned as described in :ref:`hardware_interfaces_defaults`. Both the hardware type and the hardware interfaces can be changed later via the node update API. Changing Hardware Interfaces ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Hardware interfaces can be changed by the following command:: baremetal node set \ --deploy-interface direct \ ---interface The modified interfaces must be enabled and compatible with the current node's hardware type. Changing Hardware Type ~~~~~~~~~~~~~~~~~~~~~~ Changing the node's hardware type can pose a problem. When the ``driver`` field is updated, the final result must be consistent, that is, the resulting hardware interfaces must be compatible with the new hardware type. This will not work:: baremetal node create --name test --driver fake-hardware baremetal node set test --driver ipmi This is because the ``fake-hardware`` hardware type defaults to ``fake`` implementations for some or all interfaces, but the ``ipmi`` hardware type is not compatible with them. There are three ways to deal with this situation: #. Provide new values for all incompatible interfaces, for example:: baremetal node set test --driver ipmi \ --boot-interface pxe \ --deploy-interface direct \ --management-interface ipmitool \ --power-interface ipmitool #. Request resetting some of the interfaces to their new defaults by using the ``--reset--interface`` family of arguments, for example:: baremetal node set test --driver ipmi \ --reset-boot-interface \ --reset-deploy-interface \ --reset-management-interface \ --reset-power-interface .. note:: This feature is available starting with ironic 11.1.0 (Rocky series, API version 1.45). #. Request resetting all interfaces to their new defaults:: baremetal node set test --driver ipmi --reset-interfaces You can still specify explicit values for some interfaces:: baremetal node set test --driver ipmi --reset-interfaces \ --deploy-interface direct .. note:: This feature is available starting with ironic 11.1.0 (Rocky series, API version 1.45). .. _static-boot-order: Static boot order configuration ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Some hardware is known to misbehave when changing the boot device through the BMC. To work around it you can use the ``noop`` management interface implementation with the ``ipmi`` and ``redfish`` hardware types. In this case the Bare Metal service will not change the boot device for you, leaving the pre-configured boot order. For example, in case of the :ref:`pxe-boot`: #. Via any available means configure the boot order on the node as follows: #. Boot from PXE/iPXE on the provisioning NIC. .. warning:: If it is not possible to limit network boot to only provisioning NIC, make sure that no other DHCP/PXE servers are accessible by the node. #. Boot from hard drive. #. Make sure the ``noop`` management interface is enabled, for example: .. code-block:: ini [DEFAULT] enabled_hardware_types = ipmi,redfish enabled_management_interfaces = ipmitool,redfish,noop #. Change the node to use the ``noop`` management interface:: baremetal node set --management-interface noop Unsupported drivers ------------------- The following drivers were declared as unsupported in ironic Newton release and as of Ocata release they are removed from ironic: - AMT driver - available as part of ironic-staging-drivers_ - iBoot driver - available as part of ironic-staging-drivers_ - Wake-On-Lan driver - available as part of ironic-staging-drivers_ - Virtualbox drivers - SeaMicro drivers - MSFT OCS drivers The SSH drivers were removed in the Pike release. Similar functionality can be achieved either with VirtualBMC_ or using libvirt drivers from ironic-staging-drivers_. .. _ironic-staging-drivers: http://ironic-staging-drivers.readthedocs.io .. _VirtualBMC: https://opendev.org/openstack/virtualbmc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/admin/fast-track.rst0000664000175000017500000000331300000000000021026 0ustar00zuulzuul00000000000000===================== Fast-Track Deployment ===================== *Fast track* is a mode of operation where the Bare Metal service keeps a machine powered on with the agent running between provisioning operations. It is first booted during in-band inspection or cleaning (whatever happens first) and is only shut down before rebooting into the final instance. Depending on the configuration, this mode can save several reboots and is particularly useful for scenarios where nodes are enrolled, prepared and provisioned within a short period of time. .. warning:: Fast track deployment targets standalone use cases and is only tested with the ``noop`` networking. The case where inspection, cleaning and provisioning networks are different is not supported. Enabling ======== Fast track is off by default and should be enabled in the configuration: .. code-block:: ini [deploy] fast_track = true Starting with the Yoga release series, it can also be enabled or disabled per node: .. code-block:: console baremetal node set --driver-info fast_track=true Inspection ---------- If using :ref:`in-band inspection`, you need to tell ironic-inspector not to power off nodes afterwards. Depending on the inspection mode (managed or unmanaged), you need to configure two places. In ``ironic.conf``: .. code-block:: ini [inspector] power_off = false And in ``inspector.conf``: .. code-block:: ini [processing] power_off = false Finally, you need to update the :ironic-inspector-doc:`inspection PXE configuration ` to include the ``ipa-api-url`` kernel parameter, pointing at the **ironic** endpoint, in addition to the existing ``ipa-inspection-callback-url``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/admin/gmr.rst0000664000175000017500000000427700000000000017566 0ustar00zuulzuul00000000000000Bare Metal Service state report (via Guru Meditation Reports) ============================================================= The Bare Metal service contains a mechanism whereby developers and system administrators can generate a report about the state of running Bare Metal executables (ironic-api and ironic-conductor). This report is called a Guru Meditation Report (GMR for short). GMR provides useful debugging information that can be used to obtain an accurate view on the current live state of the system. For example, what threads are running, what configuration parameters are in effect, and more. The eventlet backdoor facility provides an interactive shell interface for any eventlet based process, allowing an administrator to telnet to a pre-defined port and execute a variety of commands. Configuration ------------- The GMR feature is optional and requires the oslo.reports_ package to be installed. For example, using pip:: pip install 'oslo.reports>=1.18.0' .. _oslo.reports: https://opendev.org/openstack/oslo.reports Generating a GMR ---------------- A *GMR* can be generated by sending the *USR2* signal to any Bare Metal process that supports it. The *GMR* will then be output to stderr for that particular process. For example: Suppose that ``ironic-api`` has process ID ``6385``, and was run with ``2>/var/log/ironic/ironic-api-err.log``. Then, sending the *USR* signal:: kill -USR2 6385 will trigger the Guru Meditation report to be printed to ``/var/log/ironic/ironic-api-err.log``. Structure of a GMR ------------------ The *GMR* consists of the following sections: Package Shows information about the package to which this process belongs, including version information. Threads Shows stack traces and thread IDs for each of the threads within this process. Green Threads Shows stack traces for each of the green threads within this process (green threads don't have thread IDs). Configuration Lists all the configuration options currently accessible via the CONF object for the current process. .. only:: html Sample GMR Report ----------------- Below is a sample GMR report generated for ``ironic-api`` service: .. include:: report.txt :literal: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/admin/hardware-burn-in.rst0000664000175000017500000001435500000000000022144 0ustar00zuulzuul00000000000000.. _hardware-burn-in: ================ Hardware Burn-in ================ Overview ======== Workflows to onboard new hardware often include a stress-testing step to provoke early failures and to avoid that these load-triggered issues only occur when the nodes have already moved to production. These ``burn-in`` tests typically include CPU, memory, disk, and network. With the Xena release, Ironic supports such tests as part of the cleaning framework. The burn-in steps rely on standard tools such as `stress-ng `_ for CPU and memory, or `fio `_ for disk and network. The burn-in cleaning steps are part of the generic hardware manager in the Ironic Python Agent (IPA) and therefore the agent ramdisk does not need to be bundled with a specific :ironic-python-agent-doc:`IPA hardware manager ` to have them available. Each burn-in step accepts (or in the case of network: needs) some basic configuration options, mostly to limit the duration of the test and to specify the amount of resources to be used. The options are set on a node's ``driver-info`` and prefixed with ``agent_burnin_``. The options available for the individual tests will be outlined below. CPU burn-in =========== The options, following a `agent_burnin_` + stress-ng stressor (`cpu`) + stress-ng option schema, are: * ``agent_burnin_cpu_timeout`` (default: 24 hours) * ``agent_burnin_cpu_cpu`` (default: 0, meaning all CPUs) to limit the overall runtime and to pick the number of CPUs to stress. For instance, in order to limit the time of the CPU burn-in to 10 minutes do: .. code-block:: console baremetal node set --driver-info agent_burnin_cpu_timeout=600 \ $NODE_NAME_OR_UUID Then launch the test with: .. code-block:: console baremetal node clean --clean-steps '[{"step": "burnin_cpu", \ "interface": "deploy"}]' $NODE_NAME_OR_UUID Memory burn-in ============== The options, following a `agent_burnin_` + stress-ng stressor (`vm`) + stress-ng option schema, are: * ``agent_burnin_vm_timeout`` (default: 24 hours) * ``agent_burnin_vm_vm-bytes`` (default: 98%) to limit the overall runtime and to set the fraction of RAM to stress. For instance, in order to limit the time of the memory burn-in to 1 hour and the amount of RAM to be used to 75% run: .. code-block:: console baremetal node set --driver-info agent_burnin_vm_timeout=3600 \ $NODE_NAME_OR_UUID baremetal node set --driver-info agent_burnin_vm_vm-bytes=75% \ $NODE_NAME_OR_UUID Then launch the test with: .. code-block:: console baremetal node clean --clean-steps '[{"step": "burnin_memory", \ "interface": "deploy"}]' $NODE_NAME_OR_UUID Disk burn-in ============ The options, following a `agent_burnin_` + fio stressor (`fio_disk`) + fio option schema, are: * agent_burnin_fio_disk_runtime (default: 0, meaning no time limit) * agent_burnin_fio_disk_loops (default: 4) to set the time limit and the number of iterations when going over the disks. For instance, in order to limit the number of loops to 2 set: .. code-block:: console baremetal node set --driver-info agent_burnin_fio_disk_loops=2 \ $NODE_NAME_OR_UUID Then launch the test with: .. code-block:: console baremetal node clean --clean-steps '[{"step": "burnin_disk", \ "interface": "deploy"}]' $NODE_NAME_OR_UUID Network burn-in =============== Burning in the network needs a little more config, since we need a pair of nodes to perform the test. Therefore, this test needs to set ``agent_burnin_fio_network_config`` JSON which requires a ``role`` field (values: ``reader``, ``writer``) and a ``partner`` field (value is the hostname of the other node to test), like: .. code-block:: console baremetal node set --driver-info agent_burnin_fio_network_config= \ '{"role": "writer", "partner": "$HOST2"}' $NODE_NAME_OR_UUID1 baremetal node set --driver-info agent_burnin_fio_network_config= \ '{"role": "reader", "partner": "$HOST1"}' $NODE_NAME_OR_UUID2 In addition and similar to the other tests, there is a runtime option to be set (only on the writer): .. code-block:: console baremetal node set --driver-info agent_burnin_fio_network_runtime=600 \ $NODE_NAME_OR_UUID Then launch the test with: .. code-block:: console baremetal node clean --clean-steps '[{"step": "burnin_network",\ "interface": "deploy"}]' $NODE_NAME_OR_UUID1 baremetal node clean --clean-steps '[{"step": "burnin_network",\ "interface": "deploy"}]' $NODE_NAME_OR_UUID2 Both nodes will wait for the other node to show up and block while waiting. If the partner does not show up, the cleaning timeout will step in. Logging ======= Since most of the burn-in steps are also providing information about the performance of the stressed components, keeping this information for verification or acceptance purposes may be desirable. By default, the output of the burn-in tools goes to the journal of the Ironic Python Agent and is therefore sent back as an archive to the conductor. In order to consume the output of the burn-in steps more easily, or even in real-time, the nodes can be configured to store the output of the individual steps to files in the ramdisk (from where they can be picked up by a logging pipeline). The configuration of the outpout file is done via one of ``agent_burnin_cpu_outputfile``, ``agent_burnin_vm_outputfile``, ``agent_burnin_fio_disk_outputfile``, and ``agent_burnin_fio_network_outputfile`` parameters which need to be added to a node like: .. code-block:: console baremetal node set --driver-info agent_burnin_cpu_outputfile=\ '/var/log/burnin.cpu' $NODE_NAME_OR_UUID Additional Information ====================== All tests can be aborted at any moment with .. code-block:: console baremetal node abort $NODE_NAME_OR_UUID One can also launch multiple tests which will be run in sequence, e.g.: .. code-block:: console baremetal node clean --clean-steps '[{"step": "burnin_cpu",\ "interface": "deploy"}, {"step": "burnin_memory",\ "interface": "deploy"}]' $NODE_NAME_OR_UUID If desired, configuring ``fast-track`` may be helpful here as it allows to keep the node up between consecutive calls of ``baremetal node clean``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/admin/index.rst0000664000175000017500000000375500000000000020110 0ustar00zuulzuul00000000000000Administrator's Guide ===================== If you are a system administrator running Ironic, this section contains information that may help you understand how to operate and upgrade the services. .. toctree:: :maxdepth: 1 Ironic Python Agent Node Hardware Inspection Node Deployment Node Cleaning Node Adoption Node Retirement RAID Configuration BIOS Settings Node Rescuing Configuring to boot from volume Multi-tenant Networking Port Groups Configuring Web or Serial Console Enabling Notifications Conductor Groups Upgrade Guide Security Troubleshooting FAQ Power Synchronization Node Multi-Tenancy Fast-Track Deployment Booting a Ramdisk or an ISO Hardware Burn-in Vendor Passthru Drivers, Hardware Types and Hardware Interfaces ----------------------------------------------- .. toctree:: :maxdepth: 3 drivers Advanced Topics --------------- .. toctree:: :maxdepth: 1 Ceph Object Gateway Windows Images Emitting Software Metrics Auditing API Traffic Service State Reporting Agent Token Deploying without BMC Credentials Layer 3 or DHCP-less Ramdisk Booting Tuning Ironic Role Based Access Control Deploying with Anaconda .. toctree:: :hidden: deploy-steps Dashboard Integration --------------------- A plugin for the OpenStack Dashboard (horizon) service is under development. Documentation for that can be found within the ironic-ui project. * :ironic-ui-doc:`Dashboard (horizon) plugin <>` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/admin/inspection.rst0000664000175000017500000001247400000000000021152 0ustar00zuulzuul00000000000000.. _inspection: =================== Hardware Inspection =================== Overview -------- Inspection allows Bare Metal service to discover required node properties once required ``driver_info`` fields (for example, IPMI credentials) are set by an operator. Inspection will also create the Bare Metal service ports for the discovered ethernet MACs. Operators will have to manually delete the Bare Metal service ports for which physical media is not connected. This is required due to the `bug 1405131 `_. There are two kinds of inspection supported by Bare Metal service: #. Out-of-band inspection is currently implemented by several hardware types, including ``ilo``, ``idrac`` and ``irmc``. #. `In-band inspection`_ by utilizing the ironic-inspector_ project. The node should be in the ``manageable`` state before inspection is initiated. If it is in the ``enroll`` or ``available`` state, move it to ``manageable`` first:: baremetal node manage Then inspection can be initiated using the following command:: baremetal node inspect .. _capabilities-discovery: Capabilities discovery ---------------------- This is an incomplete list of capabilities we want to discover during inspection. The exact support is hardware and hardware type specific though, the most complete list is provided by the iLO :ref:`ilo-inspection`. ``secure_boot`` (``true`` or ``false``) whether secure boot is supported for the node ``boot_mode`` (``bios`` or ``uefi``) the boot mode the node is using ``cpu_vt`` (``true`` or ``false``) whether the CPU virtualization is enabled ``cpu_aes`` (``true`` or ``false``) whether the AES CPU extensions are enabled ``max_raid_level`` (integer, 0-10) maximum RAID level supported by the node ``pci_gpu_devices`` (non-negative integer) number of GPU devices on the node The operator can specify these capabilities in nova flavor for node to be selected for scheduling:: nova flavor-key my-baremetal-flavor set capabilities:pci_gpu_devices="> 0" nova flavor-key my-baremetal-flavor set capabilities:secure_boot="true" Please see a specific :doc:`hardware type page ` for the exact list of capabilities this hardware type can discover. .. _in-band inspection: In-band inspection ------------------ In-band inspection involves booting a ramdisk on the target node and fetching information directly from it. This process is more fragile and time-consuming than the out-of-band inspection, but it is not vendor-specific and works across a wide range of hardware. In-band inspection is using the ironic-inspector_ project. It is supported by all hardware types, and used by default, if enabled, by the ``ipmi`` hardware type. The ``inspector`` *inspect* interface has to be enabled to use it: .. code-block:: ini [DEFAULT] enabled_inspect_interfaces = inspector,no-inspect If the ironic-inspector service is not registered in the service catalog, set the following option: .. code-block:: ini [inspector] endpoint_override = http://inspector.example.com:5050 In order to ensure that ports in Bare Metal service are synchronized with NIC ports on the node, the following settings in the ironic-inspector configuration file must be set: .. code-block:: ini [processing] add_ports = all keep_ports = present There are two modes of in-band inspection: `managed inspection`_ and `unmanaged inspection`_. .. _ironic-inspector: https://pypi.org/project/ironic-inspector .. _python-ironicclient: https://pypi.org/project/python-ironicclient Managed inspection ~~~~~~~~~~~~~~~~~~ Inspection is *managed* when the Bare Metal conductor fully configures the node for inspection, including setting boot device, boot mode and power state. This is the only way to conduct inspection using :ref:`redfish-virtual-media` or with :doc:`/admin/dhcp-less`. This mode is engaged automatically when the node has sufficient information to configure boot (e.g. ports in case of iPXE). There are a few configuration options that tune managed inspection, the most important is ``extra_kernel_params``, which allows adding kernel parameters for inspection specifically. This is where you can configure :ironic-python-agent-doc:`inspection collectors and other parameters `, for example: .. code-block:: ini [inspector] extra_kernel_params = ipa-inspection-collectors=default,logs ipa-collect-lldp=1 For the callback URL the ironic-inspector endpoint from the service catalog is used. If you want to override the endpoint for callback only, set the following option: .. code-block:: ini [inspector] callback_endpoint_override = https://example.com/baremetal-introspection/v1/continue Unmanaged inspection ~~~~~~~~~~~~~~~~~~~~ Under *unmanaged* inspection we understand in-band inspection orchestrated by ironic-inspector or a third party. This was the only inspection mode before the Ussuri release, and it is still used when the node's boot cannot be configured by the conductor. The options described above do not affect unmanaged inspection. See :ironic-inspector-doc:`ironic-inspector installation guide ` for more information. If you want to **prevent** unmanaged inspection from working, set this option: .. code-block:: ini [inspector] require_managed_boot = True ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648108935.8866668 ironic-20.1.0/doc/source/admin/interfaces/0000775000175000017500000000000000000000000020360 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/admin/interfaces/boot.rst0000664000175000017500000000625300000000000022063 0ustar00zuulzuul00000000000000=============== Boot interfaces =============== The boot interface manages booting of both the deploy ramdisk and the user instances on the bare metal node. The `PXE boot`_ interface is generic and works with all hardware that supports booting from network. Alternatively, several vendors provide *virtual media* implementations of the boot interface. They work by pushing an ISO image to the node's `management controller`_, and do not require either PXE or iPXE. Check your driver documentation at :doc:`../drivers` for details. .. _pxe-boot: PXE boot -------- The ``pxe`` boot interface uses PXE_ or iPXE_ to deliver the target kernel/ramdisk pair. PXE uses relatively slow and unreliable TFTP protocol for transfer, while iPXE uses HTTP. The downside of iPXE is that it's less common, and usually requires bootstrapping using PXE first. The ``pxe`` boot interface works by preparing a PXE/iPXE environment for a node on the file system, then instructing the DHCP provider (for example, the Networking service) to boot the node from it. See ref:`direct-deploy-example` for a better understanding of the whole deployment process. .. note:: Both PXE and iPXE are configured differently, when UEFI boot is used instead of conventional BIOS boot. This is particularly important for CPU architectures that do not have BIOS support at all. The ``pxe`` boot interface is used by default for many hardware types, including ``ipmi``. Some hardware types, notably ``ilo`` and ``irmc`` have their specific implementations of the PXE boot interface. Additional configuration is required for this boot interface - see :doc:`/install/configure-pxe` for details. Common options -------------- Enable persistent boot device for deploy/clean operation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For (i)PXE booting, Ironic uses non-persistent boot order changes for clean/deploy by default. For some drivers, persistent changes are far more costly than non-persisent ones, so this approach can bring a performance benefit. In order to control this behavior, however, Ironic provides the ``force_persistent_boot_device`` flag in the node's ``driver_info``. It allows the values ``Default`` (make all changes but the last one upon deployment non-persistent), ``Always`` (make all changes persistent), and ``Never`` (make all boot order changes non-persistent). For example in order to have only persistent changes one would need to set something like:: $ openstack baremetal node set --driver-info force_persistent_boot_device='Always' .. note:: It is recommended to check if the node's state has not changed as there is no way of locking the node between these commands. .. note:: The values 'True'/'False' for the option 'force_persistent_boot_device' in the node's driver info for the (i)PXE drivers are deprecated and support for them may be removed in a future release. The former default value 'False' is replaced by the new value 'Default', the value 'True' is replaced by 'Always'. .. _PXE: https://en.wikipedia.org/wiki/Preboot_Execution_Environment .. _iPXE: https://en.wikipedia.org/wiki/IPXE .. _management controller: https://en.wikipedia.org/wiki/Out-of-band_management ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/admin/interfaces/deploy.rst0000664000175000017500000001145500000000000022414 0ustar00zuulzuul00000000000000================= Deploy Interfaces ================= A *deploy* interface plays a critical role in the provisioning process. It orchestrates the whole deployment and defines how the image gets transferred to the target disk. .. _direct-deploy: Direct deploy ============= With ``direct`` deploy interface, the deploy ramdisk fetches the image from an HTTP location. It can be an object storage (swift or RadosGW) temporary URL or a user-provided HTTP URL. The deploy ramdisk then copies the image to the target disk. See :ref:`direct deploy diagram ` for a detailed explanation of how this deploy interface works. You can specify this deploy interface when creating or updating a node:: baremetal node create --driver ipmi --deploy-interface direct baremetal node set --deploy-interface direct .. note:: For historical reasons the ``direct`` deploy interface is sometimes called ``agent``. This is because before the Kilo release **ironic-python-agent** used to only support this deploy interface. .. _image_download_source: Deploy with custom HTTP servers ------------------------------- The ``direct`` deploy interface can also be configured to use with custom HTTP servers set up at ironic conductor nodes, images will be cached locally and made accessible by the HTTP server. To use this deploy interface with a custom HTTP server, set ``image_download_source`` to ``http`` in the ``[agent]`` section. .. code-block:: ini [agent] ... image_download_source = http ... This configuration affects *glance* and ``file://`` images. If you want ``http(s)://`` images to also be cached and served locally, use instead: .. code-block:: ini [agent] image_download_source = local .. note:: This option can also be set per node in ``driver_info``:: baremetal node set --driver-info image_download_source=local or per instance in ``instance_info``:: baremetal node set --instance-info image_download_source=local You need to set up a workable HTTP server at each conductor node which with ``direct`` deploy interface enabled, and check http related options in the ironic configuration file to match the HTTP server configurations. .. code-block:: ini [deploy] http_url = http://example.com http_root = /httpboot .. note:: See also: :ref:`l3-external-ip`. Each HTTP server should be configured to follow symlinks for images accessible from HTTP service. Please refer to configuration option ``FollowSymLinks`` if you are using Apache HTTP server, or ``disable_symlinks`` if Nginx HTTP server is in use. .. _ansible-deploy: Ansible deploy ============== This interface is similar to ``direct`` in the sense that the image is downloaded by the ramdisk directly from the image store (not from ironic-conductor host), but the logic of provisioning the node is held in a set of Ansible playbooks that are applied by the ``ironic-conductor`` service handling the node. While somewhat more complex to set up, this deploy interface provides greater flexibility in terms of advanced node preparation during provisioning. This interface is supported by most but not all hardware types declared in ironic. However this deploy interface is not enabled by default. To enable it, add ``ansible`` to the list of enabled deploy interfaces in ``enabled_deploy_interfaces`` option in the ``[DEFAULT]`` section of ironic's configuration file: .. code-block:: ini [DEFAULT] ... enabled_deploy_interfaces = direct,ansible ... Once enabled, you can specify this deploy interface when creating or updating a node: .. code-block:: shell baremetal node create --driver ipmi --deploy-interface ansible baremetal node set --deploy-interface ansible For more information about this deploy interface, its features and how to use it, see :doc:`Ansible deploy interface <../drivers/ansible>`. .. toctree:: :hidden: ../drivers/ansible Anaconda deploy =============== The ``anaconda`` deploy interface is another option for highly customized deployments. See :doc:`/admin/anaconda-deploy-interface` for more details. .. _ramdisk-deploy: Ramdisk deploy ============== The ramdisk interface is intended to provide a mechanism to "deploy" an instance where the item to be deployed is in reality a ramdisk. It is documented separately, see :doc:`/admin/ramdisk-boot`. .. _custom-agent-deploy: Custom agent deploy =================== The ``custom-agent`` deploy interface is designed for operators who want to completely orchestrate writing the instance image using :ironic-python-agent-doc:`in-band deploy steps from a custom agent image `. If you use this deploy interface, you are responsible to provide all necessary deploy steps with priorities between 61 and 99 (see :ref:`node-deployment-core-steps` for information on priorities). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/admin/metrics.rst0000664000175000017500000001016500000000000020440 0ustar00zuulzuul00000000000000.. _metrics: ========================= Emitting Software Metrics ========================= Beginning with the Newton (6.1.0) release, the ironic services support emitting internal performance data to `statsd `_. This allows operators to graph and understand performance bottlenecks in their system. This guide assumes you have a statsd server setup. For information on using and configuring statsd, please see the `statsd `_ README and documentation. These performance measurements, herein referred to as "metrics", can be emitted from the Bare Metal service, including ironic-api, ironic-conductor, and ironic-python-agent. By default, none of the services will emit metrics. Configuring the Bare Metal Service to Enable Metrics ==================================================== Enabling metrics in ironic-api and ironic-conductor --------------------------------------------------- The ironic-api and ironic-conductor services can be configured to emit metrics to statsd by adding the following to the ironic configuration file, usually located at ``/etc/ironic/ironic.conf``:: [metrics] backend = statsd If a statsd daemon is installed and configured on every host running an ironic service, listening on the default UDP port (8125), no further configuration is needed. If you are using a remote statsd server, you must also supply connection information in the ironic configuration file:: [metrics_statsd] # Point this at your environments' statsd host statsd_host = 192.0.2.1 statsd_port = 8125 Enabling metrics in ironic-python-agent --------------------------------------- The ironic-python-agent process receives its configuration in the response from the initial lookup request to the ironic-api service. This means to configure ironic-python-agent to emit metrics, you must enable the agent metrics backend in your ironic configuration file on all ironic-conductor hosts:: [metrics] agent_backend = statsd In order to reliably emit metrics from the ironic-python-agent, you must provide a statsd server that is reachable from both the configured provisioning and cleaning networks. The agent statsd connection information is configured in the ironic configuration file as well:: [metrics_statsd] # Point this at a statsd host reachable from the provisioning and cleaning nets agent_statsd_host = 198.51.100.2 agent_statsd_port = 8125 Types of Metrics Emitted ======================== The Bare Metal service emits timing metrics for every API method, as well as for most driver methods. These metrics measure how long a given method takes to execute. A deployer with metrics enabled should expect between 100 and 500 distinctly named data points to be emitted from the Bare Metal service. This will increase if the metrics.preserve_host option is set to true or if multiple drivers are used in the Bare Metal deployment. This estimate may be used to determine if a deployer needs to scale their metrics backend to handle the additional load before enabling metrics. To see which metrics have changed names or have been removed between releases, refer to the `ironic release notes `_. .. note:: With the default statsd configuration, each timing metric may create additional metrics due to how statsd handles timing metrics. For more information, see statds documentation on `metric types `_. The ironic-python-agent ramdisk emits timing metrics for every API method. Deployers who use custom HardwareManagers can emit custom metrics for their hardware. For more information on custom HardwareManagers, and emitting metrics from them, please see the :ironic-python-agent-doc:`ironic-python-agent documentation <>`. Adding New Metrics ================== If you're a developer, and would like to add additional metrics to ironic, please see the :ironic-lib-doc:`ironic-lib developer documentation <>` for details on how to use the metrics library. A release note should also be created each time a metric is changed or removed to alert deployers of the change. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/admin/multitenancy.rst0000664000175000017500000002417600000000000021515 0ustar00zuulzuul00000000000000.. _multitenancy: ======================================= Multi-tenancy in the Bare Metal service ======================================= Overview ======== It is possible to use dedicated tenant networks for provisioned nodes, which extends the current Bare Metal service capabilities of providing flat networks. This works in conjunction with the Networking service to allow provisioning of nodes in a separate provisioning network. The result of this is that multiple tenants can use nodes in an isolated fashion. However, this configuration does not support trunk ports belonging to multiple networks. Concepts ======== .. _network-interfaces: Network interfaces ------------------ Network interface is one of the driver interfaces that manages network switching for nodes. There are 3 network interfaces available in the Bare Metal service: - ``noop`` interface is used for standalone deployments, and does not perform any network switching; - ``flat`` interface places all nodes into a single provider network that is pre-configured on the Networking service and physical equipment. Nodes remain physically connected to this network during their entire life cycle. - ``neutron`` interface provides tenant-defined networking through the Networking service, separating tenant networks from each other and from the provisioning and cleaning provider networks. Nodes will move between these networks during their life cycle. This interface requires Networking service support for the switches attached to the baremetal servers so they can be programmed. Local link connection --------------------- The Bare Metal service allows ``local_link_connection`` information to be associated with Bare Metal ports. This information is provided to the Networking service's ML2 driver when a Virtual Interface (VIF) is attached. The ML2 driver uses the information to plug the specified port to the tenant network. .. list-table:: ``local_link_connection`` fields :header-rows: 1 * - Field - Description * - ``switch_id`` - Required. Identifies a switch and can be a MAC address or an OpenFlow-based ``datapath_id``. * - ``port_id`` - Required. Port ID on the switch/Smart NIC, for example, Gig0/1, rep0-0. * - ``switch_info`` - Optional. Used to distinguish different switch models or other vendor-specific identifier. Some ML2 plugins may require this field. * - ``hostname`` - Required in case of a Smart NIC port. Hostname of Smart NIC device. .. note:: This isn't applicable to Infiniband ports because the network topology is discoverable by the Infiniband Subnet Manager. If specified, local_link_connection information will be ignored. If port is Smart NIC port then: 1. ``port_id`` is the representor port name on the Smart NIC. 2. ``switch_id`` is not mandatory. .. _multitenancy-physnets: Physical networks ----------------- A Bare Metal port may be associated with a physical network using its ``physical_network`` field. The Bare Metal service uses this information when mapping between virtual ports in the Networking service and physical ports and port groups in the Bare Metal service. A port's physical network field is optional, and if not set then any virtual port may be mapped to that port, provided that no free Bare Metal port with a suitable physical network assignment exists. The physical network of a port group is defined by the physical network of its constituent ports. The Bare Metal service ensures that all ports in a port group have the same value in their physical network field. When attaching a virtual interface (VIF) to a node, the following ordered criteria are used to select a suitable unattached port or port group: * Require ports or port groups to not have a physical network or to have a physical network that matches one of the VIF's allowed physical networks. * Prefer ports and port groups that have a physical network to ports and port groups that do not have a physical network. * Prefer port groups to ports. Prefer ports with PXE enabled. Configuring the Bare Metal service ================================== See the :ref:`configure-tenant-networks` section in the installation guide for the Bare Metal service. Configuring nodes ================= #. Ensure that your python-ironicclient version and requested API version are sufficient for your requirements. * Multi-tenancy support was added in API version 1.20, and is supported by python-ironicclient version 1.5.0 or higher. * Physical network support for ironic ports was added in API version 1.34, and is supported by python-ironicclient version 1.15.0 or higher. * Smart NIC support for ironic ports was added in API version 1.53, and is supported by python-ironicclient version 2.7.0 or higher. The following examples assume you are using python-ironicclient version 2.7.0 or higher. Export the following variable:: export OS_BAREMETAL_API_VERSION= #. The node's ``network_interface`` field should be set to a valid network interface. Valid interfaces are listed in the ``[DEFAULT]/enabled_network_interfaces`` configuration option in the ironic-conductor's configuration file. Set it to ``neutron`` to use the Networking service's ML2 driver:: baremetal node create --network-interface neutron --driver ipmi .. note:: If the ``[DEFAULT]/default_network_interface`` configuration option is set, the ``--network-interface`` option does not need to be specified when creating the node. #. To update an existing node's network interface to ``neutron``, use the following commands:: baremetal node set $NODE_UUID_OR_NAME \ --network-interface neutron #. Create a port as follows:: baremetal port create $HW_MAC_ADDRESS --node $NODE_UUID \ --local-link-connection switch_id=$SWITCH_MAC_ADDRESS \ --local-link-connection switch_info=$SWITCH_HOSTNAME \ --local-link-connection port_id=$SWITCH_PORT \ --pxe-enabled true \ --physical-network physnet1 An Infiniband port requires client ID, while local link connection information will be populated by Infiniband Subnet Manager. The client ID consists of <12-byte vendor prefix>:<8 byte port GUID>. There is no standard process for deriving the port's MAC address ($HW_MAC_ADDRESS); it is vendor specific. For example, Mellanox ConnectX Family Devices prefix is ff:00:00:00:00:00:02:00:00:02:c9:00. If port GUID was f4:52:14:03:00:38:39:81 the client ID would be ff:00:00:00:00:00:02:00:00:02:c9:00:f4:52:14:03:00:38:39:81. Mellanox ConnectX Family Device's HW_MAC_ADDRESS consists of 6 bytes; the port GUID's lower 3 and higher 3 bytes. In this example it would be f4:52:14:38:39:81. Putting it all together, create an Infiniband port as follows:: baremetal port create $HW_MAC_ADDRESS --node $NODE_UUID \ --pxe-enabled true \ --extra client-id=$CLIENT_ID \ --physical-network physnet1 #. Create a Smart NIC port as follows:: baremetal port create $HW_MAC_ADDRESS --node $NODE_UUID \ --local-link-connection hostname=$HOSTNAME \ --local-link-connection port_id=$REP_NAME \ --pxe-enabled true \ --physical-network physnet1 \ --is-smartnic A Smart NIC port requires ``hostname`` which is the hostname of the Smart NIC, and ``port_id`` which is the representor port name within the Smart NIC. #. Check the port configuration:: baremetal port show $PORT_UUID After these steps, the provisioning of the created node will happen in the provisioning network, and then the node will be moved to the tenant network that was requested. Configuring the Networking service ================================== In addition to configuring the Bare Metal service some additional configuration of the Networking service is required to ensure ports for bare metal servers are correctly programmed. This configuration will be determined by the Bare Metal service network interfaces you have enabled and which top of rack switches you have in your environment. ``flat`` network interface -------------------------- In order for Networking service ports to correctly operate with the Bare Metal service ``flat`` network interface the ``baremetal`` ML2 mechanism driver from `networking-baremetal `_ needs to be loaded into the Networking service configuration. This driver understands that the switch should be already configured by the admin, and will mark the networking service ports as successfully bound as nothing else needs to be done. #. Install the ``networking-baremetal`` library .. code-block:: console $ pip install networking-baremetal #. Enable the ``baremetal`` driver in the Networking service ML2 configuration file .. code-block:: ini [ml2] mechanism_drivers = ovs,baremetal ``neutron`` network interface ----------------------------- The ``neutron`` network interface allows the Networking service to program the physical top of rack switches for the bare metal servers. To do this an ML2 mechanism driver which supports the ``baremetal`` VNIC type for the make and model of top of rack switch in the environment must be installed and enabled. This is a list of known top of rack ML2 mechanism drivers which work with the ``neutron`` network interface: Cisco Nexus 9000 series To install and configure this ML2 mechanism driver see `Nexus Mechanism Driver Installation Guide `_. FUJITSU CFX2000 ``networking-fujitsu`` ML2 driver supports this switch. The documentation is available `here `_. Networking Generic Switch This is an ML2 mechanism driver built for testing against virtual bare metal environments and some switches that are not covered by hardware specific ML2 mechanism drivers. More information is available in the project's `README `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/admin/node-deployment.rst0000664000175000017500000003210200000000000022070 0ustar00zuulzuul00000000000000=============== Node Deployment =============== .. contents:: :depth: 2 Overview ======== Node deployment is performed by the Bare Metal service to prepare a node for use by a workload. The exact work flow used depends on a number of factors, including the hardware type and interfaces assigned to a node. Deploy Steps ============ The Bare Metal service implements deployment by collecting a list of deploy steps to perform on a node from the Power, Deploy, Management, BIOS, and RAID interfaces of the driver assigned to the node. These steps are then ordered by priority and executed on the node when the node is moved to the ``deploying`` state. Nodes move to the ``deploying`` state when attempting to move to the ``active`` state (when the hardware is prepared for use by a workload). For a full understanding of all state transitions into deployment, please see :doc:`/user/states`. The Bare Metal service added support for deploy steps in the Rocky release. Order of execution ------------------ Deploy steps are ordered from higher to lower priority, where a larger integer is a higher priority. If the same priority is used by deploy steps on different interfaces, the following resolution order is used: Power, Management, Deploy, BIOS, and RAID interfaces. .. _node-deployment-core-steps: Agent steps ----------- All deploy interfaces based on ironic-python-agent (i.e. ``direct``, ``ansible`` and any derivatives) expose the following deploy steps: ``deploy.deploy`` (priority 100) In this step the node is booted using a provisioning image. ``deploy.write_image`` (priority 80) An out-of-band (``ansible``) or in-band (``direct``) step that downloads and writes the image to the node. ``deploy.tear_down_agent`` (priority 40) In this step the provisioning image is shut down. ``deploy.switch_to_tenant_network`` (priority 30) In this step networking for the node is switched from provisioning to tenant networks. ``deploy.boot_instance`` (priority 20) In this step the node is booted into the user image. Additionally, the ``direct`` deploy interfaces has: ``deploy.prepare_instance_boot`` (priority 60) In this step the boot device is configured and the bootloader is installed. .. note:: For the ``ansible`` deploy interface these steps are done in ``deploy.write_image``. Accordingly, the following priority ranges can be used for custom deploy steps: > 100 Out-of-band steps to run before deployment. 81 to 99 In-band deploy steps to run before the image is written. 61 to 79 In-band deploy steps to run after the image is written but before the bootloader is installed. 41 to 59 In-band steps to run after the image is written the bootloader is installed. 21 to 39 Out-of-band steps to run after the provisioning image is shut down. 1 to 19 Any steps that are run when the user instance is already running. In-band steps ------------- More deploy steps can be provided by the ramdisk, see :ironic-python-agent-doc:`IPA hardware managers documentation ` for a listing. .. _standalone-deploy-steps: Requesting steps ---------------- Starting with Bare Metal API version 1.69 user can optionally supply deploy steps for node deployment when invoking deployment or rebuilding. Overlapping steps will take precedence over `Agent steps`_ and `Deploy Templates`_ steps. Using "baremetal" client deploy steps can be passed via ``--deploy-steps`` argument. The argument ``--deploy-steps`` is one of: - a JSON string - path to a JSON file whose contents are passed to the API - '-', to read from stdin. This allows piping in the deploy steps. An example by passing a JSON string: .. code-block:: console baremetal node deploy \ --deploy-steps '[{"interface": "bios", "step": "apply_configuration", "args": {"settings": [{"name": "LogicalProc", "value": "Enabled"}]}, "priority": 150}]' Format of JSON for deploy steps argument is described in `Deploy step format`_ section. .. note:: Starting with `ironicclient` 4.6.0 you can provide a YAML file for ``--deploy-steps``. Excluding the default steps --------------------------- Starting with the Xena release, you can use the new :ref:`custom-agent-deploy` interface to exclude the default step ``write_image`` and skip bootloader installation in the ``prepare_instance_boot`` step. Writing a Deploy Step --------------------- Please refer to :doc:`/contributor/deploy-steps`. FAQ --- What deploy step is running? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ To check what deploy step the node is performing or attempted to perform and failed, run the following command; it will return the value in the node's ``driver_internal_info`` field:: baremetal node show -f value -c driver_internal_info The ``deploy_steps`` field will contain a list of all remaining steps with their priorities, and the first one listed is the step currently in progress or that the node failed before going into ``deploy failed`` state. Troubleshooting --------------- If deployment fails on a node, the node will be put into the ``deploy failed`` state until the node is deprovisioned. A deprovisioned node is moved to the ``available`` state after the cleaning process has been performed successfully. Strategies for determining why a deploy step failed include checking the ironic conductor logs, checking logs from the ironic-python-agent that have been stored on the ironic conductor, or performing general hardware troubleshooting on the node. Deploy Templates ================ Starting with the Stein release, with Bare Metal API version 1.55, deploy templates offer a way to define a set of one or more deploy steps to be executed with particular sets of arguments and priorities. Each deploy template has a name, which must be a valid trait. Traits can be either standard or custom. Standard traits are listed in the :os-traits-doc:`os_traits library <>`. Custom traits must meet the following requirements: * prefixed with ``CUSTOM_`` * contain only upper case characters A to Z, digits 0 to 9, or underscores * no longer than 255 characters in length Deploy step format ------------------ An invocation of a deploy step is defined in a deploy template as follows:: { "interface": "", "step": "", "args": { "": "", "": "" }, "priority": } A deploy template contains a list of one or more such steps. Each combination of `interface` and `step` may only be specified once in a deploy template. Matching deploy templates ------------------------- During deployment, if any of the traits in a node's ``instance_info.traits`` field match the name of a deploy template, then the steps from that deploy template will be added to the list of steps to be executed by the node. When using the Compute service, any traits in the instance's flavor properties or image properties are stored in ``instance_info.traits`` during deployment. See :ref:`scheduling-traits` for further information on how traits are used for scheduling when the Bare Metal service is used with the Compute service. Note that there is no ongoing relationship between a node and any templates that are matched during deployment. The set of matching deploy templates is checked at deployment time. Any subsequent updates to or deletion of those templates will not be reflected in the node's configuration unless it is redeployed or rebuilt. Similarly, if a node is rebuilt and the set of matching deploy templates has changed since the initial deployment, then the resulting configuration of the node may be different from the initial deployment. Overriding default deploy steps ------------------------------- A deploy step is enabled by default if it has a non-zero default priority. A default deploy step may be overridden in a deploy template. If the step's priority is a positive integer it will be executed with the specified priority and arguments. If the step's priority is zero, the step will not be executed. If the :ref:`deploy.deploy step ` is included in a deploy template, it can only be assigned a priority of zero to disable it. Creating a deploy template via API ---------------------------------- A deploy template can be created using the Bare Metal API:: POST /v1/deploy_templates Here is an example of the body of a request to create a deploy template with a single step: .. code-block:: json { "name": "CUSTOM_HYPERTHREADING_ON", "steps": [ { "interface": "bios", "step": "apply_configuration", "args": { "settings": [ { "name": "LogicalProc", "value": "Enabled" } ] }, "priority": 150 } ] } Further information on this API is available `here `__. Creating a deploy template via "baremetal" client ----------------------------------------------------------- A deploy template can be created via the ``baremetal deploy template create`` command, starting with ``python-ironicclient`` 2.7.0. The argument ``--steps`` must be specified. Its value is one of: - a JSON string - path to a JSON file whose contents are passed to the API - '-', to read from stdin. This allows piping in the deploy steps. Example of creating a deploy template with a single step using a JSON string: .. code-block:: console baremetal deploy template create \ CUSTOM_HYPERTHREADING_ON \ --steps '[{"interface": "bios", "step": "apply_configuration", "args": {"settings": [{"name": "LogicalProc", "value": "Enabled"}]}, "priority": 150}]' Or with a file: .. code-block:: console baremetal deploy template create \ CUSTOM_HYPERTHREADING_ON \ ---steps my-deploy-steps.txt Or with stdin: .. code-block:: console cat my-deploy-steps.txt | baremetal deploy template create \ CUSTOM_HYPERTHREADING_ON \ --steps - Example of use with the Compute service --------------------------------------- .. note:: The deploy steps used in this example are for example purposes only. In the following example, we first add the trait ``CUSTOM_HYPERTHREADING_ON`` to the node represented by ````: .. code-block:: console baremetal node add trait CUSTOM_HYPERTHREADING_ON We also update the flavor ``bm-hyperthreading-on`` in the Compute service with the following property: .. code-block:: console openstack flavor set --property trait:CUSTOM_HYPERTHREADING_ON=required bm-hyperthreading-on Creating a Compute instance with this flavor will ensure that the instance is scheduled only to Bare Metal nodes with the ``CUSTOM_HYPERTHREADING_ON`` trait. We could then create a Bare Metal deploy template with the name ``CUSTOM_HYPERTHREADING_ON`` and a deploy step that enables Hyperthreading: .. code-block:: json { "name": "CUSTOM_HYPERTHREADING_ON", "steps": [ { "interface": "bios", "step": "apply_configuration", "args": { "settings": [ { "name": "LogicalProc", "value": "Enabled" } ] }, "priority": 150 } ] } When an instance is created using the ``bm-hyperthreading-on`` flavor, then the deploy steps of deploy template ``CUSTOM_HYPERTHREADING_ON`` will be executed during the deployment of the scheduled node, causing Hyperthreading to be enabled in the node's BIOS configuration. To make this example more dynamic, let's add a second trait ``CUSTOM_HYPERTHREADING_OFF`` to the node: .. code-block:: console baremetal node add trait CUSTOM_HYPERTHREADING_OFF We could also update a second flavor, ``bm-hyperthreading-off``, with the following property: .. code-block:: console openstack flavor set --property trait:CUSTOM_HYPERTHREADING_OFF=required bm-hyperthreading-off Finally, we create a deploy template with the name ``CUSTOM_HYPERTHREADING_OFF`` and a deploy step that disables Hyperthreading: .. code-block:: json { "name": "CUSTOM_HYPERTHREADING_OFF", "steps": [ { "interface": "bios", "step": "apply_configuration", "args": { "settings": [ { "name": "LogicalProc", "value": "Disabled" } ] }, "priority": 150 } ] } Creating a Compute instance with the ``bm-hyperthreading-off`` instance will cause the scheduled node to have Hyperthreading disabled in the BIOS during deployment. We now have a way to create Compute instances with different configurations, by choosing between different Compute flavors, supported by a single Bare Metal node that is dynamically configured during deployment. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/admin/node-multitenancy.rst0000664000175000017500000001300000000000000022420 0ustar00zuulzuul00000000000000================== Node Multi-Tenancy ================== This guide explains the steps needed to enable node multi-tenancy. This feature enables non-admins to perform API actions on nodes, limited by policy configuration. The Bare Metal service supports two kinds of non-admin users: * Owner: owns specific nodes and performs administrative actions on them * Lessee: receives temporary and limited access to a node Setting the Owner and Lessee ============================ Non-administrative access to a node is controlled through a node's ``owner`` or ``lessee`` attribute:: baremetal node set --owner 080925ee2f464a2c9dce91ee6ea354e2 node-7 baremetal node set --lessee 2a210e5ff114c8f2b6e994218f51a904 node-10 Configuring the Bare Metal Service Policy ========================================= By default, the Bare Metal service policy is configured so that a node owner or lessee has no access to any node APIs. However, the policy :doc:`policy file ` contains rules that can be used to enable node API access:: # Owner of node #"is_node_owner": "project_id:%(node.owner)s" # Lessee of node #"is_node_lessee": "project_id:%(node.lessee)s" An administrator can then modify the policy file to expose individual node APIs as follows:: # Change Node provision status # PUT /nodes/{node_ident}/states/provision #"baremetal:node:set_provision_state": "rule:is_admin" "baremetal:node:set_provision_state": "rule:is_admin or rule:is_node_owner or rule:is_node_lessee" # Update Node records # PATCH /nodes/{node_ident} #"baremetal:node:update": "rule:is_admin or rule:is_node_owner" In addition, it is safe to expose the ``baremetal:node:list`` rule, as the node list function now filters non-admins by owner and lessee:: # Retrieve multiple Node records, filtered by owner # GET /nodes # GET /nodes/detail #"baremetal:node:list": "rule:baremetal:node:get" "baremetal:node:list": "" Note that ``baremetal:node:list_all`` permits users to see all nodes regardless of owner/lessee, so it should remain restricted to admins. Ports ----- Port APIs can be similarly exposed to node owners and lessees:: # Retrieve Port records # GET /ports/{port_id} # GET /nodes/{node_ident}/ports # GET /nodes/{node_ident}/ports/detail # GET /portgroups/{portgroup_ident}/ports # GET /portgroups/{portgroup_ident}/ports/detail #"baremetal:port:get": "rule:is_admin or rule:is_observer" "baremetal:port:get": "rule:is_admin or rule:is_observer or rule:is_node_owner or rule:is_node_lessee" # Retrieve multiple Port records, filtered by owner # GET /ports # GET /ports/detail #"baremetal:port:list": "rule:baremetal:port:get" "baremetal:port:list": "" Allocations ----------- Allocations respect node tenancy as well. A restricted allocation creates an allocation tied to a project, and that can only match nodes where that project is the owner or lessee. Here is a sample set of allocation policy rules that allow non-admins to use allocations effectively:: # Retrieve Allocation records # GET /allocations/{allocation_id} # GET /nodes/{node_ident}/allocation #"baremetal:allocation:get": "rule:is_admin or rule:is_observer" "baremetal:allocation:get": "rule:is_admin or rule:is_observer or rule:is_allocation_owner" # Retrieve multiple Allocation records, filtered by owner # GET /allocations #"baremetal:allocation:list": "rule:baremetal:allocation:get" "baremetal:allocation:list": "" # Retrieve multiple Allocation records # GET /allocations #"baremetal:allocation:list_all": "rule:baremetal:allocation:get" # Create Allocation records # POST /allocations #"baremetal:allocation:create": "rule:is_admin" # Create Allocation records that are restricted to an owner # POST /allocations #"baremetal:allocation:create_restricted": "rule:baremetal:allocation:create" "baremetal:allocation:create_restricted": "" # Delete Allocation records # DELETE /allocations/{allocation_id} # DELETE /nodes/{node_ident}/allocation #"baremetal:allocation:delete": "rule:is_admin" "baremetal:allocation:delete": "rule:is_admin or rule:is_allocation_owner" # Change name and extra fields of an allocation # PATCH /allocations/{allocation_id} #"baremetal:allocation:update": "rule:is_admin" "baremetal:allocation:update": "rule:is_admin or rule:is_allocation_owner" Deployment and Metalsmith ------------------------- Provisioning a node requires a specific set of APIs to be made available. The following policy specifications are enough to allow a node owner to use :metalsmith-doc:`Metalsmith ` to deploy upon a node:: "baremetal:node:get": "rule:is_admin or rule:is_observer or rule:is_node_owner" "baremetal:node:list": "" "baremetal:node:update_extra": "rule:is_admin or rule:is_node_owner" "baremetal:node:update_instance_info": "rule:is_admin or rule:is_node_owner" "baremetal:node:validate": "rule:is_admin or rule:is_node_owner" "baremetal:node:set_provision_state": "rule:is_admin or rule:is_node_owner" "baremetal:node:vif:list": "rule:is_admin or rule:is_node_owner" "baremetal:node:vif:attach": "rule:is_admin or rule:is_node_owner" "baremetal:node:vif:detach": "rule:is_admin or rule:is_node_owner" "baremetal:allocation:get": "rule:is_admin or rule:is_observer or rule:is_allocation_owner" "baremetal:allocation:list": "" "baremetal:allocation:create_restricted": "" "baremetal:allocation:delete": "rule:is_admin or rule:is_allocation_owner" "baremetal:allocation:update": "rule:is_admin or rule:is_allocation_owner" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/admin/notifications.rst0000664000175000017500000007335600000000000021656 0ustar00zuulzuul00000000000000.. _deploy-notifications: ============= Notifications ============= Ironic, when configured to do so, will emit notifications over a message bus that indicate different events that occur within the service. These can be consumed by any external service. Examples may include a billing or usage system, a monitoring data store, or other OpenStack services. This page describes how to enable notifications and the different kinds of notifications that ironic may emit. The external consumer will see notifications emitted by ironic as JSON objects structured in the following manner:: { "priority": , "event_type": , "timestamp": , "publisher_id": , "message_id": , "payload": } Configuration ============= To enable notifications with ironic, there are two configuration options in ironic.conf that must be adjusted. The first option is the ``notification_level`` option in the ``[DEFAULT]`` section of the configuration file. This can be set to "debug", "info", "warning", "error", or "critical", and determines the minimum priority level for which notifications are emitted. For example, if the option is set to "warning", all notifications with priority level "warning", "error", or "critical" are emitted, but not notifications with priority level "debug" or "info". For information about the semantics of each log level, see the OpenStack logging standards [1]_. If this option is unset, no notifications will be emitted. The priority level of each available notification is documented below. The second option is the ``transport_url`` option in the ``[oslo_messaging_notifications]`` section of the configuration. This determines the message bus used when sending notifications. If this is unset, the default transport used for RPC is used. All notifications are emitted on the "ironic_versioned_notifications" topic in the message bus. Generally, each type of message that traverses the message bus is associated with a topic describing what the message is about. For more information, see the documentation of your chosen message bus, such as the RabbitMQ documentation [2]_. Note that notifications may be lossy, and there's no guarantee that a notification will make it across the message bus to a consumer. Versioning ========== Each notification has an associated version in the "ironic_object.version" field of the payload. Consumers are guaranteed that microversion bumps will add new fields, while macroversion bumps are backwards-incompatible and may have fields removed. Versioned notifications are emitted by default to the `ironic_versioned_notifications` topic. This can be changed and it is configurable in the ironic.conf with the `versioned_notifications_topics` config option. Available notifications ======================= .. TODO(mariojv) Add some form of tabular formatting below The notifications that ironic emits are described here. They are listed (alphabetically) by service first, then by event_type. All examples below show payloads before serialization to JSON. ------------------------ ironic-api notifications ------------------------ Resources CRUD notifications ---------------------------- These notifications are emitted from API service when ironic resources are modified as part of create, update, or delete (CRUD) [3]_ procedures. All CRUD notifications are emitted at INFO level, except for "error" status that is emitted at ERROR level. List of CRUD notifications for chassis: * ``baremetal.chassis.create.start`` * ``baremetal.chassis.create.end`` * ``baremetal.chassis.create.error`` * ``baremetal.chassis.update.start`` * ``baremetal.chassis.update.end`` * ``baremetal.chassis.update.error`` * ``baremetal.chassis.delete.start`` * ``baremetal.chassis.delete.end`` * ``baremetal.chassis.delete.error`` Example of chassis CRUD notification:: { "priority": "info", "payload":{ "ironic_object.namespace":"ironic", "ironic_object.name":"ChassisCRUDPayload", "ironic_object.version":"1.0", "ironic_object.data":{ "created_at": "2016-04-10T10:13:03+00:00", "description": "bare 28", "extra": {}, "updated_at": "2016-04-27T21:11:03+00:00", "uuid": "1910f669-ce8b-43c2-b1d8-cf3d65be815e" } }, "event_type":"baremetal.chassis.update.end", "publisher_id":"ironic-api.hostname02" } List of CRUD notifications for deploy template: * ``baremetal.deploy_template.create.start`` * ``baremetal.deploy_template.create.end`` * ``baremetal.deploy_template.create.error`` * ``baremetal.deploy_template.update.start`` * ``baremetal.deploy_template.update.end`` * ``baremetal.deploy_template.update.error`` * ``baremetal.deploy_template.delete.start`` * ``baremetal.deploy_template.delete.end`` * ``baremetal.deploy_template.delete.error`` Example of deploy template CRUD notification:: { "priority": "info", "payload":{ "ironic_object.namespace":"ironic", "ironic_object.name":"DeployTemplateCRUDPayload", "ironic_object.version":"1.0", "ironic_object.data":{ "created_at": "2019-02-10T10:13:03+00:00", "extra": {}, "name": "CUSTOM_HYPERTHREADING_ON", "steps": [ { "interface": "bios", "step": "apply_configuration", "args": { "settings": [ { "name": "LogicalProc", "value": "Enabled" } ] }, "priority": 150 } ], "updated_at": "2019-02-27T21:11:03+00:00", "uuid": "1910f669-ce8b-43c2-b1d8-cf3d65be815e" } }, "event_type":"baremetal.deploy_template.update.end", "publisher_id":"ironic-api.hostname02" } List of CRUD notifications for node: * ``baremetal.node.create.start`` * ``baremetal.node.create.end`` * ``baremetal.node.create.error`` * ``baremetal.node.update.start`` * ``baremetal.node.update.end`` * ``baremetal.node.update.error`` * ``baremetal.node.delete.start`` * ``baremetal.node.delete.end`` * ``baremetal.node.delete.error`` Example of node CRUD notification:: { "priority": "info", "payload":{ "ironic_object.namespace":"ironic", "ironic_object.name":"NodeCRUDPayload", "ironic_object.version":"1.13", "ironic_object.data":{ "chassis_uuid": "db0eef9d-45b2-4dc0-94a8-fc283c01171f", "clean_step": None, "conductor_group": "", "console_enabled": False, "created_at": "2016-01-26T20:41:03+00:00", "deploy_step": None, "description": "my sample node", "driver": "ipmi", "driver_info": { "ipmi_address": "192.168.0.111", "ipmi_username": "root"}, "extra": {}, "inspection_finished_at": None, "inspection_started_at": None, "instance_info": {}, "instance_uuid": None, "last_error": None, "lessee": None, "maintenance": False, "maintenance_reason": None, "fault": None, "bios_interface": "no-bios", "boot_interface": "pxe", "console_interface": "no-console", "deploy_interface": "direct", "inspect_interface": "no-inspect", "management_interface": "ipmitool", "network_interface": "flat", "power_interface": "ipmitool", "raid_interface": "no-raid", "rescue_interface": "no-rescue", "storage_interface": "noop", "vendor_interface": "no-vendor", "name": None, "owner": None, "power_state": "power off", "properties": { "memory_mb": 4096, "cpu_arch": "x86_64", "local_gb": 10, "cpus": 8}, "protected": False, "protected_reason": None, "provision_state": "deploying", "provision_updated_at": "2016-01-27T20:41:03+00:00", "resource_class": None, "retired": None, "retired_reason": None, "target_power_state": None, "target_provision_state": "active", "traits": [ "CUSTOM_TRAIT1", "HW_CPU_X86_VMX"], "updated_at": "2016-01-27T20:41:03+00:00", "uuid": "1be26c0b-03f2-4d2e-ae87-c02d7f33c123" } }, "event_type":"baremetal.node.update.end", "publisher_id":"ironic-api.hostname02" } List of CRUD notifications for port: * ``baremetal.port.create.start`` * ``baremetal.port.create.end`` * ``baremetal.port.create.error`` * ``baremetal.port.update.start`` * ``baremetal.port.update.end`` * ``baremetal.port.update.error`` * ``baremetal.port.delete.start`` * ``baremetal.port.delete.end`` * ``baremetal.port.delete.error`` Example of port CRUD notification:: { "priority": "info", "payload":{ "ironic_object.namespace":"ironic", "ironic_object.name":"PortCRUDPayload", "ironic_object.version":"1.3", "ironic_object.data":{ "address": "77:66:23:34:11:b7", "created_at": "2016-02-11T15:23:03+00:00", "node_uuid": "5b236cab-ad4e-4220-b57c-e827e858745a", "extra": {}, "is_smartnic": True, "local_link_connection": {}, "physical_network": "physnet1", "portgroup_uuid": "bd2f385e-c51c-4752-82d1-7a9ec2c25f24", "pxe_enabled": True, "updated_at": "2016-03-27T20:41:03+00:00", "uuid": "1be26c0b-03f2-4d2e-ae87-c02d7f33c123" } }, "event_type":"baremetal.port.update.end", "publisher_id":"ironic-api.hostname02" } List of CRUD notifications for port group: * ``baremetal.portgroup.create.start`` * ``baremetal.portgroup.create.end`` * ``baremetal.portgroup.create.error`` * ``baremetal.portgroup.update.start`` * ``baremetal.portgroup.update.end`` * ``baremetal.portgroup.update.error`` * ``baremetal.portgroup.delete.start`` * ``baremetal.portgroup.delete.end`` * ``baremetal.portgroup.delete.error`` Example of portgroup CRUD notification:: { "priority": "info", "payload":{ "ironic_object.namespace":"ironic", "ironic_object.name":"PortgroupCRUDPayload", "ironic_object.version":"1.0", "ironic_object.data":{ "address": "11:44:32:87:61:e5", "created_at": "2017-01-11T11:33:03+00:00", "node_uuid": "5b236cab-ad4e-4220-b57c-e827e858745a", "extra": {}, "mode": "7", "name": "portgroup-node-18", "properties": {}, "standalone_ports_supported": True, "updated_at": "2017-01-31T11:41:07+00:00", "uuid": "db033a40-bfed-4c84-815a-3db26bb268bb", } }, "event_type":"baremetal.portgroup.update.end", "publisher_id":"ironic-api.hostname02" } List of CRUD notifications for volume connector: * ``baremetal.volumeconnector.create.start`` * ``baremetal.volumeconnector.create.end`` * ``baremetal.volumeconnector.create.error`` * ``baremetal.volumeconnector.update.start`` * ``baremetal.volumeconnector.update.end`` * ``baremetal.volumeconnector.update.error`` * ``baremetal.volumeconnector.delete.start`` * ``baremetal.volumeconnector.delete.end`` * ``baremetal.volumeconnector.delete.error`` Example of volume connector CRUD notification:: { "priority": "info", "payload": { "ironic_object.namespace": "ironic", "ironic_object.name": "VolumeConnectorCRUDPayload", "ironic_object.version": "1.0", "ironic_object.data": { "connector_id": "iqn.2017-05.org.openstack:01:d9a51732c3f", "created_at": "2017-05-11T05:57:36+00:00", "extra": {}, "node_uuid": "4dbb4e69-99a8-4e13-b6e8-dd2ad4a20caf", "type": "iqn", "updated_at": "2017-05-11T08:28:58+00:00", "uuid": "19b9f3ab-4754-4725-a7a4-c43ea7e57360" } }, "event_type": "baremetal.volumeconnector.update.end", "publisher_id":"ironic-api.hostname02" } List of CRUD notifications for volume target: * ``baremetal.volumetarget.create.start`` * ``baremetal.volumetarget.create.end`` * ``baremetal.volumetarget.create.error`` * ``baremetal.volumetarget.update.start`` * ``baremetal.volumetarget.update.end`` * ``baremetal.volumetarget.update.error`` * ``baremetal.volumetarget.delete.start`` * ``baremetal.volumetarget.delete.end`` * ``baremetal.volumetarget.delete.error`` Example of volume target CRUD notification:: { "priority": "info", "payload": { "ironic_object.namespace": "ironic", "ironic_object.version": "1.0", "ironic_object.name": "VolumeTargetCRUDPayload" "ironic_object.data": { "boot_index": 0, "created_at": "2017-05-11T09:38:59+00:00", "extra": {}, "node_uuid": "4dbb4e69-99a8-4e13-b6e8-dd2ad4a20caf", "properties": { "access_mode": "rw", "auth_method": "CHAP" "auth_password": "***", "auth_username": "urxhQCzAKr4sjyE8DivY", "encrypted": false, "qos_specs": null, "target_discovered": false, "target_iqn": "iqn.2010-10.org.openstack:volume-f0d9b0e6-b242-9105-91d4-a20331693ad8", "target_lun": 1, "target_portal": "192.168.12.34:3260", "volume_id": "f0d9b0e6-b042-4105-91d4-a20331693ad8", }, "updated_at": "2017-05-11T09:52:04+00:00", "uuid": "82a45833-9c58-4ec1-943c-2091ab10e47b", "volume_id": "f0d9b0e6-b242-9105-91d4-a20331693ad8", "volume_type": "iscsi" } }, "event_type": "baremetal.volumetarget.update.end", "publisher_id":"ironic-api.hostname02" } Node maintenance notifications ------------------------------ These notifications are emitted from API service when maintenance mode is changed via API service. List of maintenance notifications for a node: * ``baremetal.node.maintenance_set.start`` * ``baremetal.node.maintenance_set.end`` * ``baremetal.node.maintenance_set.error`` "start" and "end" notifications have INFO level, "error" has ERROR. Example of node maintenance notification:: { "priority": "info", "payload":{ "ironic_object.namespace":"ironic", "ironic_object.name":"NodePayload", "ironic_object.version":"1.15", "ironic_object.data":{ "clean_step": None, "conductor_group": "", "console_enabled": False, "created_at": "2016-01-26T20:41:03+00:00", "deploy_step": None, "description": "my sample node", "driver": "ipmi", "extra": {}, "inspection_finished_at": None, "inspection_started_at": None, "instance_info": {}, "instance_uuid": None, "last_error": None, "lessee": None, "maintenance": True, "maintenance_reason": "hw upgrade", "fault": None, "bios_interface": "no-bios", "boot_interface": "pxe", "console_interface": "no-console", "deploy_interface": "direct", "inspect_interface": "no-inspect", "management_interface": "ipmitool", "network_interface": "flat", "power_interface": "ipmitool", "raid_interface": "no-raid", "rescue_interface": "no-rescue", "storage_interface": "noop", "vendor_interface": "no-vendor", "name": None, "owner": None, "power_state": "power off", "properties": { "memory_mb": 4096, "cpu_arch": "x86_64", "local_gb": 10, "cpus": 8}, "protected": False, "protected_reason": None, "provision_state": "available", "provision_updated_at": "2016-01-27T20:41:03+00:00", "resource_class": None, "retired": None, "retired_reason": None, "target_power_state": None, "target_provision_state": None, "traits": [ "CUSTOM_TRAIT1", "HW_CPU_X86_VMX"], "updated_at": "2016-01-27T20:41:03+00:00", "uuid": "1be26c0b-03f2-4d2e-ae87-c02d7f33c123" } }, "event_type":"baremetal.node.maintenance_set.start", "publisher_id":"ironic-api.hostname02" } ------------------------------ ironic-conductor notifications ------------------------------ Node console notifications ------------------------------ These notifications are emitted by the ironic-conductor service when conductor service starts or stops console for the node. The notification event types for a node console are: * ``baremetal.node.console_set.start`` * ``baremetal.node.console_set.end`` * ``baremetal.node.console_set.error`` * ``baremetal.node.console_restore.start`` * ``baremetal.node.console_restore.end`` * ``baremetal.node.console_restore.error`` ``console_set`` action is used when start or stop console is initiated. The ``console_restore`` action is used when the console was already enabled, but a driver must restart the console because an ironic-conductor was restarted. This may also be sent when an ironic-conductor takes over a node that was being managed by another ironic-conductor. "start" and "end" notifications have INFO level, "error" has ERROR. Example of node console notification:: { "priority": "info", "payload":{ "ironic_object.namespace":"ironic", "ironic_object.name":"NodePayload", "ironic_object.version":"1.15", "ironic_object.data":{ "clean_step": None, "conductor_group": "", "console_enabled": True, "created_at": "2016-01-26T20:41:03+00:00", "deploy_step": None, "description": "my sample node", "driver": "ipmi", "extra": {}, "inspection_finished_at": None, "inspection_started_at": None, "instance_info": {}, "instance_uuid": None, "last_error": None, "lessee": None, "maintenance": False, "maintenance_reason": None, "fault": None, "bios_interface": "no-bios", "boot_interface": "pxe", "console_interface": "no-console", "deploy_interface": "direct", "inspect_interface": "no-inspect", "management_interface": "ipmitool", "network_interface": "flat", "power_interface": "ipmitool", "raid_interface": "no-raid", "rescue_interface": "no-rescue", "storage_interface": "noop", "vendor_interface": "no-vendor", "name": None, "owner": None, "power_state": "power off", "properties": { "memory_mb": 4096, "cpu_arch": "x86_64", "local_gb": 10, "cpus": 8}, "protected": False, "protected_reason": None, "provision_state": "available", "provision_updated_at": "2016-01-27T20:41:03+00:00", "resource_class": None, "retired": None, "retired_reason": None, "target_power_state": None, "target_provision_state": None, "traits": [ "CUSTOM_TRAIT1", "HW_CPU_X86_VMX"], "updated_at": "2016-01-27T20:41:03+00:00", "uuid": "1be26c0b-03f2-4d2e-ae87-c02d7f33c123" } }, "event_type":"baremetal.node.console_set.end", "publisher_id":"ironic-conductor.hostname01" } baremetal.node.power_set ------------------------ * ``baremetal.node.power_set.start`` is emitted by the ironic-conductor service when it begins a power state change. It has notification level "info". * ``baremetal.node.power_set.end`` is emitted when ironic-conductor successfully completes a power state change task. It has notification level "info". * ``baremetal.node.power_set.error`` is emitted by ironic-conductor when it fails to set a node's power state. It has notification level "error". This can occur when ironic fails to retrieve the old power state prior to setting the new one on the node, or when it fails to set the power state if a change is requested. Here is an example payload for a notification with this event type. The "to_power" payload field indicates the power state to which the ironic-conductor is attempting to change the node:: { "priority": "info", "payload":{ "ironic_object.namespace":"ironic", "ironic_object.name":"NodeSetPowerStatePayload", "ironic_object.version":"1.15", "ironic_object.data":{ "clean_step": None, "conductor_group": "", "console_enabled": False, "created_at": "2016-01-26T20:41:03+00:00", "deploy_step": None, "description": "my sample node", "driver": "ipmi", "extra": {}, "inspection_finished_at": None, "inspection_started_at": None, "instance_uuid": "d6ea00c1-1f94-4e95-90b3-3462d7031678", "last_error": None, "lessee": None, "maintenance": False, "maintenance_reason": None, "fault": None, "bios_interface": "no-bios", "boot_interface": "pxe", "console_interface": "no-console", "deploy_interface": "direct", "inspect_interface": "no-inspect", "management_interface": "ipmitool", "network_interface": "flat", "power_interface": "ipmitool", "raid_interface": "no-raid", "rescue_interface": "no-rescue", "storage_interface": "noop", "vendor_interface": "no-vendor", "name": None, "owner": None, "power_state": "power off", "properties": { "memory_mb": 4096, "cpu_arch": "x86_64", "local_gb": 10, "cpus": 8}, "protected": False, "protected_reason": None "provision_state": "available", "provision_updated_at": "2016-01-27T20:41:03+00:00", "resource_class": None, "retired": None, "retired_reason": None, "target_power_state": None, "target_provision_state": None, "traits": [ "CUSTOM_TRAIT1", "HW_CPU_X86_VMX"], "updated_at": "2016-01-27T20:41:03+00:00", "uuid": "1be26c0b-03f2-4d2e-ae87-c02d7f33c123", "to_power": "power on" } }, "event_type":"baremetal.node.power_set.start", "publisher_id":"ironic-conductor.hostname01" } baremetal.node.power_state_corrected ------------------------------------ * ``baremetal.node.power_state_corrected.success`` is emitted by ironic-conductor when the power state on the baremetal hardware is different from the previous known power state of the node and the database is corrected to reflect this new power state. It has notification level "info". Here is an example payload for a notification with this event_type. The "from_power" payload field indicates the previous power state on the node, prior to the correction:: { "priority": "info", "payload":{ "ironic_object.namespace":"ironic", "ironic_object.name":"NodeCorrectedPowerStatePayload", "ironic_object.version":"1.15", "ironic_object.data":{ "clean_step": None, "conductor_group": "", "console_enabled": False, "created_at": "2016-01-26T20:41:03+00:00", "deploy_step": None, "description": "my sample node", "driver": "ipmi", "extra": {}, "inspection_finished_at": None, "inspection_started_at": None, "instance_uuid": "d6ea00c1-1f94-4e95-90b3-3462d7031678", "last_error": None, "lessee": None, "maintenance": False, "maintenance_reason": None, "fault": None, "bios_interface": "no-bios", "boot_interface": "pxe", "console_interface": "no-console", "deploy_interface": "direct", "inspect_interface": "no-inspect", "management_interface": "ipmitool", "network_interface": "flat", "power_interface": "ipmitool", "raid_interface": "no-raid", "rescue_interface": "no-rescue", "storage_interface": "noop", "vendor_interface": "no-vendor", "name": None, "owner": None, "power_state": "power off", "properties": { "memory_mb": 4096, "cpu_arch": "x86_64", "local_gb": 10, "cpus": 8}, "protected": False, "protected_reason": None, "provision_state": "available", "provision_updated_at": "2016-01-27T20:41:03+00:00", "resource_class": None, "retired": None, "retired_reason": None, "target_power_state": None, "target_provision_state": None, "traits": [ "CUSTOM_TRAIT1", "HW_CPU_X86_VMX"], "updated_at": "2016-01-27T20:41:03+00:00", "uuid": "1be26c0b-03f2-4d2e-ae87-c02d7f33c123", "from_power": "power on" } }, "event_type":"baremetal.node.power_state_corrected.success", "publisher_id":"ironic-conductor.cond-hostname02" } baremetal.node.provision_set ---------------------------- * ``baremetal.node.provision_set.start`` is emitted by the ironic-conductor service when it begins a provision state transition. It has notification level INFO. * ``baremetal.node.provision_set.end`` is emitted when ironic-conductor successfully completes a provision state transition. It has notification level INFO. * ``baremetal.node.provision_set.success`` is emitted when ironic-conductor successfully changes provision state instantly, without any intermediate work required (example is AVAILABLE to MANAGEABLE). It has notification level INFO. * ``baremetal.node.provision_set.error`` is emitted by ironic-conductor when it changes provision state as result of error event processing. It has notification level ERROR. Here is an example payload for a notification with this event type. The "previous_provision_state" and "previous_target_provision_state" payload fields indicate a node's provision states before state change, "event" is the FSM (finite state machine) event that triggered the state change:: { "priority": "info", "payload":{ "ironic_object.namespace":"ironic", "ironic_object.name":"NodeSetProvisionStatePayload", "ironic_object.version":"1.16", "ironic_object.data":{ "clean_step": None, "conductor_group": "", "console_enabled": False, "created_at": "2016-01-26T20:41:03+00:00", "deploy_step": None, "description": "my sample node", "driver": "ipmi", "driver_internal_info": { "is_whole_disk_image": True}, "extra": {}, "inspection_finished_at": None, "inspection_started_at": None, "instance_info": {}, "instance_uuid": None, "last_error": None, "lessee": None, "maintenance": False, "maintenance_reason": None, "fault": None, "bios_interface": "no-bios", "boot_interface": "pxe", "console_interface": "no-console", "deploy_interface": "direct", "inspect_interface": "no-inspect", "management_interface": "ipmitool", "network_interface": "flat", "power_interface": "ipmitool", "raid_interface": "no-raid", "rescue_interface": "no-rescue", "storage_interface": "noop", "vendor_interface": "no-vendor", "name": None, "owner": None, "power_state": "power off", "properties": { "memory_mb": 4096, "cpu_arch": "x86_64", "local_gb": 10, "cpus": 8}, "protected": False, "protected_reason": None, "provision_state": "deploying", "provision_updated_at": "2016-01-27T20:41:03+00:00", "resource_class": None, "retired": None, "retired_reason": None, "target_power_state": None, "target_provision_state": "active", "traits": [ "CUSTOM_TRAIT1", "HW_CPU_X86_VMX"], "updated_at": "2016-01-27T20:41:03+00:00", "uuid": "1be26c0b-03f2-4d2e-ae87-c02d7f33c123", "previous_provision_state": "available", "previous_target_provision_state": None, "event": "deploy" } }, "event_type":"baremetal.node.provision_set.start", "publisher_id":"ironic-conductor.hostname01" } .. [1] https://wiki.openstack.org/wiki/LoggingStandards#Log_level_definitions .. [2] https://www.rabbitmq.com/documentation.html .. [3] https://en.wikipedia.org/wiki/Create,_read,_update_and_delete ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/admin/portgroups.rst0000664000175000017500000001536100000000000021221 0ustar00zuulzuul00000000000000=================== Port groups support =================== The Bare Metal service supports static configuration of port groups (bonds) in the instances via configdrive. See `kernel documentation on bonding`_ to see why it may be useful and how it is setup in linux. The sections below describe how to make use of them in the Bare Metal service. Switch-side configuration ------------------------- If port groups are desired in the ironic deployment, they need to be configured on the switches. It needs to be done manually, and the mode and properties configured on the switch have to correspond to the mode and properties that will be configured on the ironic side, as bonding mode and properties may be named differently on your switch, or have possible values different from the ones described in `kernel documentation on bonding`_. Please refer to your switch configuration documentation for more details. Provisioning and cleaning cannot make use of port groups if they need to boot the deployment ramdisk via (i)PXE. If your switches or desired port group configuration do not support port group fallback, which will allow port group members to be used by themselves, you need to set port group's ``standalone_ports_supported`` value to be ``False`` in ironic, as it is ``True`` by default. Physical networks ----------------- If any port in a port group has a physical network, then all ports in that port group must have the same physical network. In order to change the physical network of the ports in a port group, all ports must first be removed from the port group, before changing their physical networks (to the same value), then adding them back to the port group. See :ref:`physical networks ` for further information on using physical networks in the Bare Metal service. Port groups configuration in the Bare Metal service --------------------------------------------------- Port group configuration is supported in ironic API microversions 1.26, the CLI commands below specify it for completeness. #. When creating a port group, the node to which it belongs must be specified, along with, optionally, its name, address, mode, properties, and if it supports fallback to standalone ports:: baremetal port group create \ --node $NODE_UUID --name test --address fa:ab:25:48:fd:ba --mode 802.3ad \ --property miimon=100 --property xmit_hash_policy="layer2+3" \ --support-standalone-ports A port group can also be updated with ``baremetal port group set`` command, see its help for more details. If an address is not specified, the port group address on the deployed instance will be the same as the address of the neutron port that is attached to the port group. If the neutron port is not attached, the port group will not be configured. .. note:: In standalone mode, port groups have to be configured manually. It can be done either statically inside the image, or by generating the configdrive and adding it to the node's ``instance_info``. For more information on how to configure bonding via configdrive, refer to `cloud-init documentation `_ and `code `_. cloud-init version 0.7.7 or later is required for bonding configuration to work. The following is a simple sample for configuring bonding via configdrive: When booting an instance, it needs to add user-data file for configuring bonding via ``--user-data`` option. For example: .. code-block:: json { "networks": [ { "type": "physical", "name": "eth0", "mac_address": "fa:ab:25:48:fd:ba" }, { "type": "physical", "name": "eth1", "mac_address": "fa:ab:25:48:fd:ab" }, { "type": "bond", "name": "bond0", "bond_interfaces": [ "eth0", "eth1" ], "mode": "active-backup" } ] } If the port group's address is not explicitly set in standalone mode, it will be set automatically by the process described in `kernel documentation on bonding`_. During interface attachment, port groups have higher priority than ports, so they will be used first. (It is not yet possible to specify which one is desired, a port group or a port, in an interface attachment request). Port groups that don't have any ports will be ignored. The mode and properties values are described in the `kernel documentation on bonding`_. The default port group mode is ``active-backup``, and this default can be changed by setting the ``[DEFAULT]default_portgroup_mode`` configuration option in the ironic API service configuration file. #. Associate ports with the created port group. It can be done on port creation:: baremetal port create \ --node $NODE_UUID --address fa:ab:25:48:fd:ba --port-group test Or by updating an existing port:: baremetal port set $PORT_UUID --port-group $PORT_GROUP_UUID When updating a port, the node associated with the port has to be in ``enroll``, ``manageable``, or ``inspecting`` states. A port group can have the same or different address as individual ports. #. Boot an instance (or node directly, in case of using standalone ironic) providing an image that has cloud-init version 0.7.7 or later and supports bonding. When the deployment is done, you can check that the port group is set up properly by running the following command in the instance:: cat /proc/net/bonding/bondX where ``X`` is a number autogenerated by cloud-init for each configured port group, in no particular order. It starts with 0 and increments by 1 for every configured port group. .. _`kernel documentation on bonding`: https://www.kernel.org/doc/Documentation/networking/bonding.txt Link aggregation/teaming on windows ----------------------------------- Portgroups are supported for Windows Server images, which can created by :ref:`building_image_windows` instruction. You can customise an instance after it is launched along with `script file `_ in ``Configuration`` of ``Instance`` and selected ``Configuration Drive`` option. Then ironic virt driver will generate network metadata and add all the additional information, such as bond mode, transmit hash policy, MII link monitoring interval, and of which links the bond consists. The information in InstanceMetadata will be used afterwards to generate the config drive. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/admin/power-sync.rst0000664000175000017500000001446200000000000021104 0ustar00zuulzuul00000000000000===================== Power Synchronization ===================== Baremetal Power Sync ==================== Each Baremetal conductor process runs a periodic task which synchronizes the power state of the nodes between its database and the actual hardware. If the value of the :oslo.config:option:`conductor.force_power_state_during_sync` option is set to ``true`` the power state in the database will be forced on the hardware and if it is set to ``false`` the hardware state will be forced on the database. If this periodic task is enabled, it runs at an interval defined by the :oslo.config:option:`conductor.sync_power_state_interval` config option for those nodes which are not in maintenance. The requests sent to Baseboard Management Controllers (BMCs) are done with a parallelism controlled by :oslo.config:option:`conductor.sync_power_state_workers`. The motivation to send out requests to BMCs in parallel is to handle misbehaving BMCs which may delay or even block the synchronization otherwise. .. note:: In deployments with many nodes and IPMI as the configured BMC protocol, the default values of a 60 seconds power sync interval and 8 worker threads may lead to a high rate of required retries due to client-side UDP packet loss (visible via the corresponding warnings in the conductor logs). While Ironic automatically retries to get the power status for the affected nodes, the failure rate may be reduced by increasing the power sync cycle, e.g. to 300 seconds, and/or by reducing the number of power sync workers, e.g. to 2. Pleae keep in mind, however, that depending on the concrete setup increasing the power sync interval may have an impact on other components relying on up-to-date power states. Compute-Baremetal Power Sync ============================ Each ``nova-compute`` process in the Compute service runs a periodic task which synchronizes the power state of servers between its database and the compute driver. If enabled, it runs at an interval defined by the `sync_power_state_interval` config option on the ``nova-compute`` process. In case of the compute driver being baremetal driver, this sync will happen between the databases of the compute and baremetal services. Since the sync happens on the ``nova-compute`` process, the state in the compute database will be forced on the baremetal database in case of inconsistencies. Hence a node which was put down using the compute service API cannot be brought up through the baremetal service API since the power sync task will regard the compute service's knowledge of the power state as the source of truth. In order to get around this disadvantage of the compute-baremetal power sync, baremetal service does power state change callbacks to the compute service using external events. Power State Change Callbacks to the Compute Service --------------------------------------------------- Whenever the Baremetal service changes the power state of a node, it can issue a notification to the Compute service. The Compute service will consume this notification and update the power state of the instance in its database. By conveying all the power state changes to the compute service, the baremetal service becomes the source of truth thus preventing the compute service from forcing wrong power states on the physical instance during the compute-baremetal power sync. It also adds the possibility of bringing up/down a physical instance through the baremetal service API even if it was put down/up through the compute service API. This change requires the :oslo.config:group:`nova` section and the necessary authentication options like the :oslo.config:option:`nova.auth_url` to be defined in the configuration file of the baremetal service. If it is not configured the baremetal service will not be able to send notifications to the compute service and it will fall back to the behaviour of the compute service forcing power states on the baremetal service during the power sync. See :oslo.config:group:`nova` group for more details on the available config options. In case of baremetal stand alone deployments where there is no compute service running, the :oslo.config:option:`nova.send_power_notifications` config option should be set to ``False`` to disable power state change callbacks to the compute service. .. note:: The baremetal service sends notifications to the compute service only if the target power state is ``power on`` or ``power off``. Other error and ``None`` states will be ignored. In situations where the power state change is originally coming from the compute service, the notification will still be sent by the baremetal service and it will be a no-op on the compute service side with a debug log stating the node is already powering on/off. .. note:: Although an exclusive lock is used when sending notifications to the compute service, there can still be a race condition if the compute-baremetal power sync happens to happen a nano-second before the power state change event is received from the baremetal service in which case the power state from compute service's database will be forced on the node. .. _power-fault: Power fault and recovery ======================== When `Baremetal Power Sync`_ is enabled, and the Bare Metal service loses access to a node (usually because of invalid credentials, BMC issues or networking interruptions), the node enters ``maintenance`` mode and its ``fault`` field is set to ``power failure``. The exact reason is stored in the ``maintenance_reason`` field. As always with maintenance mode, only a subset of operations will work on such nodes, and both the Compute service and the Ironic's native allocation API will refuse to pick them. Any in-progress operations will either pause or fail. The conductor responsible for the node will try to recover the connection periodically (with the interval configured by the :oslo.config:option:`conductor.power_failure_recovery_interval` option). If the power sync is successful, the ``fault`` field is unset and the node leaves the maintenance mode. .. note:: This only applies to automatic maintenance mode with the ``fault`` field set. Maintenance mode set manually is never left automatically. Alternatively, you can disable maintenance mode yourself once the problem is resolved:: baremetal node maintenance unset ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/admin/radosgw.rst0000664000175000017500000000447600000000000020450 0ustar00zuulzuul00000000000000.. _radosgw support: =========================== Ceph Object Gateway support =========================== Overview ======== Ceph project is a powerful distributed storage system. It contains object store and provides a RADOS Gateway Swift API which is compatible with OpenStack Swift API. Ironic added support for RADOS Gateway temporary URL in the Mitaka release. Configure Ironic and Glance with RADOS Gateway ============================================== #. Install Ceph storage with RADOS Gateway. See `Ceph documentation `_. #. Configure RADOS Gateway to use keystone for authentication. See `Integrating with OpenStack Keystone `_ #. Register RADOS Gateway endpoint in the keystone catalog, with the same format swift uses, as the ``object-store`` service. URL example: ``http://rados.example.com:8080/swift/v1/AUTH_$(project_id)s``. In the ceph configuration, make sure radosgw is configured with the following value:: rgw swift account in url = True #. Configure Glance API service for RADOS Swift API as backend. Edit the configuration file for the Glance API service (is typically located at ``/etc/glance/glance-api.conf``):: [glance_store] stores = file, http, swift default_store = swift default_swift_reference=ref1 swift_store_config_file=/etc/glance/glance-swift-creds.conf swift_store_container = glance swift_store_create_container_on_put = True In the file referenced in ``swift_store_config_file`` option, add the following:: [ref1] user = : key = user_domain_id = default project_domain_id = default auth_version = 3 auth_address = http://keystone.example.com/identity Values for user and key options correspond to keystone credentials for RADOS Gateway service user. Note: RADOS Gateway uses FastCGI protocol for interacting with HTTP server. Read your HTTP server documentation if you want to enable HTTPS support. #. Restart Glance API service and upload all needed images. #. If you're using custom container name in RADOS, change Ironic configuration file on the conductor host(s) as follows:: [glance] swift_container = glance #. Restart Ironic conductor service(s). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/admin/raid.rst0000664000175000017500000004543200000000000017716 0ustar00zuulzuul00000000000000.. _raid: ================== RAID Configuration ================== Overview ======== Ironic supports RAID configuration for bare metal nodes. It allows operators to specify the desired RAID configuration via the OpenStackClient CLI or REST API. The desired RAID configuration is applied on the bare metal during manual cleaning. The examples described here use the OpenStackClient CLI; please see the `REST API reference `_ for their corresponding REST API requests. Prerequisites ============= The bare metal node needs to use a hardware type that supports RAID configuration. RAID interfaces may implement RAID configuration either in-band or out-of-band. Software RAID is supported on all hardware, although with some caveats - see `Software RAID`_ for details. In-band RAID configuration (including software RAID) is done using the Ironic Python Agent ramdisk. For in-band hardware RAID configuration, a hardware manager which supports RAID should be bundled with the ramdisk. Whether a node supports RAID configuration could be found using the CLI command ``baremetal node validate ``. In-band RAID is usually implemented by the ``agent`` RAID interface. Build agent ramdisk which supports RAID configuration ===================================================== For doing in-band hardware RAID configuration, Ironic needs an agent ramdisk bundled with a hardware manager which supports RAID configuration for your hardware. For example, the :ref:`DIB_raid_support` should be used for HPE Proliant Servers. .. note:: For in-band software RAID, the agent ramdisk does not need to be bundled with a hardware manager as the generic hardware manager in the Ironic Python Agent already provides (basic) support for software RAID. RAID configuration JSON format ============================== The desired RAID configuration and current RAID configuration are represented in JSON format. Target RAID configuration ------------------------- This is the desired RAID configuration on the bare metal node. Using the OpenStackClient CLI (or REST API), the operator sets ``target_raid_config`` field of the node. The target RAID configuration will be applied during manual cleaning. Target RAID configuration is a dictionary having ``logical_disks`` as the key. The value for the ``logical_disks`` is a list of JSON dictionaries. It looks like:: { "logical_disks": [ {}, {}, ... ] } If the ``target_raid_config`` is an empty dictionary, it unsets the value of ``target_raid_config`` if the value was set with previous RAID configuration done on the node. Each dictionary of logical disk contains the desired properties of logical disk supported by the hardware type. These properties are discoverable by:: baremetal driver raid property list Mandatory properties ^^^^^^^^^^^^^^^^^^^^ These properties must be specified for each logical disk and have no default values: - ``size_gb`` - Size (Integer) of the logical disk to be created in GiB. ``MAX`` may be specified if the logical disk should use all of the remaining space available. This can be used only when backing physical disks are specified (see below). - ``raid_level`` - RAID level for the logical disk. Ironic supports the following RAID levels: 0, 1, 2, 5, 6, 1+0, 5+0, 6+0. Optional properties ^^^^^^^^^^^^^^^^^^^ These properties have default values and they may be overridden in the specification of any logical disk. None of these options are supported for software RAID. - ``volume_name`` - Name of the volume. Should be unique within the Node. If not specified, volume name will be auto-generated. - ``is_root_volume`` - Set to ``true`` if this is the root volume. At most one logical disk can have this set to ``true``; the other logical disks must have this set to ``false``. The ``root device hint`` will be saved, if the RAID interface is capable of retrieving it. This is ``false`` by default. Backing physical disk hints ^^^^^^^^^^^^^^^^^^^^^^^^^^^ These hints are specified for each logical disk to let Ironic find the desired disks for RAID configuration. This is machine-independent information. This serves the use-case where the operator doesn't want to provide individual details for each bare metal node. None of these options are supported for software RAID. - ``share_physical_disks`` - Set to ``true`` if this logical disk can share physical disks with other logical disks. The default value is ``false``, except for software RAID which always shares disks. - ``disk_type`` - ``hdd`` or ``ssd``. If this is not specified, disk type will not be a criterion to find backing physical disks. - ``interface_type`` - ``sata`` or ``scsi`` or ``sas``. If this is not specified, interface type will not be a criterion to find backing physical disks. - ``number_of_physical_disks`` - Integer, number of disks to use for the logical disk. Defaults to minimum number of disks required for the particular RAID level, except for software RAID which always spans all disks. Backing physical disks ^^^^^^^^^^^^^^^^^^^^^^ These are the actual machine-dependent information. This is suitable for environments where the operator wants to automate the selection of physical disks with a 3rd-party tool based on a wider range of attributes (eg. S.M.A.R.T. status, physical location). The values for these properties are hardware dependent. - ``controller`` - The name of the controller as read by the RAID interface. In order to trigger the setup of a Software RAID via the Ironic Python Agent, the value of this property needs to be set to ``software``. - ``physical_disks`` - A list of physical disks to use as read by the RAID interface. For software RAID ``physical_disks`` is a list of device hints in the same format as used for :ref:`root-device-hints`. The number of provided hints must match the expected number of backing devices (repeat the same hint if necessary). .. note:: If properties from both "Backing physical disk hints" or "Backing physical disks" are specified, they should be consistent with each other. If they are not consistent, then the RAID configuration will fail (because the appropriate backing physical disks could not be found). .. _raid-config-examples: Examples for ``target_raid_config`` ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ *Example 1*. Single RAID disk of RAID level 5 with all of the space available. Make this the root volume to which Ironic deploys the image: .. code-block:: json { "logical_disks": [ { "size_gb": "MAX", "raid_level": "5", "is_root_volume": true } ] } *Example 2*. Two RAID disks. One with RAID level 5 of 100 GiB and make it root volume and use SSD. Another with RAID level 1 of 500 GiB and use HDD: .. code-block:: json { "logical_disks": [ { "size_gb": 100, "raid_level": "5", "is_root_volume": true, "disk_type": "ssd" }, { "size_gb": 500, "raid_level": "1", "disk_type": "hdd" } ] } *Example 3*. Single RAID disk. I know which disks and controller to use: .. code-block:: json { "logical_disks": [ { "size_gb": 100, "raid_level": "5", "controller": "Smart Array P822 in Slot 3", "physical_disks": ["6I:1:5", "6I:1:6", "6I:1:7"], "is_root_volume": true } ] } *Example 4*. Using backing physical disks: .. code-block:: json { "logical_disks": [ { "size_gb": 50, "raid_level": "1+0", "controller": "RAID.Integrated.1-1", "volume_name": "root_volume", "is_root_volume": true, "physical_disks": [ "Disk.Bay.0:Encl.Int.0-1:RAID.Integrated.1-1", "Disk.Bay.1:Encl.Int.0-1:RAID.Integrated.1-1" ] }, { "size_gb": 100, "raid_level": "5", "controller": "RAID.Integrated.1-1", "volume_name": "data_volume", "physical_disks": [ "Disk.Bay.2:Encl.Int.0-1:RAID.Integrated.1-1", "Disk.Bay.3:Encl.Int.0-1:RAID.Integrated.1-1", "Disk.Bay.4:Encl.Int.0-1:RAID.Integrated.1-1" ] } ] } *Example 5*. Software RAID with two RAID devices: .. code-block:: json { "logical_disks": [ { "size_gb": 100, "raid_level": "1", "controller": "software" }, { "size_gb": "MAX", "raid_level": "0", "controller": "software" } ] } *Example 6*. Software RAID, limiting backing block devices to exactly two devices with the size exceeding 100 GiB: .. code-block:: json { "logical_disks": [ { "size_gb": "MAX", "raid_level": "0", "controller": "software", "physical_disks": [ {"size": "> 100"}, {"size": "> 100"} ] } ] } Current RAID configuration -------------------------- After target RAID configuration is applied on the bare metal node, Ironic populates the current RAID configuration. This is populated in the ``raid_config`` field in the Ironic node. This contains the details about every logical disk after they were created on the bare metal node. It contains details like RAID controller used, the backing physical disks used, WWN of each logical disk, etc. It also contains information about each physical disk found on the bare metal node. To get the current RAID configuration:: baremetal node show Workflow ======== * Operator configures the bare metal node with a hardware type that has a ``RAIDInterface`` other than ``no-raid``. For instance, for Software RAID, this would be ``agent``. * For in-band RAID configuration, operator builds an agent ramdisk which supports RAID configuration by bundling the hardware manager with the ramdisk. See `Build agent ramdisk which supports RAID configuration`_ for more information. * Operator prepares the desired target RAID configuration as mentioned in `Target RAID configuration`_. The target RAID configuration is set on the Ironic node:: baremetal node set \ --target-raid-config The CLI command can accept the input from standard input also:: baremetal node set \ --target-raid-config - * Create a JSON file with the RAID clean steps for manual cleaning. Add other clean steps as desired:: [{ "interface": "raid", "step": "delete_configuration" }, { "interface": "raid", "step": "create_configuration" }] .. note:: 'create_configuration' doesn't remove existing disks. It is recommended to add 'delete_configuration' before 'create_configuration' to make sure that only the desired logical disks exist in the system after manual cleaning. * Bring the node to ``manageable`` state and do a ``clean`` action to start cleaning on the node:: baremetal node clean \ --clean-steps * After manual cleaning is complete, the current RAID configuration is reported in the ``raid_config`` field when running:: baremetal node show Software RAID ============= Building Linux software RAID in-band (via the Ironic Python Agent ramdisk) is supported starting with the Train release. It is requested by using the ``agent`` RAID interface and RAID configuration with all controllers set to ``software``. You can find a software RAID configuration example in :ref:`raid-config-examples`. There are certain limitations to be aware of: * Only the mandatory properties (plus the required ``controller`` property) from `Target RAID configuration`_ are currently supported. * The number of created Software RAID devices must be 1 or 2. If there is only one Software RAID device, it has to be a RAID-1. If there are two, the first one has to be a RAID-1, while the RAID level for the second one can be 0, 1, 1+0, 5, or 6. As the first RAID device will be the deployment device, enforcing a RAID-1 reduces the risk of ending up with a non-booting node in case of a disk failure. * Building RAID will fail if the target disks are already partitioned. Wipe the disks using e.g. the ``erase_devices_metadata`` clean step before building RAID:: [{ "interface": "raid", "step": "delete_configuration" }, { "interface": "deploy", "step": "erase_devices_metadata" }, { "interface": "raid", "step": "create_configuration" }] * The final instance image must have the ``mdadm`` utility installed and needs to be able to detect software RAID devices at boot time (which is usually done by having the RAID drivers embedded in the image's initrd). * Regular cleaning will not remove RAID configuration (similarly to hardware RAID). To destroy RAID run the ``delete_configuration`` manual clean step. * There is no support for partition images, only whole-disk images are supported with Software RAID. See :doc:`/install/configure-glance-images`. This includes flavors requesting dynamic creation of swap filesystems. Swap should be pre-allocated inside of a disk image partition layout. * Images utilizing LVM for their root filesystem are not supported. Patches are welcome to explicitly support such functionality. * If the root filesystem UUID is not known to Ironic via metadata, then the disk image layout **MUST** have the first partition consist of the root filesystem. Ironic is agnostic if the partition table is a DOS MBR or a GPT partition. Starting in Ironic 14.0.0 (Ussuri), the root filesystem UUID can be set and passed through to Ironic through the Glance Image Service ``properties`` sub-field ``rootfs_uuid`` for the image to be deployed. Starting in Ironic 16.1.0 (Wallaby), similar functionality is available via the baremetal node ``instance_info`` field value ``image_rootfs_uuid``. See :doc:`/install/standalone` for more details on standalone usage including an example command. * In UEFI mode, the Ironic Python Agent creates EFI system partitions (ESPs) for the bootloader and the boot configuration (grub.cfg or grubenv) on all holder devices. The content of these partitions is populated upon deployment from the deployed user image. Depending on how the partitions are mounted, the content of the partitions may get out of sync, e.g. when new kernels are installed or the bootloader is updated, so measures to keep these partitions in sync need to be taken. Note that starting with the Victoria release, the Ironic Python Agent configures a RAID-1 mirror for the ESPs, so no additional measures to ensure consistency of the ESPs should be required any longer. * In BIOS mode, the Ironic Python Agent installs the boot loader onto all disks. While nothing is required for kernel or grub package updates, re-installing the bootloader on one disk, e.g. during a disk replacement, may require to re-install the bootloader on all disks. Otherwise, there is a risk of an incompatibility of the grub components stored on the device (i.e. stage1/boot.img in the MBR and stage1.5/core.img in the MBR gap) with the ones stored in /boot (stage2). This incompatibility can render the node unbootable if the wrong disk is selected for booting. * Linux kernel device naming is not consistent across reboots for RAID devices and may be numbered in a distribution specific pattern. Operators will need to be mindful of this if a root device hint is utilized. A particular example of this is that the first "md0" device on a Ubuntu based ramdisk may start as device "md0", whereas on a Centos or Red Hat Enterprise Linux based ramdisk may start at device "md127". After a reboot, these device names may change entirely. .. NOTE:: :ref:`Root device hints ` should not be explicitly required to utilize software RAID. Candidate devices are chosen by sorting the usable device list looking for the smallest usable device which is then sorted by name. The secondary sort by name improves the odds for matching the first initialized block device. In the case of software RAID, they are always a little smaller than the primary block devices due to metadata overhead, which helps make them the most likely candidate devices. Image requirements ------------------ Since Ironic needs to perform additional steps when deploying nodes with software RAID, there are some requirements the deployed images need to fulfill. Up to and including the Train release, the image needs to have its root file system on the first partition. Starting with Ussuri, the image can also have additional metadata to point Ironic to the partition with the root file system: for this, the image needs to set the ``rootfs_uuid`` property with the file system UUID of the root file system. One way to extract this UUID from an existing image is to download the image, mount it as a loopback device, and use ``blkid``: .. code-block:: bash $ sudo losetup -f $ sudo losetup /dev/loop0 /tmp/myimage.raw $ sudo kpartx -a /dev/loop0 $ blkid The pre-Ussuri approach, i.e. to have the root file system on the first partition, is kept as a fallback and hence allows software RAID deployments where Ironic does not have access to any image metadata (e.g. Ironic stand-alone). Using RAID in nova flavor for scheduling ======================================== The operator can specify the `raid_level` capability in nova flavor for node to be selected for scheduling:: openstack flavor set my-baremetal-flavor --property capabilities:raid_level="1+0" Developer documentation ======================= In-band RAID configuration is done using IPA ramdisk. IPA ramdisk has support for pluggable hardware managers which can be used to extend the functionality offered by IPA ramdisk using stevedore plugins. For more information, see Ironic Python Agent :ironic-python-agent-doc:`Hardware Manager ` documentation. The hardware manager that supports RAID configuration should do the following: #. Implement a method named ``create_configuration``. This method creates the RAID configuration as given in ``target_raid_config``. After successful RAID configuration, it returns the current RAID configuration information which ironic uses to set ``node.raid_config``. #. Implement a method named ``delete_configuration``. This method deletes all the RAID disks on the bare metal. #. Return these two clean steps in ``get_clean_steps`` method with priority as 0. Example:: return [{'step': 'create_configuration', 'interface': 'raid', 'priority': 0}, {'step': 'delete_configuration', 'interface': 'raid', 'priority': 0}] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/admin/ramdisk-boot.rst0000664000175000017500000001422100000000000021362 0ustar00zuulzuul00000000000000Booting a Ramdisk or an ISO =========================== Ironic supports booting a user provided ramdisk or an ISO image (starting with the Victoria release) instead of deploying a node. Most commonly this is performed when an instance is booted via PXE, iPXE or Virtual Media, with the only local storage contents being those in memory. It is suported by ``pxe``, ``ipxe``, ``redfish-virtual-media`` and ``ilo-virtual-media`` boot interfaces. Configuration ------------- Ramdisk/ISO boot requires using the ``ramdisk`` deploy interface. As with most non-default interfaces, it must be enabled and set for a node to be utilized: .. code-block:: ini [DEFAULT] ... enabled_deploy_interfaces = direct,ramdisk ... Once enabled and the conductor(s) have been restarted, the interface can be set upon creation of a new node: .. code-block:: shell baremetal node create --driver ipmi \ --deploy-interface ramdisk \ --boot-interface ipxe or update an existing node: .. code-block:: shell baremetal node set --deploy-interface ramdisk You can also use it with :ref:`redfish virtual media ` instead of iPXE. Creating a ramdisk ------------------ A ramdisk can be created using the ``ironic-ramdisk-base`` element from ironic-python-agent-builder_, e.g. with Debian: .. code-block:: shell export ELEMENTS_PATH=/opt/stack/ironic-python-agent-builder/dib disk-image-create -o /output/ramdisk \ debian-minimal ironic-ramdisk-base openssh-server dhcp-all-interfaces You should consider using the following elements: * openssh-server_ to install the SSH server since it's not provided by default by some minimal images. * devuser_ or dynamic-login_ to provide SSH access. * dhcp-all-interfaces_ or simple-init_ to configure networking. The resulting files (``/output/ramdisk.kernel`` and ``/output/ramdisk.initramfs`` in this case) can then be used when `Booting a ramdisk`_. Booting a ramdisk ----------------- Pass the kernel and ramdisk as normally, also providing the ramdisk as an image source, for example, .. code-block:: shell baremetal node set \ --instance-info kernel=http://path/to/ramdisk.kernel \ --instance-info ramdisk=http://path/to/ramdisk.initramfs baremetal node deploy .. note:: Before the Xena release, the ``image_source`` field was also required:: --instance-info image_source=http://path/to/ramdisk.initramfs Booting an ISO -------------- The ``ramdisk`` deploy interface can also be used to boot an ISO image. For example, .. code-block:: shell baremetal node set \ --instance-info boot_iso=http://path/to/boot.iso baremetal node deploy .. warning:: This feature, when utilized with the ``ipxe`` ``boot_interface``, will only allow a kernel and ramdisk to be booted from the supplied ISO file. Any additional contents, such as additional ramdisk contents or installer package files will be unavailable after the boot of the Operating System. Operators wishing to leverage this functionality for actions such as OS installation should explore use of the standard ``ramdisk`` ``deploy_interface`` along with the ``instance_info/kernel_append_params`` setting to pass arbitrary settings such as a mirror URL for the initial ramdisk to load data from. This is a limitation of iPXE and the overall boot process of the operating system where memory allocated by iPXE is released. By default the Bare Metal service will cache the ISO locally and serve from its HTTP server. If you want to avoid that, set the following: .. code-block:: shell baremetal node set \ --instance-info ramdisk_image_download_source=http ISO images are also cached across deployments, similarly to how it is done for normal instance images. The URL together with the last modified response header are used to determine if an image needs updating. Limitations ----------- The intended use case is for advanced scientific and ephemeral workloads where the step of writing an image to the local storage is not required or desired. As such, this interface does come with several caveats: * Configuration drives are not supported with network boot, only with Redfish virtual media. * Disk image contents are not written to the bare metal node. * Users and Operators who intend to leverage this interface should expect to leverage a metadata service, custom ramdisk images, or the ``instance_info/ramdisk_kernel_arguments`` parameter to add options to the kernel boot command line. * When using PXE/iPXE boot, bare metal nodes must continue to have network access to PXE and iPXE network resources. This is contrary to most tenant networking enabled configurations where this access is restricted to the provisioning and cleaning networks * As with all deployment interfaces, automatic cleaning of the node will still occur with the contents of any local storage being wiped between deployments. Common options -------------- Disable persistent boot device for ramdisk iso boot ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For iso boot, Ironic sets the boot target to continuously boot from the iso attached over virtual media. This behaviour may not always be desired e.g. if the vmedia is installing to hard drive and then rebooting. In order to instead set the virtual media to be one time boot Ironic provides the ``force_persistent_boot_device`` flag in the node's ``driver_info``. Which can be set to ``Never``:: $ openstack baremetal node set --driver-info force_persistent_boot_device='Never' .. _ironic-python-agent-builder: https://opendev.org/openstack/ironic-python-agent-builder .. _openssh-server: https://docs.openstack.org/diskimage-builder/latest/elements/openssh-server/README.html .. _devuser: https://docs.openstack.org/diskimage-builder/latest/elements/devuser/README.html .. _dynamic-login: https://docs.openstack.org/diskimage-builder/latest/elements/dynamic-login/README.html .. _dhcp-all-interfaces: https://docs.openstack.org/diskimage-builder/latest/elements/dhcp-all-interfaces/README.html .. _simple-init: https://docs.openstack.org/diskimage-builder/latest/elements/simple-init/README.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/admin/report.txt0000664000175000017500000004447600000000000020330 0ustar00zuulzuul00000000000000/usr/local/lib/python2.7/dist-packages/pecan/__init__.py:122: RuntimeWarning: `static_root` is only used when `debug` is True, ignoring RuntimeWarning ======================================================================== ==== Guru Meditation ==== ======================================================================== |||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| ======================================================================== ==== Package ==== ======================================================================== product = None vendor = None version = None ======================================================================== ==== Threads ==== ======================================================================== ------ Thread #140512155997952 ------ /usr/local/lib/python2.7/dist-packages/eventlet/hubs/hub.py:346 in run `self.wait(sleep_time)` /usr/local/lib/python2.7/dist-packages/eventlet/hubs/poll.py:82 in wait `sleep(seconds)` ======================================================================== ==== Green Threads ==== ======================================================================== ------ Green Thread ------ /usr/local/bin/ironic-api:10 in `sys.exit(main())` /opt/stack/ironic/ironic/cmd/api.py:48 in main `launcher.wait()` /usr/local/lib/python2.7/dist-packages/oslo_service/service.py:586 in wait `self._respawn_children()` /usr/local/lib/python2.7/dist-packages/oslo_service/service.py:570 in _respawn_children `eventlet.greenthread.sleep(self.wait_interval)` /usr/local/lib/python2.7/dist-packages/eventlet/greenthread.py:34 in sleep `hub.switch()` /usr/local/lib/python2.7/dist-packages/eventlet/hubs/hub.py:294 in switch `return self.greenlet.switch()` ------ Green Thread ------ No Traceback! ======================================================================== ==== Processes ==== ======================================================================== Process 124840 (under 48114) [ run by: ubuntu (1000), state: running ] Process 124849 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124850 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124851 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124852 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124853 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124854 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124855 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124856 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124857 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124858 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124859 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124860 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124861 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124862 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124863 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124864 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124865 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124866 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124867 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124868 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124869 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124870 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124871 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124872 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124873 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124874 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124875 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124876 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124877 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124878 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124879 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124880 (under 124840) [ run by: ubuntu (1000), state: sleeping ] ======================================================================== ==== Configuration ==== ======================================================================== agent: agent_api_version = v1 deploy_logs_collect = always deploy_logs_local_path = /home/ubuntu/ironic-bm-logs/deploy_logs deploy_logs_storage_backend = local deploy_logs_swift_container = ironic_deploy_logs_container deploy_logs_swift_days_to_expire = 30 manage_agent_boot = True memory_consumed_by_agent = 0 post_deploy_get_power_state_retries = 6 post_deploy_get_power_state_retry_interval = 5 stream_raw_images = True api: api_workers = None enable_ssl_api = False host_ip = 0.0.0.0 max_limit = 1000 port = 6385 public_endpoint = None ramdisk_heartbeat_timeout = 30 restrict_lookup = True audit: audit_map_file = /etc/ironic/api_audit_map.conf enabled = False ignore_req_list = namespace = openstack audit_middleware_notifications: driver = None topics = None transport_url = *** conductor: api_url = http://10.223.197.220:6385 automated_clean = True check_provision_state_interval = 60 clean_callback_timeout = 1800 configdrive_swift_container = ironic_configdrive_container configdrive_use_swift = False deploy_callback_timeout = 1800 force_power_state_during_sync = True heartbeat_interval = 10 heartbeat_timeout = 60 inspect_timeout = 1800 node_locked_retry_attempts = 3 node_locked_retry_interval = 1 periodic_max_workers = 8 power_state_sync_max_retries = 3 send_sensor_data = False send_sensor_data_interval = 600 send_sensor_data_types = ALL sync_local_state_interval = 180 sync_power_state_interval = 60 workers_pool_size = 100 console: subprocess_checking_interval = 1 subprocess_timeout = 10 terminal = shellinaboxd terminal_cert_dir = None terminal_pid_dir = None cors: allow_credentials = True allow_headers = allow_methods = DELETE GET HEAD OPTIONS PATCH POST PUT TRACE allowed_origin = None expose_headers = max_age = 3600 cors.subdomain: allow_credentials = True allow_headers = allow_methods = DELETE GET HEAD OPTIONS PATCH POST PUT TRACE allowed_origin = None expose_headers = max_age = 3600 database: backend = sqlalchemy connection = *** connection_debug = 0 connection_trace = False db_inc_retry_interval = True db_max_retries = 20 db_max_retry_interval = 10 db_retry_interval = 1 idle_timeout = 3600 max_overflow = 50 max_pool_size = 5 max_retries = 10 min_pool_size = 1 mysql_engine = InnoDB mysql_sql_mode = TRADITIONAL pool_timeout = None retry_interval = 10 slave_connection = *** sqlite_synchronous = True use_db_reconnect = False default: api_paste_config = api-paste.ini auth_strategy = keystone bindir = /opt/stack/ironic/ironic/bin client_socket_timeout = 900 config-dir = config-file = /etc/ironic/ironic.conf control_exchange = ironic debug = True debug_tracebacks_in_api = False default_boot_interface = None default_console_interface = None default_deploy_interface = None default_inspect_interface = None default_log_levels = amqp=WARNING amqplib=WARNING eventlet.wsgi.server=INFO glanceclient=WARNING iso8601=WARNING keystoneauth.session=INFO keystonemiddleware.auth_token=INFO oslo_messaging=INFO paramiko=WARNING qpid.messaging=INFO requests=WARNING sqlalchemy=WARNING stevedore=INFO urllib3.connectionpool=WARNING openstack=WARNING default_management_interface = None default_network_interface = None default_portgroup_mode = active-backup default_power_interface = None default_raid_interface = None default_vendor_interface = None enabled_boot_interfaces = pxe enabled_console_interfaces = no-console enabled_deploy_interfaces = direct iscsi enabled_hardware_types = ipmi redfish enabled_inspect_interfaces = no-inspect enabled_management_interfaces = ipmitool redfish enabled_network_interfaces = flat noop enabled_power_interfaces = ipmitool redfish enabled_raid_interfaces = agent no-raid enabled_vendor_interfaces = no-vendor force_raw_images = True graceful_shutdown_timeout = 60 grub_config_template = /opt/stack/ironic/ironic/common/grub_conf.template hash_partition_exponent = 5 hash_ring_reset_interval = 180 host = ubuntu instance_format = [instance: %(uuid)s] instance_uuid_format = [instance: %(uuid)s] isolinux_bin = /usr/lib/syslinux/isolinux.bin isolinux_config_template = /opt/stack/ironic/ironic/common/isolinux_config.template log-config-append = None log-date-format = %Y-%m-%d %H:%M:%S log-dir = None log-file = None log_options = True logging_context_format_string = %(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [%(request_id)s %(project_name)s %(user_name)s%(color)s] %(instance)s%(color)s%(message)s logging_debug_format_suffix = from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d logging_default_format_string = %(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s logging_exception_prefix = %(color)s%(asctime)s.%(msecs)03d TRACE %(name)s %(instance)s logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s max_header_line = 16384 my_ip = 10.223.197.220 notification_level = None parallel_image_downloads = False pecan_debug = False publish_errors = False pybasedir = /opt/stack/ironic/ironic rate_limit_burst = 0 rate_limit_except_level = CRITICAL rate_limit_interval = 0 rootwrap_config = /etc/ironic/rootwrap.conf rpc_backend = rabbit rpc_response_timeout = 60 state_path = /var/lib/ironic syslog-log-facility = LOG_USER tcp_keepidle = 600 tempdir = /tmp transport_url = *** use-journal = False use-syslog = False use_stderr = False watch-log-file = False wsgi_default_pool_size = 100 wsgi_keep_alive = True wsgi_log_format = %(client_ip)s "%(request_line)s" status: %(status_code)s len: %(body_length)s time: %(wall_seconds).7f deploy: continue_if_disk_secure_erase_fails = False default_boot_option = local erase_devices_metadata_priority = None erase_devices_priority = 0 http_root = /opt/stack/data/ironic/httpboot http_url = http://10.223.197.220:3928 power_off_after_deploy_failure = True shred_final_overwrite_with_zeros = True shred_random_overwrite_iterations = 1 dhcp: dhcp_provider = neutron disk_partitioner: check_device_interval = 1 check_device_max_retries = 20 disk_utils: bios_boot_partition_size = 1 dd_block_size = 1M efi_system_partition_size = 200 iscsi_verify_attempts = 3 drac: query_raid_config_job_status_interval = 120 glance: allowed_direct_url_schemes = auth_section = None auth_strategy = keystone auth_type = password cafile = /opt/stack/data/ca-bundle.pem certfile = None glance_api_insecure = False glance_api_servers = None glance_cafile = None glance_num_retries = 0 insecure = False keyfile = None swift_account = AUTH_cb13c4492d124b01b4659a97d627955c swift_api_version = v1 swift_container = glance swift_endpoint_url = http://10.223.197.220:8080 swift_store_multiple_containers_seed = 0 swift_temp_url_cache_enabled = False swift_temp_url_duration = 3600 swift_temp_url_expected_download_start_delay = 0 swift_temp_url_key = *** timeout = None ilo: ca_file = None clean_priority_clear_secure_boot_keys = 0 clean_priority_erase_devices = None clean_priority_reset_bios_to_default = 10 clean_priority_reset_ilo = 0 clean_priority_reset_ilo_credential = 30 clean_priority_reset_secure_boot_keys_to_default = 20 client_port = 443 client_timeout = 60 default_boot_mode = auto power_retry = 6 power_wait = 2 swift_ilo_container = ironic_ilo_container swift_object_expiry_timeout = 900 use_web_server_for_images = False inspector: auth_section = None auth_type = password cafile = /opt/stack/data/ca-bundle.pem certfile = None enabled = False insecure = False keyfile = None service_url = None status_check_period = 60 timeout = None ipmi: min_command_interval = 5 retry_timeout = 60 irmc: auth_method = basic client_timeout = 60 port = 443 remote_image_server = None remote_image_share_name = share remote_image_share_root = /remote_image_share_root remote_image_share_type = CIFS remote_image_user_domain = remote_image_user_name = None remote_image_user_password = *** sensor_method = ipmitool snmp_community = public snmp_port = 161 snmp_security = None snmp_version = v2c ironic_lib: fatal_exception_format_errors = False root_helper = sudo ironic-rootwrap /etc/ironic/rootwrap.conf iscsi: portal_port = 3260 keystone: region_name = RegionOne keystone_authtoken: admin_password = *** admin_tenant_name = admin admin_token = *** admin_user = None auth-url = http://10.223.197.220/identity_admin auth_admin_prefix = auth_host = 127.0.0.1 auth_port = 5000 auth_protocol = https auth_section = None auth_type = password www_authenticate_uri = http://10.223.197.220/identity auth_version = None cache = None cafile = /opt/stack/data/ca-bundle.pem certfile = None check_revocations_for_cached = False default-domain-id = None default-domain-name = None delay_auth_decision = False domain-id = None domain-name = None enforce_token_bind = permissive hash_algorithms = md5 http_connect_timeout = None http_request_max_retries = 3 identity_uri = None include_service_catalog = True insecure = False keyfile = None memcache_pool_conn_get_timeout = 10 memcache_pool_dead_retry = 300 memcache_pool_maxsize = 10 memcache_pool_socket_timeout = 3 memcache_pool_unused_timeout = 60 memcache_secret_key = *** memcache_security_strategy = None memcache_use_advanced_pool = False memcached_servers = 10.223.197.220:11211 password = *** project-domain-id = None project-domain-name = Default project-id = None project-name = service region_name = None revocation_cache_time = 10 service_token_roles = service service_token_roles_required = False signing_dir = /var/cache/ironic/api token_cache_time = 300 trust-id = None user-domain-id = None user-domain-name = Default user-id = None username = ironic metrics: agent_backend = noop agent_global_prefix = None agent_prepend_host = False agent_prepend_host_reverse = True agent_prepend_uuid = False backend = noop global_prefix = None prepend_host = False prepend_host_reverse = True metrics_statsd: agent_statsd_host = localhost agent_statsd_port = 8125 statsd_host = localhost statsd_port = 8125 neutron: auth_section = None auth_strategy = keystone auth_type = password cafile = /opt/stack/data/ca-bundle.pem certfile = None cleaning_network = private cleaning_network_security_groups = insecure = False keyfile = None port_setup_delay = 15 provisioning_network = None provisioning_network_security_groups = retries = 3 timeout = None url = None url_timeout = 30 oslo_concurrency: disable_process_locking = False lock_path = None oslo_messaging_notifications: driver = topics = notifications transport_url = *** oslo_messaging_rabbit: amqp_auto_delete = False amqp_durable_queues = False conn_pool_min_size = 2 conn_pool_ttl = 1200 fake_rabbit = False heartbeat_rate = 2 heartbeat_timeout_threshold = 60 kombu_compression = None kombu_failover_strategy = round-robin kombu_missing_consumer_retry_timeout = 60 kombu_reconnect_delay = 1.0 rabbit_ha_queues = False rabbit_host = localhost rabbit_hosts = localhost:5672 rabbit_interval_max = 30 rabbit_login_method = AMQPLAIN rabbit_password = *** rabbit_port = 5672 rabbit_qos_prefetch_count = 0 rabbit_retry_backoff = 2 rabbit_retry_interval = 1 rabbit_transient_queues_ttl = 1800 rabbit_userid = guest rabbit_virtual_host = / rpc_conn_pool_size = 30 ssl = False ssl_ca_file = ssl_cert_file = ssl_key_file = ssl_version = oslo_versionedobjects: fatal_exception_format_errors = False pxe: default_ephemeral_format = ext4 image_cache_size = 20480 image_cache_ttl = 10080 images_path = /var/lib/ironic/images/ instance_master_path = /var/lib/ironic/master_images ipxe_boot_script = /opt/stack/ironic/ironic/drivers/modules/boot.ipxe ipxe_enabled = True ipxe_timeout = 0 ipxe_use_swift = False pxe_append_params = nofb nomodeset vga=normal console=ttyS0 systemd.journald.forward_to_console=yes pxe_bootfile_name = undionly.kpxe pxe_bootfile_name_by_arch: pxe_config_template = /opt/stack/ironic/ironic/drivers/modules/ipxe_config.template pxe_config_template_by_arch: tftp_master_path = /opt/stack/data/ironic/tftpboot/master_images tftp_root = /opt/stack/data/ironic/tftpboot tftp_server = 10.223.197.220 uefi_pxe_bootfile_name = ipxe.efi uefi_pxe_config_template = /opt/stack/ironic/ironic/drivers/modules/ipxe_config.template seamicro: action_timeout = 10 max_retry = 3 service_catalog: auth_section = None auth_type = password cafile = /opt/stack/data/ca-bundle.pem certfile = None insecure = False keyfile = None timeout = None snmp: power_timeout = 10 reboot_delay = 0 swift: auth_section = None auth_type = password cafile = /opt/stack/data/ca-bundle.pem certfile = None insecure = False keyfile = None swift_max_retries = 2 timeout = None virtualbox: port = 18083 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/admin/rescue.rst0000664000175000017500000000671600000000000020267 0ustar00zuulzuul00000000000000.. _rescue: =========== Rescue Mode =========== Overview ======== The Bare Metal Service supports putting nodes in rescue mode using hardware types that support rescue interfaces. The hardware types utilizing ironic-python-agent with ``PXE``/``Virtual Media`` based boot interface can support rescue operation when configured appropriately. .. note:: The rescue operation is currently supported only when tenant networks use DHCP to obtain IP addresses. Rescue operation can be used to boot nodes into a rescue ramdisk so that the ``rescue`` user can access the node, in order to provide the ability to access the node in case access to OS is not possible. For example, if there is a need to perform manual password reset or data recovery in the event of some failure, rescue operation can be used. Configuring The Bare Metal Service ================================== Configure the Bare Metal Service appropriately so that the service has the information needed to boot the ramdisk before a user tries to initiate rescue operation. This will differ somewhat between different deploy environments, but an example of how to do this is outlined below: #. Create and configure ramdisk that supports rescue operation. Please see :doc:`/install/deploy-ramdisk` for detailed instructions to build a ramdisk. #. Configure a network to use for booting nodes into the rescue ramdisk in neutron, and note the UUID or name of this network. This is required if you're using the neutron DHCP provider and have Bare Metal Service managing ramdisk booting (the default). This can be the same network as your cleaning or tenant network (for flat network). For an example of how to configure new networks with Bare Metal Service, see the :doc:`/install/configure-networking` documentation. #. Add the unique name or UUID of your rescue network to ``ironic.conf``: .. code-block:: ini [neutron] rescuing_network= .. note:: This can be set per node via driver_info['rescuing_network'] #. Restart the ironic conductor service. #. Specify a rescue kernel and ramdisk or rescue ISO compatible with the node's driver for pxe based boot interface or virtual-media based boot interface respectively. Example for pxe based boot interface: .. code-block:: console baremetal node set $NODE_UUID \ --driver-info rescue_ramdisk=$RESCUE_INITRD_UUID \ --driver-info rescue_kernel=$RESCUE_VMLINUZ_UUID See :doc:`/install/configure-glance-images` for details. If you are not using Image service, it is possible to provide images to Bare Metal service via hrefs. After this, The Bare Metal Service should be ready for ``rescue`` operation. Test it out by attempting to rescue an active node and connect to the instance using ssh, as given below: .. code-block:: console baremetal node rescue $NODE_UUID \ --rescue-password --wait ssh rescue@$INSTANCE_IP_ADDRESS To move a node back to active state after using rescue mode you can use ``unrescue``. Please unmount any filesystems that were manually mounted before proceeding with unrescue. The node unrescue can be done as given below: .. code-block:: console baremetal node unrescue $NODE_UUID ``rescue`` and ``unrescue`` operations can also be triggered via the Compute Service using the following commands: .. code-block:: console openstack server rescue --password openstack server unrescue ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/admin/retirement.rst0000664000175000017500000000445000000000000021150 0ustar00zuulzuul00000000000000.. _retirement: =============== Node retirement =============== Overview ======== Retiring nodes is a natural part of a server’s life cycle, for instance when the end of the warranty is reached and the physical space is needed for new deliveries to install replacement capacity. However, depending on the type of the deployment, removing nodes from service can be a full workflow by itself as it may include steps like moving applications to other hosts, cleaning sensitive data from disks or the BMC, or tracking the dismantling of servers from their racks. Ironic provides some means to support such workflows by allowing to tag nodes as ``retired`` which will prevent any further scheduling of instances, but will still allow for other operations, such as cleaning, to happen (this marks an important difference to nodes which have the ``maintenance`` flag set). How to use ========== When it is known that a node shall be retired, set the ``retired`` flag on the node with:: baremetal node set --retired node-001 This can be done irrespective of the state the node is in, so in particular while the node is ``active``. .. NOTE:: An exception are nodes which are in ``available``. For backwards compatibility reasons, these nodes need to be moved to ``manageable`` first. Trying to set the ``retired`` flag for ``available`` nodes will result in an error. Optionally, a reason can be specified when a node is retired, e.g.:: baremetal node set --retired node-001 \ --retired-reason "End of warranty for delivery abc123" Upon instance deletion, an ``active`` node with the ``retired`` flag set will not move to ``available``, but to ``manageable``. The node will hence not be eligible for scheduling of new instances. Equally, nodes with ``retired`` set to True cannot move from ``manageable`` to ``available``: the ``provide`` verb is blocked. This is to prevent accidental re-use of nodes tagged for removal from the fleet. In order to move these nodes to ``available`` none the less, the ``retired`` field needs to be removed first. This can be done via:: baremetal node unset --retired node-001 In order to facilitate the identification of nodes marked for retirement, e.g. by other teams, ironic also allows to list all nodes which have the ``retired`` flag set:: baremetal node list --retired ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/admin/secure-rbac.rst0000664000175000017500000002673500000000000021177 0ustar00zuulzuul00000000000000=========== Secure RBAC =========== Suggested Reading ================= It is likely an understatement to say that policy enforcement is a complex subject. It requires operational context to craft custom policy to meet general use needs. Part of this is why the Secure RBAC effort was started, to provide consistency and a "good" starting place for most users who need a higher level of granularity. That being said, it would likely help anyone working to implement customization of these policies to consult some reference material in hopes of understanding the context. * `Keystone Adminstrator Guide - Service API Protection `_ * `Ironic Scoped Role Based Access Control Specification `_ Historical Context - How we reached our access model ---------------------------------------------------- Ironic has reached the access model through an evolution the API and the data stored. Along with the data stored, the enforcement of policy based upon data stored in these fields. * `Ownership Information Storage `_ * `Allow Node owners to Administer `_ * `Allow Leasable Nodes `_ System Scoped ============= System scoped authentication is intended for "administrative" activities such as those crossing tenants/projects, as all tenants/projects should be visible to ``system`` scoped users in Ironic. System scoped requests do not have an associated ``project_id`` value for the Keystone request authorization token utilized to speak with Ironic. These requests are translated through `keystonemiddleware `_ into values which tell Ironic what to do. Or to be more precise, tell the policy enforcement framework the information necessary to make decisions. System scoped requests very much align with the access controls of Ironic before the Secure RBAC effort. The original custom role ``baremetal_admin`` privileges are identical to a system scoped ``admin``'s privileges. Similarly ``baremetal_observer`` is identical to a system scoped ``reader``. In these concepts, the ``admin`` is allowed to create/delete objects/items. The ``reader`` is allowed to read details about items and is intended for users who may need an account with read-only access for or front-line support purposes. In addition to these concepts, a ``member`` role exists in the Secure RBAC use model. Ironic does support this role, and in general ``member`` role users in a system scope are able to perform basic updates/changes, with the exception of special fields like those to disable cleaning. Project Scoped ============== Project scoped authentication is when a request token and associated records indicate an associated ``project_id`` value. Legacy Behavior --------------- The legacy behavior of API service is that all requests are treated as project scoped requests where access is governed using an "admin project". This behavior is *deprecated*. The new behavior is a delineation of access through ``system`` scoped and ``project`` scoped requests. In essence, what would have served as an "admin project", is now ``system`` scoped usage. Previously, Ironic API, by default, responded with access denied or permitted based upon the admin project and associated role. These responses would generate an HTTP 403 if the project was incorrect or if a user role. .. NOTE:: While Ironic has had the concept of an ``owner`` and a ``lessee``, they are *NOT* used by default. They require custom policy configuration files to be used in the legacy operating mode. Supported Endpoints ------------------- * /nodes * /nodes//ports * /nodes//portgroups * /nodes//volume/connectors * /nodes//volume/targets * /nodes//allocation * /ports * /portgroups * /volume/connectors * /volume/targets * /allocations How Project Scoped Works ------------------------ Ironic has two project use models where access is generally more delegative to an ``owner`` and access to a ``lessee`` is generally more utilitarian. The purpose of an owner, is more to enable the System Operator to delegate much of the administrative activity of a Node to the owner. This may be because they physically own the hardware, or they are in charge of the node. Regardless of the use model that the fields and mechanics support, these fields are to support humans, and possibly services where applicable. The purpose of a lessee is more for a *tenant* in their *project* to be able to have access to perform basic actions with the API. In some cases that may be to reprovision or rebuild a node. Ultimately that is the lessee's prerogative, but by default there are actions and field updates that cannot be performed by default. This is also governed by access level within a project. These policies are applied in the way data is viewed and how data can be updated. Generally, an inability to view a node is an access permission issue in term of the project ID being correct for owner/lessee. The ironic project has attempted to generally codify what we believe is reasonable, however operators may wish to override these policy settings. For details general policy setting details, please see :doc:`/configuration/policy`. Field value visibility restrictions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Ironic's API, by default has a concept of filtering node values to prevent sensitive data from being leaked. System scoped users are subjected to basic restrictions, whereas project scoped users are, by default, examined further and against additional policies. This threshold is controlled with the ``baremetal:node:get:filter_threshold``. By default, the following fields are masked on Nodes and are controlled by the associated policies. By default, owners are able to see insight into the infrastructure, whereas lessee users *CANNOT* view these fields by default. * ``last_error`` - ``baremetal:node:get:last_error`` * ``reservation`` - ``baremetal:node:get:reservation`` * ``driver_internal_info`` - ``baremetal:node:get:driver_internal_info`` * ``driver_info`` - ``baremetal:node:get:driver_info`` Field update restrictions ~~~~~~~~~~~~~~~~~~~~~~~~~ Some of the fields in this list are restricted to System scoped users, or even only System Administrators. Some of these default restrictions are likely obvious. Owners can't change the owner. Lessee's can't change the owner. * ``driver_info`` - ``baremetal:node:update:driver_info`` * ``properties`` - ``baremetal:node:update:properties`` * ``chassis_uuid`` - ``baremetal:node:update:chassis_uuid`` * ``instance_uuid`` - ``baremetal:node:update:instance_uuid`` * ``lessee`` - ``baremetal:node:update:lessee`` * ``owner`` - ``baremetal:node:update:owner`` * ``driver`` - ``baremetal:node:update:driver_interfaces`` * ``*_interface`` - ``baremetal:node:update:driver_interfaces`` * ``network_data`` - ``baremetal:node:update:network_data`` * ``conductor_group`` - ``baremetal:node:update:conductor_group`` * ``name`` - ``baremetal:node:update:name`` * ``retired`` - ``baremetal:node:update:driver_info`` * ``retired_reason`` - ``baremetal:node:update:retired`` .. WARNING:: The ``chassis_uuid`` field is a write-once-only field. As such it is restricted to system scoped administrators. More information is available on these fields in :doc:`/configuration/policy`. Allocations ~~~~~~~~~~~ The ``allocations`` endpoint of the API is somewhat different than other endpoints as it allows for the allocation of physical machines to an admin. In this context, there is not already an ``owner`` or ``project_id`` to leverage to control access for the creation process, any project member does have the inherent privilege of requesting an allocation. That being said, their allocation request will require physical nodes to be owned or leased to the ``project_id`` through the ``node`` fields ``owner`` or ``lessee``. Ability to override the owner is restricted to system scoped users by default and any new allocation being requested with a specific owner, if made in ``project`` scope, will have the ``project_id`` recorded as the owner of the allocation. Ultimately, an operational behavior difference exists between the ``owner`` and ``lessee`` rights in terms of allocations. With the standard access rights, ``lessee`` users are able to create allocations if they own nodes which are not allocated or deployed, but they cannot reprovision nodes when using only a ``member`` role. This limitation is not the case for project-scoped users with the ``admin`` role. .. WARNING:: The allocation endpoint's use is restricted to project scoped interactions until ``[oslo_policy]enforce_new_defaults`` has been set to ``True`` using the ``baremetal:allocation:create_pre_rbac`` policy rule. This is in order to prevent endpoint misuse. Afterwards all project scoped allocations will automatically populate an owner. System scoped request are not subjected to this restriction, and operators may change the default restriction via the ``baremetal:allocation:create_restricted`` policy. Practical differences --------------------- Most users, upon implementing the use of ``system`` scoped authentication should not notice a difference as long as their authentication token is properly scoped to ``system`` and with the appropriate role for their access level. For most users who used a ``baremetal`` project, or other custom project via a custom policy file, along with a custom role name such as ``baremetal_admin``, this will require changing the user to be a ``system`` scoped user with ``admin`` privileges. The most noticeable difference for API consumers is the HTTP 403 access code is now mainly a HTTP 404 access code. The access concept has changed from "Does the user broadly have access to the API?" to "Does user have access to the node, and then do they have access to the specific resource?". What is an owner or lessee? --------------------------- An ``owner`` or ``lessee`` is the project which has been assigned baremetal resources. Generally these should be service projects as opposed to a project dedicated to a specific user. This will help prevent the need to involve a ``system`` scoped administrator from having to correct ownership records should a project need to be removed due to an individual's departure. The underlying ``project_id`` is used to represent and associate the owner or lessee. How do I assign an owner? ------------------------- .. code-block:: console # baremetal node set --owner .. note:: With the default access policy, an ``owner`` is able to change the assigned ``lessee`` of a node. However the ``lessee`` is unable to do the same. How do I assign a lessee? ------------------------- .. code-block:: console # baremetal node set --lessee What is the difference between an owner and lessee? --------------------------------------------------- This is largely covered in `How Project Scoped Works`_ although as noted it is largely in means of access. A ``lessee`` is far more restrictive and an ``owner`` may revoke access to ``lessee``. Access to the underlying baremetal node is not exclusive between the ``owner`` and ``lessee``, and this use model expects that some level of communication takes place between the appropriate parties. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/admin/security.rst0000664000175000017500000002660300000000000020645 0ustar00zuulzuul00000000000000.. _security: ================= Security Overview ================= While the Bare Metal service is intended to be a secure application, it is important to understand what it does and does not cover today. Deployers must properly evaluate their use case and take the appropriate actions to secure their environment(s). This document is intended to provide an overview of what risks an operator of the Bare Metal service should be aware of. It is not intended as a How-To guide for securing a data center or an OpenStack deployment. .. TODO: add "Security Considerations for Network Boot" section .. TODO: add "Credential Storage and Management" section .. TODO: add "Multi-tenancy Considerations" section REST API: user roles and policy settings ======================================== .. WARNING:: This information is presently in flux as of the Wallaby release with the implementation of ``Secure RBAC`` where ``system`` and ``project`` scoped requests are able to be parsed and default access controls support a delineation of roles and responsibilities through the roles. Please see :doc:`/admin/secure-rbac`. Beginning with the Newton (6.1.0) release, the Bare Metal service allows operators significant control over API access: * Access may be restricted to each method (GET, PUT, etc) for each REST resource. Defaults are provided with the release and defined in code. * Access may be divided between an "administrative" role with full access and "observer" role with read-only access. By default, these roles are assigned the names ``baremetal_admin`` and ``baremetal_observer``, respectively. * By default, passwords and instance secrets are hidden in ``driver_info`` and ``instance_info``, respectively. In case of debugging or diagnosing, the behavior can be overridden by changing the policy file. To allow password in ``driver_info`` unmasked for users with administrative privileges, apply following changes to policy configuration file:: "show_password": "rule:is_admin" And restart the Bare Metal API service to take effect. Please check :doc:`/configuration/policy` for more details. Prior to the Newton (6.1.0) release, the Bare Metal service only supported two policy options: * API access may be secured by a simple policy rule: users with administrative privileges may access all API resources, whereas users without administrative privileges may only access public API resources. * Passwords contained in the ``driver_info`` field may be hidden from all API responses with the ``show_password`` policy setting. This defaults to always hide passwords, regardless of the user's role. You can override it with policy configuration as described above. Multi-tenancy ============= There are two aspects of multitenancy to consider when evaluating a deployment of the Bare Metal Service: interactions between tenants on the network, and actions one tenant can take on a machine that will affect the next tenant. Network Interactions -------------------- Interactions between tenants' workloads running simultaneously on separate servers include, but are not limited to: IP spoofing, packet sniffing, and network man-in-the-middle attacks. By default, the Bare Metal service provisions all nodes on a "flat" network, and does not take any precautions to avoid or prevent interaction between tenants. This can be addressed by integration with the OpenStack Identity, Compute, and Networking services, so as to provide tenant-network isolation. Additional documentation on `network multi-tenancy `_ is available. Lingering Effects ----------------- Interactions between tenants placed sequentially on the same server include, but are not limited to: changes in BIOS settings, modifications to firmware, or files left on disk or peripheral storage devices (if these devices are not erased between uses). By default, the Bare Metal service will erase (clean) the local disk drives during the "cleaning" phase, after deleting an instance. It *does not* reset BIOS or reflash firmware or peripheral devices. This can be addressed through customizing the utility ramdisk used during the "cleaning" phase. See details in the `Firmware security`_ section. Firmware security ================= When the Bare Metal service deploys an operating system image to a server, that image is run natively on the server without virtualization. Any user with administrative access to the deployed instance has administrative access to the underlying hardware. Most servers' default settings do not prevent a privileged local user from gaining direct access to hardware devices. Such a user could modify device or firmware settings, and potentially flash new firmware to the device, before deleting their instance and allowing the server to be allocated to another user. If the ``[conductor]/automated_clean`` configuration option is enabled (and the ``[deploy]/erase_devices_priority`` configuration option is not zero), the Bare Metal service will securely erase all local disk devices within a machine during instance deletion. However, the service does not ship with any code that will validate the integrity of, or make any modifications to, system or device firmware or firmware settings. Operators are encouraged to write their own hardware manager plugins for the ``ironic-python-agent`` ramdisk. This should include custom ``clean steps`` that would be run during the :ref:`cleaning` process, as part of Node de-provisioning. The ``clean steps`` would perform the specific actions necessary within that environment to ensure the integrity of each server's firmware. Ideally, an operator would work with their hardware vendor to ensure that proper firmware security measures are put in place ahead of time. This could include: - installing signed firmware for BIOS and peripheral devices - using a TPM (Trusted Platform Module) to validate signatures at boot time - booting machines in `UEFI secure boot mode`_, rather than BIOS mode, to validate kernel signatures - disabling local (in-band) access from the host OS to the management controller (BMC) - disabling modifications to boot settings from the host OS Additional references: - :ref:`cleaning` .. _secure-boot: UEFI secure boot mode ===================== Some hardware types support turning `UEFI secure boot`_ dynamically when deploying an instance. Currently these are :doc:`/admin/drivers/ilo`, :doc:`/admin/drivers/irmc` and :doc:`/admin/drivers/redfish`. Support for the UEFI secure boot is declared by adding the ``secure_boot`` capability in the ``capabilities`` parameter in the ``properties`` field of a node. ``secure_boot`` is a boolean parameter and takes value as ``true`` or ``false``. To enable ``secure_boot`` on a node add it to ``capabilities``:: baremetal node set --property capabilities='secure_boot:true' Alternatively use :doc:`/admin/inspection` to automatically populate the secure boot capability. .. warning:: UEFI secure boot only works in UEFI boot mode, see :ref:`boot_mode_support` for how to turn it on and off. Compatible images ----------------- Use element ``ubuntu-signed`` or ``fedora`` to build signed deploy ISO and user images with `diskimage-builder `_. The below command creates files named cloud-image-boot.iso, cloud-image.initrd, cloud-image.vmlinuz and cloud-image.qcow2 in the current working directory:: cd ./bin/disk-image-create -o cloud-image ubuntu-signed baremetal iso Ensure the public key of the signed image is loaded into bare metal to deploy signed images. Enabling with OpenStack Compute ------------------------------- Nodes having ``secure_boot`` set to ``true`` may be requested by adding an ``extra_spec`` to the nova flavor:: openstack flavor set --property capabilities:secure_boot="true" openstack server create --flavor --image instance-1 If ``capabilities`` is used in ``extra_spec`` as above, nova scheduler (``ComputeCapabilitiesFilter``) will match only ironic nodes which have the ``secure_boot`` set appropriately in ``properties/capabilities``. It will filter out rest of the nodes. The above facility for matching in nova can be used in heterogeneous environments where there is a mix of machines supporting and not supporting UEFI secure boot, and operator wants to provide a choice to the user regarding secure boot. If the flavor doesn't contain ``secure_boot`` then nova scheduler will not consider secure boot mode as a placement criteria, hence user may get a secure boot capable machine that matches with user specified flavors but deployment would not use its secure boot capability. Secure boot deploy would happen only when it is explicitly specified through flavor. Enabling standalone ------------------- To request secure boot for an instance in standalone mode (without OpenStack Compute), you need to add the capability directly to the node's ``instance_info``:: baremetal node set --instance-info capabilities='{"secure_boot": "true"}' .. _UEFI secure boot: https://en.wikipedia.org/wiki/Unified_Extensible_Firmware_Interface#Secure_boot Other considerations ==================== Internal networks ----------------- Access to networks which the Bare Metal service uses internally should be prohibited from outside. These networks are the ones used for management (with the nodes' BMC controllers), provisioning, cleaning (if used) and rescuing (if used). This can be done with physical or logical network isolation, traffic filtering, etc. Management interface technologies --------------------------------- Some nodes support more than one management interface technology (vendor and IPMI for example). If you use only one modern technology for out-of-band node access, it is recommended that you disable IPMI since the IPMI protocol is not secure. If IPMI is enabled, in most cases a local OS administrator is able to work in-band with IPMI settings without specifying any credentials, as this is a DCMI specification requirement. Tenant network isolation ------------------------ If you use tenant network isolation, services (TFTP or HTTP) that handle the nodes' boot files should serve requests only from the internal networks that are used for the nodes being deployed and cleaned. TFTP protocol does not support per-user access control at all. For HTTP, there is no generic and safe way to transfer credentials to the node. Also, tenant network isolation is not intended to work with network-booting a node by default, once the node has been provisioned. API endpoints for RAM disk use ------------------------------ There are `two (unauthorized) endpoints `_ in the Bare Metal API that are intended for use by the ironic-python-agent RAM disk. They are not intended for public use. These endpoints can potentially cause security issues. Access to these endpoints from external or untrusted networks should be prohibited. An easy way to do this is to: * set up two groups of API services: one for external requests, the second for deploy RAM disks' requests. * to disable unauthorized access to these endpoints in the (first) API services group that serves external requests, the following lines should be added to the :ironic-doc:`policy.yaml file `:: # Send heartbeats from IPA ramdisk "baremetal:node:ipa_heartbeat": "rule:is_admin" # Access IPA ramdisk functions "baremetal:driver:ipa_lookup": "rule:is_admin" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/admin/troubleshooting.rst0000664000175000017500000012716200000000000022227 0ustar00zuulzuul00000000000000.. _troubleshooting: ====================== Troubleshooting Ironic ====================== Nova returns "No valid host was found" Error ============================================ Sometimes Nova Conductor log file "nova-conductor.log" or a message returned from Nova API contains the following error:: NoValidHost: No valid host was found. There are not enough hosts available. "No valid host was found" means that the Nova Scheduler could not find a bare metal node suitable for booting the new instance. This in turn usually means some mismatch between resources that Nova expects to find and resources that Ironic advertised to Nova. A few things should be checked in this case: #. Make sure that enough nodes are in ``available`` state, not in maintenance mode and not already used by an existing instance. Check with the following command:: baremetal node list --provision-state available --no-maintenance --unassociated If this command does not show enough nodes, use generic ``baremetal node list`` to check other nodes. For example, nodes in ``manageable`` state should be made available:: baremetal node provide The Bare metal service automatically puts a node in maintenance mode if there are issues with accessing its management interface. See :ref:`power-fault` for details. The ``node validate`` command can be used to verify that all required fields are present. The following command should not return anything:: baremetal node validate | grep -E '(power|management)\W*False' Maintenance mode will be also set on a node if automated cleaning has failed for it previously. #. Make sure that you have Compute services running and enabled:: $ openstack compute service list --service nova-compute +----+--------------+-------------+------+---------+-------+----------------------------+ | ID | Binary | Host | Zone | Status | State | Updated At | +----+--------------+-------------+------+---------+-------+----------------------------+ | 7 | nova-compute | example.com | nova | enabled | up | 2017-09-04T13:14:03.000000 | +----+--------------+-------------+------+---------+-------+----------------------------+ By default, a Compute service is disabled after 10 consecutive build failures on it. This is to ensure that new build requests are not routed to a broken Compute service. If it is the case, make sure to fix the source of the failures, then re-enable it:: openstack compute service set --enable nova-compute #. Starting with the Pike release, check that all your nodes have the ``resource_class`` field set using the following command:: baremetal node list --fields uuid name resource_class Then check that the flavor(s) are configured to request these resource classes via their properties:: openstack flavor show -f value -c properties For example, if your node has resource class ``baremetal-large``, it will be matched by a flavor with property ``resources:CUSTOM_BAREMETAL_LARGE`` set to ``1``. See :doc:`/install/configure-nova-flavors` for more details on the correct configuration. #. Upon scheduling, Nova will query the Placement API service for the available resource providers (in the case of Ironic: nodes with a given resource class). If placement does not have any allocation candidates for the requested resource class, the request will result in a "No valid host was found" error. It is hence sensible to check if Placement is aware of resource providers (nodes) for the requested resource class with:: $ openstack allocation candidate list --resource CUSTOM_BAREMETAL_LARGE='1' +---+-----------------------------+--------------------------------------+-------------------------------+ | # | allocation | resource provider | inventory used/capacity | +---+-----------------------------+--------------------------------------+-------------------------------+ | 1 | CUSTOM_BAREMETAL_LARGE=1 | 2f7b9c69-c1df-4e40-b94e-5821a4ea0453 | CUSTOM_BAREMETAL_LARGE=0/1 | +---+-----------------------------+--------------------------------------+-------------------------------+ For Ironic, the resource provider is the UUID of the available Ironic node. If this command returns an empty list (or does not contain the targeted resource provider), the operator needs to understand first, why the resource tracker has not reported this provider to placement. Potential explanations include: * the resource tracker cycle has not finished yet and the resource provider will appear once it has (the time to finish the cycle scales linearly with the number of nodes the corresponding ``nova-compute`` service manages); * the node is in a state where the resource tracker does not consider it to be eligible for scheduling, e.g. when the node has ``maintenance`` set to ``True``; make sure the target nodes are in ``available`` and ``maintenance`` is ``False``; #. If you do not use scheduling based on resource classes, then the node's properties must have been set either manually or via inspection. For each node with ``available`` state check that the ``properties`` JSON field has valid values for the keys ``cpus``, ``cpu_arch``, ``memory_mb`` and ``local_gb``. Example of valid properties:: $ baremetal node show --fields properties +------------+------------------------------------------------------------------------------------+ | Property | Value | +------------+------------------------------------------------------------------------------------+ | properties | {u'memory_mb': u'8192', u'cpu_arch': u'x86_64', u'local_gb': u'41', u'cpus': u'4'} | +------------+------------------------------------------------------------------------------------+ .. warning:: If you're using exact match filters in the Nova Scheduler, make sure the flavor and the node properties match exactly. #. The Nova flavor that you are using does not match any properties of the available Ironic nodes. Use :: openstack flavor show to compare. The extra specs in your flavor starting with ``capability:`` should match ones in ``node.properties['capabilities']``. .. note:: The format of capabilities is different in Nova and Ironic. E.g. in Nova flavor:: $ openstack flavor show -c properties +------------+----------------------------------+ | Field | Value | +------------+----------------------------------+ | properties | capabilities:boot_option='local' | +------------+----------------------------------+ But in Ironic node:: $ baremetal node show --fields properties +------------+-----------------------------------------+ | Property | Value | +------------+-----------------------------------------+ | properties | {u'capabilities': u'boot_option:local'} | +------------+-----------------------------------------+ #. After making changes to nodes in Ironic, it takes time for those changes to propagate from Ironic to Nova. Check that :: openstack hypervisor stats show correctly shows total amount of resources in your system. You can also check ``openstack hypervisor show `` to see the status of individual Ironic nodes as reported to Nova. #. Figure out which Nova Scheduler filter ruled out your nodes. Check the ``nova-scheduler`` logs for lines containing something like:: Filter ComputeCapabilitiesFilter returned 0 hosts The name of the filter that removed the last hosts may give some hints on what exactly was not matched. See :nova-doc:`Nova filters documentation ` for more details. #. If none of the above helped, check Ironic conductor log carefully to see if there are any conductor-related errors which are the root cause for "No valid host was found". If there are any "Error in deploy of node : [Errno 28] ..." error messages in Ironic conductor log, it means the conductor run into a special error during deployment. So you can check the log carefully to fix or work around and then try again. Patching the Deploy Ramdisk =========================== When debugging a problem with deployment and/or inspection you may want to quickly apply a change to the ramdisk to see if it helps. Of course you can inject your code and/or SSH keys during the ramdisk build (depends on how exactly you've built your ramdisk). But it's also possible to quickly modify an already built ramdisk. Create an empty directory and unpack the ramdisk content there: .. code-block:: bash $ mkdir unpack $ cd unpack $ gzip -dc /path/to/the/ramdisk | cpio -id The last command will result in the whole Linux file system tree unpacked in the current directory. Now you can modify any files you want. The actual location of the files will depend on the way you've built the ramdisk. .. note:: On a systemd-based system you can use the ``systemd-nspawn`` tool (from the ``systemd-container`` package) to create a lightweight container from the unpacked filesystem tree:: $ sudo systemd-nspawn --directory /path/to/unpacked/ramdisk/ /bin/bash This will allow you to run commands within the filesystem, e.g. use package manager. If the ramdisk is also systemd-based, and you have login credentials set up, you can even boot a real ramdisk enviroment with :: $ sudo systemd-nspawn --directory /path/to/unpacked/ramdisk/ --boot After you've done the modifications, pack the whole content of the current directory back:: $ find . | cpio -H newc -o | gzip -c > /path/to/the/new/ramdisk .. note:: You don't need to modify the kernel (e.g. ``tinyipa-master.vmlinuz``), only the ramdisk part. API Errors ========== The `debug_tracebacks_in_api` config option may be set to return tracebacks in the API response for all 4xx and 5xx errors. .. _retrieve_deploy_ramdisk_logs: Retrieving logs from the deploy ramdisk ======================================= When troubleshooting deployments (specially in case of a deploy failure) it's important to have access to the deploy ramdisk logs to be able to identify the source of the problem. By default, Ironic will retrieve the logs from the deploy ramdisk when the deployment fails and save it on the local filesystem at ``/var/log/ironic/deploy``. To change this behavior, operators can make the following changes to ``/etc/ironic/ironic.conf`` under the ``[agent]`` group: * ``deploy_logs_collect``: Whether Ironic should collect the deployment logs on deployment. Valid values for this option are: * ``on_failure`` (**default**): Retrieve the deployment logs upon a deployment failure. * ``always``: Always retrieve the deployment logs, even if the deployment succeed. * ``never``: Disable retrieving the deployment logs. * ``deploy_logs_storage_backend``: The name of the storage backend where the logs will be stored. Valid values for this option are: * ``local`` (**default**): Store the logs in the local filesystem. * ``swift``: Store the logs in Swift. * ``deploy_logs_local_path``: The path to the directory where the logs should be stored, used when the ``deploy_logs_storage_backend`` is configured to ``local``. By default logs will be stored at **/var/log/ironic/deploy**. * ``deploy_logs_swift_container``: The name of the Swift container to store the logs, used when the deploy_logs_storage_backend is configured to "swift". By default **ironic_deploy_logs_container**. * ``deploy_logs_swift_days_to_expire``: Number of days before a log object is marked as expired in Swift. If None, the logs will be kept forever or until manually deleted. Used when the deploy_logs_storage_backend is configured to "swift". By default **30** days. When the logs are collected, Ironic will store a *tar.gz* file containing all the logs according to the ``deploy_logs_storage_backend`` configuration option. All log objects will be named with the following pattern:: [_]_.tar.gz .. note:: The *instance_uuid* field is not required for deploying a node when Ironic is configured to be used in standalone mode. If present it will be appended to the name. Accessing the log data ---------------------- When storing in the local filesystem ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When storing the logs in the local filesystem, the log files can be found at the path configured in the ``deploy_logs_local_path`` configuration option. For example, to find the logs from the node ``5e9258c4-cfda-40b6-86e2-e192f523d668``: .. code-block:: bash $ ls /var/log/ironic/deploy | grep 5e9258c4-cfda-40b6-86e2-e192f523d668 5e9258c4-cfda-40b6-86e2-e192f523d668_88595d8a-6725-4471-8cd5-c0f3106b6898_2016-08-08-13:52:12.tar.gz 5e9258c4-cfda-40b6-86e2-e192f523d668_db87f2c5-7a9a-48c2-9a76-604287257c1b_2016-08-08-14:07:25.tar.gz .. note:: When saving the logs to the filesystem, operators may want to enable some form of rotation for the logs to avoid disk space problems. When storing in Swift ~~~~~~~~~~~~~~~~~~~~~ When using Swift, operators can associate the objects in the container with the nodes in Ironic and search for the logs for the node ``5e9258c4-cfda-40b6-86e2-e192f523d668`` using the **prefix** parameter. For example: .. code-block:: bash $ swift list ironic_deploy_logs_container -p 5e9258c4-cfda-40b6-86e2-e192f523d668 5e9258c4-cfda-40b6-86e2-e192f523d668_88595d8a-6725-4471-8cd5-c0f3106b6898_2016-08-08-13:52:12.tar.gz 5e9258c4-cfda-40b6-86e2-e192f523d668_db87f2c5-7a9a-48c2-9a76-604287257c1b_2016-08-08-14:07:25.tar.gz To download a specific log from Swift, do: .. code-block:: bash $ swift download ironic_deploy_logs_container "5e9258c4-cfda-40b6-86e2-e192f523d668_db87f2c5-7a9a-48c2-9a76-604287257c1b_2016-08-08-14:07:25.tar.gz" 5e9258c4-cfda-40b6-86e2-e192f523d668_db87f2c5-7a9a-48c2-9a76-604287257c1b_2016-08-08-14:07:25.tar.gz [auth 0.341s, headers 0.391s, total 0.391s, 0.531 MB/s] The contents of the log file ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The log is just a ``.tar.gz`` file that can be extracted as: .. code-block:: bash $ tar xvf The contents of the file may differ slightly depending on the distribution that the deploy ramdisk is using: * For distributions using ``systemd`` there will be a file called **journal** which contains all the system logs collected via the ``journalctl`` command. * For other distributions, the ramdisk will collect all the contents of the ``/var/log`` directory. For all distributions, the log file will also contain the output of the following commands (if present): ``ps``, ``df``, ``ip addr`` and ``iptables``. Here's one example when extracting the content of a log file for a distribution that uses ``systemd``: .. code-block:: bash $ tar xvf 5e9258c4-cfda-40b6-86e2-e192f523d668_88595d8a-6725-4471-8cd5-c0f3106b6898_2016-08-08-13:52:12.tar.gz df ps journal ip_addr iptables .. _troubleshooting-stp: DHCP during PXE or iPXE is inconsistent or unreliable ===================================================== This can be caused by the spanning tree protocol delay on some switches. The delay prevents the switch port moving to forwarding mode during the nodes attempts to PXE, so the packets never make it to the DHCP server. To resolve this issue you should set the switch port that connects to your baremetal nodes as an edge or PortFast type port. Configured in this way the switch port will move to forwarding mode as soon as the link is established. An example on how to do that for a Cisco Nexus switch is: .. code-block:: bash $ config terminal $ (config) interface eth1/11 $ (config-if) spanning-tree port type edge Why does X issue occur when I am using LACP bonding with iPXE? ============================================================== If you are using iPXE, an unfortunate aspect of its design and interaction with networking is an automatic response as a Link Aggregation Control Protocol (or LACP) peer to remote switches. iPXE does this for only the single port which is used for network booting. In theory, this may help establish the port link-state faster with some switch vendors, but the official reasoning as far as the Ironic Developers are aware is not documented for iPXE. The end result of this is that once iPXE has stopped responding to LACP messages from the peer port, which occurs as part of the process of booting a ramdisk and iPXE handing over control to a full operating-system, switches typically begin a timer to determine how to handle the failure. This is because, depending on the mode of LACP, this can be interpreted as a switch or network fabric failure. This may demonstrate as any number of behaviors or issues from ramdisks finding they are unable to acquire DHCP addresses over the network interface to downloads abruptly stalling, to even minor issues such as LLDP port data being unavailable in introspection. Overall: * Ironic's agent doesn't officially support LACP and the Ironic community generally believes this may cause more problems than it would solve. During the Victoria development cycle, we added retry logic for most actions in an attempt to navigate the worst-known default hold-down timers to help ensure a deployment does not fail due to a short-lived transitory network connectivity failure in the form of a switch port having moved to a temporary blocking state. Where applicable and possible, many of these patches have been backported to supported releases. These patches also require that the switchport has an eventual fallback to a non-bonded mode. If the port remains in a blocking state, then traffic will be unable to flow and the deployment is likely to time out. * If you must use LACP, consider ``passive`` LACP negotiation settings in the network switch as opposed to ``active``. The difference being with passive the connected workload is likely a server where it should likely request the switch to establish the Link Aggregate. This is instead of being treated as if it's possibly another switch. * Consult your switch vendor's support forums. Some vendors have recommended port settings for booting machines using iPXE with their switches. IPMI errors =========== When working with IPMI, several settings need to be enabled depending on vendors. Enable IPMI over LAN -------------------- Machines may not have IPMI access over LAN enabled by default. This could cause the IPMI port to be unreachable through ipmitool, as shown: .. code-block:: bash $ ipmitool -I lan -H ipmi_host -U ipmi_user -P ipmi_pass chassis power status Error: Unable to establish LAN session To fix this, enable `IPMI over lan` setting using your BMC tool or web app. Troubleshooting lanplus interface --------------------------------- When working with lanplus interfaces, you may encounter the following error: .. code-block:: bash $ ipmitool -I lanplus -H ipmi_host -U ipmi_user -P ipmi_pass power status Error in open session response message : insufficient resources for session Error: Unable to establish IPMI v2 / RMCP+ session To fix that issue, please enable `RMCP+ Cipher Suite3 Configuration` setting using your BMC tool or web app. Why are my nodes stuck in a "-ing" state? ========================================= The Ironic conductor uses states ending with ``ing`` as a signifier that the conductor is actively working on something related to the node. Often, this means there is an internal lock or ``reservation`` set on the node and the conductor is downloading, uploading, or attempting to perform some sort of Input/Output operation - see `Why does API return "Node is locked by host"?`_ for details. In the case the conductor gets stuck, these operations should timeout, but there are cases in operating systems where operations are blocked until completion. These sorts of operations can vary based on the specific environment and operating configuration. What can cause these sorts of failures? --------------------------------------- Typical causes of such failures are going to be largely rooted in the concept of ``iowait``, either in the form of downloading from a remote host or reading or writing to the disk of the conductor. An operator can use the `iostat `_ tool to identify the percentage of CPU time spent waiting on storage devices. The fields that will be particularly important are the ``iowait``, ``await``, and ``tps`` ones, which can be read about in the ``iostat`` manual page. In the case of network file systems, for backing components such as image caches or distributed ``tftpboot`` or ``httpboot`` folders, IO operations failing on these can, depending on operating system and underlying client settings, cause threads to be stuck in a blocking wait state, which is realistically undetectable short the operating system logging connectivity errors or even lock manager access errors. For example with `nfs `_, the underlying client recovery behavior, in terms of ``soft``, ``hard``, ``softreval``, ``nosoftreval``, will largely impact this behavior, but also NFS server settings can impact this behavior. A solid sign that this is a failure, is when an ``ls /path/to/nfs`` command hangs for a period of time. In such cases, the Storage Administrator should be consulted and network connectivity investigated for errors before trying to recover to proceed. The bad news for IO related failures ------------------------------------ If the node has a populated ``reservation`` field, and has not timed out or proceeded to a ``fail`` state, then the conductor process will likely need to be restarted. This is because the worker thread is hung with-in the conductor. Manual intervention with-in Ironic's database is *not* advised to try and "un-wedge" the machine in this state, and restarting the conductor is encouraged. .. note:: Ironic's conductor, upon restart, clears reservations for nodes which were previously managed by the conductor before restart. If a distributed or network file system is in use, it is highly recommended that the operating system of the node running the conductor be rebooted as the running conductor may not even be able to exit in the state of an IO failure, again dependent upon site and server configuration. File Size != Disk Size ---------------------- An easy to make misconception is that a 2.4 GB file means that only 2.4 GB is written to disk. But if that file's virtual size is 20 GB, or 100 GB things can become very problematic and extend the amount of time the node spends in ``deploying`` and ``deploy wait`` states. Again, these sorts of cases will depend upon the exact configuration of the deployment, but hopefully these are areas where these actions can occur. * Conversion to raw image files upon download to the conductor, from the ``[DEFAULT]force_raw_images`` option. Users using Glance may also experience issues here as the conductor will cache the image to be written which takes place when the ``[agent]image_download_source`` is set to ``http`` instead of ``swift``. .. note:: The QCOW2 image conversion utility does consume quite a bit of memory when converting images or writing them to the end storage device. This is because the files are not sequential in nature, and must be re-assembled from an internal block mapping. Internally Ironic limits this to 1GB of RAM. Operators performing large numbers of deployments may wish to disable raw images in these sorts of cases in order to minimize the conductor becoming a limiting factor due to memory and network IO. Why are my nodes stuck in a "wait" state? ========================================= The Ironic conductor uses states containing ``wait`` as a signifier that the conductor is waiting for a callback from another component, such as the Ironic Python Agent or the Inspector. If this feedback does not arrive, the conductor will time out and the node will eventually move to a ``failed`` state. Depending on the configuration and the circumstances, however, a node can stay in a ``wait`` state for a long time or even never time out. The list of such wait states includes: * ``clean wait`` for cleaning, * ``inspect wait`` for introspection, * ``rescue wait`` for rescueing, and * ``wait call-back`` for deploying. Communication issues between the conductor and the node ------------------------------------------------------- One of the most common issues when nodes seem to be stuck in a wait state occur when the node never received any instructions or does not react as expected: the conductor moved the node to a wait state but the node will never call back. Examples include wrong ciphers which will make ipmitool get stuck or BMCs in a state where they accept commands, but don't do the requested task (or only a part of it, like shutting off, but not starting). It is useful in these cases to see via a ping or the console if and which action the node is performing. If the node does not seem to react to the requests sent be the conductor, it may be worthwhile to try the corresponding action out-of-band, e.g. confirm that power on/off commands work when directly sent to the BMC. The section on `IPMI errors`_. above gives some additional points to check. In some situations, a BMC reset may be necessary. Ironic Python Agent stuck ------------------------- Nodes can also get remain in a wait state when the component the conductor is waiting for gets stuck, e.g. when a hardware manager enters a loop or is waiting for an event that is never happening. In these cases, it might be helpful to connect to the IPA and inspect its logs, see the trouble shooting guide of the :ironic-python-agent-doc:`ironic-python-agent (IPA) <>` on how to do this. Stopping the operation ---------------------- Cleaning, inspection and rescuing can be stopped while in ``clean wait``, ``inspect wait`` and ``rescue wait`` states using the ``abort`` command. It will move the node to the corresponding failure state (``clean failed``, ``inspect failed`` or ``rescue failed``):: baremetal node abort Deploying can be aborted while in the ``wait call-back`` state by starting an undeploy (normally resulting in cleaning):: baremetal node undeploy See :doc:`/user/states` for more details. .. note:: Since the Bare Metal service is not doing anything actively in waiting states, the nodes are not moved to failed states on conductor restart. Deployments fail with "failed to update MAC address" ==================================================== The design of the integration with the Networking service (neutron) is such that once virtual ports have been created in the API, their MAC address must be updated in order for the DHCP server to be able to appropriately reply. This can sometimes result in errors being raised indicating that the MAC address is already in use. This is because at some point in the past, a virtual interface was orphaned either by accident or by some unexpected glitch, and a previous entry is still present in Neutron. This error looks something like this when reported in the ironic-conductor log output.: Failed to update MAC address on Neutron port 305beda7-0dd0-4fec-b4d2-78b7aa4e8e6a.: MacAddressInUseClient: Unable to complete operation for network 1e252627-6223-4076-a2b9-6f56493c9bac. The mac address 52:54:00:7c:c4:56 is in use. Because we have no idea about this entry, we fail the deployment process as we can't make a number of assumptions in order to attempt to automatically resolve the conflict. How did I get here? ------------------- Originally this was a fairly easy issue to encounter. The retry logic path which resulted between the Orchestration (heat) and Compute (nova) services, could sometimes result in additional un-necessary ports being created. Bugs of this class have been largely resolved since the Rocky development cycle. Since then, the way this can become encountered is due to Networking (neutron) VIF attachments not being removed or deleted prior to deleting a port in the Bare Metal service. Ultimately, the key of this is that the port is being deleted. Under most operating circumstances, there really is no need to delete the port, and VIF attachments are stored on the port object, so deleting the port *CAN* result in the VIF not being cleaned up from Neutron. Under normal circumstances, when deleting ports, a node should be in a stable state, and the node should not be provisioned. If the ``baremetal port delete`` command fails, this may indicate that a known VIF is still attached. Generally if they are transitory from cleaning, provisioning, rescuing, or even inspection, getting the node to the ``available`` state wil unblock your delete operation, that is unless there is a tenant VIF attahment. In that case, the vif will need to be removed from with-in the Bare Metal service using the ``baremetal node vif detach`` command. A port can also be checked to see if there is a VIF attachment by consulting the port's ``internal_info`` field. .. warning:: The ``maintenance`` flag can be used to force the node's port to be deleted, however this will disable any check that would normally block the user from issuing a delete and accidently orphaning the VIF attachment record. How do I resolve this? ---------------------- Generally, you need to identify the port with the offending MAC address. Example: .. code-block:: console $ openstack port list --mac-address 52:54:00:7c:c4:56 From the command's output, you should be able to identify the ``id`` field. Using that, you can delete the port. Example: .. code-block:: console $ openstack port delete .. warning:: Before deleting a port, you should always verify that it is no longer in use or no longer seems applicable/operable. If multiple deployments of the Bare Metal service with a single Neutron, the possibility that a inventory typo, or possibly even a duplicate MAC address exists, which could also produce the same basic error message. My test VM image does not deploy -- mount point does not exist ============================================================== What is likely occuring ----------------------- The image attempting to be deployed likely is a partition image where the file system that the user wishes to boot from lacks the required folders, such as ``/dev`` and ``/proc``, which are required to install a bootloader for a Linux OS image It should be noted that similar errors can also occur with whole disk images where we are attempting to setup the UEFI bootloader configuration. That being said, in this case, the image is likely invalid or contains an unexpected internal structure. Users performing testing may choose something that they believe will work based on it working for virtual machines. These images are often attractive for testing as they are generic and include basic support for establishing networking and possibly installing user keys. Unfortunately, these images often lack drivers and firmware required for many different types of physical hardware which makes using them very problematic. Additionally, images such as `Cirros `_ do not have any contents in the root filesystem (i.e. an empty filesystem), as they are designed for the ``ramdisk`` to write the contents to disk upon boot. How do I not encounter this issue? ---------------------------------- We generally recommend using `diskimage-builder `_ or vendor supplied images. Centos, Ubuntu, Fedora, and Debian all publish operating system images which do generally include drivers and firmware for physical hardware. Many of these published "cloud" images, also support auto-configuration of networking AND population of user keys. Issues with autoconfigured TLS ============================== These issues will manifest as an error in ``ironic-conductor`` logs looking similar to (lines are wrapped for readability):: ERROR ironic.drivers.modules.agent_client [-] Failed to connect to the agent running on node d7c322f0-0354-4008-92b4-f49fb2201001 for invoking command clean.get_clean_steps. Error: HTTPSConnectionPool(host='192.168.123.126', port=9999): Max retries exceeded with url: /v1/commands/?wait=true&agent_token= (Caused by SSLError(SSLError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed (_ssl.c:897)'),)): requests.exceptions.SSLError: HTTPSConnectionPool(host='192.168.123.126', port=9999): Max retries exceeded with url: /v1/commands/?wait=true&agent_token= (Caused by SSLError(SSLError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed (_ssl.c:897)'),)) The cause of the issue is that the Bare Metal service cannot access the ramdisk with the TLS certificate provided by the ramdisk on first heartbeat. You can inspect the stored certificate in ``/var/lib/ironic/certificates/.crt``. You can try connecting to the ramdisk using the IP address in the log message:: curl -vL https://:9999/v1/commands \ --cacert /var/lib/ironic/certificates/.crt You can get the detailed information about the certificate using openSSL:: openssl x509 -text -noout -in /var/lib/ironic/certificates/.crt Clock skew ---------- One possible source of the problem is a discrepancy between the hardware clock on the node and the time on the machine with the Bare Metal service. It can be detected by comparing the ``Not Before`` field in the ``openssl`` output with the timestamp of a log message. The recommended solution is to enable the NTP support in ironic-python-agent by passing the ``ipa-ntp-server`` argument with an address of an NTP server reachable by the node. If it is not possible, you need to ensure the correct hardware time on the machine. Keep in mind a potential issue with timezones: an ability to store timezone in hardware is pretty recent and may not be available. Since ironic-python-agent is likely operating in UTC, the hardware clock should also be set in UTC. .. note:: Microsoft Windows uses local time by default, so a machine that has previously run Windows will likely have wrong time. I changed ironic.conf, and now I can't edit my nodes. ===================================================== Whenever a node is created in ironic, default interfaces are identified as part of driver composition. This maybe sourced from explicit default values which have been set in ``ironic.conf`` or by the interface order for the enabled interfaces list. The result of this is that the ``ironic-conductor`` cannot spawn a ``task`` using the composed driver, as a portion of the driver is no longer enabled. This makes it difficult to edit or update the node if the settings have been changed. For example, with networking interfaces, if you have ``default_network_interface=neutron`` or ``enabled_network_interfaces=neutron,flat`` in your ``ironic.conf``, nodes would have been created with the ``neutron`` network interface. This is because ``default_network_interface`` overrides the setting for new nodes, and that setting is **saved** to the database nodes table. Similarly, the order of ``enabled_network_interfaces`` takes priority, and the first entry in the list is generally set to the default for the node upon creation, and that record is **saved** to the database nodes table. The only case where driver composition does *not* calculate a default is if an explicit value is provided upon the creation of the node. Example failure --------------- A node in this state, when the ``network_interface`` was saved as ``neutron``, yet the ``neutron`` interface is no longer enabled will fail basic state transition requests: .. code-block:: console $ baremetal node manage 7164efca-37ab-1213-1112-b731cf795a5a Could not find the following interface in the 'ironic.hardware.interfaces.network' entrypoint: neutron. Valid interfaces are ['flat']. (HTTP 400) How to fix this? ---------------- Revert the changes you made to ``ironic.conf``. This applies to any changes to any ``default_*_interface`` options or the order of interfaces in the for the ``enabled_*_interfaces`` options. Once the conductor has been restarted with the updated configuration, you should now be able to update the interface using the ``baremetal node set`` command. In this example we use the ``network_interface`` as this is most commonly where it is encountered: .. code-block:: console $ baremetal node set $NAME_OR_UUID --network-interface flat .. note:: There are additional paths one can take to remedy this sort of issue, however we encourage operators to be mindful of operational consistency when making major configuration changes. Once you have updated the saved interfaces, you should be able to safely return the ``ironic.conf`` configuration change in changing what interfaces are enabled by the conductor. I'm getting Out of Memory errors ================================ This issue, also known as the "the OOMKiller got my conductor" case, is where your OS system memory reaches a point where the operating system engages measures to shed active memory consumption in order to prevent a complete failure of the machine. Unfortunately this can cause unpredictable behavior. How did I get here? ------------------- One of the major consumers of memory in a host running an ironic-conductor is transformation of disk images using the ``qemu-img`` tool. This tool, because the disk images it works with are both compressed and out of linear block order, requires a considerable amount of memory to efficently re-assemble and write-out a disk to a device, or to simply convert the format such as to a ``raw`` image. By default, ironic's configuration limits this conversion to 1 GB of RAM for the process, but each conversion does cause additional buffer memory to be used, which increases overall system memory pressure. Generally memory pressure alone from buffers will not cause an out of memory condition, but the multiple conversions or deployments running at the same time CAN cause extreme memory pressure and risk the system running out of memory. How do I resolve this? ---------------------- This can be addressed a few different ways: * Use raw images, however these images can be substantially larger and require more data to be transmitted "over the wire". * Add more physical memory. * Add swap space. * Reduce concurrency, possibly via another conductor or changing the nova-compute.conf ``max_concurrent_builds`` parameter. * Or finally, adjust the ``[DEFAULT]minimum_required_memory`` parameter in your ironic.conf file. The default should be considered a "default of last resort" and you may need to reserve additional memory. You may also wish to adjust the ``[DEFAULT]minimum_memory_wait_retries`` and ``[DEFAULT]minimum_memory_wait_time`` parameters. Why does API return "Node is locked by host"? ============================================= This error usually manifests as HTTP error 409 on the client side: Node d7e2aed8-50a9-4427-baaa-f8f595e2ceb3 is locked by host 192.168.122.1, please retry after the current operation is completed. It happens, because an operation that modifies a node is requested, while another such operation is running. The conflicting operation may be user requested (e.g. a provisioning action) or related to the internal processes (e.g. changing power state during :doc:`power-sync`). The reported host name corresponds to the conductor instance that holds the lock. Normally, these errors are transient and safe to retry after a few seconds. If the lock is held for significant time, these are the steps you can take. First of all, check the current ``provision_state`` of the node: ``verifying`` means that the conductor is trying to access the node's BMC. If it happens for minutes, it means that the BMC is either unreachable or misbehaving. Double-check the information in ``driver_info``, especially the BMC address and credentials. If the access details seem correct, try resetting the BMC using, for example, its web UI. ``deploying``/``inspecting``/``cleaning`` means that the conductor is doing some active work. It may include downloading or converting images, executing synchronous out-of-band deploy or clean steps, etc. A node can stay in this state for minutes, depending on various factors. Consult the conductor logs. ``available``/``manageable``/``wait call-back``/``clean wait`` means that some background process is holding the lock. Most commonly it's the power synchronization loop. Similarly to the ``verifying`` state, it may mean that the BMC access is broken or too slow. The conductor logs will provide you insights on what is happening. To trace the process using conductor logs: #. Isolate the relevant log parts. Lock messages come from the ``ironic.conductor.task_manager`` module. You can also check the ``ironic.common.states`` module for any state transitions: .. code-block:: console $ grep -E '(ironic.conductor.task_manager|ironic.common.states|NodeLocked)' \ conductor.log > state.log #. Find the first instance of ``NodeLocked``. It may look like this (stripping timestamps and request IDs here and below for readability):: DEBUG ironic.conductor.task_manager [-] Attempting to get exclusive lock on node d7e2aed8-50a9-4427-baaa-f8f595e2ceb3 (for node update) __init__ /usr/lib/python3.6/site-packages/ironic/conductor/task_manager.py:233 DEBUG ironic_lib.json_rpc.server [-] RPC error NodeLocked: Node d7e2aed8-50a9-4427-baaa-f8f595e2ceb3 is locked by host 192.168.57.53, please retry after the current operation is completed. _handle_error /usr/lib/python3.6/site-packages/ironic_lib/json_rpc/server.py:179 The events right before this failure will provide you a clue on why the lock is held. #. Find the last successful **exclusive** locking event before the failure, for example:: DEBUG ironic.conductor.task_manager [-] Attempting to get exclusive lock on node d7e2aed8-50a9-4427-baaa-f8f595e2ceb3 (for provision action manage) __init__ /usr/lib/python3.6/site-packages/ironic/conductor/task_manager.py:233 DEBUG ironic.conductor.task_manager [-] Node d7e2aed8-50a9-4427-baaa-f8f595e2ceb3 successfully reserved for provision action manage (took 0.01 seconds) reserve_node /usr/lib/python3.6/site-packages/ironic/conductor/task_manager.py:350 DEBUG ironic.common.states [-] Exiting old state 'enroll' in response to event 'manage' on_exit /usr/lib/python3.6/site-packages/ironic/common/states.py:307 DEBUG ironic.common.states [-] Entering new state 'verifying' in response to event 'manage' on_enter /usr/lib/python3.6/site-packages/ironic/common/states.py:313 This is your root cause, the lock is held because of the BMC credentials verification. #. Find when the lock is released (if at all). The messages look like this:: DEBUG ironic.conductor.task_manager [-] Successfully released exclusive lock for provision action manage on node d7e2aed8-50a9-4427-baaa-f8f595e2ceb3 (lock was held 60.02 sec) release_resources /usr/lib/python3.6/site-packages/ironic/conductor/task_manager.py:447 The message tells you the reason the lock was held (``for provision action manage``) and the amount of time it was held (60.02 seconds, which is way too much for accessing a BMC). Unfortunately, due to the way the conductor is designed, it is not possible to gracefully break a stuck lock held in ``*-ing`` states. As the last resort, you may need to restart the affected conductor. See `Why are my nodes stuck in a "-ing" state?`_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/admin/tuning.rst0000664000175000017500000002671600000000000020307 0ustar00zuulzuul00000000000000============= Tuning Ironic ============= Memory Utilization ================== Memory utilization is a difficult thing to tune in Ironic as largely we may be asked by API consumers to perform work for which the underlying tools require large amounts of memory. The biggest example of this is image conversion. Images not in a raw format need to be written out to disk for conversion (when requested) which requires the conversion process to generate an in-memory map to re-assemble the image contents into a coherent stream of data. This entire process also stresses the kernel buffers and cache. This ultimately comes down to a trade-off of Memory versus Performance, similar to the trade-off of Performance versus Cost. On a plus side, an idle Ironic deployment does not need much in the way of memory. On the down side, a highly bursty environment where a large number of concurrent deployments may be requested should consider two aspects: * How is the ironic-api service/process set up? Will more processes be launched automatically? * Are images prioritized for storage size on disk? Or are they compressed and require format conversion? API === Ironic's API should have a fairly stable memory footprint with activity, however depending on how the webserver is running the API, additional processes can be launched. Under normal conditions, as of Ironic 15.1, the ``ironic-api`` service/process consumes approximately 270MB of memory per worker. Depending on how the process is being launched, the number of workers and maximum request threads per worker may differ. Naturally there are configuration and performance trade-offs. * Directly as a native python process, i.e. execute ``ironic-api`` processes. Each single worker allows for multiple requests to be handled and threaded at the same time which can allow high levels of request concurrency. As of the Victoria cycle, a direct invocation of the ``ironic-api`` program will only launch a maximum of four workers. * Launched via a wrapper such as Apache+uWSGI may allow for multiple distinct worker processes, but these workers typically limit the number of request processing threads that are permitted to execute. This means requests can stack up in the front-end webserver and be released to the ``ironic-api`` as prior requests complete. In environments with long running synchronous calls, such as use of the vendor passthru interface, this can be very problematic. When the webserver is launched by the API process directly, the default is based upon the number of CPU sockets in your machine. When launching using uwsgi, this will entirely vary upon your configuration, but balancing workers/threads based upon your load and needs is highly advisable. Each worker process is unique and consumes far more memory than a comparable number of worker threads. At the same time, the scheduler will focus on worker processes as the threads are greenthreads. .. note:: Host operating systems featuring in-memory de-duplication should see an improvement in the overall memory footprint with multiple processes, but this is not something the development team has measured and will vary based upon multiple factors. One important item to note: each Ironic API service/process *does* keep a copy of the hash ring as generated from the database *in-memory*. This is done to help allocate load across a cluster in-line with how individual nodes and their responsible conductors are allocated across the cluster. In other words, your amount of memory WILL increase corresponding to the number of nodes managed by each ironic conductor. It is important to understand that features such as `conductor groups <./conductor-groups.rst>`_ means that only matching portions of nodes will be considered for the hash ring if needed. Conductor ========= A conductor process will launch a number of other processes, as required, in order to complete the requested work. Ultimately this means it can quickly consume large amounts of memory because it was asked to complete a substantial amount of work all at once. The ``ironic-conductor`` from ironic 15.1 consumes by default about 340MB of RAM in an idle configuration. This process, by default, operates as a single process. Additional processes can be launched, but they must have unique resolvable hostnames and addresses for JSON-RPC or use a central oslo.messaging supported message bus in order for Webserver API to Conductor API communication to be functional. Typically, the most memory intensive operation that can be triggered is a image conversion for deployment, which is limited to 1GB of RAM per conversion process. Most deployments, by default, do have a concurrency limit depending on their Compute (See `nova.conf `_ setting ``max_concurrent_builds``) configuration. However, this is only per ``nova-compute`` worker, so naturally this concurrency will scale with additional workers. Stand-alone users can easily request deployments exceeding the Compute service default maximum concurrent builds. As such, if your environment is used this way, you may wish to carefully consider your deployment architecture. With a single nova-compute process talking to a single conductor, asked to perform ten concurrent deployments of images requiring conversion, the memory needed may exceed 10GB. This does however, entirely depend upon image block structure and layout, and what deploy interface is being used. Database ======== Query load upon the database is one of the biggest potential bottlenecks which can cascade across a deployment and ultimately degrade service to an Ironic user. Often, depending on load, query patterns, periodic tasks, and so on and so forth, additional indexes may be needed to help provide hints to the database so it can most efficently attempt to reduce the number of rows which need to be examined in order to return a result set. Adding indexes -------------- This example below is specific to MariaDB/MySQL, but the syntax should be easy to modify for operators using PostgreSQL. .. code-block:: sql use ironic; create index owner_idx on nodes (owner) LOCK = SHARED; create index lessee_idx on nodes (lessee) LOCK = SHARED; create index driver_idx on nodes (driver) LOCK = SHARED; create index provision_state_idx on nodes (provision_state) LOCK = SHARED; create index reservation_idx on nodes (reservation) LOCK = SHARED; create index conductor_group_idx on nodes (conductor_group) LOCK = SHARED; create index resource_class_idx on nodes (resource_class) LOCK = SHARED; .. note:: The indexes noted have been added automatically by Xena versions of Ironic and later. They are provided here as an example and operators can add them manually prior with versions of Ironic. The database upgrade for the Xena release of Ironic which adds these indexes are only aware of being able to skip index creation if it already exists on MySQL/MariaDB. .. note:: It may be possible to use "LOCK = NONE". Basic testing indicates this takes a little bit longer, but shouldn't result in the database table becoming write locked during the index creation. If the database engine cannot support this, then the index creation will fail. Database platforms also have a concept of what is called a "compound index" where the index is aligned with the exact query pattern being submitted to the database. The database is able to use this compound index to attempt to drastically reduce the result set generation time for the remainder of the query. As of the composition of this document, we do not ship compound indexes in Ironic as we feel the most general benefit is single column indexes, and depending on data present, an operator may wish to explore compound indexes with their database administrator, as comound indexes can also have negative performance impacts if improperly constructed. .. code-block:: sql use ironic; create index my_custom_app_query_index on nodes (reservation, provision_state, driver); The risk, and *WHY* you should engage a Database Administrator, is depending on your configuration, the actual index may need to include one or more additional fields such as owner or lessee which may be added on to the index. At the same time, queries with less field matches, or in different orders will exhibit different performance as the compound index may not be able to be consulted. Indexes will not fix everything ------------------------------- Indexes are not a magical cure-all for all API or database performance issues, but they are an increadibly important part depending on data access and query patterns. The underlying object layer and data conversions including record pagination do add a substantial amount of overhead to what may otherwise return as a result set on a manual database query. In Ironic's case, due to the object model and the need to extract multiple pieces of data at varying levels of the data model to handle cases such as upgrades, the entire result set is downloaded and transformed which is an overhead you do not experience with a command line database client. BMC interaction =============== In its default configuration, Ironic runs a periodic task to synchronize the power state of the managed physical nodes with the Ironic database. For the hardware type ``ipmi`` (see :doc:`/admin/drivers/ipmitool`) and depending on the number of nodes, the network connectivity, and the parallelism of these queries, this synchronization may fail and retries will be triggered. Please find more details on the power synchronization and which options to adapt in case too many power sync failures occur in the section on :doc:`/admin/power-sync`. What can I do? ============== Previously in this document, we've already suggested some architectural constraints and limitations, but there are some things that can be done to maximize performance. Again, this will vary greatly depending on your use. * Use the ``direct`` deploy interface. This offloads any final image conversion to the host running the ``ironic-python-agent``. Additionally, if Swift or other object storage such as RadosGW is used, downloads can be completely separated from the host running the ``ironic-conductor``. * Use small/compact "raw" images. Qcow2 files are generally compressed and require substantial amounts of memory to decompress and stream. * Tune the internal memory limit for the conductor using the ``[DEFAULT]memory_required_minimum`` setting. This will help the conductor throttle back memory intensive operations. The default should prevent Out-of-Memory operations, but under extreme memory pressure this may still be sub-optimal. Before changing this setting, it is highly advised to consult with your resident "Unix wizard" or even the Ironic development team in upstream IRC. This feature was added in the Wallaby development cycle. * If network bandwidth is the problem you are seeking to solve for, you may wish to explore a mix of the ``direct`` deploy interface and caching proxies. Such a configuration can be highly beneficial in wide area deployments. See :ref:`Using proxies for image download `. * If you're making use of large configuration drives, you may wish to ensure you're using Swift to store them as opposed to housing them inside of the database. The entire object and contents are returned whenever Ironic needs to evaluate the entire node, which can become a performance impact. For more information on configuration drives, please see :ref:`Enabling the configuration drive `. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/admin/upgrade-guide.rst0000664000175000017500000003450400000000000021517 0ustar00zuulzuul00000000000000.. _upgrade-guide: ================================ Bare Metal Service Upgrade Guide ================================ This document outlines various steps and notes for operators to consider when upgrading their ironic-driven clouds from previous versions of OpenStack. The Bare Metal (ironic) service is tightly coupled with the ironic driver that is shipped with the Compute (nova) service. Some special considerations must be taken into account when upgrading your cloud. Both offline and rolling upgrades are supported. Plan your upgrade ================= * Rolling upgrades are available starting with the Pike release; that is, when upgrading from Ocata. This means that it is possible to do an upgrade with minimal to no downtime of the Bare Metal API. * Upgrades are only supported between two consecutive named releases. This means that you cannot upgrade Ocata directly into Queens; you need to upgrade into Pike first. * The `release notes `_ should always be read carefully when upgrading the Bare Metal service. Specific upgrade steps and considerations are documented there. * The Bare Metal service should always be upgraded before the Compute service. .. note:: The ironic virt driver in nova always uses a specific version of the ironic REST API. This API version may be one that was introduced in the same development cycle, so upgrading nova first may result in nova being unable to use the Bare Metal API. * Make a backup of your database. Ironic does not support downgrading of the database. Hence, in case of upgrade failure, restoring the database from a backup is the only choice. * Before starting your upgrade, it is best to ensure that all nodes have reached, or are in, a stable ``provision_state``. Nodes in states with long running processes such as deploying or cleaning, may fail, and may require manual intervention to return them to the available hardware pool. This is most likely in cases where a timeout has occurred or a service was terminated abruptly. For a visual diagram detailing states and possible state transitions, please see :ref:`states`. Offline upgrades ================ In an offline (or cold) upgrade, the Bare Metal service is not available during the upgrade, because all the services have to be taken down. When upgrading the Bare Metal service, the following steps should always be taken in this order: #. upgrade the ironic-python-agent image #. update ironic code, without restarting services #. run database schema migrations via ``ironic-dbsync upgrade`` #. restart ironic-conductor and ironic-api services Once the above is done, do the following: * update any applicable configuration options to stop using any deprecated features or options, and perform any required work to transition to alternatives. All the deprecated features and options will be supported for one release cycle, so should be removed before your next upgrade is performed. * upgrade python-ironicclient along with any other services connecting to the Bare Metal service as a client, such as nova-compute * run the ``ironic-dbsync online_data_migrations`` command to make sure that data migrations are applied. The command lets you limit the impact of the data migrations with the ``--max-count`` option, which limits the number of migrations executed in one run. You should complete all of the migrations as soon as possible after the upgrade. .. warning:: You will not be able to start an upgrade to the release after this one, until this has been completed for the current release. For example, as part of upgrading from Ocata to Pike, you need to complete Pike's data migrations. If this not done, you will not be able to upgrade to Queens -- it will not be possible to execute Queens' database schema updates. Rolling upgrades ================ To Reduce downtime, the services can be upgraded in a rolling fashion, meaning to upgrade one or a few services at a time to minimize impact. Rolling upgrades are available starting with the Pike release. This feature makes it possible to upgrade between releases, such as Ocata to Pike, with minimal to no downtime of the Bare Metal API. Requirements ------------ To facilitate an upgrade in a rolling fashion, you need to have a highly-available deployment consisting of at least two ironic-api and two ironic-conductor services. Use of a load balancer to balance requests across the ironic-api services is recommended, as it allows for a minimal impact to end users. Concepts -------- There are four aspects of the rolling upgrade process to keep in mind: * API and RPC version pinning, and versioned object backports * online data migrations * graceful service shutdown * API load balancer draining API & RPC version pinning and versioned object backports ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Through careful RPC versioning, newer services are able to talk to older services (and vice-versa). The ``[DEFAULT]/pin_release_version`` configuration option is used for this. It should be set (pinned) to the release version that the older services are using. The newer services will backport RPC calls and objects to their appropriate versions from the pinned release. If the ``IncompatibleObjectVersion`` exception occurs, it is most likely due to an incorrect or unspecified ``[DEFAULT]/pin_release_version`` configuration value. For example, when ``[DEFAULT]/pin_release_version`` is not set to the older release version, no conversion will happen during the upgrade. For the ironic-api service, the API version is pinned via the same ``[DEFAULT]/pin_release_version`` configuration option as above. When pinned, the new ironic-api services will not service any API requests with Bare Metal API versions that are higher than what the old ironic-api services support. HTTP status code 406 is returned for such requests. This prevents new features (available in new API versions) from being used until after the upgrade has been completed. Online data migrations ~~~~~~~~~~~~~~~~~~~~~~ To make database schema migrations less painful to execute, we have implemented process changes to facilitate upgrades. * All data migrations are banned from schema migration scripts. * Schema migration scripts only update the database schema. * Data migrations must be done at the end of the rolling upgrade process, after the schema migration and after the services have been upgraded to the latest release. All data migrations are performed using the ``ironic-dbsync online_data_migrations`` command. It can be run as a background process so that it does not interrupt running services; however it must be run to completion for a cold upgrade if the intent is to make use of new features immediately. (You would also execute the same command with services turned off if you are doing a cold upgrade). This data migration must be completed. If not, you will not be able to upgrade to future releases. For example, if you had upgraded from Ocata to Pike but did not do the data migrations, you will not be able to upgrade from Pike to Queens. (More precisely, you will not be able to apply Queens' schema migrations.) Graceful conductor service shutdown ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The ironic-conductor service is a Python process listening for messages on a message queue. When the operator sends the SIGTERM signal to the process, the service stops consuming messages from the queue, so that no additional work is picked up. It completes any outstanding work and then terminates. During this process, messages can be left on the queue and will be processed after the Python process starts back up. This gives us a way to shutdown a service using older code, and start up a service using newer code with minimal impact. .. note:: This was tested with RabbitMQ messaging backend and may vary with other backends. Nodes that are being acted upon by an ironic-conductor process, which are not in a stable state, may encounter failures. Node failures that occur during an upgrade are likely due to timeouts, resulting from delays involving messages being processed and acted upon by a conductor during long running, multi-step processes such as deployment or cleaning. API load balancer draining ~~~~~~~~~~~~~~~~~~~~~~~~~~ If you are using a load balancer for the ironic-api services, we recommend that you redirect requests to the new API services and drain off of the ironic-api services that have not yet been upgraded. Rolling upgrade process ----------------------- Before maintenance window ~~~~~~~~~~~~~~~~~~~~~~~~~ * Upgrade the ironic-python-agent image * Using the new release (ironic code), execute the required database schema updates by running the database upgrade command: ``ironic-dbsync upgrade``. These schema change operations should have minimal or no effect on performance, and should not cause any operations to fail (but please check the release notes). You can: * install the new release on an existing system * install the new release in a new virtualenv or a container At this point, new columns and tables may exist in the database. These database schema changes are done in a way that both the old and new (N and N+1) releases can perform operations against the same schema. .. note:: Ironic bases its API, RPC and object storage format versions on the ``[DEFAULT]/pin_release_version`` configuration option. It is advisable to automate the deployment of changes in configuration files to make the process less error prone and repeatable. During maintenance window ~~~~~~~~~~~~~~~~~~~~~~~~~ #. All ironic-conductor services should be upgraded first. Ensure that at least one ironic-conductor service is running at all times. For every ironic-conductor, either one by one or a few at a time: * shut down the service. Messages from the ironic-api services to the conductors are load-balanced by the message queue and a hash-ring, so the only thing you need to worry about is to shut the service down gracefully (using ``SIGTERM`` signal) to make sure it will finish all the requests being processed before shutting down. * upgrade the installed version of ironic and dependencies * set the ``[DEFAULT]/pin_release_version`` configuration option value to the version you are upgrading from (that is, the old version). Based on this setting, the new ironic-conductor services will downgrade any RPC communication and data objects to conform to the old service. For example, if you are upgrading from Ocata to Pike, set this value to ``ocata``. * start the service #. The next service to upgrade is ironic-api. Ensure that at least one ironic-api service is running at all times. You may want to start another temporary instance of the older ironic-api to handle the load while you are upgrading the original ironic-api services. For every ironic-api service, either one by one or a few at a time: * in HA deployment you are typically running them behind a load balancer (for example HAProxy), so you need to take the service instance out of the balancer * shut it down * upgrade the installed version of ironic and dependencies * set the ``[DEFAULT]/pin_release_version`` configuration option value to the version you are upgrading from (that is, the old version). Based on this setting, the new ironic-api services will downgrade any RPC communication and data objects to conform to the old service. In addition, the new services will return HTTP status code 406 for any requests with newer API versions that the old services did not support. This prevents new features (available in new API versions) from being used until after the upgrade has been completed. For example, if you are upgrading from Ocata to Pike, set this value to ``ocata``. * restart the service * add it back into the load balancer After upgrading all the ironic-api services, the Bare Metal service is running in the new version but with downgraded RPC communication and database object storage formats. New features (in new API versions) are not supported, because they could fail when objects are in the downgraded object formats and some internal RPC API functions may still not be available. #. For all the ironic-conductor services, one at a time: * remove the ``[DEFAULT]/pin_release_version`` configuration option setting * restart the ironic-conductor service #. For all the ironic-api services, one at a time: * remove the ``[DEFAULT]/pin_release_version`` configuration option setting * restart the ironic-api service After maintenance window ~~~~~~~~~~~~~~~~~~~~~~~~ Now that all the services are upgraded, the system is able to use the latest version of the RPC protocol and able to access all the features of the new release. * Update any applicable configuration options to stop using any deprecated features or options, and perform any required work to transition to alternatives. All the deprecated features and options will be supported for one release cycle, so should be removed before your next upgrade is performed. * Upgrade ``python-ironicclient`` along with other services connecting to the Bare Metal service as a client, such as ``nova-compute``. .. warning:: A ``nova-compute`` instance tries to attach VIFs to all active instances on start up. Make sure that for all active nodes there is at least one running ``ironic-conductor`` process to manage them. Otherwise the instances will be moved to the ``ERROR`` state on the ``nova-compute`` start up. * Run the ``ironic-dbsync online_data_migrations`` command to make sure that data migrations are applied. The command lets you limit the impact of the data migrations with the ``--max-count`` option, which limits the number of migrations executed in one run. You should complete all of the migrations as soon as possible after the upgrade. .. warning:: Note that you will not be able to start an upgrade to the next release after this one, until this has been completed for the current release. For example, as part of upgrading from Ocata to Pike, you need to complete Pike's data migrations. If this not done, you will not be able to upgrade to Queens -- it will not be possible to execute Queens' database schema updates. .. toctree:: :hidden: upgrade-to-hardware-types.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/admin/upgrade-to-hardware-types.rst0000664000175000017500000002420000000000000023771 0ustar00zuulzuul00000000000000Upgrading to Hardware Types =========================== Starting with the Rocky release, the Bare Metal service does not support *classic drivers* any more. If you still use *classic drivers*, please upgrade to *hardware types* immediately. Please see :doc:`/install/enabling-drivers` for details on *hardware types* and *hardware interfaces*. Planning the upgrade -------------------- It is necessary to figure out which hardware types and hardware interfaces correspond to which classic drivers used in your deployment. The following table lists the classic drivers with their corresponding hardware types and the boot, deploy, inspect, management, and power hardware interfaces: ===================== ==================== ==================== ============== ========== ========== ========= Classic Driver Hardware Type Boot Deploy Inspect Management Power ===================== ==================== ==================== ============== ========== ========== ========= agent_ilo ilo ilo-virtual-media direct ilo ilo ilo agent_ipmitool ipmi pxe direct inspector ipmitool ipmitool agent_ipmitool_socat ipmi pxe direct inspector ipmitool ipmitool agent_irmc irmc irmc-virtual-media direct irmc irmc irmc iscsi_ilo ilo ilo-virtual-media iscsi ilo ilo ilo iscsi_irmc irmc irmc-virtual-media iscsi irmc irmc irmc pxe_drac idrac pxe iscsi idrac idrac idrac pxe_drac_inspector idrac pxe iscsi inspector idrac idrac pxe_ilo ilo ilo-pxe iscsi ilo ilo ilo pxe_ipmitool ipmi pxe iscsi inspector ipmitool ipmitool pxe_ipmitool_socat ipmi pxe iscsi inspector ipmitool ipmitool pxe_irmc irmc irmc-pxe iscsi irmc irmc irmc pxe_snmp snmp pxe iscsi no-inspect fake snmp ===================== ==================== ==================== ============== ========== ========== ========= .. note:: The ``inspector`` *inspect* interface was only used if explicitly enabled in the configuration. Otherwise, ``no-inspect`` was used. .. note:: ``pxe_ipmitool_socat`` and ``agent_ipmitool_socat`` use ``ipmitool-socat`` *console* interface (the default for the ``ipmi`` hardware type), while ``pxe_ipmitool`` and ``agent_ipmitool`` use ``ipmitool-shellinabox``. See Console_ for details. For out-of-tree drivers you may need to reach out to their maintainers or figure out the appropriate interfaces by researching the source code. Configuration ------------- You will need to enable hardware types and interfaces that correspond to your currently enabled classic drivers. For example, if you have the following configuration in your ``ironic.conf``: .. code-block:: ini [DEFAULT] enabled_drivers = pxe_ipmitool,agent_ipmitool You will have to add this configuration as well: .. code-block:: ini [DEFAULT] enabled_hardware_types = ipmi enabled_boot_interfaces = pxe enabled_deploy_interfaces = iscsi,direct enabled_management_interfaces = ipmitool enabled_power_interfaces = ipmitool .. note:: For every interface type there is an option ``default__interface``, where ```` is the interface type name. For example, one can make all nodes use the ``direct`` deploy method by default by setting: .. code-block:: ini [DEFAULT] default_deploy_interface = direct Migrating nodes --------------- After the required items are enabled in the configuration, each node's ``driver`` field has to be updated to a new value. You may need to also set new values for some or all interfaces: .. code-block:: console export OS_BAREMETAL_API_VERSION=1.31 for uuid in $(baremetal node list --driver pxe_ipmitool -f value -c UUID); do baremetal node set --driver ipmi --deploy-interface iscsi done for uuid in $(baremetal node list --driver agent_ipmitool -f value -c UUID); do baremetal node set --driver ipmi --deploy-interface direct done See :doc:`/install/enrollment` for more details on setting hardware types and interfaces. .. warning:: It is not recommended to change the interfaces for ``active`` nodes. If absolutely needed, the nodes have to be put in the maintenance mode first: .. code-block:: console baremetal node maintenance set $UUID \ --reason "Changing driver and/or hardware interfaces" # do the update, validate its correctness baremetal node maintenance unset $UUID Other interfaces ---------------- Care has to be taken to migrate from classic drivers using non-default interfaces. This chapter covers a few of the most commonly used. Ironic Inspector ~~~~~~~~~~~~~~~~ Some classic drivers, notably ``pxe_ipmitool``, ``agent_ipmitool`` and ``pxe_drac_inspector``, use ironic-inspector_ for their *inspect* interface. The same functionality is available for all hardware types, but the appropriate ``inspect`` interface has to be enabled in the Bare Metal service configuration file, for example: .. code-block:: ini [DEFAULT] enabled_inspect_interfaces = inspector,no-inspect See :doc:`/install/enabling-drivers` for more details. .. note:: The configuration option ``[inspector]enabled`` does not affect hardware types. Then you can tell your nodes to use this interface, for example: .. code-block:: console export OS_BAREMETAL_API_VERSION=1.31 for uuid in $(baremetal node list --driver ipmi -f value -c UUID); do baremetal node set --inspect-interface inspector done .. note:: A node configured with the IPMI hardware type, will use the inspector inspection implementation automatically if it is enabled. This is not the case for the most of the vendor drivers. .. _ironic-inspector: https://docs.openstack.org/ironic-inspector/ Console ~~~~~~~ Several classic drivers, notably ``pxe_ipmitool_socat`` and ``agent_ipmitool_socat``, use socat-based serial console implementation. For the ``ipmi`` hardware type it is used by default, if enabled in the configuration file: .. code-block:: ini [DEFAULT] enabled_console_interfaces = ipmitool-socat,no-console If you want to use the ``shellinabox`` implementation instead, it has to be enabled as well: .. code-block:: ini [DEFAULT] enabled_console_interfaces = ipmitool-shellinabox,no-console Then you need to update some or all nodes to use it explicitly. For example, to update all nodes use: .. code-block:: console export OS_BAREMETAL_API_VERSION=1.31 for uuid in $(baremetal node list --driver ipmi -f value -c UUID); do baremetal node set --console-interface ipmitool-shellinabox done RAID ~~~~ Many classic drivers, including ``pxe_ipmitool`` and ``agent_ipmitool`` use the IPA-based in-band RAID implementation by default. For the hardware types it is not used by default. To use it, you need to enable it in the configuration first: .. code-block:: ini [DEFAULT] enabled_raid_interfaces = agent,no-raid Then you can update those nodes that support in-band RAID to use the ``agent`` RAID interface. For example, to update all nodes use: .. code-block:: console export OS_BAREMETAL_API_VERSION=1.31 for uuid in $(baremetal node list --driver ipmi -f value -c UUID); do baremetal node set --raid-interface agent done .. note:: The ability of a node to use the ``agent`` RAID interface depends on the ramdisk (more specifically, a :ironic-python-agent-doc:`hardware manager ` used in it), not on the driver. Network and storage ~~~~~~~~~~~~~~~~~~~ The network and storage interfaces have always been dynamic, and thus do not require any special treatment during upgrade. Vendor ~~~~~~ Classic drivers are allowed to use the ``VendorMixin`` functionality to combine and expose several node or driver vendor passthru methods from different vendor interface implementations in one driver. **This is no longer possible with hardware types.** With hardware types, a vendor interface can only have a single active implementation from the list of vendor interfaces supported by a given hardware type. Ironic no longer has in-tree drivers (both classic and hardware types) that rely on this ``VendorMixin`` functionality support. However if you are using an out-of-tree classic driver that depends on it, you'll need to do the following in order to use vendor passthru methods from different vendor passthru implementations: #. While creating a new hardware type to replace your classic driver, specify all vendor interface implementations your classic driver was using to build its ``VendorMixin`` as supported vendor interfaces (property ``supported_vendor_interfaces`` of the Python class that defines your hardware type). #. Ensure all required vendor interfaces are enabled in the ironic configuration file under the ``[DEFAULT]enabled_vendor_interfaces`` option. You should also consider setting the ``[DEFAULT]default_vendor_interface`` option to specify the vendor interface for nodes that do not have one set explicitly. #. Before invoking a specific vendor passthru method, make sure that the node's vendor interface is set to the interface with the desired vendor passthru method. For example, if you want to invoke the vendor passthru method ``vendor_method_foo()`` from ``vendor_foo`` vendor interface: .. code-block:: shell # set the vendor interface to 'vendor_foo` baremetal node set --vendor-interface vendor_foo # invoke the vendor passthru method baremetal node passthru call vendor_method_foo ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/admin/vendor-passthru.rst0000664000175000017500000001407400000000000022141 0ustar00zuulzuul00000000000000Vendor Passthru =============== The bare metal service allows drivers to expose vendor-specific API known as *vendor passthru*. Node Vendor Passthru -------------------- Drivers may implement a passthrough API, which is accessible via the ``/v1/nodes//vendor_passthru?method={METHOD}`` endpoint. Beyond basic checking, Ironic does not introspect the message body and simply "passes it through" to the relevant driver. A method: * can support one or more HTTP methods (for example, GET, POST) * is asynchronous or synchronous + For asynchronous methods, a 202 (Accepted) HTTP status code is returned to indicate that the request was received, accepted and is being acted upon. No body is returned in the response. + For synchronous methods, a 200 (OK) HTTP status code is returned to indicate that the request was fulfilled. The response may include a body. * can require an exclusive lock on the node. This only occurs if the method doesn't specify require_exclusive_lock=False in the decorator. If an exclusive lock is held on the node, other requests for the node will be delayed and may fail with an HTTP 409 (Conflict) error code. This endpoint exposes a node's driver directly, and as such, it is expressly not part of Ironic's standard REST API. There is only a single HTTP endpoint exposed, and the semantics of the message body are determined solely by the driver. Ironic makes no guarantees about backwards compatibility; this is solely up to the discretion of each driver's author. To get information about all the methods available via the vendor_passthru endpoint for a particular node, use CLI: .. code-block:: console $ baremetal node passthru list +-----------------------+------------------------+-------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+------------------------+ | Name | Supported HTTP methods | Async | Description | Response is attachment | +-----------------------+------------------------+-------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+------------------------+ | create_subscription | POST | False | Creates a subscription on a node. Required argument: a dictionary of {'destination': 'destination_url'} | False | | delete_subscription | DELETE | False | Delete a subscription on a node. Required argument: a dictionary of {'id': 'subscription_bmc_id'} | False | | eject_vmedia | POST | True | Eject a virtual media device. If no device is provided then all attached devices will be ejected. Optional arguments: 'boot_device' - the boot device to eject, either 'cd', 'dvd', 'usb', or 'floppy' | False | | get_all_subscriptions | GET | False | Returns all subscriptions on the node. | False | | get_subscription | GET | False | Get a subscription on the node. Required argument: a dictionary of {'id': 'subscription_bmc_id'} | False | +-----------------------+------------------------+-------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+------------------------+ The response will contain information for each method, such as the method's name, a description, the HTTP methods supported, and whether it's asynchronous or synchronous. You can call a method with CLI, for example: .. code-block:: console $ baremetal node passthru call eject_vmedia Driver Vendor Passthru ---------------------- Drivers may implement an API for requests not related to any node, at ``/v1/drivers//vendor_passthru?method={METHOD}``. A method: * can support one or more HTTP methods (for example, GET, POST) * is asynchronous or synchronous + For asynchronous methods, a 202 (Accepted) HTTP status code is returned to indicate that the request was received, accepted and is being acted upon. No body is returned in the response. + For synchronous methods, a 200 (OK) HTTP status code is returned to indicate that the request was fulfilled. The response may include a body. .. note:: Unlike methods in `Node Vendor Passthru`_, a request does not lock any resource, so it will not delay other requests and will not fail with an HTTP 409 (Conflict) error code. Ironic makes no guarantees about the semantics of the message BODY sent to this endpoint. That is left up to each driver's author. To get information about all the methods available via the driver vendor_passthru endpoint, use CLI: .. code-block:: console $ baremetal driver passthru list redfish The response will contain information for each method, such as the method's name, a description, the HTTP methods supported, and whether it's asynchronous or synchronous. .. warning:: Currently only the methods available in the default interfaces of the hardware type are available. You can call a method with CLI, for example: .. code-block:: console $ baremetal driver passthru call ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648108935.8866668 ironic-20.1.0/doc/source/cli/0000775000175000017500000000000000000000000015714 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/cli/index.rst0000664000175000017500000000024000000000000017551 0ustar00zuulzuul00000000000000Command References ================== Here are references for commands not elsewhere documented. .. toctree:: :maxdepth: 1 ironic-dbsync ironic-status ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/cli/ironic-dbsync.rst0000664000175000017500000001430100000000000021210 0ustar00zuulzuul00000000000000============= ironic-dbsync ============= The :command:`ironic-dbsync` utility is used to create the database schema tables that the ironic services will use for storage. It can also be used to upgrade existing database tables when migrating between different versions of ironic. The `Alembic library `_ is used to perform the database migrations. Options ======= This is a partial list of the most useful options. To see the full list, run the following:: ironic-dbsync --help .. program:: ironic-dbsync .. option:: -h, --help Show help message and exit. .. option:: --config-dir Path to a config directory with configuration files. .. option:: --config-file Path to a configuration file to use. .. option:: -d, --debug Print debugging output. .. option:: --version Show the program's version number and exit. .. option:: upgrade, stamp, revision, version, create_schema, online_data_migrations The :ref:`command ` to run. Usage ===== Options for the various :ref:`commands ` for :command:`ironic-dbsync` are listed when the :option:`-h` or :option:`--help` option is used after the command. For example:: ironic-dbsync create_schema --help Information about the database is read from the ironic configuration file used by the API server and conductor services. This file must be specified with the :option:`--config-file` option:: ironic-dbsync --config-file /path/to/ironic.conf create_schema The configuration file defines the database backend to use with the *connection* database option:: [database] connection=mysql+pymysql://root@localhost/ironic If no configuration file is specified with the :option:`--config-file` option, :command:`ironic-dbsync` assumes an SQLite database. .. _dbsync_cmds: Command Options =============== :command:`ironic-dbsync` is given a command that tells the utility what actions to perform. These commands can take arguments. Several commands are available: .. _create_schema: create_schema ------------- .. program:: create_schema .. option:: -h, --help Show help for create_schema and exit. This command will create database tables based on the most current version. It assumes that there are no existing tables. An example of creating database tables with the most recent version:: ironic-dbsync --config-file=/etc/ironic/ironic.conf create_schema online_data_migrations ---------------------- .. program:: online_data_migrations .. option:: -h, --help Show help for online_data_migrations and exit. .. option:: --max-count The maximum number of objects (a positive value) to migrate. Optional. If not specified, all the objects will be migrated (in batches of 50 to avoid locking the database for long periods of time). .. option:: --option If a migration accepts additional parameters, they can be passed via this argument. It can be specified several times. This command will migrate objects in the database to their most recent versions. This command must be successfully run (return code 0) before upgrading to a future release. It returns: * 1 (not completed) if there are still pending objects to be migrated. Before upgrading to a newer release, this command must be run until 0 is returned. * 0 (success) after migrations are finished or there are no data to migrate * 127 (error) if max-count is not a positive value or an option is invalid * 2 (error) if the database is not compatible with this release. This command needs to be run using the previous release of ironic, before upgrading and running it with this release. revision -------- .. program:: revision .. option:: -h, --help Show help for revision and exit. .. option:: -m , --message The message to use with the revision file. .. option:: --autogenerate Compares table metadata in the application with the status of the database and generates migrations based on this comparison. This command will create a new revision file. You can use the :option:`--message` option to comment the revision. This is really only useful for ironic developers making changes that require database changes. This revision file is used during database migration and will specify the changes that need to be made to the database tables. Further discussion is beyond the scope of this document. stamp ----- .. program:: stamp .. option:: -h, --help Show help for stamp and exit. .. option:: --revision The revision number. This command will 'stamp' the revision table with the version specified with the :option:`--revision` option. It will not run any migrations. upgrade ------- .. program:: upgrade .. option:: -h, --help Show help for upgrade and exit. .. option:: --revision The revision number to upgrade to. This command will upgrade existing database tables to the most recent version, or to the version specified with the :option:`--revision` option. Before this ``upgrade`` is invoked, the command :command:`ironic-dbsync online_data_migrations` must have been successfully run using the previous version of ironic (if you are doing an upgrade as opposed to a new installation of ironic). If it wasn't run, the database will not be compatible with this recent version of ironic, and this command will return 2 (error). If there are no existing tables, then new tables are created, beginning with the oldest known version, and successively upgraded using all of the database migration files, until they are at the specified version. Note that this behavior is different from the :ref:`create_schema` command that creates the tables based on the most recent version. An example of upgrading to the most recent table versions:: ironic-dbsync --config-file=/etc/ironic/ironic.conf upgrade .. note:: This command is the default if no command is given to :command:`ironic-dbsync`. .. warning:: The upgrade command is not compatible with SQLite databases since it uses ALTER TABLE commands to upgrade the database tables. SQLite supports only a limited subset of ALTER TABLE. version ------- .. program:: version .. option:: -h, --help Show help for version and exit. This command will output the current database version. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/cli/ironic-status.rst0000664000175000017500000000360300000000000021254 0ustar00zuulzuul00000000000000============= ironic-status ============= Synopsis ======== :: ironic-status [] Description =========== :program:`ironic-status` is a tool that provides routines for checking the status of a Ironic deployment. Options ======= The standard pattern for executing a :program:`ironic-status` command is:: ironic-status [] Run without arguments to see a list of available command categories:: ironic-status Categories are: * ``upgrade`` Detailed descriptions are below. You can also run with a category argument such as ``upgrade`` to see a list of all commands in that category:: ironic-status upgrade These sections describe the available categories and arguments for :program:`ironic-status`. Upgrade ~~~~~~~ .. _ironic-status-checks: ``ironic-status upgrade check`` Performs a release-specific readiness check before restarting services with new code. This command expects to have complete configuration and access to databases and services. **Return Codes** .. list-table:: :widths: 20 80 :header-rows: 1 * - Return code - Description * - 0 - All upgrade readiness checks passed successfully and there is nothing to do. * - 1 - At least one check encountered an issue and requires further investigation. This is considered a warning but the upgrade may be OK. * - 2 - There was an upgrade status check failure that needs to be investigated. This should be considered something that stops an upgrade. * - 255 - An unexpected error occurred. **History of Checks** **12.0.0 (Stein)** * Adds a check for compatibility of the object versions with the release of ironic. **Wallaby** * Adds a check to validate the configured policy file is not JSON based as JSON based policies have been deprecated. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/conf.py0000664000175000017500000001236600000000000016454 0ustar00zuulzuul00000000000000# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import sys import eventlet # NOTE(dims): monkey patch subprocess to prevent failures in latest eventlet # See https://github.com/eventlet/eventlet/issues/398 try: eventlet.monkey_patch(subprocess=True) except TypeError: pass # -- General configuration ---------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.join(os.path.abspath('.'), '_exts')) # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.viewcode', 'sphinx.ext.graphviz', 'sphinxcontrib.seqdiag', 'sphinxcontrib.apidoc', 'sphinxcontrib.rsvgconverter', 'oslo_config.sphinxext', 'oslo_config.sphinxconfiggen', 'oslo_policy.sphinxext', 'oslo_policy.sphinxpolicygen', 'automated_steps', 'openstackdocstheme', 'web_api_docstring' ] # sphinxcontrib.apidoc options apidoc_module_dir = '../../ironic' apidoc_output_dir = 'contributor/api' apidoc_excluded_paths = [ 'db/sqlalchemy/alembic/env', 'db/sqlalchemy/alembic/versions/*', 'drivers/modules/ansible/playbooks*', 'hacking', 'tests', ] apidoc_separate_modules = True openstackdocs_repo_name = 'openstack/ironic' openstackdocs_use_storyboard = True openstackdocs_pdf_link = True openstackdocs_projects = [ 'bifrost', 'cinder', 'glance', 'ironic', 'ironic-inspector', 'ironic-lib', 'ironic-neutron-agent', 'ironic-python-agent', 'ironic-ui', 'keystone', 'keystonemiddleware', 'metalsmith', 'networking-baremetal', 'neutron', 'nova', 'oslo.messaging', 'oslo.reports', 'oslo.versionedobjects', 'oslotest', 'osprofiler', 'os-traits', 'python-ironicclient', 'python-ironic-inspector-client', 'python-openstackclient', 'swift', ] wsme_protocols = ['restjson'] # autodoc generation is a bit aggressive and a nuisance when doing heavy # text edit cycles. # execute "export SPHINX_DEBUG=1" in your terminal to disable # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. copyright = u'OpenStack Foundation' config_generator_config_file = '../../tools/config/ironic-config-generator.conf' sample_config_basename = '_static/ironic' policy_generator_config_file = '../../tools/policy/ironic-policy-generator.conf' sample_policy_basename = '_static/ironic' # A list of ignored prefixes for module index sorting. modindex_common_prefix = ['ironic.'] # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'native' # A list of glob-style patterns that should be excluded when looking for # source files. They are matched against the source file names relative to the # source directory, using slashes as directory separators on all platforms. exclude_patterns = ['api/ironic.drivers.modules.ansible.playbooks.*', 'api/ironic.tests.*'] # Ignore the following warning: WARNING: while setting up extension # wsmeext.sphinxext: directive 'autoattribute' is already registered, # it will be overridden. suppress_warnings = ['app.add_directive'] # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. html_theme = 'openstackdocs' # Output file base name for HTML help builder. htmlhelp_basename = 'Ironicdoc' latex_use_xindy = False # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ( 'index', 'doc-ironic.tex', u'Ironic Documentation', u'OpenStack Foundation', 'manual' ), ] # Allow deeper levels of nesting for \begin...\end stanzas latex_elements = {'maxlistdepth': 10} # -- Options for seqdiag ------------------------------------------------------ seqdiag_html_image_format = "SVG" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648108935.8866668 ironic-20.1.0/doc/source/configuration/0000775000175000017500000000000000000000000020014 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/configuration/config.rst0000664000175000017500000000044000000000000022011 0ustar00zuulzuul00000000000000===================== Configuration Options ===================== The following is an overview of all available configuration options in Ironic. For a sample configuration file, refer to :doc:`sample-config`. .. show-options:: :config-file: tools/config/ironic-config-generator.conf ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/configuration/index.rst0000664000175000017500000000102600000000000021654 0ustar00zuulzuul00000000000000======================= Configuration Reference ======================= Many aspects of the Bare Metal service are specific to the environment it is deployed in. The following pages describe configuration options that can be used to adjust the service to your particular situation. .. toctree:: :maxdepth: 1 Configuration Options Policies .. only:: html Sample files ------------ .. toctree:: :maxdepth: 1 Sample Config File Sample Policy File ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/configuration/policy.rst0000664000175000017500000000117600000000000022052 0ustar00zuulzuul00000000000000======== Policies ======== .. warning:: JSON formatted policy files were deprecated in the Wallaby development cycle due to the Victoria deprecation by the ``olso.policy`` library. Use the `oslopolicy-convert-json-to-yaml`__ tool to convert the existing JSON to YAML formatted policy file in backward compatible way. .. __: https://docs.openstack.org/oslo.policy/latest/cli/oslopolicy-convert-json-to-yaml.html The following is an overview of all available policies in Ironic. For a sample configuration file, refer to :doc:`sample-policy`. .. show-policy:: :config-file: tools/policy/ironic-policy-generator.conf ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/configuration/sample-config.rst0000664000175000017500000000111600000000000023271 0ustar00zuulzuul00000000000000========================= Sample Configuration File ========================= The following is a sample Ironic configuration for adaptation and use. For a detailed overview of all available configuration options, refer to :doc:`config`. The sample configuration can also be viewed in :download:`file form `. .. important:: The sample configuration file is auto-generated from Ironic when this documentation is built. You must ensure your version of Ironic matches the version of this documentation. .. literalinclude:: /_static/ironic.conf.sample ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/configuration/sample-policy.rst0000664000175000017500000000062600000000000023330 0ustar00zuulzuul00000000000000============= Ironic Policy ============= The following is a sample Ironic policy file, autogenerated from Ironic when this documentation is built. To prevent conflicts, ensure your version of Ironic aligns with the version of this documentation. The sample policy can also be downloaded as a :download:`file `. .. literalinclude:: /_static/ironic.policy.yaml.sample ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648108935.8906667 ironic-20.1.0/doc/source/contributor/0000775000175000017500000000000000000000000017517 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/contributor/adding-new-job.rst0000664000175000017500000000452700000000000023046 0ustar00zuulzuul00000000000000.. _adding-new-job: ================ Adding a new Job ================ Are you familiar with Zuul? =========================== Before start trying to figure out how Zuul works, take some time and read about `Zuul Config `_ and the `Zuul Best Practices `_. .. _zuul_config: https://zuul-ci.org/docs/zuul/user/config.html .. _zuul_best_practices: https://docs.openstack.org/infra/manual/creators.html#zuul-best-practices Where can I find the existing jobs? =================================== The jobs for the Ironic project are defined under the zuul.d_ folder in the root directory, that contains three files, whose function is described below. * ironic-jobs.yaml_: Contains the configuration of each Ironic Job converted to Zuul v3. * legacy-ironic-jobs.yaml_: Contains the configuration of each Ironic Job that haven't been converted to Zuul v3 yet. * project.yaml_: Contains the jobs that will run during check and gate phase. .. _zuul.d: https://opendev.org/openstack/ironic/src/branch/master/zuul.d .. _ironic-jobs.yaml: https://opendev.org/openstack/ironic/src/branch/master/zuul.d/ironic-jobs.yaml .. _legacy-ironic-jobs.yaml: https://opendev.org/openstack/ironic/src/branch/master/zuul.d/legacy-ironic-jobs.yaml .. _project.yaml: https://opendev.org/openstack/ironic/src/branch/master/zuul.d/project.yaml Create a new Job ================ Identify among the existing jobs the one that most closely resembles the scenario you want to test, the existing job will be used as `parent` in your job definition. Now you will only need to either overwrite or add variables to your job definition under the `vars` section to represent the desired scenario. The code block below shows the minimal structure of a new job definition that you need to add to ironic-jobs.yaml_. .. code-block:: yaml - job: name: description: parent: vars: : After having the definition of your new job you just need to add the job name to the project.yaml_ under `check` and `gate`. Only jobs that are voting should be in the `gate` section. .. code-block:: yaml - project: check: jobs: - gate: queue: ironic jobs: - ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/contributor/architecture.rst0000664000175000017500000000771100000000000022741 0ustar00zuulzuul00000000000000.. _architecture: =================== System Architecture =================== High Level description ====================== An Ironic deployment will be composed of the following components: - An admin-only RESTful `API service`_, by which privileged users, such as cloud operators and other services within the cloud control plane, may interact with the managed bare metal servers. - A `Conductor service`_, which does the bulk of the work. Functionality is exposed via the `API service`_. The Conductor and API services communicate via RPC. - A Database and `DB API`_ for storing the state of the Conductor and Drivers. - A Deployment Ramdisk or Deployment Agent, which provide control over the hardware which is not available remotely to the Conductor. A ramdisk should be built which contains one of these agents, eg. with `diskimage-builder`_. This ramdisk can be booted on-demand. .. note:: The agent is never run inside a tenant instance. .. _`architecture_drivers`: Drivers ======= The internal driver API provides a consistent interface between the Conductor service and the driver implementations. A driver is defined by a *hardware type* deriving from the AbstractHardwareType_ class, defining supported *hardware interfaces*. See :doc:`/install/enabling-drivers` for a more detailed explanation. See :doc:`drivers` for an explanation on how to write new hardware types and interfaces. Driver-Specific Periodic Tasks ------------------------------ Drivers may run their own periodic tasks, i.e. actions run repeatedly after a certain amount of time. Such a task is created by using the periodic_ decorator on an interface method. For example .. code-block:: python from futurist import periodics class FakePower(base.PowerInterface): @periodics.periodic(spacing=42) def task(self, manager, context): pass # do something Here the ``spacing`` argument is a period in seconds for a given periodic task. For example 'spacing=5' means every 5 seconds. Starting with the Yoga cycle, there is also a new decorator :py:func:`ironic.conductor.periodics.node_periodic` to create periodic tasks that handle nodes. See :ref:`deploy steps documentation ` for an example. Driver-Specific Steps --------------------- Drivers may have specific steps that may need to be executed or offered to a user to execute in order to perform specific configuration tasks. These steps should ideally be located on the management interface to enable consistent user experience of the hardware type. What should be avoided is duplication of existing interfaces such as the deploy interface to enable vendor specific cleaning or deployment steps. Message Routing =============== Each Conductor registers itself in the database upon start-up, and periodically updates the timestamp of its record. Contained within this registration is a list of the drivers which this Conductor instance supports. This allows all services to maintain a consistent view of which Conductors and which drivers are available at all times. Based on their respective driver, all nodes are mapped across the set of available Conductors using a `consistent hashing algorithm`_. Node-specific tasks are dispatched from the API tier to the appropriate conductor using conductor-specific RPC channels. As Conductor instances join or leave the cluster, nodes may be remapped to different Conductors, thus triggering various driver actions such as take-over or clean-up. .. _API service: webapi.html .. _AbstractHardwareType: api/ironic.drivers.hardware_type.html#ironic.drivers.hardware_type.AbstractHardwareType .. _Conductor service: api/ironic.conductor.manager.html .. _DB API: api/ironic.db.api.html .. _diskimage-builder: https://docs.openstack.org/diskimage-builder/latest/ .. _consistent hashing algorithm: https://docs.openstack.org/tooz/latest/user/tutorial/hashring.html .. _periodic: https://docs.openstack.org/futurist/latest/reference/index.html#futurist.periodics.periodic ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/contributor/bios_develop.rst0000664000175000017500000001013700000000000022725 0ustar00zuulzuul00000000000000.. _bios_develop: Developing BIOS Interface ========================= To support a driver specific BIOS interface it is necessary to create a class inheriting from the ``BIOSInterface`` class: .. code-block:: python from ironic.drivers import base class ExampleBIOS(base.BIOSInterface): def get_properties(self): return {} def validate(self, task): pass See :doc:`/contributor/drivers` for a detailed explanation of hardware type and interface. The ``get_properties`` and ``validate`` are methods that all driver interfaces have. The hardware interface that supports BIOS settings should also implement the following three methods: * Implement a method named ``cache_bios_settings``. This method stores BIOS settings to the ``bios_settings`` table during cleaning operations and updates the ``bios_settings`` table when ``apply_configuration`` or ``factory_reset`` are successfully called. .. code-block:: python from ironic.drivers import base driver_client = importutils.try_import('driver.client') class ExampleBIOS(base.BIOSInterface): def __init__(self): if driver_client is None: raise exception.DriverLoadError( driver=self.__class__.__name__, reason=_("Unable to import driver library")) def cache_bios_settings(self, task): node_id = task.node.id node_info = driver_common.parse_driver_info(task.node) settings = driver_client.get_bios_settings(node_info) create_list, update_list, delete_list, nochange_list = ( objects.BIOSSettingList.sync_node_setting(settings)) if len(create_list) > 0: objects.BIOSSettingList.create( task.context, node_id, create_list) if len(update_list) > 0: objects.BIOSSettingList.save( task.context, node_id, update_list) if len(delete_list) > 0: delete_names = [] for setting in delete_list: delete_names.append(setting.name) objects.BIOSSettingList.delete( task.context, node_id, delete_names) .. note:: ``driver.client`` is vendor specific library to control and manage the bare metal hardware, for example: python-dracclient, sushy. * Implement a method named ``factory_reset``. This method needs to use the ``clean_step`` decorator. It resets BIOS settings to factory default on the given node. It calls ``cache_bios_settings`` automatically to update existing ``bios_settings`` table once successfully executed. .. code-block:: python class ExampleBIOS(base.BIOSInterface): @base.clean_step(priority=0) def factory_reset(self, task): node_info = driver_common.parse_driver_info(task.node) driver_client.reset_bios_settings(node_info) * Implement a method named ``apply_configuration``. This method needs to use the clean_step decorator. It takes the given BIOS settings and applies them on the node. It also calls ``cache_bios_settings`` automatically to update existing ``bios_settings`` table after successfully applying given settings on the node. .. code-block:: python class ExampleBIOS(base.BIOSInterface): @base.clean_step(priority=0, argsinfo={ 'settings': { 'description': ( 'A list of BIOS settings to be applied' ), 'required': True } }) def apply_configuration(self, task, settings): node_info = driver_common.parse_driver_info(task.node) driver_client.apply_bios_settings(node_info, settings) The ``settings`` parameter is a list of BIOS settings to be configured. for example:: [ { "setting name": { "name": "String", "value": "String" } }, { "setting name": { "name": "String", "value": "String" } }, ... ] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/contributor/bugs.rst0000664000175000017500000001236200000000000021215 0ustar00zuulzuul00000000000000Bug Reporting and Triaging Guide ================================ StoryBoard ---------- All ironic projects use StoryBoard_ for tracking both bugs and enhancement requests (RFE). The `ironic project group`_ lists all our projects. .. note:: Ironic is developed as part of OpenStack and therefore uses the ``openstack/`` namespace. StoryBoard is somewhat different from traditional bug tracking systems because every *story* is not linked to a project itself, but rather through its *tasks*. A story represents an issue you are facing or an enhancement you want to see, while tasks represent individual action items which can span several projects. When creating a story, you'll also need to create the first task. If unsure, create a task against ``openstack/ironic``. Reporting Guide --------------- We are constantly receiving a lot of requests, so it's important to file a meaningful story for it to be acted upon. A good story: * specifies **why** a change is needed. In case of a bug - what you expected to happen. * explains how to reproduce the described condition. .. note:: Please try to provide a reproducer based on unit tests, :ref:`devstack ` or bifrost_. While we try our best to support users using other installers and distributions, it may be non-trivial without deep knowledge of them. If you're using a commercial distribution or a product, please try contacting support first. * should be understandable without additional context. For example, if you see an exception, we will need the full traceback. Other commonly required things are: * the contents of the node in question (use ``baremetal node show ``) * debug logging related to the event, ideally with logs from the ramdisk * versions of ironic, ironic-python-agent, and any other coupled components. * should not be too verbose either. Unfortunately, we cannot process a few days worth of system logs to find the problems, we expect your collaboration. * is not a question or a support request. Please see :doc:`contributing` for the ways to contact us. * provides a way to contact the reporter. Please follow the comments and expect follow-up emails, but ideally also be on IRC for questions. An enhancement request additionally: * benefits the overall project, not just one consumer. If you have a case that is specific to your requirements, think about ways to make ironic extensible to be able to cover it. * does not unnecessary increase the project scope. Consider if your idea can be implemented without changing ironic or its projects, maybe it actually should? Triaging Guide -------------- The bug triaging process involves checking new stories to make sure they are actionable by the team. This guide is mostly targeting the project team, but we would appreciate if reporters could partly self-triage their own requests. * Determine if the request is valid and complete. Use the checklist in the `Reporting Guide`_ for that. * Is the request a bug report or an enhancement request (an RFE)? The difference is often subtle, the key question to answer is if the described behavior is expected. Add an ``rfe`` tag to all enhancement requests and propose it for the "RFE Review" section of the `weekly meeting`_. * Does the RFE obviously require a spec_? Usually this is decided when an RFE is reviewed during the meeting, but some requests are undoubtedly complex, involve changing a lot of critical parts and thus demand a spec. Add a ``needs-spec`` tag to enhancement requests that obviously need a spec. Otherwise leave it until the meeting. * Apply additional tags: * All hardware type specific stories should receive a corresponding tag (e.g. ``ipmi``, ``idrac``, etc). * API-related stories should have an ``api`` tag. * CI issues should have a ``gate`` tag. The next actions **must only** be done by a core team member (or an experienced full-time contributor appoined by the PTL): * Can the RFE be automatically approved? It happens if the RFE requests an implementation of a driver feature that is already implemented for other drivers and does not pose additional complexity. If the RFE can be automatically approved, apply the ``rfe-approved`` tag. If unsure, never apply the tag! Talk to the PTL instead. * Does the RFE have a corresponding spec approved? If yes, apply the ``rfe-approved`` tag. * In the end, apply the ``ironic-triaged`` tag to make the story as triaged. Expiring Bugs ------------- While we hope to fix all issues that our consumers hit, it is unfortunately not realistic. Stories **may** be closed by marking all their tasks ``INVALID`` in the following cases: * No solution has been proposed in 1 calendar year. * Additional information has been requested from the reporter, and no update has been provided in 1 calendar month. * The request no longer aligns with the direction of the project. .. note:: As usual, common sense should be applied when closing stories. .. _StoryBoard: https://storyboard.openstack.org .. _ironic project group: https://storyboard.openstack.org/#!/project_group/ironic .. _bifrost: https://docs.openstack.org/bifrost .. _spec: https://specs.openstack.org/openstack/ironic-specs/ .. _weekly meeting: https://wiki.openstack.org/wiki/Meetings/Ironic ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/contributor/community.rst0000664000175000017500000000507100000000000022300 0ustar00zuulzuul00000000000000==================== Bare Metal Community ==================== This document provides information on how to reach out to the community for questions, bug reports or new code contributions. Useful Links ============ Bug/Task tracker https://storyboard.openstack.org/#!/project/openstack/ironic Code Hosting https://opendev.org/openstack/ironic Code Review https://review.opendev.org/#/q/status:open+project:openstack/ironic,n,z Weekly Meeting Agenda https://wiki.openstack.org/wiki/Meetings/Ironic#Agenda_for_next_meeting Asking Questions ================ There are two many venues where all discussions happen: IRC and mailing lists. Internet Relay Chat 'IRC' ------------------------- Daily contributor discussions take place on IRC in the ``#openstack-ironic`` channel on the OFTC IRC network. Please feel free to connect to ``ircs://irc.oftc.net:6697`` and join our channel! Note that while we have community members from everywhere in the world, we're the most active from roughly 6am to 12am. If you don't get an answer to your question, try the `Mailing list`_. Additional information on getting connected can be found in the `OpenStack community contribution guide `_. Mailing list ------------ We use the *openstack-discuss* mailing list for asynchronous communications and longer discussions. Navigate to http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-discuss to subscribe or view the archives. When sending a message please prefix the *Subject* line with ``[ironic]`` so that we don't miss it. Reporting Bugs ============== LaunchPad --------- Most of the tools used for OpenStack require a Launchpad_ ID for authentication. Ironic previously used to track work on Launchpad, but we have not done so since migrating to Storyboard_. .. _Launchpad: https://launchpad.net Storyboard ---------- The ironic project moved from Launchpad to `StoryBoard `_ for work and task tracking. This provides an aggregate view called a "Project Group" and individual "Projects". A good starting place is the `project group `_ representing the whole of the ironic community, as opposed to the `ironic project `_ storyboard which represents ironic as a repository. See :doc:`bugs` for more details on how we track bugs. Contributing Code ================= .. seealso:: * :doc:`contributing` - basic information on new code contributions * :doc:`/contributor/index` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/contributor/contributing.rst0000664000175000017500000003536100000000000022770 0ustar00zuulzuul00000000000000.. _code-contribution-guide: ============================ So You Want to Contribute... ============================ This document provides some necessary points for developers to consider when writing and reviewing Ironic code. The checklist will help developers get things right. Please make sure to check the :doc:`community page ` first. Contributing Code ================= If you're completely new to OpenStack and want to contribute to the ironic project, please start by familiarizing yourself with the `Infra Team's Developer Guide `_. This will help you get your accounts set up in Launchpad and Gerrit, familiarize you with the workflow for the OpenStack continuous integration and testing systems, and help you with your first commit. Everything Ironic ----------------- Ironic is a community of projects centered around the primary project repository 'ironic', which help facilitate the deployment and management of bare metal resources. This means there are a number of different repositories that fall into the responsibility of the project team and the community. Some of the repositories may not seem strictly hardware related, but they may be tools or things to just make an aspect easier. Related Projects ---------------- There are several projects that are tightly integrated with ironic and which are developed by the same community. .. seealso:: * :bifrost-doc:`Bifrost Documentation <>` * :ironic-inspector-doc:`Ironic Inspector Documentation <>` * :ironic-lib-doc:`Ironic Lib Documentation <>` * :ironic-python-agent-doc:`Ironic Python Agent (IPA) Documentation <>` * :python-ironicclient-doc:`Ironic Client Documentation <>` * :python-ironic-inspector-client-doc:`Ironic Inspector Client Documentation <>` Adding New Features =================== Ironic tracks new features using RFEs (Requests for Feature Enhancements) instead of blueprints. These are stories with 'rfe' tag, and they should be submitted before a spec or code is proposed. When a member of the `ironic-core team `_ decides that the proposal is worth implementing, a spec (if needed) and code should be submitted, referencing the RFE task or story ID number. Contributors are welcome to submit a spec and/or code before the RFE is approved, however those patches will not land until the RFE is approved. Feature Submission Process -------------------------- #. Submit a bug report on the `ironic StoryBoard `_. There are two fields that must be filled: 'Title' and 'Description'. 'Tasks' can be added and are associated with a project. If you can't describe it in a sentence or two, it may mean that you are either trying to capture more than one RFE at once, or that you are having a hard time defining what you are trying to solve at all. This may also be a sign that your feature may require a specification document. #. Describe the proposed change in the 'Description' field. The description should provide enough details for a knowledgeable developer to understand what is the existing problem in the current platform that needs to be addressed, or what is the enhancement that would make the platform more capable, both from a functional and a non-functional standpoint. #. Submit the story, add an 'rfe' tag to it and assign yourself or whoever is going to work on this feature. #. As soon as a member of the team acknowledges the story, we will move the story to the 'Review' state. As time goes on, Discussion about the RFE, and whether to approve it will occur. #. Contributors will evaluate the RFE and may advise the submitter to file a spec in the ironic-specs repository to elaborate on the feature request. Typically this is when an RFE requires extra scrutiny, more design discussion, etc. For the spec submission process, please see the `Ironic Specs Process`_. A specific task should be created to track the creation of a specification. #. If a spec is not required, once the discussion has happened and there is positive consensus among the ironic-core team on the RFE, the RFE is 'approved', and its tag will move from 'rfe' to 'rfe-approved'. This means that the feature is approved and the related code may be merged. #. If a spec is required, the spec must be submitted (with a new task as part of the story referenced as 'Task' in the commit message), reviewed, and merged before the RFE will be 'approved' (and the tag changed to 'rfe-approved'). #. The tasks then goes through the usual process -- first to 'Review' when the spec/code is being worked on, then 'Merged' when it is implemented. #. If the RFE is rejected, the ironic-core team will move the story to "Invalid" status. Change Tracking --------------- We track our stories and tasks in Storyboard. https://storyboard.openstack.org/#!/project/ironic When working on an RFE, please be sure to tag your commits properly: "Story: #xxxx" or "Task: #xxxx". It is also helpful to set a consistent review topic, such as "story/xxxx" for all patches related to the RFE. If the RFE spans across several projects (e.g. ironic and python-ironicclient), but the main work is going to happen within ironic, please use the same story for all the code you're submitting, there is no need to create a separate RFE in every project. .. note:: **RFEs may only be approved by members of the ironic-core team**. .. note:: While not strictly required for minor changes and fixes, it is highly preferred by the Ironic community that any change which needs to be backported, have a recorded Story and Task in Storyboard. Managing Change Sets -------------------- If you would like some help, or if you (or some members of your team) are unable to continue working on the feature, updating and maintaining the changes, please let the rest of the ironic community know. You could leave a comment in one or more of the changes/patches, bring it up in IRC, the weekly meeting, or on the OpenStack development email list. Communicating this will make other contributors aware of the situation and allow for others to step forward and volunteer to continue with the work. In the event that a contributor leaves the community, do not expect the contributor's changes to be continued unless someone volunteers to do so. Getting Your Patch Merged ------------------------- Within the Ironic project, we generally require two core reviewers to sign-off (+2) change sets. We also will generally recognize non-core (+1) reviewers, and sometimes even reverse our decision to merge code based upon their reviews. We recognize that some repositories have less visibility, as such it is okay to ask for a review in our IRC channel. Please be prepared to stay in IRC for a little while in case we have questions. Sometimes we may also approve patches with a single core reviewer. This is generally discouraged, but sometimes necessary. When we do so, we try to explain why we do so. As a patch submitter, it equally helps us to understand why the change is important. Generally, more detail and context helps us understand the change faster. Timeline Expectations --------------------- As with any large project, it does take time for features and changes to be merged in any of the project repositories. This is largely due to limited review bandwidth coupled with varying reviewer priorities and focuses. When establishing an understanding of complexity, the following things should be kept in mind. * Generally, small and minor changes can gain consensus and merge fairly quickly. These sorts of changes would be: bug fixes, minor documentation updates, follow-up changes. * Medium changes generally consist of driver feature parity changes, where one driver is working to match functionality of another driver. * These changes generally only require an RFE for the purposes of tracking and correlating the change. * Documentation updates are expected to be submitted with or immediately following the initial change set. * Larger or controversial changes generally take much longer to merge. This is often due to the necessity of reviewers to gain additional context and for change sets to be iterated upon to reach a state where there is consensus. These sorts of changes include: database, object, internal interface additions, RPC, rest API changes. * These changes will very often require specifications to reach consensus, unless there are pre-existing patterns or code already present. * These changes may require many reviews and iterations, and can also expect to be impacted by merge conflicts as other code or features are merged. * These changes must typically be split into a series of changes. Reviewers typically shy away from larger single change sets due to increased difficulty in reviewing. * Do not expect any API or user-visible data model changes to merge after the API client freeze. Some substrate changes may merge if not user visible. * You should expect complex features, such as cross-project features or integration, to take longer than a single development cycle to land. * Building consensus is vital. * Often these changes are controversial or have multiple considerations that need to be worked through in the specification process, which may cause the design to change. As such, it may take months to reach consensus over design. * These features are best broken into larger chunks and tackled in an incremental fashion. Live Upgrade Related Concerns ----------------------------- See :doc:`/contributor/rolling-upgrades`. Driver Internal Info ~~~~~~~~~~~~~~~~~~~~ The ``driver_internal_info`` node field was introduced in the Kilo release. It allows driver developers to store internal information that can not be modified by end users. Here is the list of existing common and agent driver attributes: * Common attributes: * ``is_whole_disk_image``: A Boolean value to indicate whether the user image contains ramdisk/kernel. * ``clean_steps``: An ordered list of clean steps that will be performed on the node. * ``deploy_steps``: An ordered list of deploy steps that will be performed on the node. Support for deploy steps was added in the ``11.1.0`` release. * ``instance``: A list of dictionaries containing the disk layout values. * ``root_uuid_or_disk_id``: A String value of the bare metal node's root partition uuid or disk id. * ``persistent_boot_device``: A String value of device from ``ironic.common.boot_devices``. * ``is_next_boot_persistent``: A Boolean value to indicate whether the next boot device is ``persistent_boot_device``. * Agent driver attributes: * ``agent_url``: A String value of IPA API URL so that Ironic can talk to IPA ramdisk. * ``hardware_manager_version``: A String value of the version of the hardware manager in IPA ramdisk. * ``target_raid_config``: A Dictionary containing the target RAID configuration. This is a copy of the same name attribute in Node object. But this one is never actually saved into DB and is only read by IPA ramdisk. .. note:: These are only some fields in use. Other vendor drivers might expose more ``driver_internal_info`` properties, please check their development documentation and/or module docstring for details. It is important for developers to make sure these properties follow the precedent of prefixing their variable names with a specific interface name (e.g., ilo_bar, drac_xyz), so as to minimize or avoid any conflicts between interfaces. Ironic Specs Process -------------------- Specifications must follow the template which can be found at `specs/template.rst `_, which is quite self-documenting. Specifications are proposed by adding them to the `specs/approved` directory, adding a soft link to it from the `specs/not-implemented` directory, and posting it for review to Gerrit. For more information, please see the `README `_. The same `Gerrit process `_ as with source code, using the repository `ironic-specs `_, is used to add new specifications. All approved specifications are available at: https://specs.openstack.org/openstack/ironic-specs. If a specification has been approved but not completed within one or more releases since the approval, it may be re-reviewed to make sure it still makes sense as written. Ironic specifications are part of the `RFE (Requests for Feature Enhancements) process <#adding-new-features>`_. You are welcome to submit patches associated with an RFE, but they will have a -2 ("do not merge") until the specification has been approved. This is to ensure that the patches don't get accidentally merged beforehand. You will still be able to get reviewer feedback and push new patch sets, even with a -2. The `list of core reviewers `_ for the specifications is small but mighty. (This is not necessarily the same list of core reviewers for code patches.) Changes to existing specs ------------------------- For approved but not-completed specs: - cosmetic cleanup, fixing errors, and changing the definition of a feature can be done to the spec. For approved and completed specs: - changing a previously approved and completed spec should only be done for cosmetic cleanup or fixing errors. - changing the definition of the feature should be done in a new spec. Please see the `Ironic specs process wiki page `_ for further reference. Project Team Leader Duties ========================== The ``Project Team Leader`` or ``PTL`` is elected each development cycle by the contributors to the ironic community. Think of this person as your primary contact if you need to try and rally the project, or have a major issue that requires attention. They serve a role that is mainly oriented towards trying to drive the technical discussion forward and managing the idiosyncrasies of the project. With this responsibility, they are considered a "public face" of the project and are generally obliged to try and provide "project updates" and outreach communication. All common PTL duties are enumerated here in the `PTL guide `_. Tasks like release management or preparation for a release are generally delegated with-in the team. Even outreach can be delegated, and specifically there is no rule stating that any member of the community can't propose a release, clean-up release notes or documentation, or even get on the occasional stage. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/contributor/debug-ci-failures.rst0000664000175000017500000000206200000000000023540 0ustar00zuulzuul00000000000000.. _debug-ci-failures: ===================== Debugging CI failures ===================== If you see `FAILURE` in one or more jobs for your patch please don't panic. This guide may help you to find the initial reason for the failure. When clicking in the failed job you will be redirect to the Zuul web page that contains all the information about the job build. Zuul Web Page ============= The page has three tabs: `Summary`, `Logs` and `Console`. * Summary: Contains overall information about the build of the job, if the job build failed it will contain a general output of the failure. * Logs: Contains all configurations and log files about all services that were used in the job. This will give you an overall idea of the failures and you can identify services that may be involved. The `job-output` file can give an overall idea of the failures and what services may be involved. * Console: Contains all the playbooks that were executed, by clicking in the arrow before each playbook name you can find the roles and commands that were executed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/contributor/deploy-steps.rst0000664000175000017500000003057700000000000022715 0ustar00zuulzuul00000000000000Developing deploy and clean steps ================================= Deploy steps basics ------------------- To support customized deployment step, implement a new method in an interface class and use the decorator ``deploy_step`` defined in ``ironic/drivers/base.py``. For example, we will implement a ``do_nothing`` deploy step in the ``AgentDeploy`` class. .. code-block:: python from ironic.drivers.modules import agent class AgentDeploy(agent.AgentDeploy): @base.deploy_step(priority=200, argsinfo={ 'test_arg': { 'description': ( "This is a test argument." ), 'required': True } }) def do_nothing(self, task, **kwargs): return None If you want to completely replace the deployment procedure, but still have the agent up and running, inherit ``CustomAgentDeploy``: .. code-block:: python from ironic.drivers.modules import agent class AgentDeploy(agent.CustomAgentDeploy): def validate(self, task): super().validate(task) # ... custom validation @base.deploy_step(priority=80) def my_write_image(self, task, **kwargs): pass # ... custom image writing @base.deploy_step(priority=70) def my_configure_bootloader(self, task, **kwargs): pass # ... custom bootloader configuration After deployment of the baremetal node, check the updated deploy steps:: baremetal node show $node_ident -f json -c driver_internal_info The above command outputs the ``driver_internal_info`` as following:: { "driver_internal_info": { ... "deploy_steps": [ { "priority": 200, "interface": "deploy", "step": "do_nothing", "argsinfo": { "test_arg": { "required": True, "description": "This is a test argument." } } }, { "priority": 100, "interface": "deploy", "step": "deploy", "argsinfo": null } ], "deploy_step_index": 1 } } In-band deploy steps (deploy steps that are run inside the ramdisk) have to be implemented in a custom :ironic-python-agent-doc:`IPA hardware manager `. All in-band deploy steps must have priorities between 41 and 99, see :ref:`node-deployment-core-steps` for details. Clean steps basics ------------------ Clean steps are written similarly to deploy steps, but are executed during :doc:`cleaning `. Steps with priority > 0 are executed during automated cleaning, all steps can be executed explicitly during manual cleaning. Unlike deploy steps, clean steps are commonly found in these interfaces: ``bios`` Steps that apply BIOS settings, see `Implementing BIOS settings`_. ``deploy`` Steps that undo the effect of deployment (e.g. erase disks). ``management`` Additional steps that use the node's BMC, such as out-of-band firmware update or BMC reset. ``raid`` Steps that build or tear down RAID, see `Implementing RAID`_. .. note:: When designing a new step for your driver, try to make it consistent with existing steps on other drivers. Just as deploy steps, in-band clean steps have to be implemented in a custom :ironic-python-agent-doc:`IPA hardware manager `. Asynchronous steps ------------------ If the step returns ``None``, ironic assumes its execution is finished and proceeds to the next step. Many steps are executed asynchronously; in this case you need to inform ironic that the step is not finished. There are several possibilities: Combined in-band and out-of-band step ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If your step starts as out-of-band and then proceeds as in-band (i.e. inside the agent), you only need to return ``CLEANWAIT``/``DEPLOYWAIT`` from the step. .. code-block:: python from ironic.drivers import base from ironic.drivers.modules import agent from ironic.drivers.modules import agent_base from ironic.drivers.modules import agent_client from ironic.drivers.modules import deploy_utils class MyDeploy(agent.CustomAgentDeploy): ... @base.deploy_step(priority=80) def my_deploy(self, task): ... return deploy_utils.get_async_step_return_state(task.node) # Usually you can use a more high-level pattern: @base.deploy_step(priority=60) def my_deploy2(self, task): new_step = {'interface': 'deploy', 'step': 'my_deploy2', 'args': {...}} client = agent_client.get_client(task) return agent_base.execute_step(task, new_step, 'deploy', client=client) .. warning:: This approach only works for steps implemented on a ``deploy`` interface that inherits agent deploy. Execution on reboot ~~~~~~~~~~~~~~~~~~~ Some steps are executed out-of-band, but require a reboot to complete. Use the following pattern: .. code-block:: python from ironic.drivers import base from ironic.drivers.modules import deploy_utils class MyManagement(base.ManagementInterface): ... @base.clean_step(priority=0) def my_action(self, task): ... # Tell ironic that... deploy_utils.set_async_step_flags( node, # ... we're waiting for IPA to come back after reboot reboot=True, # ... the current step is done skip_current_step=True) return deploy_utils.reboot_to_finish_step(task) .. _deploy-steps-polling: Polling for completion ~~~~~~~~~~~~~~~~~~~~~~~ Finally, you may want to poll the BMC until the operation is complete. Often enough, this also involves a reboot. In this case you can use the :py:func:`ironic.conductor.periodics.node_periodic` decorator to create a periodic task that operates on relevant nodes: .. code-block:: python from ironic.common import states from ironic.common import utils from ironic.conductor import periodics from ironic.drivers import base from ironic.drivers.modules import deploy_utils _STATUS_CHECK_INTERVAL = ... # better use a configuration option class MyManagement(base.ManagementInterface): ... @base.clean_step(priority=0) def my_action(self, task): ... reboot_required = ... # your step may or may not need rebooting # Make this node as running my_action. Often enough you will store # some useful data rather than a boolean flag. utils.set_node_nested_field(task.node, 'driver_internal_info', 'in_my_action', True) # Tell ironic that... deploy_utils.set_async_step_flags( node, # ... we're waiting for IPA to come back after reboot reboot=reboot_required, # ... the current step shouldn't be entered again skip_current_step=True, # ... we'll be polling until the step is done polling=True) if reboot_required: return deploy_utils.reboot_to_finish_step(task) @periodics.node_periodic( purpose='checking my action status', spacing=_STATUS_CHECK_INTERVAL, filters={ # Skip nodes that already have a lock 'reserved': False, # Only consider nodes that are waiting for cleaning or failed # on timeout. 'provision_state_in': [states.CLEANWAIT, states.CLEANFAIL], }, # Load driver_internal_info from the database on listing predicate_extra_fields=['driver_internal_info'], # Only consider nodes with in_my_action predicate=lambda n: n.driver_internal_info.get('in_my_action'), ) def check_my_action(self, task, manager, context): if not needs_actions(): # insert your checks here return task.upgrade_lock() ... # do any required updates # Drop the flag so that this node is no longer considered utils.pop_node_nested_field(task.node, 'driver_internal_info', 'in_my_action') Note that creating a ``task`` involves an additional database query, so you want to avoid creating them for too many nodes in your periodic tasks. Instead: * Try to use precise ``filters`` to filter out nodes on the database level. Using ``reserved`` and ``provision_state``/``provision_state_in`` are recommended in most cases. See :py:meth:`ironic.db.api.Connection.get_nodeinfo_list` for a list of possible filters. * Use ``predicate`` to filter on complex fields such as ``driver_internal_info``. Predicates are checked before tasks are created. Implementing RAID ----------------- RAID is implemented via deploy and clean steps in the ``raid`` interfaces. By convention they have the following signatures: .. code-block:: python from ironic.drivers import base class MyRAID(base.RAIDInterface): @base.clean_step(priority=0, abortable=False, argsinfo={ 'create_root_volume': { 'description': ( 'This specifies whether to create the root volume. ' 'Defaults to `True`.' ), 'required': False }, 'create_nonroot_volumes': { 'description': ( 'This specifies whether to create the non-root volumes. ' 'Defaults to `True`.' ), 'required': False }, 'delete_existing': { 'description': ( 'Setting this to `True` indicates to delete existing RAID ' 'configuration prior to creating the new configuration. ' 'Default value is `False`.' ), 'required': False, } }) def create_configuration(self, task, create_root_volume=True, create_nonroot_volumes=True, delete_existing=False): pass @base.clean_step(priority=0) @base.deploy_step(priority=0) def delete_configuration(self, task): pass @base.deploy_step(priority=0, argsinfo=base.RAID_APPLY_CONFIGURATION_ARGSINFO) def apply_configuration(self, task, raid_config, create_root_volume=True, create_nonroot_volumes=False, delete_existing=False): pass Notes: * ``create_configuration`` only works as a clean step, during deployment ``apply_configuration`` is used instead. * ``apply_configuration`` accepts the target RAID configuration explicitly, while ``create_configuration`` uses the node's ``target_raid_config`` field. * Priorities default to 0 since RAID should not be built by default. Implementing BIOS settings -------------------------- BIOS is implemented via deploy and clean steps in the ``raid`` interfaces. By convention they have the following signatures: .. code-block:: python from ironic.drivers import base _APPLY_CONFIGURATION_ARGSINFO = { 'settings': { 'description': ( 'A list of BIOS settings to be applied' ), 'required': True } } class MyBIOS(base.BIOSInterface): @base.clean_step(priority=0) @base.deploy_step(priority=0) @base.cache_bios_settings def factory_reset(self, task): pass @base.clean_step(priority=0, argsinfo=_APPLY_CONFIGURATION_ARGSINFO) @base.deploy_step(priority=0, argsinfo=_APPLY_CONFIGURATION_ARGSINFO) @base.cache_bios_settings def apply_configuration(self, task, settings): pass Notes: * Both ``factory_reset`` and ``apply_configuration`` can be used as deploy and clean steps. * The ``cache_bios_settings`` decorator is used to ensure that the settings cached in the ironic database is updated. * Priorities default to 0 since BIOS settings should not be modified by default. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/contributor/dev-quickstart.rst0000664000175000017500000010016500000000000023222 0ustar00zuulzuul00000000000000.. _dev-quickstart: ===================== Developer Quick-Start ===================== This is a quick walkthrough to get you started developing code for Ironic. This assumes you are already familiar with submitting code reviews to an OpenStack project. The gate currently runs the unit tests under Python 3.6, 3.7, and 3.8. It is strongly encouraged to run the unit tests locally prior to submitting a patch. .. note:: Do not run unit tests on the same environment as devstack due to conflicting configuration with system dependencies. .. note:: This document is compatible with Python (3.6, 3.7, 3.8), Debian "buster" (10.8), Ubuntu Focal Fossa (20.04 LTS), RHEL8/CentOS Stream, openSUSE/SLE 15, and Fedora (33). When referring to different versions of Python and OS distributions, this is explicitly stated. .. seealso:: https://docs.openstack.org/infra/manual/developers.html#development-workflow Prepare Development System ========================== System Prerequisites -------------------- The following packages cover the prerequisites for a local development environment on most current distributions. Instructions for getting set up with non-default versions of Python and on older distributions are included below as well. - Ubuntu/Debian:: sudo apt-get install build-essential python3-dev libssl-dev python3-pip libmysqlclient-dev libxml2-dev libxslt-dev libpq-dev git git-review libffi-dev gettext ipmitool psmisc graphviz libjpeg-dev - RHEL/CentOS/Fedora:: sudo dnf install python3-devel openssl-devel python3-pip mysql-devel libxml2-devel libxslt-devel postgresql-devel git git-review libffi-devel gettext ipmitool psmisc graphviz gcc libjpeg-turbo-devel - openSUSE/SLE:: sudo zypper install git git-review libffi-devel libmysqlclient-devel libopenssl-devel libxml2-devel libxslt-devel postgresql-devel python3-devel python-nose python3-pip gettext-runtime psmisc To run the tests locally, it is a requirement that your terminal emulator supports unicode with the ``en_US.UTF8`` locale. If you use locale-gen to manage your locales, make sure you have enabled ``en_US.UTF8`` in ``/etc/locale.gen`` and rerun ``locale-gen``. Python Prerequisites -------------------- We suggest to use at least tox 3.9, if your distribution has an older version, you can install it using pip system-wise or better per user using the --user option that by default will install the binary under $HOME/.local/bin, so you need to be sure to have that path in $PATH; for example:: pip install tox --user will install tox as ~/.local/bin/tox You may need to explicitly upgrade virtualenv if you've installed the one from your OS distribution and it is too old (tox will complain). You can upgrade it individually, if you need to:: pip install -U virtualenv --user Running Unit Tests Locally ========================== If you haven't already, Ironic source code should be pulled directly from git:: # from your home or source directory cd ~ git clone https://opendev.org/openstack/ironic cd ironic Running Unit and Style Tests ---------------------------- All unit tests should be run using tox. To run Ironic's entire test suite:: # to run the py3 unit tests, and the style tests tox To run a specific test or tests, use the "-e" option followed by the tox target name. For example:: # run the unit tests under py36 and also run the pep8 tests tox -epy36 -epep8 You may pass options to the test programs using positional arguments. To run a specific unit test, this passes the desired test (regex string) to `stestr `_:: # run a specific test for Python 3.6 tox -epy36 -- test_conductor Debugging unit tests -------------------- In order to break into the debugger from a unit test we need to insert a breaking point to the code: .. code-block:: python import pdb; pdb.set_trace() Then run ``tox`` with the debug environment as one of the following:: tox -e debug tox -e debug test_file_name tox -e debug test_file_name.TestClass tox -e debug test_file_name.TestClass.test_name For more information see the :oslotest-doc:`oslotest documentation `. Database Setup -------------- The unit tests need a local database setup, you can use ``tools/test-setup.sh`` to set up the database the same way as setup in the OpenStack test systems. Additional Tox Targets ---------------------- There are several additional tox targets not included in the default list, such as the target which builds the documentation site. See the ``tox.ini`` file for a complete listing of tox targets. These can be run directly by specifying the target name:: # generate the documentation pages locally tox -edocs # generate the sample configuration file tox -egenconfig Exercising the Services Locally =============================== In addition to running automated tests, sometimes it can be helpful to actually run the services locally, without needing a server in a remote datacenter. If you would like to exercise the Ironic services in isolation within your local environment, you can do this without starting any other OpenStack services. For example, this is useful for rapidly prototyping and debugging interactions over the RPC channel, testing database migrations, and so forth. Here we describe two ways to install and configure the dependencies, either run directly on your local machine or encapsulated in a virtual machine or container. Step 1: Create a Python virtualenv ---------------------------------- #. If you haven't already downloaded the source code, do that first:: cd ~ git clone https://opendev.org/openstack/ironic cd ironic #. Create the Python virtualenv:: tox -evenv --notest --develop -r #. Activate the virtual environment:: . .tox/venv/bin/activate #. Install the `openstack` client command utility:: pip install python-openstackclient #. Install the `baremetal` client:: pip install python-ironicclient .. note:: You can install python-ironicclient from source by cloning the git repository and running `pip install .` while in the root of the cloned repository. #. Export some ENV vars so the client will connect to the local services that you'll start in the next section:: export OS_AUTH_TYPE=none export OS_ENDPOINT=http://localhost:6385/ Next, install and configure system dependencies. Step 2: Install System Dependencies Locally -------------------------------------------- This step will install MySQL on your local system. This may not be desirable in some situations (eg, you're developing from a laptop and do not want to run a MySQL server on it all the time). If you want to use SQLite, skip it and do not set the ``connection`` option. #. Install mysql-server: Ubuntu/Debian:: sudo apt-get install mysql-server RHEL/CentOS/Fedora:: sudo dnf install mariadb mariadb-server sudo systemctl start mariadb.service openSUSE/SLE:: sudo zypper install mariadb sudo systemctl start mysql.service If using MySQL, you need to create the initial database:: mysql -u root -pMYSQL_ROOT_PWD -e "create schema ironic" .. note:: if you choose not to install mysql-server, ironic will default to using a local sqlite database. The database will then be stored in ``ironic/ironic.sqlite``. #. Create a configuration file within the ironic source directory:: # generate a sample config tox -egenconfig # copy sample config and modify it as necessary cp etc/ironic/ironic.conf.sample etc/ironic/ironic.conf.local # disable auth since we are not running keystone here sed -i "s/#auth_strategy = keystone/auth_strategy = noauth/" etc/ironic/ironic.conf.local # use the 'fake-hardware' test hardware type sed -i "s/#enabled_hardware_types = .*/enabled_hardware_types = fake-hardware/" etc/ironic/ironic.conf.local # use the 'fake' deploy and boot interfaces sed -i "s/#enabled_deploy_interfaces = .*/enabled_deploy_interfaces = fake/" etc/ironic/ironic.conf.local sed -i "s/#enabled_boot_interfaces = .*/enabled_boot_interfaces = fake/" etc/ironic/ironic.conf.local # enable both fake and ipmitool management and power interfaces sed -i "s/#enabled_management_interfaces = .*/enabled_management_interfaces = fake,ipmitool/" etc/ironic/ironic.conf.local sed -i "s/#enabled_power_interfaces = .*/enabled_power_interfaces = fake,ipmitool/" etc/ironic/ironic.conf.local # change the periodic sync_power_state_interval to a week, to avoid getting NodeLocked exceptions sed -i "s/#sync_power_state_interval = 60/sync_power_state_interval = 604800/" etc/ironic/ironic.conf.local # if you opted to install mysql-server, switch the DB connection from sqlite to mysql sed -i "s/#connection = .*/connection = mysql\+pymysql:\/\/root:MYSQL_ROOT_PWD@localhost\/ironic/" etc/ironic/ironic.conf.local # use JSON RPC to avoid installing rabbitmq locally sed -i "s/#rpc_transport = oslo/rpc_transport = json-rpc/" etc/ironic/ironic.conf.local Step 3: Start the Services -------------------------- From within the python virtualenv, run the following command to prepare the database before you start the ironic services:: # initialize the database for ironic ironic-dbsync --config-file etc/ironic/ironic.conf.local create_schema Next, open two new terminals for this section, and run each of the examples here in a separate terminal. In this way, the services will *not* be run as daemons; you can observe their output and stop them with Ctrl-C at any time. #. Start the API service in debug mode and watch its output:: cd ~/ironic . .tox/venv/bin/activate ironic-api -d --config-file etc/ironic/ironic.conf.local #. Start the Conductor service in debug mode and watch its output:: cd ~/ironic . .tox/venv/bin/activate ironic-conductor -d --config-file etc/ironic/ironic.conf.local Step 4: Interact with the running services ------------------------------------------ You should now be able to interact with ironic via the python client, which is present in the python virtualenv, and observe both services' debug outputs in the other two windows. This is a good way to test new features or play with the functionality without necessarily starting DevStack. To get started, export the following variables to point the client at the local instance of ironic and disable the authentication:: export OS_AUTH_TYPE=none export OS_ENDPOINT=http://127.0.0.1:6385 Then list the available commands and resources:: # get a list of available commands openstack help baremetal # get the list of drivers currently supported by the available conductor(s) baremetal driver list # get a list of nodes (should be empty at this point) baremetal node list Here is an example walkthrough of creating a node:: MAC="aa:bb:cc:dd:ee:ff" # replace with the MAC of a data port on your node IPMI_ADDR="1.2.3.4" # replace with a real IP of the node BMC IPMI_USER="admin" # replace with the BMC's user name IPMI_PASS="pass" # replace with the BMC's password # enroll the node with the fake hardware type and IPMI-based power and # management interfaces. Note that driver info may be added at node # creation time with "--driver-info" NODE=$(baremetal node create \ --driver fake-hardware \ --management-interface ipmitool \ --power-interface ipmitool \ --driver-info ipmi_address=$IPMI_ADDR \ --driver-info ipmi_username=$IPMI_USER \ -f value -c uuid) # driver info may also be added or updated later on baremetal node set $NODE --driver-info ipmi_password=$IPMI_PASS # add a network port baremetal port create $MAC --node $NODE # view the information for the node baremetal node show $NODE # request that the node's driver validate the supplied information baremetal node validate $NODE # you have now enrolled a node sufficiently to be able to control # its power state from ironic! baremetal node power on $NODE If you make some code changes and want to test their effects, simply stop the services with Ctrl-C and restart them. Step 5: Fixing your test environment ------------------------------------ If you are testing changes that add or remove python entrypoints, or making significant changes to ironic's python modules, or simply keep the virtualenv around for a long time, your development environment may reach an inconsistent state. It may help to delete cached ".pyc" files, update dependencies, reinstall ironic, or even recreate the virtualenv. The following commands may help with that, but are not an exhaustive troubleshooting guide:: # clear cached pyc files cd ~/ironic/ironic find ./ -name '*.pyc' | xargs rm # reinstall ironic modules cd ~/ironic . .tox/venv/bin/activate pip uninstall ironic pip install -e . # install and upgrade ironic and all python dependencies cd ~/ironic . .tox/venv/bin/activate pip install -U -e . .. _`deploy_devstack`: Deploying Ironic with DevStack ============================== DevStack may be configured to deploy Ironic, setup Nova to use the Ironic driver and provide hardware resources (network, baremetal compute nodes) using a combination of OpenVSwitch and libvirt. It is highly recommended to deploy on an expendable virtual machine and not on your personal work station. Deploying Ironic with DevStack requires a machine running Ubuntu 16.04 (or later) or Fedora 24 (or later). Make sure your machine is fully up to date and has the latest packages installed before beginning this process. The ironic-tempest-plugin is necessary if you want to run integration tests, the section `Ironic with ironic-tempest-plugin`_ tells the extra steps you need to enable it in DevStack. .. seealso:: https://docs.openstack.org/devstack/latest/ .. note:: The devstack "demo" tenant is now granted the "baremetal_observer" role and thereby has read-only access to ironic's API. This is sufficient for all the examples below. Should you want to create or modify bare metal resources directly (ie. through ironic rather than through nova) you will need to use the devstack "admin" tenant. Devstack will no longer create the user 'stack' with the desired permissions, but does provide a script to perform the task:: git clone https://opendev.org/openstack/devstack.git devstack sudo ./devstack/tools/create-stack-user.sh .. note:: In case you receive an error "Could not determine host ip address. See local.conf for suggestions on setting HOST_IP", you need to manually add the main ip of your machine to the localrc file under devstack/ using the HOST_IP variable, e.g. HOST_IP=YOURIP This could happen when running devstack on virtual machines. Switch to the stack user and clone DevStack:: sudo su - stack git clone https://opendev.org/openstack/devstack.git devstack Ironic ------ Create devstack/local.conf with minimal settings required to enable Ironic. An example local.conf that enables the ``direct`` :doc:`deploy interface ` and uses the ``ipmi`` hardware type by default:: cd devstack cat >local.conf <` and uses the ``ipmi`` hardware type by default:: cd devstack cat >local.conf <`_ to control the power state of the virtual baremetal nodes. .. note:: When running QEMU as non-root user (e.g. ``qemu`` on Fedora or ``libvirt-qemu`` on Ubuntu), make sure ``IRONIC_VM_LOG_DIR`` points to a directory where QEMU will be able to write. You can verify this with, for example:: # on Fedora sudo -u qemu touch $HOME/ironic-bm-logs/test.log # on Ubuntu sudo -u libvirt-qemu touch $HOME/ironic-bm-logs/test.log .. note:: To check out an in-progress patch for testing, you can add a Git ref to the ``enable_plugin`` line. For instance:: enable_plugin ironic https://opendev.org/openstack/ironic refs/changes/46/295946/15 For a patch in review, you can find the ref to use by clicking the "Download" button in Gerrit. You can also specify a different git repo, or a branch or tag:: enable_plugin ironic https://github.com/openstack/ironic stable/kilo For more details, see the `devstack plugin interface documentation `_. Run stack.sh:: ./stack.sh Source credentials, create a key, and spawn an instance as the ``demo`` user:: . ~/devstack/openrc # query the image id of the default cirros image image=$(openstack image show $DEFAULT_IMAGE_NAME -f value -c id) # create keypair ssh-keygen openstack keypair create --public-key ~/.ssh/id_rsa.pub default # spawn instance openstack server create --flavor baremetal --image $image --key-name default testing .. note:: Because devstack create multiple networks, we need to pass an additional parameter ``--nic net-id`` to the nova boot command when using the admin account, for example:: net_id=$(openstack network list | egrep "$PRIVATE_NETWORK_NAME"'[^-]' | awk '{ print $2 }') openstack server create --flavor baremetal --nic net-id=$net_id --image $image --key-name default testing You should now see a Nova instance building:: openstack server list --long +----------+---------+--------+------------+-------------+----------+------------+----------+-------------------+------+------------+ | ID | Name | Status | Task State | Power State | Networks | Image Name | Image ID | Availability Zone | Host | Properties | +----------+---------+--------+------------+-------------+----------+------------+----------+-------------------+------+------------+ | a2c7f812 | testing | BUILD | spawning | NOSTATE | | cirros-0.3 | 44d4092a | nova | | | | -e386-4a | | | | | | .5-x86_64- | -51ac-47 | | | | | 22-b393- | | | | | | disk | 51-9c50- | | | | | fe1802ab | | | | | | | fd6e2050 | | | | | d56e | | | | | | | faa1 | | | | +----------+---------+--------+------------+-------------+----------+------------+----------+-------------------+------+------------+ Nova will be interfacing with Ironic conductor to spawn the node. On the Ironic side, you should see an Ironic node associated with this Nova instance. It should be powered on and in a 'wait call-back' provisioning state:: baremetal node list +--------------------------------------+--------+--------------------------------------+-------------+--------------------+-------------+ | UUID | Name | Instance UUID | Power State | Provisioning State | Maintenance | +--------------------------------------+--------+--------------------------------------+-------------+--------------------+-------------+ | 9e592cbe-e492-4e4f-bf8f-4c9e0ad1868f | node-0 | None | power off | None | False | | ec0c6384-cc3a-4edf-b7db-abde1998be96 | node-1 | None | power off | None | False | | 4099e31c-576c-48f8-b460-75e1b14e497f | node-2 | a2c7f812-e386-4a22-b393-fe1802abd56e | power on | wait call-back | False | +--------------------------------------+--------+--------------------------------------+-------------+--------------------+-------------+ At this point, Ironic conductor has called to libvirt (via virtualbmc) to power on a virtual machine, which will PXE + TFTP boot from the conductor node and progress through the Ironic provisioning workflow. One libvirt domain should be active now:: sudo virsh list --all Id Name State ---------------------------------------------------- 2 node-2 running - node-0 shut off - node-1 shut off This provisioning process may take some time depending on the performance of the host system, but Ironic should eventually show the node as having an 'active' provisioning state:: baremetal node list +--------------------------------------+--------+--------------------------------------+-------------+--------------------+-------------+ | UUID | Name | Instance UUID | Power State | Provisioning State | Maintenance | +--------------------------------------+--------+--------------------------------------+-------------+--------------------+-------------+ | 9e592cbe-e492-4e4f-bf8f-4c9e0ad1868f | node-0 | None | power off | None | False | | ec0c6384-cc3a-4edf-b7db-abde1998be96 | node-1 | None | power off | None | False | | 4099e31c-576c-48f8-b460-75e1b14e497f | node-2 | a2c7f812-e386-4a22-b393-fe1802abd56e | power on | active | False | +--------------------------------------+--------+--------------------------------------+-------------+--------------------+-------------+ This should also be reflected in the Nova instance state, which at this point should be ACTIVE, Running and an associated private IP:: openstack server list --long +----------+---------+--------+------------+-------------+---------------+------------+----------+-------------------+------+------------+ | ID | Name | Status | Task State | Power State | Networks | Image Name | Image ID | Availability Zone | Host | Properties | +----------+---------+--------+------------+-------------+---------------+------------+----------+-------------------+------+------------+ | a2c7f812 | testing | ACTIVE | none | Running | private=10.1. | cirros-0.3 | 44d4092a | nova | | | | -e386-4a | | | | | 0.4, fd7d:1f3 | .5-x86_64- | -51ac-47 | | | | | 22-b393- | | | | | c:4bf1:0:f816 | disk | 51-9c50- | | | | | fe1802ab | | | | | :3eff:f39d:6d | | fd6e2050 | | | | | d56e | | | | | 94 | | faa1 | | | | +----------+---------+--------+------------+-------------+---------------+------------+----------+-------------------+------+------------+ The server should now be accessible via SSH:: ssh cirros@10.1.0.4 $ Running Tempest tests ===================== After :ref:`Deploying Ironic with DevStack ` with the ironic-tempest-plugin enabled, one might want to run integration tests against the running cloud. The Tempest project is the project that offers an integration test suite for OpenStack. First, navigate to Tempest directory:: cd /opt/stack/tempest To run all tests from the `Ironic plugin `_, execute the following command:: tox -e all -- ironic To limit the amount of tests that you would like to run, you can use a regex. For instance, to limit the run to a single test file, the following command can be used:: tox -e all -- ironic_tempest_plugin.tests.scenario.test_baremetal_basic_ops Debugging Tempest tests ----------------------- It is sometimes useful to step through the test code, line by line, especially when the error output is vague. This can be done by running the tests in debug mode and using a debugger such as `pdb `_. For example, after editing the *test_baremetal_basic_ops* file and setting up the pdb traces you can invoke the ``run_tempest.sh`` script in the Tempest directory with the following parameters:: ./run_tempest.sh -N -d ironic_tempest_plugin.tests.scenario.test_baremetal_basic_ops * The *-N* parameter tells the script to run the tests in the local environment (without a virtualenv) so it can find the Ironic tempest plugin. * The *-d* parameter enables the debug mode, allowing it to be used with pdb. For more information about the supported parameters see:: ./run_tempest.sh --help .. note:: Always be careful when running debuggers in time sensitive code, they may cause timeout errors that weren't there before. OSProfiler Tracing in Ironic ============================ OSProfiler is an OpenStack cross-project profiling library. It is being used among OpenStack projects to look at performance issues and detect bottlenecks. For details on how OSProfiler works and how to use it in ironic, please refer to `OSProfiler Support Documentation `_. Building developer documentation ================================ If you would like to build the documentation locally, eg. to test your documentation changes before uploading them for review, run these commands to build the documentation set: - On your local machine:: # activate your development virtualenv . .tox/venv/bin/activate # build the docs tox -edocs #Now use your browser to open the top-level index.html located at: ironic/doc/build/html/index.html - On a remote machine:: # Go to the directory that contains the docs cd ~/ironic/doc/source/ # Build the docs tox -edocs # Change directory to the newly built HTML files cd ~/ironic/doc/build/html/ # Create a server using python on port 8000 python -m SimpleHTTPServer 8000 #Now use your browser to open the top-level index.html located at: http://your_ip:8000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/contributor/drivers.rst0000664000175000017500000001245200000000000021733 0ustar00zuulzuul00000000000000.. _pluggable_drivers: ================= Pluggable Drivers ================= Ironic supports a pluggable driver model. This allows contributors to easily add new drivers, and operators to use third-party drivers or write their own. A driver is built at runtime from a *hardware type* and *hardware interfaces*. See :doc:`/install/enabling-drivers` for a detailed explanation of these concepts. Hardware types and interfaces are loaded by the ``ironic-conductor`` service during initialization from the setuptools entrypoints ``ironic.hardware.types`` and ``ironic.hardware.interfaces.`` where ```` is an interface type (for example, ``deploy``). Only hardware types listed in the configuration option ``enabled_hardware_types`` and interfaces listed in configuration options ``enabled__interfaces`` are loaded. A complete list of hardware types available on the system may be found by enumerating this entrypoint by running the following python script:: #!/usr/bin/env python import pkg_resources as pkg print [p.name for p in pkg.iter_entry_points("ironic.hardware.types") if not p.name.startswith("fake")] A list of drivers enabled in a running Ironic service may be found by issuing the following command against that API end point:: baremetal driver list Writing a hardware type ----------------------- A hardware type is a Python class, inheriting :py:class:`ironic.drivers.hardware_type.AbstractHardwareType` and listed in the setuptools entry point ``ironic.hardware.types``. Most of the real world hardware types inherit :py:class:`ironic.drivers.generic.GenericHardware` instead. This helper class provides useful implementations for interfaces that are usually the same for all hardware types, such as ``deploy``. The minimum required interfaces are: * :doc:`boot ` that specifies how to boot ramdisks and instances on the hardware. A generic ``pxe`` implementation is provided by the ``GenericHardware`` base class. * :doc:`deploy ` that orchestrates the deployment. A few common implementations are provided by the ``GenericHardware`` base class. As of the Rocky release, a deploy interface should decorate its deploy method to indicate that it is a deploy step. Conventionally, the deploy method uses a priority of 100. .. code-block:: python @ironic.drivers.base.deploy_step(priority=100) def deploy(self, task): .. note:: Most of the hardware types should not override this interface. * `power` implements power actions for the hardware. These common implementations may be used, if supported by the hardware: * :py:class:`ironic.drivers.modules.ipmitool.IPMIPower` * :py:class:`ironic.drivers.modules.redfish.power.RedfishPower` Otherwise, you need to write your own implementation by subclassing :py:class:`ironic.drivers.base.PowerInterface` and providing missing methods. .. note:: Power actions in Ironic are blocking - methods of a power interface should not return until the power action is finished or errors out. * `management` implements additional out-of-band management actions, such as setting a boot device. A few common implementations exist and may be used, if supported by the hardware: * :py:class:`ironic.drivers.modules.ipmitool.IPMIManagement` * :py:class:`ironic.drivers.modules.redfish.management.RedfishManagement` Some hardware types, such as ``snmp`` do not support out-of-band management. They use the fake implementation in :py:class:`ironic.drivers.modules.fake.FakeManagement` instead. Otherwise, you need to write your own implementation by subclassing :py:class:`ironic.drivers.base.ManagementInterface` and providing missing methods. Combine the interfaces in a hardware type by populating the lists of supported interfaces. These lists are prioritized, with the most preferred implementation first. For example: .. code-block:: python class MyHardware(generic.GenericHardware): @property def supported_management_interfaces(self): """List of supported management interfaces.""" return [MyManagement, ipmitool.IPMIManagement] @property def supported_power_interfaces(self): """List of supported power interfaces.""" return [MyPower, ipmitool.IPMIPower] .. note:: In this example, all interfaces, except for ``management`` and ``power`` are taken from the ``GenericHardware`` base class. Finally, give the new hardware type and new interfaces human-friendly names and create entry points for them in the ``setup.cfg`` file:: ironic.hardware.types = my-hardware = ironic.drivers.my_hardware:MyHardware ironic.hardware.interfaces.power = my-power = ironic.drivers.modules.my_hardware:MyPower ironic.hardware.interfaces.management = my-management = ironic.drivers.modules.my_hardware:MyManagement Deploy and clean steps ---------------------- Significant parts of the bare metal functionality is implemented via :doc:`deploy steps ` or :doc:`clean steps `. See :doc:`deploy-steps` for information on how to write them. Supported Drivers ----------------- For a list of supported drivers (those that are continuously tested on every upstream commit) please consult the :doc:`drivers page `. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/contributor/faq.rst0000664000175000017500000001263200000000000021024 0ustar00zuulzuul00000000000000.. _faq: ========================================== Developer FAQ (frequently asked questions) ========================================== Here are some answers to frequently-asked questions from IRC and elsewhere. .. contents:: :local: :depth: 2 How do I... =========== ...create a migration script template? -------------------------------------- Using the ``ironic-dbsync revision`` command, e.g:: $ cd ironic $ tox -evenv -- ironic-dbsync revision -m \"create foo table\" It will create an empty alembic migration. For more information see the `alembic documentation`_. .. _`alembic documentation`: http://alembic.zzzcomputing.com/en/latest/tutorial.html#create-a-migration-script .. _faq_release_note: ...know if a release note is needed for my change? -------------------------------------------------- `Reno documentation`_ contains a description of what can be added to each section of a release note. If, after reading this, you're still unsure about whether to add a release note for your change or not, keep in mind that it is intended to contain information for deployers, so changes to unit tests or documentation are unlikely to require one. ...create a new release note? ----------------------------- By running ``reno`` command via tox, e.g:: $ tox -e venv -- reno new version-foo venv create: /home/foo/ironic/.tox/venv venv installdeps: -r/home/foo/ironic/test-requirements.txt venv develop-inst: /home/foo/ironic venv runtests: PYTHONHASHSEED='0' venv runtests: commands[0] | reno new version-foo Created new notes file in releasenotes/notes/version-foo-ecb3875dc1cbf6d9.yaml venv: commands succeeded congratulations :) $ git status On branch test Untracked files: (use "git add ..." to include in what will be committed) releasenotes/notes/version-foo-ecb3875dc1cbf6d9.yaml Then edit the result file. Note that: - we prefer to use present tense in release notes. For example, a release note should say "Adds support for feature foo", not "Added support for feature foo". (We use 'adds' instead of 'add' because grammatically, it is "ironic adds support", not "ironic add support".) - any variant of English spelling (American, British, Canadian, Australian...) is acceptable. The release note itself should be consistent and not have different spelling variants of the same word. For more information see the `reno documentation`_. .. _`reno documentation`: https://docs.openstack.org/reno/latest/user/usage.html ...update a release note? ------------------------- If this is a release note that pertains to something that was fixed on master or an intermediary release (during a development cycle, that hasn't been branched yet), you can go ahead and update it by submitting a patch. If it is the release note of an ironic release that has branched, `it can be updated `_ but we will only allow it in extenuating circumstances. (It can be updated by *only* updating the file in that branch. DO NOT update the file in master and cherry-pick it. If you do, `see how the mess was cleaned up `_.) ...get a decision on something? ------------------------------- You have an issue and would like a decision to be made. First, make sure that the issue hasn't already been addressed, by looking at documentation, stories, specifications, or asking. Information and links can be found on the `Ironic wiki`_ page. There are several ways to solicit comments and opinions: * bringing it up at the `weekly Ironic meeting`_ * bringing it up on IRC_ * bringing it up on the `mailing list`_ (add "[Ironic]" to the Subject of the email) If there are enough core folks at the weekly meeting, after discussing an issue, voting could happen and a decision could be made. The problem with IRC or the weekly meeting is that feedback will only come from the people that are actually present. To inform (and solicit feedback from) more people about an issue, the preferred process is: #. bring it up on the mailing list #. after some period of time has elapsed (and depending on the thread activity), someone should propose a solution via gerrit. (E.g. the person that started the thread if no one else steps up.) The proposal should be made in the git repository that is associated with the issue. (For instance, this decision process was proposed as a documentation patch to the ironic repository.) #. In the email thread, don't forget to provide a link to the proposed patch! #. The discussion then moves to the proposed patch. If this is a big decision, we could declare that some percentage of the cores should vote on it before landing it. (This process was suggested in an email thread about `process for making decisions`_.) .. _Ironic wiki: https://wiki.openstack.org/wiki/Ironic .. _weekly Ironic meeting: https://wiki.openstack.org/wiki/Meetings/Ironic .. _IRC: https://wiki.openstack.org/wiki/Ironic#IRC .. _mailing list: http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-discuss .. _process for making decisions: http://lists.openstack.org/pipermail/openstack-dev/2016-May/095460.html ...add support for GMRs to new executables and extending the GMR? ----------------------------------------------------------------- For more information, see the :oslo.reports-doc:`oslo.reports documentation ` page. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/contributor/governance.rst0000664000175000017500000000265400000000000022407 0ustar00zuulzuul00000000000000=========================== Ironic Governance Structure =========================== The ironic project manages a number of repositories that contribute to our mission. The full list of repositories that ironic manages is available in the `governance site`_. .. _`governance site`: https://governance.openstack.org/reference/projects/ironic.html What belongs in ironic governance? ================================== For a repository to be part of the Ironic project: * It must comply with the TC's `rules for a new project `_. * It must not be intended for use with only a single vendor's hardware. A library that implements a standard to manage hardware from multiple vendors (such as IPMI or redfish) is okay. * It must align with Ironic's `mission statement `_. Lack of contributor diversity is a chicken-egg problem, and as such a repository where only a single company is contributing is okay, with the hope that other companies will contribute after joining the ironic project. Repositories that are no longer maintained should be pruned from governance regularly. Proposing a new project to ironic governance ============================================ Bring the proposal to the ironic `weekly meeting `_ to discuss with the team. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/contributor/index.rst0000664000175000017500000001131200000000000021356 0ustar00zuulzuul00000000000000Developer's Guide ================= Getting Started --------------- If you are new to ironic, this section contains information that should help you get started as a developer working on the project or contributing to the project. .. toctree:: :maxdepth: 1 Bare Metal Community Developer Contribution Guide Bugs Reporting and Triaging Guide Setting Up Your Development Environment Priorities Specifications Frequently Asked Questions Contributor Vision OpenStack Vision The following pages describe the architecture of the Bare Metal service and may be helpful to anyone working on or with the service, but are written primarily for developers. .. toctree:: :maxdepth: 1 Ironic System Architecture Developing New Notifications OSProfiler Tracing Rolling Upgrades Role Based Access Control Testing These pages contain information for PTLs, cross-project liaisons, and core reviewers. .. toctree:: :maxdepth: 1 Releasing Ironic Projects Ironic Governance Structure .. toctree:: :hidden: states Writing Drivers --------------- Ironic's community includes many hardware vendors who contribute drivers that enable more advanced functionality when Ironic is used in conjunction with that hardware. To do this, the Ironic developer community is committed to standardizing on a `Python Driver API `_ that meets the common needs of all hardware vendors, and evolving this API without breaking backwards compatibility. However, it is sometimes necessary for driver authors to implement functionality - and expose it through the REST API - that can not be done through any existing API. To facilitate that, we also provide the means for API calls to be "passed through" ironic and directly to the driver. Some guidelines on how to implement this are provided below. Driver authors are strongly encouraged to talk with the developer community about any implementation using this functionality. .. toctree:: :maxdepth: 1 Driver Overview Writing "vendor_passthru" methods Creating new BIOS interfaces Third party continuous integration testing Writing Deploy or Clean Steps Testing Network Integration --------------------------- In order to test the integration between the Bare Metal and Networking services, support has been added to `devstack `_ to mimic an external physical switch. Here we include a recommended configuration for devstack to bring up this environment. .. toctree:: :maxdepth: 1 Configuring Devstack for multitenant network testing Testing Boot-from-Volume ------------------------ Starting with the Pike release, it is also possible to use DevStack for testing booting from Cinder volumes with VMs. .. toctree:: :maxdepth: 1 Configuring Devstack for boot-from-volume testing Full Ironic Server Python API Reference --------------------------------------- .. toctree:: :maxdepth: 1 api/modules Understanding the Ironic's CI ----------------------------- It's important to understand the role of each job in the CI, how to add new jobs and how to debug failures that may arise. To facilitate that, we have created the documentation below. .. toctree:: :maxdepth: 1 Job roles in the CI How to add a new job? How to debug failures in CI jobs Our policy for stable branches ------------------------------ Stable branches that are on `Extended Maintenance`_ and haven't received backports in a while, can be tagged as ``Unmaintained``, after discussions within the ironic community. If such a decision is taken, an email will be sent to the OpenStack mailing list. What does ``Unmaintained`` mean? The branch still exists, but the ironic upstream community will not actively backport patches from maintained branches. Fixes can still be merged, though, if pushed into review by operators or other downstream developers. It also means that branchless projects (e.g.: ironic-tempest-plugin), may not have configurations that are compatible with those branches. As of 09 March 2020, the list of ``Unmaintained`` branches includes: * Ocata (Last commit - Jun 28, 2019) * Pike (Last commit - Oct 2, 2019) .. _Extended Maintenance: https://docs.openstack.org/project-team-guide/stable-branches.html#maintenance-phases ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/contributor/ironic-boot-from-volume.rst0000664000175000017500000001064400000000000024750 0ustar00zuulzuul00000000000000===================================== Ironic Boot-from-Volume with DevStack ===================================== This guide shows how to setup DevStack for enabling boot-from-volume feature, which has been supported from the Pike release. This scenario shows how to setup DevStack to enable nodes to boot from volumes managed by cinder with VMs as baremetal servers. DevStack Configuration ====================== The following is ``local.conf`` that will setup DevStack with 3 VMs that are registered in ironic. A volume connector with IQN is created for each node. These connectors can be used to connect volumes created by cinder. The detailed description for DevStack is at :ref:`deploy_devstack`. :: [[local|localrc]] enable_plugin ironic https://opendev.org/openstack/ironic IRONIC_STORAGE_INTERFACE=cinder # Credentials ADMIN_PASSWORD=password DATABASE_PASSWORD=password RABBIT_PASSWORD=password SERVICE_PASSWORD=password SERVICE_TOKEN=password SWIFT_HASH=password SWIFT_TEMPURL_KEY=password # Set glance's default limit to be baremetal image friendly GLANCE_LIMIT_IMAGE_SIZE_TOTAL=5000 # Enable Neutron which is required by Ironic and disable nova-network. disable_service n-net disable_service n-novnc enable_service q-svc enable_service q-agt enable_service q-dhcp enable_service q-l3 enable_service q-meta enable_service neutron # Enable Swift for the direct deploy interface. enable_service s-proxy enable_service s-object enable_service s-container enable_service s-account # Disable Horizon disable_service horizon # Disable Heat disable_service heat h-api h-api-cfn h-api-cw h-eng # Swift temp URL's are required for the direct deploy interface. SWIFT_ENABLE_TEMPURLS=True # Create 3 virtual machines to pose as Ironic's baremetal nodes. IRONIC_VM_COUNT=3 IRONIC_BAREMETAL_BASIC_OPS=True DEFAULT_INSTANCE_TYPE=baremetal # Enable additional hardware types, if needed. #IRONIC_ENABLED_HARDWARE_TYPES=ipmi,fake-hardware # Don't forget that many hardware types require enabling of additional # interfaces, most often power and management: #IRONIC_ENABLED_MANAGEMENT_INTERFACES=ipmitool,fake #IRONIC_ENABLED_POWER_INTERFACES=ipmitool,fake #IRONIC_DEFAULT_DEPLOY_INTERFACE=direct # Change this to alter the default driver for nodes created by devstack. # This driver should be in the enabled list above. IRONIC_DEPLOY_DRIVER=ipmi # The parameters below represent the minimum possible values to create # functional nodes. IRONIC_VM_SPECS_RAM=1280 IRONIC_VM_SPECS_DISK=10 # Size of the ephemeral partition in GB. Use 0 for no ephemeral partition. IRONIC_VM_EPHEMERAL_DISK=0 # To build your own IPA ramdisk from source, set this to True IRONIC_BUILD_DEPLOY_RAMDISK=False VIRT_DRIVER=ironic # By default, DevStack creates a 10.0.0.0/24 network for instances. # If this overlaps with the hosts network, you may adjust with the # following. NETWORK_GATEWAY=10.1.0.1 FIXED_RANGE=10.1.0.0/24 FIXED_NETWORK_SIZE=256 # Log all output to files LOGFILE=$HOME/devstack.log LOGDIR=$HOME/logs IRONIC_VM_LOG_DIR=$HOME/ironic-bm-logs After the environment is built, you can create a volume with cinder and request an instance with the volume to nova:: . ~/devstack/openrc # query the image id of the default cirros image image=$(openstack image show $DEFAULT_IMAGE_NAME -f value -c id) # create keypair ssh-keygen openstack keypair create --public-key ~/.ssh/id_rsa.pub default # create volume volume=$(openstack volume create --image $image --size 1 my-volume -f value -c id) # spawn instance openstack server create --flavor baremetal --volume $volume --key-name default testing You can also run an integration test that an instance is booted from a remote volume with tempest in the environment:: cd /opt/stack/tempest tox -e all-plugin -- ironic_tempest_plugin.tests.scenario.test_baremetal_boot_from_volume Please note that the storage interface will only indicate errors based upon the state of the node and the configuration present. As such a node does not exclusively have to boot via a remote volume, and as such `validate` actions upon nodes may be slightly misleading. If an appropriate `volume target` is defined, no error should be returned for the boot interface. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/contributor/ironic-multitenant-networking.rst0000664000175000017500000000761700000000000026276 0ustar00zuulzuul00000000000000========================================== Ironic multitenant networking and DevStack ========================================== This guide will walk you through using OpenStack Ironic/Neutron with the ML2 ``networking-generic-switch`` plugin. Using VMs as baremetal servers ============================== This scenario shows how to setup Devstack to use Ironic/Neutron integration with VMs as baremetal servers and ML2 ``networking-generic-switch`` that interacts with OVS. DevStack Configuration ---------------------- The following is ``local.conf`` that will setup Devstack with 3 VMs that are registered in ironic. ``networking-generic-switch`` driver will be installed and configured in Neutron. :: [[local|localrc]] # Configure ironic from ironic devstack plugin. enable_plugin ironic https://opendev.org/openstack/ironic # Install networking-generic-switch Neutron ML2 driver that interacts with OVS enable_plugin networking-generic-switch https://opendev.org/openstack/networking-generic-switch # Add link local info when registering Ironic node IRONIC_USE_LINK_LOCAL=True IRONIC_ENABLED_NETWORK_INTERFACES=flat,neutron IRONIC_NETWORK_INTERFACE=neutron #Networking configuration OVS_PHYSICAL_BRIDGE=brbm PHYSICAL_NETWORK=mynetwork IRONIC_PROVISION_NETWORK_NAME=ironic-provision IRONIC_PROVISION_SUBNET_PREFIX=10.0.5.0/24 IRONIC_PROVISION_SUBNET_GATEWAY=10.0.5.1 Q_PLUGIN=ml2 ENABLE_TENANT_VLANS=True Q_ML2_TENANT_NETWORK_TYPE=vlan TENANT_VLAN_RANGE=100:150 # Credentials ADMIN_PASSWORD=password RABBIT_PASSWORD=password DATABASE_PASSWORD=password SERVICE_PASSWORD=password SERVICE_TOKEN=password SWIFT_HASH=password SWIFT_TEMPURL_KEY=password # Enable Ironic API and Ironic Conductor enable_service ironic enable_service ir-api enable_service ir-cond # Disable nova novnc service, ironic does not support it anyway. disable_service n-novnc # Enable Swift for the direct deploy interface. enable_service s-proxy enable_service s-object enable_service s-container enable_service s-account # Disable Horizon disable_service horizon # Disable Cinder disable_service cinder c-sch c-api c-vol # Disable Tempest disable_service tempest # Set glance's default limit to be baremetal image friendly GLANCE_LIMIT_IMAGE_SIZE_TOTAL=5000 # Swift temp URL's are required for the direct deploy interface. SWIFT_ENABLE_TEMPURLS=True # Create 3 virtual machines to pose as Ironic's baremetal nodes. IRONIC_VM_COUNT=3 IRONIC_BAREMETAL_BASIC_OPS=True # Enable additional hardware types, if needed. #IRONIC_ENABLED_HARDWARE_TYPES=ipmi,fake-hardware # Don't forget that many hardware types require enabling of additional # interfaces, most often power and management: #IRONIC_ENABLED_MANAGEMENT_INTERFACES=ipmitool,fake #IRONIC_ENABLED_POWER_INTERFACES=ipmitool,fake #IRONIC_DEFAULT_DEPLOY_INTERFACE=direct # Change this to alter the default driver for nodes created by devstack. # This driver should be in the enabled list above. IRONIC_DEPLOY_DRIVER=ipmi # The parameters below represent the minimum possible values to create # functional nodes. IRONIC_VM_SPECS_RAM=1024 IRONIC_VM_SPECS_DISK=10 # Size of the ephemeral partition in GB. Use 0 for no ephemeral partition. IRONIC_VM_EPHEMERAL_DISK=0 # To build your own IPA ramdisk from source, set this to True IRONIC_BUILD_DEPLOY_RAMDISK=False VIRT_DRIVER=ironic # By default, DevStack creates a 10.0.0.0/24 network for instances. # If this overlaps with the hosts network, you may adjust with the # following. NETWORK_GATEWAY=10.1.0.1 FIXED_RANGE=10.1.0.0/24 FIXED_NETWORK_SIZE=256 # Log all output to files LOGFILE=$HOME/devstack.log LOGDIR=$HOME/logs IRONIC_VM_LOG_DIR=$HOME/ironic-bm-logs ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/contributor/jobs-description.rst0000664000175000017500000001434300000000000023534 0ustar00zuulzuul00000000000000.. _jobs-description: ================ Jobs description ================ The description of each jobs that runs in the CI when you submit a patch for `openstack/ironic` is visible in :ref:`table_jobs_description`. .. _table_jobs_description: .. list-table:: Table. OpenStack Ironic CI jobs description :widths: 53 47 :header-rows: 1 * - Job name - Description * - ironic-tox-unit-with-driver-libs - Runs Ironic unit tests with the driver dependencies installed under Python3 * - ironic-tempest-functional-python3 - Deploys Ironic in standalone mode and runs tempest functional tests that matches the regex `ironic_tempest_plugin.tests.api` under Python3 * - ironic-grenade - Deploys Ironic in a DevStack and runs upgrade for all enabled services. * - ironic-standalone - Deploys Ironic in standalone mode and runs tempest tests that match the regex `ironic_standalone`. * - ironic-standalone-redfish - Deploys Ironic in standalone mode and runs tempest tests that match the regex `ironic_standalone` using the redfish driver. * - ironic-tempest-partition-bios-redfish-pxe - Deploys Ironic in DevStack, configured to use dib ramdisk partition image with `pxe` boot and `redfish` driver. Runs tempest tests that match the regex `ironic_tempest_plugin.tests.scenario`, also deploys 1 virtual baremetal. * - ironic-tempest-partition-uefi-redfish-vmedia - Deploys Ironic in DevStack, configured to use dib ramdisk partition image with `vmedia` boot and `redfish` driver. Runs tempest tests that match the regex `ironic_tempest_plugin.tests.scenario`, also deploys 1 virtual baremetal. * - ironic-tempest-wholedisk-bios-snmp-pxe - Deploys Ironic in DevStack, configured to use a pre-built dib ramdisk wholedisk image that is downloaded from a Swift temporary url, `pxe` boot and `snmp` driver. Runs tempest tests that match the regex `ironic_tempest_plugin.tests.scenario` and deploys 1 virtual baremetal. * - ironic-tempest-partition-bios-ipmi-pxe - Deploys Ironic in DevStack, configured to use dib ramdisk, a partition image, `pxe` boot in legacy mode and `ipmi` hardware type. Runs tempest tests that match the regex `ironic_tempest_plugin.tests.scenario` and deploy 1 virtual baremetal. * - ironic-tempest-partition-uefi-ipmi-pxe - Deploys Ironic in DevStack, configured to use dib ramdisk, a partition image, `pxe` boot in UEFI mode and `ipmi` hardware type. Runs tempest tests that match the regex `ironic_tempest_plugin.tests.scenario`, also deploys 1 virtual baremetal. * - ironic-tempest-ipa-wholedisk-direct-tinyipa-multinode - Deploys Ironic in a multinode DevStack, configured to use a pre-build tinyipa ramdisk wholedisk image that is downloaded from a Swift temporary url, `pxe` boot and `ipmi` driver. Runs tempest tests that match the regex `(ironic_tempest_plugin.tests.scenario|test_schedule_to_all_nodes)` and deploys 7 virtual baremetal. * - ironic-tempest-bios-ipmi-direct-tinyipa - Deploys Ironic in DevStack, configured to use a pre-build tinyipa ramdisk wholedisk image that is downloaded from a Swift temporary url, `pxe` boot and `ipmi` driver. Runs tempest tests that match the regex `ironic_tempest_plugin.tests.scenario` and deploys 1 virtual baremetal. * - ironic-tempest-bfv - Deploys Ironic in DevStack with cinder enabled, so it can deploy baremetal using boot from volume. Runs tempest tests that match the regex `baremetal_boot_from_volume` and deploys 3 virtual baremetal nodes using boot from volume. * - ironic-tempest-ipa-partition-uefi-pxe-grub2 - Deploys Ironic in DevStack, configured to use pxe with uefi and grub2 and `ipmi` driver. Runs tempest tests that match the regex `ironic_tempest_plugin.tests.scenario` and deploys 1 virtual baremetal. * - metalsmith-integration-glance-localboot-centos8-uefi - Tests the integration between Ironic and Metalsmith using Glance as image source and CentOS8 with local boot and uefi. * - ironic-tox-bandit - Runs bandit security tests in a tox environment to find known issues in the Ironic code. * - ironic-inspector-tempest - Deploys Ironic and Ironic Inspector in DevStack, configured to use a pre-build tinyipa ramdisk wholedisk image that is downloaded from a Swift temporary url, `pxe` boot and `ipmi` driver. Runs tempest tests that match the regex `InspectorBasicTest` and deploys 1 virtual baremetal. * - ironic-inspector-tempest-managed-non-standalone - Deploys Ironic and Ironic Inspector in DevStack, configured to use a pre-build tinyipa ramdisk wholedisk image that is downloaded from a Swift temporary url, `pxe` boot and `ipmi` driver. Boot is managed by ironic, ironic-inspector runs in non-standalone mode. Runs tempest tests that match the regex `InspectorBasicTest` and deploys 1 virtual baremetal. * - ironic-inspector-tempest-partition-bios-redfish-vmedia - Deploys Ironic and Ironic Inspector in DevStack, configured to use `vmedia` boot and `redfish` driver. Runs tempest tests that match the regex `InspectorBasicTest` and deploys 1 virtual baremetal. * - ironic-tempest-ipa-wholedisk-bios-ipmi-direct-dib - Deploys Ironic in DevStack, configured to use a pre-built dib ramdisk wholedisk image that is downloaded from http url, `pxe` boot and `ipmi` driver. Runs tempest tests that match the regex `ironic_tempest_plugin.tests.scenario` and deploys 1 virtual baremetal. * - bifrost-integration-tinyipa-ubuntu-focal - Tests the integration between Ironic and Bifrost using a tinyipa image. * - bifrost-integration-redfish-vmedia-uefi-centos-8 - Tests the integration between Ironic and Bifrost using redfish vmedia and a dib image based on centos 8. * - ironic-tempest-pxe_ipmitool-postgres - Deploys Ironic in DevStack, configured to use tinyipa ramdisk partition image with `pxe` boot and `ipmi` driver and postgres instead of mysql. Runs tempest tests that match the regex `ironic_tempest_plugin.tests.scenario`, also deploys 1 virtual baremetal. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/contributor/notifications.rst0000664000175000017500000001470600000000000023132 0ustar00zuulzuul00000000000000.. _develop-notifications: ============================ Developing New Notifications ============================ Ironic notifications are events intended for consumption by external services. Notifications are sent to these services over a message bus by :oslo.messaging-doc:`oslo.messaging's Notifier class `. For more information about configuring notifications and available notifications, see :ref:`deploy-notifications`. Ironic also has a set of base classes that assist in clearly defining the notification itself, the payload, and the other fields not auto-generated by oslo (level, event_type and publisher_id). Below describes how to use these base classes to add a new notification to ironic. Adding a new notification to ironic =================================== To add a new notification to ironic, a new versioned notification class should be created by subclassing the NotificationBase class to define the notification itself and the NotificationPayloadBase class to define which fields the new notification will contain inside its payload. You may also define a schema to allow the payload to be automatically populated by the fields of an ironic object. Here's an example:: # The ironic object whose fields you want to use in your schema @base.IronicObjectRegistry.register class ExampleObject(base.IronicObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'id': fields.IntegerField(), 'uuid': fields.UUIDField(), 'a_useful_field': fields.StringField(), 'not_useful_field': fields.StringField() } # A class for your new notification @base.IronicObjectRegistry.register class ExampleNotification(notification.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': fields.ObjectField('ExampleNotifPayload') } # A class for your notification's payload @base.IronicObjectRegistry.register class ExampleNotifPayload(notification.NotificationPayloadBase): # Schemas are optional. They just allow you to reuse other objects' # fields by passing in that object and calling populate_schema with # a kwarg set to the other object. SCHEMA = { 'a_useful_field': ('example_obj', 'a_useful_field') } # Version 1.0: Initial version VERSION = '1.0' fields = { 'a_useful_field': fields.StringField(), 'an_extra_field': fields.StringField(nullable=True) } Note that both the payload and notification classes are :oslo.versionedobjects-doc:`oslo versioned objects <>`. Modifications to these require a version bump so that consumers of notifications know when the notifications have changed. SCHEMA defines how to populate the payload fields. It's an optional attribute that subclasses may use to easily populate notifications with data from other objects. It is a dictionary where every key value pair has the following format:: : (, ) The ```` is the name where the data will be stored in the payload object; this field has to be defined as a field of the payload. The ```` shall refer to name of the parameter passed as kwarg to the payload's ``populate_schema()`` call and this object will be used as the source of the data. The ```` shall be a valid field of the passed argument. The SCHEMA needs to be applied with the ``populate_schema()`` call before the notification can be emitted. The value of the ``payload.`` field will be set by the ``.`` field. The ```` will not be part of the payload object internal or external representation. Payload fields that are not set by the SCHEMA can be filled in the same way as in any versioned object. Then, to create a payload, you would do something like the following. Note that if you choose to define a schema in the SCHEMA class variable, you must populate the schema by calling ``populate_schema(example_obj=my_example_obj)`` before emitting the notification is allowed:: my_example_obj = ExampleObject(id=1, a_useful_field='important', not_useful_field='blah') # an_extra_field is optional since it's not a part of the SCHEMA and is a # nullable field in the class fields my_notify_payload = ExampleNotifyPayload(an_extra_field='hello') # populate the schema with the ExampleObject fields my_notify_payload.populate_schema(example_obj=my_example_obj) You then create the notification with the oslo required fields (event_type, publisher_id, and level, all sender fields needed by oslo that are defined in the ironic notification base classes) and emit it:: notify = ExampleNotification( event_type=notification.EventType(object='example_obj', action='do_something', status=fields.NotificationStatus.START), publisher=notification.NotificationPublisher( service='ironic-conductor', host='hostname01'), level=fields.NotificationLevel.DEBUG, payload=my_notify_payload) notify.emit(context) When specifying the event_type, ``object`` will specify the object being acted on, ``action`` will be a string describing what action is being performed on that object, and ``status`` will be one of "start", "end", "error", or "success". "start" and "end" are used to indicate when actions that are not immediate begin and succeed. "success" is used to indicate when actions that are immediate succeed. "error" is used to indicate when any type of action fails, regardless of whether it's immediate or not. As a result of specifying these parameters, event_type will be formatted as ``baremetal...`` on the message bus. This example will send the following notification over the message bus:: { "priority": "debug", "payload":{ "ironic_object.namespace":"ironic", "ironic_object.name":"ExampleNotifyPayload", "ironic_object.version":"1.0", "ironic_object.data":{ "a_useful_field":"important", "an_extra_field":"hello" } }, "event_type":"baremetal.example_obj.do_something.start", "publisher_id":"ironic-conductor.hostname01" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/contributor/osprofiler-support.rst0000664000175000017500000000675500000000000024164 0ustar00zuulzuul00000000000000.. _OSProfiler-support: ================ About OSProfiler ================ OSProfiler is an OpenStack cross-project profiling library. Its API provides different ways to add a new trace point. Trace points contain two messages (start and stop). Messages like below are sent to a collector:: { "name": -(start|stop), "base_id": , "parent_id": , "trace_id": , "info": } The fields are defined as follows: ``base_id`` - that is same for all trace points that belong to one trace. This is used to simplify the process of retrieving all trace points (related to one trace) from the collector. ``parent_id`` - of parent trace point. ``trace_id`` - of current trace point. ``info`` - the dictionary that contains user information passed when calling profiler start() & stop() methods. The profiler uses ceilometer as a centralized collector. Two other alternatives for ceilometer are pure MongoDB driver and Elasticsearch. A notifier is setup to send notifications to ceilometer using oslo.messaging and ceilometer API is used to retrieve all messages related to one trace. OSProfiler has entry point that allows the user to retrieve information about traces and present it in HTML/JSON using CLI. For more details see :osprofiler-doc:`OSProfiler – Cross-project profiling library `. How to Use OSProfiler with Ironic in Devstack ============================================= To use or test OSProfiler in ironic, the user needs to setup Devstack with OSProfiler and ceilometer. In addition to the setup described at :ref:`deploy_devstack`, the user needs to do the following: Add the following to ``localrc`` to enable OSProfiler and ceilometer:: enable_plugin panko https://opendev.org/openstack/panko enable_plugin ceilometer https://opendev.org/openstack/ceilometer enable_plugin osprofiler https://opendev.org/openstack/osprofiler # Enable the following services CEILOMETER_NOTIFICATION_TOPICS=notifications,profiler ENABLED_SERVICES+=,ceilometer-acompute,ceilometer-acentral ENABLED_SERVICES+=,ceilometer-anotification,ceilometer-collector ENABLED_SERVICES+=,ceilometer-alarm-evaluator,ceilometer-alarm-notifier ENABLED_SERVICES+=,ceilometer-api Run stack.sh. Once Devstack environment is setup, edit ``ironic.conf`` to set the following profiler options and restart ironic services:: [profiler] enabled = True hmac_keys = SECRET_KEY # default value used across several OpenStack projects trace_sqlalchemy = True In order to trace ironic using OSProfiler, use openstackclient to run baremetal commands with ``--os-profile SECRET_KEY``. For example, the following will cause a to be printed after node list:: $ openstack --os-profile SECRET_KEY baremetal node list Output of the above command will include the following:: Trace ID: Display trace with command: osprofiler trace show --html The trace results can be seen using this command:: $ osprofiler trace show --html The trace results can be saved in a file with ``--out file-name`` option:: $ osprofiler trace show --html --out trace.html The trace results show the time spent in ironic-api, ironic-conductor, and db calls. More detailed db tracing is enabled if ``trace_sqlalchemy`` is set to true. References ========== - :osprofiler-doc:`OSProfiler – Cross-project profiling library ` - :ref:`deploy_devstack` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/contributor/rbac-testing.rst0000664000175000017500000001376500000000000022647 0ustar00zuulzuul00000000000000=================================== Role Based Access Control - Testing =================================== .. todo: This entire file is being added in to provide context for reviewers so we can keep in-line comments to the necessary points in the yaml files. It *IS* written with a forward awareness of the later patches, but it is also broad in nature attempting to provide context to aid in review. The Role Based Access control testing is a minor departure from the Ironic standard pattern of entirely python based unit testing. In part this was done for purposes of speed and to keep the declaration of the test context. This also lended itself to be very useful due to the nature of A/B testing which is requried to properly migrate the Ironic project from a project scoped universe where an ``admin project`` is utilized as the authenticating factor coupled with two custom roles, ``baremetal_admin``, and ``baremetal_observer``. As a contributor looking back after getting a over a thousand additional tests in place using this method, it definitely helped the speed at which these were created, and then ported to support additional. How these tests work ==================== These tests execute API calls through the API layer, using the appropriate verb and header, which settings to prevent the ``keystonemiddleware`` from intercepting and replacing the headers we're passing. Ultimately this is a feature, and it helps quite a bit. The second aspect of how this works is we're mocking the conductor RPC ``get_topic_for`` and ``get_random_topic_for`` methods. These calls raise Temporary Unavailable, since trying to execute the entire interaction into the conductor is moderately pointless because all policy enforement is located with-in the API layer. At the same time wiring everything up to go from API to conductor code and back would have been a heavier lift. As such, the tests largely look for one of the following error codes. * 200 - Got the item from the API - This is an database driven interaction. * 201 - Created - This is databaes driven interaction. These are rare. * 204 - Accepted - This is a database driven interaction. These are rare. * 403 - Forbidden - This tells us the policy worked as expected where access was denied. * 404 - NotFound - This is typically when objects were not found. Before Ironic becomes scope aware, these are generally only in the drivers API endpoint's behavior. In System scope aware Project scoped configuration, i.e. later RBAC tests, this will become the dominant response for project scoped users as responding with a 403 if they could be an owner or lessee would provide insight into the existence of a node. * 503 - Service Unavailable - In the context of our tests, we expect this when a request *has* been successfully authenticated and would have been sent along to the conductor. How to make changes or review these tests? ========================================== The tests cycle through the various endpoints, and repeating patterns are clearly visible. Typically this means a given endpoint is cycled through with the same basic test using slightly different parameters such as different authentication parameters. When it comes to system scope aware tests supporting node ``owners`` and ``lessee``, these tests will cycle a little more with slightly different attributes as the operation is not general against a shared common node, but different nodes. Some tests will test body contents, or attributes. some will validate the number of records returned. This is important later with ``owner`` and ``lessee`` having slightly different views of the universe. Some general rules apply * Admins can do things, at least as far as their scope or rights apply. Remember: owner and lessee admins are closer to System scoped Admin Members. * Members can do some things, but not everything * Readers can always read, but as we get into sensitive data later on such as fields containing infrastucture internal addresses, these values will become hidden and additional tests will examine this. * Third party, or external/other Admins will find nothing but sadness in empty lists, 403, 404, or even 500 errors. What is/will be tested? ======================= The idea is to in essence test as much as possible, however as these tests Role Based Access Control related capabilities will come in a series of phases, styles vary a little. The first phase is ``"legacy"``. In essence these are partially programatically generated and then human reviewed and values populated with expected values. The second phase is remarkably similar to ``legacy``. It is the safety net where we execute the ``legacy`` tests with the updated ``oslo.policy`` configuration to help enforce scopes. These tests will intentionally begin to fail in phase three. The third phase is the implementation of System scope awareness for the API. In this process, as various portions of the API are made system scope aware. The ``legacy`` tests are marked as ``deprecated`` which signals to the second phase test sequences that they are **expected** to fail. New ``system scoped`` tests are also implemented which are matched up by name to the ``legacy`` tests. The major difference being some header values, and a user with a ``member`` role in the ``system`` scope now has some rights. The forth phase, is implementaiton of ``owner`` and ``lessee`` aware project scoping. The testing approach is similar, however it is much more of a "shotgun" approach. We test what we know should work, and what know should not work, but we do not have redundant testing for each role as ``admin`` users are also ``members``, and since the policy rules are designed around thresholds of access, it just made no sense to run the same test for admin and members, where member was the threshold. These thresholds will vary with the proposed default policy. The forth scope also tests a third party external admin as a negative test to ensure that we are also denying access to resources appropriately. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/contributor/releasing.rst0000664000175000017500000003263100000000000022227 0ustar00zuulzuul00000000000000========================= Releasing Ironic Projects ========================= Since the responsibility for releases will move between people, we document that process here. A full list of projects that ironic manages is available in the `governance site`_. .. _`governance site`: https://governance.openstack.org/reference/projects/ironic.html Who is responsible for releases? ================================ The current PTL is ultimately responsible for making sure code gets released. They may choose to delegate this responsibility to a liaison, which is documented in the `cross-project liaison wiki`_. Anyone may submit a release request per the process below, but the PTL or liaison must +1 the request for it to be processed. .. _`cross-project liaison wiki`: https://wiki.openstack.org/wiki/CrossProjectLiaisons#Release_management Release process =============== Releases are managed by the OpenStack release team. The release process is documented in the `Project Team Guide`_. .. _`Project Team Guide`: https://docs.openstack.org/project-team-guide/release-management.html#how-to-release What do we have to release? =========================== The ironic project has a number of deliverables under its governance. The ultimate source of truth for this is `projects.yaml `__ in the governance repository. These deliverables have varying release models, and these are defined in the `deliverables YAML files `__ in the releases repository. In general, ironic deliverables follow the `cycle-with-intermediary `__ release model. Non-client libraries -------------------- The following deliverables are non-client libraries: * ironic-lib * metalsmith * sushy Client libraries ---------------- The following deliverables are client libraries: * python-ironicclient * python-ironic-inspector-client Normal release -------------- The following deliverables are Neutron plugins: * networking-baremetal * networking-generic-switch The following deliverables are Horizon plugins: * ironic-ui The following deliverables are Tempest plugins: * ironic-tempest-plugin The following deliverables are tools: * ironic-python-agent-builder The following deliverables are services, or treated as such: * bifrost * ironic * ironic-inspector * ironic-prometheus-exporter * ironic-python-agent Manual release -------------- The ironic-staging-drivers follows a different procedure, see `Releasing ironic-staging-drivers `__. Independent ----------- The following deliverables are released `independently `__: * sushy-tools * tenks * virtualbmc Not released ------------ The following deliverables do not need to be released: * ironic-inspector-specs * ironic-specs Bugfix branches =============== The following projects have ``bugfix/X.Y`` branches in addition to standard openstack ``stable/NAME`` branches: * bifrost * ironic * ironic-inspector * ironic-python-agent They are also released on a regular cadence as opposed to on-demand, namely three times a release cycle (roughly a release every 2 months). One of the releases corresponds to the coordinated OpenStack released and receives a ``stable/NAME`` branch. The other two happen during the cycle and receive a ``bugfix/X.Y`` branch, where ``X.Y`` consists of the major and the minor component of the version (e.g. ``bugfix/8.1`` for 8.1.0). To leave some version space for releases from these branches, releases of these projects from the master branch always increase either the major or the minor version. Currently releases from bugfix branches cannot be automated and must be done by the release team manually. Things to do before releasing ============================= * Review the unreleased release notes, if the project uses them. Make sure they follow our :ref:`standards `, are coherent, and have proper grammar. Combine release notes if necessary (for example, a release note for a feature and another release note to add to that feature may be combined). * For ironic releases only, not ironic-inspector releases: if any new API microversions have been added since the last release, update the REST API version history (``doc/source/contributor/webapi-version-history.rst``) to indicate that they were part of the new release. * To support rolling upgrades, add this new release version (and release name if it is a named release) into ``ironic/common/release_mappings.py``: * in ``RELEASE_MAPPING`` make a copy of the ``master`` entry, and rename the first ``master`` entry to the new semver release version. * If this is a named release, add a ``RELEASE_MAPPING`` entry for the named release. Its value should be the same as that of the latest semver one (that you just added above). It is important to do this before a stable/ branch is made (or if `the grenade switch is made `_ to use the latest release from stable as the 'old' release). Otherwise, once it is made, CI (the grenade job that tests new-release -> master) will fail. * Check for any open patches that are close to be merged or release critical. This usually includes important bug fixes and/or features that we'd like to release, including the related documentation. How to propose a release ======================== The steps that lead to a release proposal are mainly manual, while proposing the release itself is almost a 100% automated process, accomplished by following the next steps: * Clone the `openstack/releases `_ repository. This is where deliverables are tracked and all the automation resides. * Under the ``deliverables`` directory you can see yaml files for each deliverable (i.e. subproject) grouped by release cycles. * The ``_independent`` directory contains yaml files for deliverables that are not bound to (official) cycles (e.g. ironic-python-agent-builder). * To check the changes we're about to release we can use the tox environment ``list-unreleased-changes``, with this syntax: .. code-block:: bash tox -e venv -- list-unreleased-changes The ``series`` argument is a release series (i.e. master or train, not stable/ussuri or stable/train). For example, assuming we're in the main directory of the releases repository, to check the changes in the ussuri series for ironic-python-agent type: .. code-block:: bash tox -e venv -- list-unreleased-changes ussuri openstack/ironic-python-agent * To update the deliverable file for the new release, we use a scripted process in the form of a tox environment called ``new-release``. To get familiar with it and see all the options, type: .. code-block:: bash tox -e venv -- new-release -h Now, based on the list of changes we found in the precedent step, and the release notes, we need to decide on whether the next version will be major, minor (feature) or patch (bugfix). Note that in this case ``series`` is a code name (train, ussuri), not a branch. That is also valid for the current development branch (master) that takes the code name of the future stable release, for example if the future stable release code name is wallaby, we need to use wallaby as ``series``. The ``--stable-branch argument`` is used only for branching in the end of a cycle, independent projects are not branched this way though. The ``--intermediate-branch`` option is used to create an intermediate bugfix branch following the `new release model for ironic projects `_. To propose the release, use the script to update the deliverable file, then commit the change, and propose it for review. For example, to propose a minor release for ironic in the master branch (current development branch), considering that the code name of the future stable release is wallaby, use: .. code-block:: bash tox -e venv -- new-release -v wallaby ironic feature Remember to use a meaningful topic, usually using the name of the deliverable, the new version and the branch, if applicable. A good commit message title should also include the same, for example "Release ironic 1.2.3 for ussuri" * As an optional step, we can use ``tox -e list-changes`` to double-check the changes before submitting them for review. Also ``tox -e validate`` (it might take a while to run based on the number of changes) does some some sanity-checks, but since everything is scripted, there shouldn't be any issue. All the scripts are designed and maintained by the release team; in case of questions or doubts or if any errors should arise, you can reach to them in the IRC channel ``#openstack-release``; all release liaisons should be present there. * After the change is up for review, the PTL or a release liaison will have to approve it before it can get approved by the release team. Then, it will be processed automatically by zuul. Things to do after releasing ============================ When a release is done that results in a stable branch ------------------------------------------------------ When a release is done that results in a stable branch for the project, several changes need to be made. The release automation will push a number of changes that need to be approved. This includes: * In the new stable branch: * a change to point ``.gitreview`` at the branch * a change to update the upper constraints file used by ``tox`` * In the master branch: * updating the release notes RST to include the new branch. The generated RST does not include the version range in the title, so we typically submit a follow-up patch to do that. An example of this patch is `here `__. * update the `templates` in `.zuul.yaml` or `zuul.d/project.yaml`. The update is necessary to use the job for the next release `openstack-python3--jobs`. An example of this patch is `here `__. We need to submit patches for changes in the stable branch to: * update the ironic devstack plugin to point at the branched tarball for IPA. An example of this patch is `here `_. * set appropriate defaults for ``TEMPEST_BAREMETAL_MIN_MICROVERSION`` and ``TEMPEST_BAREMETAL_MAX_MICROVERSION`` in ``devstack/lib/ironic`` to make sure that unsupported API tempest tests are skipped on stable branches. E.g. `patch 495319 `_. We need to submit patches for changes on master to: * to support rolling upgrades, since the release was a named release, we need to make these changes. Note that we need to wait until *after* the switch in grenade is made to test the latest release (N) with master (e.g. `for stable/queens `_). Doing these changes sooner -- after the ironic release and before the switch when grenade is testing the prior release (N-1) with master, will cause the tests to fail. (You may want to ask/remind infra/qa team, as to when they will do this switch.) * In ``ironic/common/release_mappings.py``, delete any entries from ``RELEASE_MAPPING`` associated with the oldest named release. Since we support upgrades between adjacent named releases, the master branch will only support upgrades from the most recent named release to master. * remove any DB migration scripts from ``ironic.cmd.dbsync.ONLINE_MIGRATIONS`` and remove the corresponding code from ironic. (These migration scripts are used to migrate from an old release to this latest release; they shouldn't be needed after that.) When a release is done that results in a bugfix branch ------------------------------------------------------ In this case the release management only creates a change to point ``.gitreview`` at the branch, ``tox.ini`` is not modified. After the release: * update the Tempest microversions as explained above. * the CI needs additional configuration, so that Zuul knows which branch to take jobs definitions from. See the following examples: * `ironic 18.1 `_ * `ironic-inspector 10.7 `_ * `ironic-python-agent 8.1 `_ Ironic Tempest plugin ~~~~~~~~~~~~~~~~~~~~~ As **ironic-tempest-plugin** is branchless, we need to submit a patch adding stable jobs to its master branch. `Example for Queens `_. Bifrost ~~~~~~~ Bifrost needs to be updated to install dependencies using the stable branch. `Example for Victoria `_. The upper constraints file referenced in ``scripts/install-deps.sh`` needs to be updated to the new release. For all releases ---------------- For all releases, whether or not it results in a stable branch: * update the specs repo to mark any specs completed in the release as implemented. * remove any -2s on patches that were blocked until after the release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/contributor/rolling-upgrades.rst0000664000175000017500000006132100000000000023532 0ustar00zuulzuul00000000000000.. _rolling-upgrades-dev: ================ Rolling Upgrades ================ The ironic (ironic-api and ironic-conductor) services support rolling upgrades, starting with a rolling upgrade from the Ocata to the Pike release. This describes the design of rolling upgrades, followed by notes for developing new features or modifying an IronicObject. Design ====== Rolling upgrades between releases --------------------------------- Ironic follows the `release-cycle-with-intermediary release model `_. The releases are `semantic-versioned `_, in the form ... We refer to a ``named release`` of ironic as the release associated with a development cycle like Pike. In addition, ironic follows the `standard deprecation policy `_, which says that the deprecation period must be at least three months and a cycle boundary. This means that there will never be anything that is both deprecated *and* removed between two named releases. Rolling upgrades will be supported between: * named release N to N+1 (starting with N == Ocata) * any named release to its latest revision, containing backported bug fixes. Because those bug fixes can contain improvements to the upgrade process, the operator should patch the system before upgrading between named releases. * most recent named release N (and semver releases newer than N) to master. As with the above bullet point, there may be a bug or a feature introduced on a master branch, that we want to remove before publishing a named release. Deprecation policy allows to do this in a 3 month time frame. If the feature was included and removed in intermediate releases, there should be a release note added, with instructions on how to do a rolling upgrade to master from an affected release or release span. This would typically instruct the operator to upgrade to a particular intermediate release, before upgrading to master. Rolling upgrade process ----------------------- Ironic supports rolling upgrades as described in the :doc:`upgrade guide <../admin/upgrade-guide>`. The upgrade process will cause the ironic services to be running the ``FromVer`` and ``ToVer`` releases in this order: 0. Upgrade ironic code and run database schema migrations via the ``ironic-dbsync upgrade`` command. 1. Upgrade code and restart ironic-conductor services, one at a time. 2. Upgrade code and restart ironic-api services, one at a time. 3. Unpin API, RPC and object versions so that the services can now use the latest versions in ``ToVer``. This is done via updating the configuration option described below in `API, RPC and object version pinning`_ and then restarting the services. ironic-conductor services should be restarted first, followed by the ironic-api services. This is to ensure that when new functionality is exposed on the unpinned API service (via API micro version), it is available on the backend. +------+---------------------------------+---------------------------------+ | step | ironic-api | ironic-conductor | +======+=================================+=================================+ | 0 | all FromVer | all FromVer | +------+---------------------------------+---------------------------------+ | 1.1 | all FromVer | some FromVer, some ToVer-pinned | +------+---------------------------------+---------------------------------+ | 1.2 | all FromVer | all ToVer-pinned | +------+---------------------------------+---------------------------------+ | 2.1 | some FromVer, some ToVer-pinned | all ToVer-pinned | +------+---------------------------------+---------------------------------+ | 2.2 | all ToVer-pinned | all ToVer-pinned | +------+---------------------------------+---------------------------------+ | 3.1 | all ToVer-pinned | some ToVer-pinned, some ToVer | +------+---------------------------------+---------------------------------+ | 3.2 | all ToVer-pinned | all ToVer | +------+---------------------------------+---------------------------------+ | 3.3 | some ToVer-pinned, some ToVer | all ToVer | +------+---------------------------------+---------------------------------+ | 3.4 | all ToVer | all ToVer | +------+---------------------------------+---------------------------------+ Policy for changes to the DB model ---------------------------------- The policy for changes to the DB model is as follows: * Adding new items to the DB model is supported. * The dropping of columns or tables and corresponding objects' fields is subject to ironic's `deprecation policy `_. But its alembic script has to wait one more deprecation period, otherwise an ``unknown column`` exception will be thrown when ``FromVer`` services access the DB. This is because :command:`ironic-dbsync upgrade` upgrades the DB schema but ``FromVer`` services still contain the dropped field in their SQLAlchemy DB model. * An ``alembic.op.alter_column()`` to rename or resize a column is not allowed. Instead, split it into multiple operations, with one operation per release cycle (to maintain compatibility with an old SQLAlchemy model). For example, to rename a column, add the new column in release N, then remove the old column in release N+1. * Some implementations of SQL's ``ALTER TABLE``, such as adding foreign keys in PostgreSQL, may impose table locks and cause downtime. If the change cannot be avoided and the impact is significant (e.g. the table can be frequently accessed and/or store a large dataset), these cases must be mentioned in the release notes. API, RPC and object version pinning ----------------------------------- For the ironic services to be running old and new releases at the same time during a rolling upgrade, the services need to be able to handle different API, RPC and object versions. This versioning is handled via the configuration option: ``[DEFAULT]/pin_release_version``. It is used to pin the API, RPC and IronicObject (e.g., Node, Conductor, Chassis, Port, and Portgroup) versions for all the ironic services. The default value of empty indicates that ironic-api and ironic-conductor will use the latest versions of API, RPC and IronicObjects. Its possible values are releases, named (e.g. ``ocata``) or sem-versioned (e.g. ``7.0``). Internally, in `common/release_mappings.py `_, ironic maintains a mapping that indicates the API, RPC and IronicObject versions associated with each release. This mapping is maintained manually. During a rolling upgrade, the services using the new release will set the configuration option value to be the name (or version) of the old release. This will indicate to the services running the new release, which API, RPC and object versions that they should be compatible with, in order to communicate with the services using the old release. Handling API versions --------------------- When the (newer) service is pinned, the maximum API version it supports will be the pinned version -- which the older service supports (as described above at `API, RPC and object version pinning`_). The ironic-api service returns HTTP status code 406 for any requests with API versions that are higher than this maximum version. Handling RPC versions --------------------- `ConductorAPI.__init__() `_ sets the ``version_cap`` variable to the desired (latest or pinned) RPC API version and passes it to the ``RPCClient`` as an initialization parameter. This variable is then used to determine the maximum requested message version that the ``RPCClient`` can send. Each RPC call can customize the request according to this ``version_cap``. The `Ironic RPC versions`_ section below has more details about this. Handling IronicObject versions ------------------------------ Internally, ironic services deal with IronicObjects in their latest versions. Only at these boundaries, when the IronicObject enters or leaves the service, do we deal with object versioning: * getting objects from the database: convert to latest version * saving objects to the database: if pinned, save in pinned version; else save in latest version * serializing objects (to send over RPC): if pinned, send pinned version; else send latest version * deserializing objects (receiving objects from RPC): convert to latest version The ironic-api service also has to handle API requests/responses based on whether or how a feature is supported by the API version and object versions. For example, when the ironic-api service is pinned, it can only allow actions that are available to the object's pinned version, and cannot allow actions that are only available for the latest version of that object. To support this: * All the database tables (SQLAlchemy models) of the IronicObjects have a column named ``version``. The value is the version of the object that is saved in the database. * The method ``IronicObject.get_target_version()`` returns the target version. If pinned, the pinned version is returned. Otherwise, the latest version is returned. * The method ``IronicObject.convert_to_version()`` converts the object into the target version. The target version may be a newer or older version than the existing version of the object. The bulk of the work is done in the helper method ``IronicObject._convert_to_version()``. Subclasses that have new versions redefine this to perform the actual conversions. In the following, * The old release is ``FromVer``; it uses version 1.14 of a Node object. * The new release is ``ToVer``. It uses version 1.15 of a Node object -- this has a deprecated ``extra`` field and a new ``meta`` field that replaces ``extra``. * db_obj['meta'] and db_obj['extra'] are the database representations of those node fields. Getting objects from the database (API/conductor <-- DB) :::::::::::::::::::::::::::::::::::::::::::::::::::::::: Both ironic-api and ironic-conductor services read values from the database. These values are converted to IronicObjects via the method ``IronicObject._from_db_object()``. This method always returns the IronicObject in its latest version, even if it was in an older version in the database. This is done regardless of the service being pinned or not. Note that if an object is converted to a later version, that IronicObject will retain any changes (in its ``_changed_fields`` field) resulting from that conversion. This is needed in case the object gets saved later, in the latest version. For example, if the node in the database is in version 1.14 and has db_obj['extra'] set: * a ``FromVer`` service will get a Node with node.extra = db_obj['extra'] (and no knowledge of node.meta since it doesn't exist) * a ``ToVer`` service (pinned or unpinned), will get a Node with: * node.meta = db_obj['extra'] * node.extra = None * node._changed_fields = ['meta', 'extra'] Saving objects to the database (API/conductor --> DB) ::::::::::::::::::::::::::::::::::::::::::::::::::::: The version used for saving IronicObjects to the database is determined as follows: * For an unpinned service, the object is saved in its latest version. Since objects are always in their latest version, no conversions are needed. * For a pinned service, the object is saved in its pinned version. Since objects are always in their latest version, the object needs to be converted to the pinned version before being saved. The method ``IronicObject.do_version_changes_for_db()`` handles this logic, returning a dictionary of changed fields and their new values (similar to the existing ``oslo.versionedobjects.VersionedObject.obj_get_changes()``). Since we do not keep track internally, of the database version of an object, the object's ``version`` field will always be part of these changes. The `Rolling upgrade process`_ (at step 3.1) ensures that by the time an object can be saved in its latest version, all services are running the newer release (although some may still be pinned) and can handle the latest object versions. An interesting situation can occur when the services are as described in step 3.1. It is possible for an IronicObject to be saved in a newer version and subsequently get saved in an older version. For example, a ``ToVer`` unpinned conductor might save a node in version 1.5. A subsequent request may cause a ``ToVer`` pinned conductor to replace and save the same node in version 1.4! Sending objects via RPC (API/conductor -> RPC) :::::::::::::::::::::::::::::::::::::::::::::: When a service makes an RPC request, any IronicObjects that are sent as part of that request are serialized into entities or primitives via ``IronicObjectSerializer.serialize_entity()``. The version used for objects being serialized is as follows: * For an unpinned service, the object is serialized to its latest version. Since objects are always in their latest version, no conversions are needed. * For a pinned service, the object is serialized to its pinned version. Since objects are always in their latest version, the object is converted to the pinned version before being serialized. The converted object includes changes that resulted from the conversion; this is needed so that the service at the other end of the RPC request has the necessary information if that object will be saved to the database. Receiving objects via RPC (API/conductor <- RPC) :::::::::::::::::::::::::::::::::::::::::::::::: When a service receives an RPC request, any entities that are part of the request need to be deserialized (via ``oslo.versionedobjects.VersionedObjectSerializer.deserialize_entity()``). For entities that represent IronicObjects, we want the deserialization process (via ``IronicObjectSerializer._process_object()``) to result in IronicObjects that are in their latest version, regardless of the version they were sent in and regardless of whether the receiving service is pinned or not. Again, any objects that are converted will retain the changes that resulted from the conversion, useful if that object is later saved to the database. For example, a ``FromVer`` ironic-api could issue an ``update_node()`` RPC request with a node in version 1.4, where node.extra was changed (so node._changed_fields = ['extra']). This node will be serialized in version 1.4. The receiving ``ToVer`` pinned ironic-conductor deserializes it and converts it to version 1.5. The resulting node will have node.meta set (to the changed value from node.extra in v1.4), node.extra = None, and node._changed_fields = ['meta', 'extra']. When developing a new feature or modifying an IronicObject ========================================================== When adding a new feature or changing an IronicObject, they need to be coded so that things work during a rolling upgrade. The following describe areas where the code may need to be changed, as well as some points to keep in mind when developing code. ironic-api ---------- During a rolling upgrade, the new, pinned ironic-api is talking to a new conductor that might also be pinned. There may also be old ironic-api services. So the new, pinned ironic-api service needs to act like it was the older service: * New features should not be made available, unless they are somehow totally supported in the old and new releases. Pinning the API version is in place to handle this. * If, for whatever reason, the API version pinning doesn't prevent a request from being handled that cannot or should not be handled, it should be coded so that the response has HTTP status code 406 (Not Acceptable). This is the same response to requests that have an incorrect (old) version specified. Ironic RPC versions ------------------- When the signature (arguments) of an RPC method is changed or new methods are added, the following needs to be considered: - The RPC version must be incremented and be the same value for both the client (``ironic/conductor/rpcapi.py``, used by ironic-api) and the server (``ironic/conductor/manager.py``, used by ironic-conductor). It should also be updated in ``ironic/common/release_mappings.py``. - Until there is a major version bump, new arguments of an RPC method can only be added as optional. Existing arguments cannot be removed or changed in incompatible ways with the method in older RPC versions. - ironic-api (client-side) sets a version cap (by passing the version cap to the constructor of oslo_messaging.RPCClient). This "pinning" is in place during a rolling upgrade when the ``[DEFAULT]/pin_release_version`` configuration option is set. - New RPC methods are not available when the service is pinned to the older release version. In this case, the corresponding REST API function should return a server error or implement alternative behaviours. - Methods which change arguments should run ``client.can_send_version()`` to see if the version of the request is compatible with the version cap of the RPC Client. Otherwise the request needs to be created to work with a previous version that is supported. - ironic-conductor (server-side) should tolerate older versions of requests in order to keep working during the rolling upgrade process. The behaviour of ironic-conductor will depend on the input parameters passed from the client-side. - Old methods can be removed only after they are no longer used by a previous named release. Object versions --------------- When subclasses of ``ironic.objects.base.IronicObject`` are modified, the following needs to be considered: - Any change of fields or change in signature of remotable methods needs a bump of the object version. The object versions are also maintained in ``ironic/common/release_mappings.py``. - New objects must be added to ``ironic/common/release_mappings.py``. Also for the first releases they should be excluded from the version check by adding their class names to the ``NEW_MODELS`` list in ``ironic/cmd/dbsync.py``. - The arguments of remotable methods (methods which are remoted to the conductor via RPC) can only be added as optional. They cannot be removed or changed in an incompatible way (to the previous release). - Field types cannot be changed. Instead, create a new field and deprecate the old one. - There is a `unit test `_ that generates the hash of an object using its fields and the signatures of its remotable methods. Objects that have a version bump need to be updated in the `expected_object_fingerprints `_ dictionary; otherwise this test will fail. A failed test can also indicate to the developer that their change(s) to an object require a version bump. - When new version objects communicate with old version objects and when reading or writing to the database, ``ironic.objects.base.IronicObject._convert_to_version()`` will be called to convert objects to the target version. Objects should implement their own ._convert_to_version() to remove or alter fields which were added or changed after the target version:: def _convert_to_version(self, target_version, remove_unavailable_fields=True): """Convert to the target version. Subclasses should redefine this method, to do the conversion of the object to the target version. Convert the object to the target version. The target version may be the same, older, or newer than the version of the object. This is used for DB interactions as well as for serialization/deserialization. The remove_unavailable_fields flag is used to distinguish these two cases: 1) For serialization/deserialization, we need to remove the unavailable fields, because the service receiving the object may not know about these fields. remove_unavailable_fields is set to True in this case. 2) For DB interactions, we need to set the unavailable fields to their appropriate values so that these fields are saved in the DB. (If they are not set, the VersionedObject magic will not know to save/update them to the DB.) remove_unavailable_fields is set to False in this case. :param target_version: the desired version of the object :param remove_unavailable_fields: True to remove fields that are unavailable in the target version; set this to True when (de)serializing. False to set the unavailable fields to appropriate values; set this to False for DB interactions. This method must handle: * converting from an older version to a newer version * converting from a newer version to an older version * making sure, when converting, that you take into consideration other object fields that may have been affected by a field (value) only available in a newer version. For example, if field 'new' is only available in Node version 1.5 and Node.affected = Node.new+3, when converting to 1.4 (an older version), you may need to change the value of Node.affected too. Online data migrations ---------------------- The ``ironic-dbsync online_data_migrations`` command will perform online data migrations. Keep in mind the `Policy for changes to the DB model`_. Future incompatible changes in SQLAlchemy models, like removing or renaming columns and tables can break rolling upgrades (when ironic services are run with different release versions simultaneously). It is forbidden to remove these database resources when they may still be used by the previous named release. When `creating new Alembic migrations `_ which modify existing models, make sure that any new columns default to NULL. Test the migration out on a non-empty database to make sure that any new constraints don't cause the database to be locked out for normal operations. You can find an overview on what DDL operations may cause downtime in https://dev.mysql.com/doc/refman/5.7/en/innodb-create-index-overview.html. (You should also check older, widely deployed InnoDB versions for issues.) In the case of PostgreSQL, adding a foreign key may lock a whole table for writes. Make sure to add a release note if there are any downtime-related concerns. Backfilling default values, and migrating data between columns or between tables must be implemented inside an online migration script. A script is a database API method (added to ``ironic/db/api.py`` and ``ironic/db/sqlalchemy/api.py``) which takes two arguments: - context: an admin context - max_count: this is used to limit the query. It is the maximum number of objects to migrate; >= 0. If zero, all the objects will be migrated. It returns a two-tuple: - the total number of objects that need to be migrated, at the start of the method, and - the number of migrated objects. In this method, the version column can be used to select and update old objects. The method name should be added to the list of ``ONLINE_MIGRATIONS`` in ``ironic/cmd/dbsync.py``. The method should be removed in the next named release after this one. After online data migrations are completed and the SQLAlchemy models no longer contain old fields, old columns can be removed from the database. This takes at least 3 releases, since we have to wait until the previous named release no longer contains references to the old schema. Before removing any resources from the database by modifying the schema, make sure that your implementation checks that all objects in the affected tables have been migrated. This check can be implemented using the version column. "ironic-dbsync upgrade" command ------------------------------- The ``ironic-dbsync upgrade`` command first checks that the versions of the objects are compatible with the (new) release of ironic, before it will make any DB schema changes. If one or more objects are not compatible, the upgrade will not be performed. This check is done by comparing the objects' ``version`` field in the database with the expected (or supported) versions of these objects. The supported versions are the versions specified in ``ironic.common.release_mappings.RELEASE_MAPPING``. The newly created tables cannot pass this check and thus have to be excluded by adding their object class names (e.g. ``Node``) to ``ironic.cmd.dbsync.NEW_MODELS``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/contributor/states.rst0000664000175000017500000000020500000000000021551 0ustar00zuulzuul00000000000000====================== Ironic's State Machine ====================== The content has been migrated, please see :doc:`/user/states`. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/contributor/third-party-ci.rst0000664000175000017500000000333700000000000023117 0ustar00zuulzuul00000000000000.. _third-party-ci: ================================== Third Party Continuous Integration ================================== .. NOTE:: This document is a work-in-progress. Unfilled sections will be worked in follow-up patchsets. This version is to get a basic outline and index done so that we can then build on it. (krtaylor) This document provides tips and guidelines for third-party driver developers setting up their continuous integration test systems. CI Architecture Overview ======================== Requirements Cookbook ===================== Sizing ------ Infrastructure -------------- This section describes what changes you'll need to make to a your CI system to add an ironic job. jenkins changes ############### nodepool changes ################ neutron changes ############### pre-test hook ############# cleanup hook ############ Ironic ------ Hardware Pool Management ======================== Problem ------- If you are using actual hardware as target machines for your CI testing then the problem of two jobs trying to use the name target arises. If you have one target machine and a maximum number of one jobs running on your ironic pipeline at a time, then you won't run into this problem. However, one target may not handle the load of ironic's daily patch submissions. Solutions --------- Zuul v3 ####### Molten Iron ########### `molteniron `_ is a tool that allows you to reserve hardware from a pool at the last minute to use in your job. Once finished testing, you can unreserve the hardware making it available for the next test job. Tips and Tricks =============== Optimize Run Time ----------------- Image Server ############ Other References ---------------- ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/contributor/vendor-passthru.rst0000664000175000017500000001442600000000000023424 0ustar00zuulzuul00000000000000.. _vendor-passthru: ============== Vendor Methods ============== This document is a quick tutorial on writing vendor specific methods to a driver. The first thing to note is that the Ironic API supports two vendor endpoints: A driver vendor passthru and a node vendor passthru. * The ``VendorInterface`` allows hardware types to expose a custom top-level functionality which is not specific to a Node. For example, let's say the driver `ipmi` exposed a method called `authentication_types` that would return what are the authentication types supported. It could be accessed via the Ironic API like: :: GET http://
:/v1/drivers/ipmi/vendor_passthru/authentication_types .. warning:: The Bare Metal API currently only allows to use driver passthru for the default ``vendor`` interface implementation for a given hardware type. This limitation will be lifted in the future. * The node vendor passthru allows drivers to expose custom functionality on per-node basis. For example the same driver `ipmi` exposing a method called `send_raw` that would send raw bytes to the BMC, the method also receives a parameter called `raw_bytes` which the value would be the bytes to be sent. It could be accessed via the Ironic API like: :: POST {'raw_bytes': '0x01 0x02'} http://
:/v1/nodes//vendor_passthru/send_raw Writing Vendor Methods ====================== Writing a custom vendor method in Ironic should be simple. The first thing to do is write a class inheriting from the `VendorInterface`_ class: .. code-block:: python class ExampleVendor(VendorInterface) def get_properties(self): return {} def validate(self, task, **kwargs): pass The `get_properties` is a method that all driver interfaces have, it should return a dictionary of : telling in the description whether that property is required or optional so the node can be manageable by that driver. For example, a required property for a `ipmi` driver would be `ipmi_address` which is the IP address or hostname of the node. We are returning an empty dictionary in our example to make it simpler. The `validate` method is responsible for validating the parameters passed to the vendor methods. Ironic will not introspect into what is passed to the drivers, it's up to the developers writing the vendor method to validate that data. Let's extend the `ExampleVendor` class to support two methods, the `authentication_types` which will be exposed on the driver vendor passthru endpoint; And the `send_raw` method that will be exposed on the node vendor passthru endpoint: .. code-block:: python class ExampleVendor(VendorInterface) def get_properties(self): return {} def validate(self, task, method, **kwargs): if method == 'send_raw': if 'raw_bytes' not in kwargs: raise MissingParameterValue() @base.driver_passthru(['GET'], async_call=False) def authentication_types(self, context, **kwargs): return {"types": ["NONE", "MD5", "MD2"]} @base.passthru(['POST']) def send_raw(self, task, **kwargs): raw_bytes = kwargs.get('raw_bytes') ... That's it! Writing a node or driver vendor passthru method is pretty much the same, the only difference is how you decorate the methods and the first parameter of the method (ignoring self). A method decorated with the `@passthru` decorator should expect a Task object as first parameter and a method decorated with the `@driver_passthru` decorator should expect a Context object as first parameter. Both decorators accept these parameters: * http_methods: A list of what the HTTP methods supported by that vendor function. To know what HTTP method that function was invoked with, a `http_method` parameter will be present in the `kwargs`. Supported HTTP methods are *POST*, *PUT*, *GET* and *PATCH*. * method: By default the method name is the name of the python function, if you want to use a different name this parameter is where this name can be set. For example: .. code-block:: python @passthru(['PUT'], method="alternative_name") def name(self, task, **kwargs): ... * description: A string containing a nice description about what that method is supposed to do. Defaults to "" (empty string). .. _VendorInterface: ../api/ironic.drivers.base.html#ironic.drivers.base.VendorInterface * async_call: A boolean value to determine whether this method should run asynchronously or synchronously. Defaults to True (Asynchronously). .. note:: This parameter was previously called "async". The node vendor passthru decorator (`@passthru`) also accepts the following parameter: * require_exclusive_lock: A boolean value determining whether this method should require an exclusive lock on a node between validate() and the beginning of method execution. For synchronous methods, the lock on the node would also be kept for the duration of method execution. Defaults to True. .. WARNING:: Please avoid having a synchronous method for slow/long-running operations **or** if the method does talk to a BMC; BMCs are flaky and very easy to break. .. WARNING:: Each asynchronous request consumes a worker thread in the ``ironic-conductor`` process. This can lead to starvation of the thread pool, resulting in a denial of service. Give the new vendor interface implementation a human-friendly name and create an entry point for it in the ``setup.cfg``:: ironic.hardware.interfaces.vendor = example = ironic.drivers.modules.example:ExampleVendor Finally, add it to the list of supported vendor interfaces for relevant hardware types, for example: .. code-block:: python class ExampleHardware(generic.GenericHardware): ... @property def supported_vendor_interfaces(self): return [example.ExampleVendor] Backwards Compatibility ======================= There is no requirement that changes to a vendor method be backwards compatible. However, for your users' sakes, we highly recommend that you do so. If you are changing the exceptions being raised, you might want to ensure that the same HTTP code is being returned to the user. For non-backwards compatibility, please make sure you add a release note that indicates this. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/contributor/vision-reflection.rst0000664000175000017500000000434100000000000023712 0ustar00zuulzuul00000000000000.. _vision_reflection: ================================================= Comparison to the 2018 OpenStack Technical Vision ================================================= In late-2018, the OpenStack Technical composed a `technical vision `_ of what OpenStack clouds should look like. While every component differs, and "cloudy" interactions change dramatically the closer to physical hardware one gets, there are a few areas where Ironic could use some improvement. This list is largely for the purposes of help wanted. It is also important to note that Ironic as a project has a `vision document `_ for itself. The Pillars of Cloud - Self Service =================================== * Ironic's mechanisms and tooling are low level infrastructure mechanisms and as such there has never been a huge emphasis or need on making Ironic be capable of offering direct multi-tenant interaction. Most users interact with the bare metal managed by Ironic via Nova, which abstracts away many of these issues. Eventually, we should offer direct multi-tenancy which is not oriented towards admin-only. Design Goals - Built-in Reliability and Durability ================================================== * Ironic presently considers in-flight operations as failed upon the restart of a controller that was previously performing a task, because we do not know the current status of the task upon re-start. In some cases, this makes sense, but potentially requires administrative intervention in the worst of cases. In a perfect universe, Ironic "conductors" would validate their perception, in case tasks actually finished. Design Goals - Graphical User Interface ======================================= * While a graphical interface was developed for Horizon in the form of `ironic-ui `_, currently ironic-ui receives only minimal housekeeping. As Ironic has evolved, ironic-ui is stuck on version `1.34` and knows nothing of our evolution since. Ironic ultimately needs a contributor with sufficient time to pick up ``ironic-ui`` or to completely replace it as a functional and customizable user interface. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/contributor/vision.rst0000664000175000017500000000631700000000000021567 0ustar00zuulzuul00000000000000.. _vision: ================== Contributor Vision ================== Background ========== During the Rocky Project Teams Gathering (Feburary/March 2018), The contributors in the room at that time took a few minutes to write out each contributor's vision of where they see ironic in five years time. After everyone had a chance to spend a few minutes writing, we went around the room and gave every contributor the chance to read their vision and allow other contributors to ask questions to better understand what each individual contributor wrote. While we were doing that, we also took time to capture the common themes. This entire exercise did result in some laughs and a common set of words, and truly helped to ensure that the entire team proceeded to use the same "words" to describe various aspects as the sessions progressed during the week. We also agreed that we should write a shared vision, to have something to reference and remind us of where we want to go as a community. Rocky Vision: For 2022-2023 =========================== Common Themes ------------- Below is an entirely unscientific summary of common themes that arose during the discussion among fourteen contributors. * Contributors picked a time between 2020, and 2023. * 4 Contributors foresee ironic being the leading Open Source baremetal deployment technology * 2 Contributors foresee ironic reaching feature parity with Nova. * 2 Contributors foresee users moving all workloads "to the cloud" * 1 Contributor foresees Kubernetes and Container integration being the major focus of Bare Metal as a Service further down the road. * 2 Contributors foresee greater composible hardware being more common. * 1 Contributor foresees ironic growing into or supporting CMDBs. * 2 Contributors foresee that features are more micro-service oriented. * 2 Contributors foresee that ironic supported all of the possible baremetal management needs * 1 Contributor foresees standalone use being more common. * 2 Contributors foresee the ironic's developer community growing * 2 Contributors foresee that auto-discovery will be more common. * 2 Contributors foresee ironic being used for devices beyond servers, such as lightbulbs, IOT, etc. Vision Statement ---------------- The year is 2022. We're meeting to plan the Z release of Ironic. We stopped to reflect upon the last few years of Ironic's growth, how we had come such a long way to become the defacto open source baremetal deployment technology. How we had grown our use cases, and support for consumers such as containers, and users who wished to managed specialized fleets of composed machines. New contributors and their different use cases have brought us closer to parity with virtual machines. Everyday we're gaining word of more operators adopting the ironic community's CMDB integration to leverage hardware discovery. We've heard of operators deploying racks upon racks of new hardware by just connecting the power and network cables, and from there the operators have discovered time to write the world's greatest operator novel with the time saved in commissioning new racks of hardware. Time has brought us closer and taught us to be more collaborative across the community, and we look forward to our next release together. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/contributor/webapi-version-history.rst0000664000175000017500000005010600000000000024704 0ustar00zuulzuul00000000000000======================== REST API Version History ======================== 1.78 (Xena, 18.2) ---------------------- Add endpoints to allow history events for nodes to be retrieved via the REST API. * ``GET /v1/nodes/{node_ident}/history/`` * ``GET /v1/nodes/{node_ident}/history/{event_uuid}`` 1.77 (Xena, 18.2) ---------------------- Add a fields selector to the the Drivers list: * ``GET /v1/drivers?fields=`` Also add a fields selector to the the Driver detail: * ``GET /v1/drivers/{driver_name}?fields=`` 1.76 (Xena, 18.2) ---------------------- Add endpoints for changing boot mode and secure boot state of node asynchronously: * ``PUT /v1/nodes/{node_ident}/states/boot_mode`` * ``PUT /v1/nodes/{node_ident}/states/secure_boot`` 1.75 (Xena, 18.1) ---------------------- Add ``boot_mode`` and ``secure_boot`` to node object and expose their state at: * ``/v1/nodes/{node_ident}/states`` 1.74 (Xena, 18.0) ---------------------- Add support for BIOS registry fields which include details about the BIOS setting. Included in the ``/v1/nodes/{node_ident}/bios/{setting}`` response. Add a new selector to include the fields in the BIOS settings list: * ``/v1/nodes/{node_ident}/bios/?detail=`` Also add a fields selector to the the BIOS settings list: * ``/v1/nodes/{node_ident}/bios/?fields=`` 1.73 (Xena, 18.0) ---------------------- Add a new ``deploy`` verb as an alias to ``active`` and ``undeploy`` verb as an alias to ``deleted``. 1.72 (Wallaby, 17.0) ---------------------- Add support for ``agent_status`` and ``agent_status_message`` to /v1/heartbeat. These fields are used for external installation tools, such as Anaconda, to report back status. 1.71 (Wallaby, 17.0) ---------------------- Signifier of the API supporting keystone ``system`` scoped roles and access controls. This is an informational flag for clients to be aware of the server's capability. 1.70 (Wallaby, 17.0) ---------------------- Add support for ``disable_ramdisk`` parameter to provisioning endpoint ``/v1/nodes/{node_ident}/states/provision``. 1.69 (Wallaby, 16.2) ---------------------- Add support for ``deploy_steps`` parameter to provisioning endpoint ``/v1/nodes/{node_ident}/states/provision``. Available and optional when target is 'active' or 'rebuild'. 1.68 (Victoria, 16.0) ----------------------- Added the ``agent_verify_ca`` parameter to the ramdisk heartbeat API. 1.67 (Victoria, 15.1) ----------------------- Add support for the mutually exclusive ``port_uuid`` and ``portgroup_uuid`` fields by having the node vif_attach API accept those values within ``vif_info``. If one is specified, then Ironic will attempt to attach a VIF to the relative port or portgroup. 1.66 (Victoria, 15.1) ----------------------- Add ``network_data`` field to the node object, that will be used by stand-alone ironic to pass L3 network configuration information to ramdisk. 1.65 (Ussuri, 15.0) --------------------- Added ``lessee`` field to the node object. The field should match the ``project_id`` of the intended lessee. If an allocation has an owner, then the allocation process will only match the allocation with a node that has the same ``owner`` or ``lessee``. 1.64 (Ussuri, 15.0) --------------------- Added the ``network_type`` to the port objects ``local_link_connection`` field. The ``network_type`` can be set to either ``managed`` or ``unmanaged``. When the type is ``unmanaged`` other fields are not required. Use ``unmanaged`` when the neutron ``network_interface`` is required, but the network is in fact a flat network where no actual switch management is done. 1.63 (Ussuri, 15.0) --------------------- Added the following new endpoints for indicator management: * ``GET /v1/nodes//management/indicators`` to list all available indicators names for each of the hardware component. Currently known components are: ``chassis``, ``system``, ``disk``, ``power`` and ``nic``. * ``GET /v1/nodes//management/indicators//`` to retrieve all indicators and their states for the hardware component. * ``PUT /v1/nodes//management/indicators//`` change state of the desired indicators of the component. 1.62 (Ussuri, 15.0) --------------------- This version of the API is to signify capability of an ironic deployment to support the ``agent token`` functionality with the ``ironic-python-agent``. 1.61 (Ussuri, 14.0) --------------------- Added ``retired`` field to the node object to mark nodes for retirement. If set, this flag will move nodes to ``manageable`` upon automatic cleaning. ``manageable`` nodes which have this flag set cannot be moved to available. Also added ``retired_reason`` to specify the retirement reason. 1.60 (Ussuri, 14.0) --------------------- Added ``owner`` field to the allocation object. The field should match the ``project_id`` of the intended owner. If the ``owner`` field is set, the allocation process will only match the allocation with a node that has the same ``owner`` field set. 1.59 (Ussuri, 14.0) --------------------- Added the ability to specify a ``vendor_data`` dictionary field in the ``configdrive`` parameter submitted with the deployment of a node. The value is a dictionary which is served as ``vendor_data2.json`` in the config drive. 1.58 (Train, 12.2.0) -------------------- Added the ability to backfill allocations for already deployed nodes by creating an allocation with ``node`` set. 1.57 (Train, 12.2.0) -------------------- Added the following new endpoint for allocation: * ``PATCH /v1/allocations/`` that allows updating ``name`` and ``extra`` fields for an existing allocation. 1.56 (Stein, 12.1.0) -------------------- Added the ability for the ``configdrive`` parameter submitted with the deployment of a node, to include a ``meta_data``, ``network_data`` and ``user_data`` dictionary fields. Ironic will now use the supplied data to create a configuration drive for the user. Prior uses of the ``configdrive`` field are unaffected. 1.55 (Stein, 12.1.0) -------------------- Added the following new endpoints for deploy templates: * ``GET /v1/deploy_templates`` to list all deploy templates. * ``GET /v1/deploy_templates/`` to retrieve details of a deploy template. * ``POST /v1/deploy_templates`` to create a deploy template. * ``PATCH /v1/deploy_templates/`` to update a deploy template. * ``DELETE /v1/deploy_templates/`` to delete a deploy template. 1.54 (Stein, 12.1.0) -------------------- Added new endpoints for external ``events``: * POST /v1/events for creating events. (This endpoint is only intended for internal consumption.) 1.53 (Stein, 12.1.0) -------------------- Added ``is_smartnic`` field to the port object to enable Smart NIC port creation in addition to local link connection attributes ``port_id`` and ``hostname``. 1.52 (Stein, 12.1.0) -------------------- Added allocation API, allowing reserving a node for deployment based on resource class and traits. The new endpoints are: * ``POST /v1/allocations`` to request an allocation. * ``GET /v1/allocations`` to list all allocations. * ``GET /v1/allocations/`` to retrieve the allocation details. * ``GET /v1/nodes//allocation`` to retrieve an allocation associated with the node. * ``DELETE /v1/allocations/`` to remove the allocation. * ``DELETE /v1/nodes//allocation`` to remove an allocation associated with the node. Also added a new field ``allocation_uuid`` to the node resource. 1.51 (Stein, 12.1.0) -------------------- Added ``description`` field to the node object to enable operators to store any information relates to the node. The field is limited to 4096 characters. 1.50 (Stein, 12.1.0) -------------------- Added ``owner`` field to the node object to enable operators to store information in relation to the owner of a node. The field is up to 255 characters and MAY be used in a later point in time to allow designation and deligation of permissions. 1.49 (Stein, 12.0.0) -------------------- Added new endpoints for retrieving conductors information, and added a ``conductor`` field to node object. 1.48 (Stein, 12.0.0) -------------------- Added ``protected`` field to the node object to allow protecting deployed nodes from undeploying, rebuilding or deletion. Also added ``protected_reason`` to specify the reason of making the node protected. 1.47 (Stein, 12.0.0) -------------------- Added ``automated_clean`` field to the node object, enabling cleaning per node. 1.46 (Rocky, 11.1.0) -------------------- Added ``conductor_group`` field to the node and the node response, as well as support to the API to return results by matching the parameter. 1.45 (Rocky, 11.1.0) -------------------- Added ``reset_interfaces`` parameter to node's PATCH request, to specify whether to reset hardware interfaces to their defaults on driver's update. 1.44 (Rocky, 11.1.0) -------------------- Added ``deploy_step`` to the node object, to indicate the current deploy step (if any) being performed on the node. 1.43 (Rocky, 11.0.0) -------------------- Added ``?detail=`` boolean query to the API list endpoints to provide a more RESTful alternative to the existing ``/nodes/detail`` and similar endpoints. 1.42 (Rocky, 11.0.0) -------------------- Added ``fault`` to the node object, to indicate currently detected fault on the node. 1.41 (Rocky, 11.0.0) -------------------- Added support to abort inspection of a node in the ``inspect wait`` state. 1.40 (Rocky, 11.0.0) -------------------- Added BIOS properties as sub resources of nodes: * GET /v1/nodes//bios * GET /v1/nodes//bios/ Added ``bios_interface`` field to the node object to allow getting and setting the interface. 1.39 (Rocky, 11.0.0) -------------------- Added ``inspect wait`` to available provision states. A node is shown as ``inspect wait`` instead of ``inspecting`` during asynchronous inspection. 1.38 (Queens, 10.1.0) --------------------- Added provision_state verbs ``rescue`` and ``unrescue`` along with the following states: ``rescue``, ``rescue failed``, ``rescue wait``, ``rescuing``, ``unrescue failed``, and ``unrescuing``. After rescuing a node, it will be left in the ``rescue`` state running a rescue ramdisk, configured with the ``rescue_password``, and listening with ssh on the specified network interfaces. Unrescuing a node will return it to ``active``. Added ``rescue_interface`` to the node object, to allow setting the rescue interface for a dynamic driver. 1.37 (Queens, 10.1.0) --------------------- Adds support for node traits, with the following new endpoints. * GET /v1/nodes//traits lists the traits for a node. * PUT /v1/nodes//traits sets all traits for a node. * PUT /v1/nodes//traits/ adds a trait to a node. * DELETE /v1/nodes//traits removes all traits from a node. * DELETE /v1/nodes//traits/ removes a trait from a node. A node's traits are also included the following node query and list responses: * GET /v1/nodes/ * GET /v1/nodes/detail * GET /v1/nodes?fields=traits Traits cannot be specified on node creation, nor can they be updated via a PATCH request on the node. 1.36 (Queens, 10.0.0) --------------------- Added ``agent_version`` parameter to deploy heartbeat request for version negotiation with Ironic Python Agent features. 1.35 (Queens, 9.2.0) -------------------- Added ability to provide ``configdrive`` when node is updated to ``rebuild`` provision state. 1.34 (Pike, 9.0.0) ------------------ Adds a ``physical_network`` field to the port object. All ports in a portgroup must have the same value in their ``physical_network`` field. 1.33 (Pike, 9.0.0) ------------------ Added ``storage_interface`` field to the node object to allow getting and setting the interface. Added ``default_storage_interface`` and ``enabled_storage_interfaces`` fields to the driver object to show the information. 1.32 (Pike, 9.0.0) ------------------ Added new endpoints for remote volume configuration: * GET /v1/volume as a root for volume resources * GET /v1/volume/connectors for listing volume connectors * POST /v1/volume/connectors for creating a volume connector * GET /v1/volume/connectors/ for showing a volume connector * PATCH /v1/volume/connectors/ for updating a volume connector * DELETE /v1/volume/connectors/ for deleting a volume connector * GET /v1/volume/targets for listing volume targets * POST /v1/volume/targets for creating a volume target * GET /v1/volume/targets/ for showing a volume target * PATCH /v1/volume/targets/ for updating a volume target * DELETE /v1/volume/targets/ for deleting a volume target Volume resources also can be listed as sub resources of nodes: * GET /v1/nodes//volume * GET /v1/nodes//volume/connectors * GET /v1/nodes//volume/targets 1.31 (Ocata, 7.0.0) ------------------- Added the following fields to the node object, to allow getting and setting interfaces for a dynamic driver: * boot_interface * console_interface * deploy_interface * inspect_interface * management_interface * power_interface * raid_interface * vendor_interface 1.30 (Ocata, 7.0.0) ------------------- Added dynamic driver APIs: * GET /v1/drivers now accepts a ``type`` parameter (optional, one of ``classic`` or ``dynamic``), to limit the result to only classic drivers or dynamic drivers (hardware types). Without this parameter, both classic and dynamic drivers are returned. * GET /v1/drivers now accepts a ``detail`` parameter (optional, one of ``True`` or ``False``), to show all fields for a driver. Defaults to ``False``. * GET /v1/drivers now returns an additional ``type`` field to show if the driver is classic or dynamic. * GET /v1/drivers/ now returns an additional ``type`` field to show if the driver is classic or dynamic. * GET /v1/drivers/ now returns additional fields that are null for classic drivers, and set as following for dynamic drivers: * The value of the default__interface is the entrypoint name of the calculated default interface for that type: * default_boot_interface * default_console_interface * default_deploy_interface * default_inspect_interface * default_management_interface * default_network_interface * default_power_interface * default_raid_interface * default_vendor_interface * The value of the enabled__interfaces is a list of entrypoint names of the enabled interfaces for that type: * enabled_boot_interfaces * enabled_console_interfaces * enabled_deploy_interfaces * enabled_inspect_interfaces * enabled_management_interfaces * enabled_network_interfaces * enabled_power_interfaces * enabled_raid_interfaces * enabled_vendor_interfaces 1.29 (Ocata, 7.0.0) ------------------- Add a new management API to support inject NMI, 'PUT /v1/nodes/(node_ident)/management/inject_nmi'. 1.28 (Ocata, 7.0.0) ------------------- Add '/v1/nodes//vifs' endpoint for attach, detach and list of VIFs. 1.27 (Ocata, 7.0.0) ------------------- Add ``soft rebooting`` and ``soft power off`` as possible values for the ``target`` field of the power state change payload, and also add ``timeout`` field to it. 1.26 (Ocata, 7.0.0) ------------------- Add portgroup ``mode`` and ``properties`` fields. 1.25 (Ocata, 7.0.0) ------------------- Add possibility to unset chassis_uuid from a node. 1.24 (Ocata, 7.0.0) ------------------- Added new endpoints '/v1/nodes//portgroups' and '/v1/portgroups//ports'. Added new field ``port.portgroup_uuid``. 1.23 (Ocata, 7.0.0) ------------------- Added '/v1/portgroups/ endpoint. 1.22 (Newton, 6.1.0) -------------------- Added endpoints for deployment ramdisks. 1.21 (Newton, 6.1.0) -------------------- Add node ``resource_class`` field. 1.20 (Newton, 6.1.0) -------------------- Add node ``network_interface`` field. 1.19 (Newton, 6.1.0) -------------------- Add ``local_link_connection`` and ``pxe_enabled`` fields to the port object. 1.18 (Newton, 6.1.0) -------------------- Add ``internal_info`` readonly field to the port object, that will be used by ironic to store internal port-related information. 1.17 (Newton, 6.0.0) -------------------- Addition of provision_state verb ``adopt`` which allows an operator to move a node from ``manageable`` state to ``active`` state without performing a deployment operation on the node. This is intended for nodes that have already been deployed by external means. 1.16 (Mitaka, 5.0.0) -------------------- Add ability to filter nodes by driver. 1.15 (Mitaka, 5.0.0) -------------------- Add ability to do manual cleaning when a node is in the manageable provision state via PUT v1/nodes//states/provision, target:clean, clean_steps:[...]. 1.14 (Liberty, 4.2.0) --------------------- Make the following endpoints discoverable via Ironic API: * '/v1/nodes//states' * '/v1/drivers//properties' 1.13 (Liberty, 4.2.0) --------------------- Add a new verb ``abort`` to the API used to abort nodes in ``CLEANWAIT`` state. 1.12 (Liberty, 4.2.0) --------------------- This API version adds the following abilities: * Get/set ``node.target_raid_config`` and to get ``node.raid_config``. * Retrieve the logical disk properties for the driver. 1.11 (Liberty, 4.0.0, breaking change) -------------------------------------- Newly registered nodes begin in the ``enroll`` provision state by default, instead of ``available``. To get them to the ``available`` state, the ``manage`` action must first be run to verify basic hardware control. On success the node moves to ``manageable`` provision state. Then the ``provide`` action must be run. Automated cleaning of the node is done and the node is made ``available``. 1.10 (Liberty, 4.0.0) --------------------- Logical node names support all RFC 3986 unreserved characters. Previously only valid fully qualified domain names could be used. 1.9 (Liberty, 4.0.0) -------------------- Add ability to filter nodes by provision state. 1.8 (Liberty, 4.0.0) -------------------- Add ability to return a subset of resource fields. 1.7 (Liberty, 4.0.0) -------------------- Add node ``clean_step`` field. 1.6 (Kilo) ---------- Add :ref:`inspection` process: introduce ``inspecting`` and ``inspectfail`` provision states, and ``inspect`` action that can be used when a node is in ``manageable`` provision state. 1.5 (Kilo) ---------- Add logical node names that can be used to address a node in addition to the node UUID. Name is expected to be a valid `fully qualified domain name`_ in this version of API. 1.4 (Kilo) ---------- Add ``manageable`` state and ``manage`` transition, which can be used to move a node to ``manageable`` state from ``available``. The node cannot be deployed in ``manageable`` state. This change is mostly a preparation for future inspection work and introduction of ``enroll`` provision state. 1.3 (Kilo) ---------- Add node ``driver_internal_info`` field. 1.2 (Kilo, breaking change) --------------------------- Renamed NOSTATE (``None`` in Python, ``null`` in JSON) node state to ``available``. This is needed to reduce confusion around ``None`` state, especially when future additions to the state machine land. 1.1 (Kilo) ---------- This was the initial version when API versioning was introduced. Includes the following changes from Kilo release cycle: * Add node ``maintenance_reason`` field and an API endpoint to set/unset the node maintenance mode. * Add sync and async support for vendor passthru methods. * Vendor passthru endpoints support different HTTP methods, not only ``POST``. * Make vendor methods discoverable via the Ironic API. * Add logic to store the config drive passed by Nova. This has been the minimum supported version since versioning was introduced. 1.0 (Juno) ---------- This version denotes Juno API and was never explicitly supported, as API versioning was not implemented in Juno, and 1.1 became the minimum supported version in Kilo. .. _fully qualified domain name: https://en.wikipedia.org/wiki/Fully_qualified_domain_name ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/contributor/webapi.rst0000664000175000017500000000543200000000000021524 0ustar00zuulzuul00000000000000========================= REST API Conceptual Guide ========================= Versioning ========== The ironic REST API supports two types of versioning: - "major versions", which have dedicated urls. - "microversions", which can be requested through the use of the ``X-OpenStack-Ironic-API-Version`` header. There is only one major version supported currently, "v1". As such, most URLs in this documentation are written with the "/v1/" prefix. Starting with the Kilo release, ironic supports microversions. In this context, a version is defined as a string of 2 integers separated by a dot: **X.Y**. Here ``X`` is a major version, always equal to ``1``, and ``Y`` is a minor version. Server minor version is increased every time the API behavior is changed (note `Exceptions from Versioning`_). .. note:: :nova-doc:`Nova versioning documentation ` has a nice guide for developers on when to bump an API version. The server indicates its minimum and maximum supported API versions in the ``X-OpenStack-Ironic-API-Minimum-Version`` and ``X-OpenStack-Ironic-API-Maximum-Version`` headers respectively, returned with every response. Client may request a specific API version by providing ``X-OpenStack-Ironic-API-Version`` header with request. The requested microversion determines both the allowable requests and the response format for all requests. A resource may be represented differently based on the requested microversion. If no version is requested by the client, the minimum supported version will be assumed. In this way, a client is only exposed to those API features that are supported in the requested (explicitly or implicitly) API version (again note `Exceptions from Versioning`_, they are not covered by this rule). We recommend clients that require a stable API to always request a specific version of API that they have been tested against. .. note:: A special value ``latest`` can be requested instead a numerical microversion, which always requests the newest supported API version from the server. REST API Versions History ------------------------- .. toctree:: :maxdepth: 1 API Version History Exceptions from Versioning -------------------------- The following API-visible things are not covered by the API versioning: * Current node state is always exposed as it is, even if not supported by the requested API version, with exception of ``available`` state, which is returned in version 1.1 as ``None`` (in Python) or ``null`` (in JSON). * Data within free-form JSON attributes: ``properties``, ``driver_info``, ``instance_info``, ``driver_internal_info`` fields on a node object; ``extra`` fields on all objects. * Addition of new drivers. * All vendor passthru methods. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648108935.8906667 ironic-20.1.0/doc/source/images/0000775000175000017500000000000000000000000016412 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/images/conceptual_architecture.png0000664000175000017500000013765600000000000024041 0ustar00zuulzuul00000000000000PNG  IHDR[AsRGBgAMA a pHYs+CIDATx^|U'd'72d*ZE(ԪEت:JZwu2D =Sا-v4=ɪ?# IH@H`\#11Wˤzmw18 "5`}_Gƞ+o[f}@b+U2K_z}yg12{/ʕSeϾ y)r#Oݷ_rrrƕ"Oɖbq$XXM,_V}Yb%}Ͽ8qqR>lM5Wn!CfO/smi٫u7I w ?LGSE4j`n]N @@6LZ&_̤Khl# x$cF~4ɯ9$2E!<"P?Vy?R}v={ɃZ{EIINUucHOY.-9  bd#ѯx [Y-'2KǗU5+❂4}YVrUe{|}~@0~{Yr}%$<|w3_Ni&guTI,6m+^Jߵ[TR@()a9nɡリe{i:1WǷn,%ya+9aS@/#ľhaHr:;FYYc,;>#G{iOfv]XGgV.\Wbiu_FY8^]5Hڵ+lɮl,rԏRޏ+Cb%Zڵi)/=iNs?}.C㞐^0^@+`dKKJ\Rzt 5GbwI~( *e֯JequetcA}J%D~Y[\O {M~\e̓,OڔkFKm؅hɜ?JFΩs$!>^f}@^ys<55;䞇8S6/Ͻl+,EΤi B@B(`dˏQ$&]j˿+~O0iX=ޙ"K 'JޏFՓZZGb]yt9Y]ztnҘֱz|i~]"]}8:gi ߲Vd|޾AW.&ָK5s"mj['(˹UX]^:G Ҭ<$55E{MdW[qҨA=] ^sB=8ҁN#OV{J\׮طtM;UY2VX|3EEk-}Fn_t41->e͂x.;jha)4[և\Ew do],cuI߳5OEͱa[v>i?[x+9W˹_̖ 5hļ[-̚|4 |Z᧜n1q 2gxb$y;7s̰A7Ng/z@ 8*_Jޠr/y):i ɊF tֲE/&8ۇRTNK+1|3GorrՓ&|``S?or# + u9R ..l۸WsZY)v1<QYzHA@ɓ~O6Nڛ  opv 3`< fJ> y<|t5WdӐpz68avPN}WG7nhql]Yf%=REGY| cyTYvG_0.u\=IN*ZlBi(=ect@@jFE]ŷm]f$;n1_z?lt´&:hU/ŪYͬt.+҇?\d;(zz78䭧ŲTW쀒-r>YwEy}KzY l E m6*9TaDn݅kK|\^ K>r8D{@ ٲv3nmmP=2OS   n'['.++ K}2:@@!e  sXFX,#, pV͙   @HB-L   $[Qv   0#  @T lEe4  Zd+ԏ  Q)@ag   jP S?  DVTA#  @HB-L   $[Qv,KK˰@$[C+i-l )ak@ H@|ɕR4a! ٲol vuILLLۥ T  @1-.@@bJe\".HZSRҪlT ɖcD@ 4l^&N@c٨U5@  x Erdrpo:0-M< 8Wd˹ @3]$PM(uA Cb}cE@ :H#ΌjK>{{XD@d$[\ d˟$\@"Aa@@@W   R%   @5  @d+T  lq   ! *U"  $[\   @HBJ      R%   @5  @d+T  lq   ! *U"  $[\   @HBJ      R%   @5  @d+T  lq   ! *U"  $[\   @HBJ      R%   @5  @d+T  x   @p '!(!xf;ürI @IV۷֭[&Gd+<δ ppq9HeذaRn]_*Y͞=[ 9%)/b@眀`5녵KÆ &Gd+<δ p_,l޼Yk_P h-G#X~Y7zh1W3f/]TF%Ç1cƘƎ+͚5Ν;o}+FСC%''|33gmڴ'x¼Gi+Voٱc\{%K.]k׮裏pN93r@   @8ƍ1b MZ,x=<ܹs=qqqW_}su_SNCyliժÞ3gFOEy㱒!ϼyh³fO?֭>|ᇞkƴm%t+93m-k4im޽syyc%r+y3='M[ `f+Ԕ@!.YYY'T鬔wJ䦛n4pРAjmϥRtϖ5j駟.L:U7o.V&K,+9KvdΜ9r}ٵw}!mf͚b%ib%vfVKgmV켊 W@ɖAo@h۶Ix߁O>-[JrrqUCjժ^Jf5MtB̚%3ɛ~Y3S2p@iݺ,_\ԩch^իW5fѣts*^TA"Q&@eg ʕ+-R=mJ~䣏>2 O4;=wb-?oYnٸq/,/[f+77לVf͒/\?KV=Y @Ǝ+͚5Ν;o}BkO<~Ϟ=Í̟?_?{EgôB2Bz6ȈK@yGo>{-@6g/q e +@g vlI AaK_$'4n9lm 3XF̸k@KvS]ٗY5=5, H?zD.^ݱ),ҷHZ%>F@ l9$}ܳ}T=>$S) ;HWFD@톧٭`Lڿsj\!Ύ|72@ 0Uqèaƌҿ-E\ÇƗ[t@*[{ ԨSD-@$[7̙#^ziԌ7q7o^4 F@-e|#YVӥ۽i*8@H&Xbt99: &w>wkǦ/]UY8@$- 6l ͚5xtu˖-0=EOsjK"4C@t-yLM8}}1eDe{ޙPҝ !D@$[&@; TX2(svKg*55p H, nisړ~: 3@J @p@Mޭ{K=b~mNj @H%I= Н n(+caZ:/   $ ZvVd.jm [غt@y$[΋=F(@\|TYOl[_l݁PgS*VNA@t-@Qv=els=]@-@lQC@brqл3@XVIJb>1|)((pɖBF@*"PAsؽM/܁ZxfddHLLԬY5l0ٲeK ՔxY+8{|3 'f͚IϞ=_6 :T͛#8pX1vXc=&[޽{˚5k|Ǿ{Ҿ}{iӦ[.39rtYtM v ٲC @XyZ;%:E).9s&ꪫ$33S^|Eիu]&Izwdܹ2|߫WO?]}MթS'%֭3eƌO˂ ̱{zHMf&M$~L>ݜI&{$|#,@ x$[&@ UwPo)SɓeժU&[L'u&7tԩSeҲeKIHHAɄ k1HitךE8=Og4 ͷ͛ˢEdɒ%ҪU+ĵkN̙#wɻk?0zQ)@awzSy_[N [! +Ə/?7Zj{ӦMRN5 Hǎ>'u]WkΫRp͛7Kbb䘯;Ot &e˗/7hտ3@pɖ3D/.?)Z\}.فoa:r "3K `٢K/CY/))P-zə 0@3o63i7n4t`/^VC@&?vA%nTm۶5ZJ}.On9]t1j92Ů]JdʕrK>}'};SeEg@1sh*..)9x]VZhjB[vĺD?D֢]}f+)K/zmFs6XkGjz#wlllCUr @ AU~} JrrLsU?EgիgR@;l! 郝~&[dD->>|ؗlo8i1[5e&_ٸqciԨܹ'u.Oygi-4 >{?K/仩<җi G@!2`(RGz)_D[t&A[N΂=fViIwa2IKgɴhm҆&V/_͌/2bv4  ZR{}YWʕ}-z]23W*U27dk믋-%'zUYOn?iC38loСC!z@@H lEH^,\Mלzt!_:i]v 2U4Q&2k]r[ݩJީNj,>bɒ?mx.5cG@|@_9cn3XtOO|Wz_lv!zGh߾}f_2 V;zJynSN1qyɒGK/ 9ta|Fz|9oӧ,\pW)vK|tYǓ{j1ޗ\ߡ|#v_P⌺I'ݔA֬Y#\uU2ݥ\}N.tajĈon6,}o|?`wg8z錫n?(Nu3lw:x_=‘m;%o=D7ޡo,D-%--MY6(!kyݼ|ryM%c:hoB׭!fk֮=z֣<د6vmηf|mL8ѼfmnPUk uĚTVc%H5R fJ̱f<kfc"d}2Z*."uaJu٠h3Z5:3e%=E7V,FZZ=No]Uo̲a .˳֭{³f͚ pBCa2yYt_$[anɖ. ai%vohkm۶5޽{hvl.~IcE-AaIx_BqI\r{ iSL1p89 d' ř< ֲZLmڴG [~ uT7u0ѢErE%@; ݛ V];k74,{8 pjGo[\g{0^w@( 96} 0c#{gviN2Q YT {xFvr'G߲qP7 l߾<0aÆˈGU\gQn`Iw20ܑM 8  Dc+uV^9g2BgǏ#  MHl   rv=  Td˦[   l-gǏ#  MHl   rv=  Td˦[   l-gǏ#  MHl   rv=  Td˦c$;;ێ]O t@@-. 5k&6lxtƵQF0=E@ @ ٥;w.ݡAXvh|)   k袋d̙g4Pڻwh:cF@B&Jjb##n`ޱcmV,Y"mڴ!.Y.]ȷ~+-[tɨ; |W!8#pp3[nbP^=_*#FCU @^^ƕD+ԍ (VdOv-Ǐg+ O-Md٢MRB/σЋ}zcZpJ'b3[yq%0}t8pqɓ'O?Ķ6:t{wӌ3Y:x饗h3c +i׮祗^X J, E]T8п P#<"V7" L`jR dݰbK=|תUߛ6m:u*ؠAر|g2qDΚU*v^*U7o,c;<%Y3VM_hSRR|~u@?ȧ~jo+֌$''qjE=|Ќ @$H"N @$l/ӂf=WIIIfh{.Fg5  Xd+T  )20ĝi0 W 끮q۱c1)Ž3rel} Wv)`]Ϸy;Z7ӭԪ!\: CLxsM޽YOiٰVԭ 5KRB'<3]+g*IiV+ұsTǹu6?IkT@<>lu݆ /4֩JpȜH +Gm:JChu`AnLgw6mjjgr.Ow>|ŽW1e{DcLA@ $[PM[ y?W}X _eTtFk yZ5"͚ͬX}>{رҬY3ܹ۾!zϞ=̓w!C &M.u=ޓۛkiRR7ph򤿔Xl1Cy8pYn~sn&={X}>hgOR8.]*FǢ1cJ%Xvifb1XS@>gZ+mڃ&kyi~}zƐ'777hmP}-v큰vσ_~өS'ϡC<[ljcxukYJSIn߾c%qJ۲rp챒(a5)((}9ƍX 9zQFbX966NS\\W_X+vc5}_"@E*s {zKv=jժrÝM!a=ZNI+١? 4H'u]WꅢEuYfɓ2S h5s   6m9BO>Znb?:bt/]wkׯ ҄?33Xeeen-3Ff͒K.$a 0@exr7-"7ntSIjEgު$1EљgyƌZx>kF4s   6m9F74}g:.+f塽{6\kNz!wuT\Yz%Ҷm[.3%/x]v5;pf}XRZֽTfN4iӥstS/3`:5No}k9@p 0h\0 o0Jq>TX2`E٤Wh:۸[EKIZ'Y6M-EY.5ꫯߥ7R1׊"'2r=e' p99z;V>ӭodCzڟ+2Hn}#{ $WXU%AMZU0*R?x󠬰vO?ԨQ<8<%R1Xi30h}_:3' CҤX Ͷn MDuoWڇFo}{qg)A?.R1@0 :$T,:/يθ{Ԛh}ٲD܈69qaF&!qu77nDո @$[ =;3Zdn+~"g  )@U&x-]:HqpڟA1@@$[6SapSX?7 L@@H6∣ ŝ[wFQ! @$H"ﰶu{w;>-VOჅU~NvyڮtvU%>.F=ٝQysTn~ @ $[=P)vS`^߰k,Wfwd>:({sVwTʩt+8\K$UB6# $[\;MٺWÙCO䐭q5JK8~$Y2WI6MM( Zd+2$yZ2#e[/I!OdvG(/>~ok^Wkoٝ#k\.2c~!jLy.#ֿ!7_Zyvn]?]-?w|1C2d!b#׃f alE%ѽm%Vݴ.vĞq" يth\1֯ ][,b uQĦRlھI#Gˏ+u?};wtڎksWϐqco0G왇GȜɞ[rcYBbއ]&,~ 8)p[6U?3+Pfo3 %ݢ   2mDn;/%Eކ~o^gjeɪoHuȕhfbm ׃O`ʛ4aV@t/=Wer۲v֍Sl?: N-w5*F,nOҭxE˦_0طK^z/19$@|U}d-JS 9~ok׹Ԩ]ߺ*ںWXK􏖴5S;[H̫G2+*8Q@ZuYAd=+YSJ+ jUMc%z@p X?I0\FY;ߛfg-|mzfjn`6x_ie'IRzblHY}R)77 !7k[4q{&UZ3^OYDf~'׎|ļze[x1{PwJSig"{߃/Eu1)Y]u3 ܣσ_v`ܹrٳo߾n}c|\gC{Aqli3yC|LO*&MYVԚ*]qU?|_7$͟dht4yLbKsM],M_vO]/ p灓"`4h >\x y.skx_Yt^,#θjԧutv^1~KL{9y_oc%EJ޿UF]>!Q[fOdY <.…裏_~cIjL/7 ~ @(XFJݣue. Yu-+Q4hZ,!zt }Vu%66ĪuRŕl&/Ou^iи$%Ո2-*+H :$V+W>l%7vә/M̾ eHBK]V$O^؆Y7DKzgJjСCM3B;qO߸@F2@8@:坱:Y/K3uX X-dž#h彇kĈ tR! @4Ѻ+=Xeh7M9E(Mdk@b}֋/=Ze ;ålp@ *K#N@D'Zj:.A%2@ jH6 p-]:زeː {BJ @ lE]0 \M:訽piFr  Z@p&ZVyw t:å-|r $[\ (:5(f%I= @ lE_1 ( ût0܉.3fpEVi@ fG`xw)> Wdk@ _ p+q @t lEw= `KHl/#Fc8R( ^/YQ1-r觗 gTA:W@kVHǿYmMt>uЗh/םsSC6HE/zy{p>o]:?Ќ۟] &o.Fc?a<@.$[v(Q`ҭkF q** +3Z+בMɌ5Ⱥ}O:}`zd,\P%R4 Rsi\OerSyJ˓z7(9\kguHʩR%]Bˊ G.@eEq8$ru$?Ͽߞ;,:)8RTF<c `WuIlL%)fVP DKgv^x&8YƋ2TC2םx\ pzYٜl[֟׊Z/;V;`y7ЄsƌŎښ)֮EwHNAT^]N@ C+m ِi1Fr sh*'H* $5>՜x$?GزL[ q W^-[&q:Gt]GHaKftk[7Jxߤ&LX{DzYu7;zw븇K[x^/$0~^nXȈzÚ՗LV0.)ԄkgVLY^>ٴUrIiҢJf9d!48_w_okG)~gVo_VJ&iދ> [PN;}< ,@j 8:^f2B]BhfK>Kg׋e^=f[2Z3Q=hR~[Lz:=byH|}6Xl+*5;n<%zڴi2qLruIqRJ(Z4Il @wWsuX$[,.\!6|K`y7ǨT)FԬ[[tk VƪJ Վm+-I\),Y{8rܿS7TM"IfyD},>ZJ8S5֗|y7(u-<0#٪"ILɖl>X|[rނc Tuނ㒭:/M [ɖn}sOY_E[PTNJ\ܤYN趒i=-3CV'gׯgH/#iIۥ pEOCw[zm&Ѫ#5&V5? <ke,-kVb̌.6͹'_6NCߙ[>Y\E']& ӬI=DHJ|u\ҢVvn۠YOլq[VlX_xV y3}]XfR@k)aabh]hw&Z-9NmҺUT6İ~$;gɊ{17vh<Ӻ'SI$~xqm8o-.[ ˶R~e9My~S9S߶])V!LO$$JJc[Ϟ\C>}9W,MIo/@ $[ԧm@RK K!VpM2E&O,V2 -b֭tM&SNK˖-%!!VB8=Og4 {h}͛7Eɒ%KUV&k׮̙3G>˓w}w q &3\$Z'FI7]yѣZj{ӦMRN5]4h ;v>L&N(]w]o^*Uo޼Y%''|wyK5)[|9GB".@@ xgt#R"*5گ_?PgQ^r%B.uh{赢_wyWTP c et̞\<]+pؙGKծm)?͒m +M \σ͊G]tjժU2?vCt _鿝YFÆ %99y:[EtիW,5W*"uV=̖scG@pc M HwiRH-~O:>כ6mPR pW . tP5-̙rAg@[ l*t@P>ػtW^!Cӝ>F7,( lq DKa0gtFK̙B9_`BL( 8E$}W0-' "Yl;V.\hcꫥE?OҤIML.]Pw|G}-=YYz>'k{oFkmG);waÆɊ+1" 5-!@ {g^0⿀nϮ;j23\s|C+IRwO?/R}]Yv2e#<"/ܹSk6m4i9f[xMFK5G" 3 P@YR}3Z駟.gy٪Q҄KyfYIa:cٶmItvzZO-SN5;곸!KvdΜ9fLzIɖB@(ϦE-f&?v[tU;fҩS'xC:vh?]5-z>/+''|wy2p@pRNp߿❧@ $[!@J@xgtd'Vc;^~?YwnjѤKg DV 0]gn/,/[Νkft"ɖ"B@*$}6͈+:ȌV:Y'wKaiLSO=%vԫWlѻwoiР$$$出P7С\$k}'|NN/C cp4mR ̞\<]+pؙGKծm)?͒m.m1>!^w{*WkXg40X~xx[ҝ>ƍtӌL9|lR4袋mPnqzVѢ3]RÊޗ\rql+@@{K nAK%66֗hit }Mgtkw}n&^ݻTRDKU4ъ"@lEK' p1x3Z:h aÆ9]r{-/N ܌3@$pi5)$Z6.܃l5@-8@p}+kCG@@(@Ĩg@z_-\ƌczOHfͤgϞ/׆*bb Sرc=nZz-k֬{IM6n-pȑҹsgY6 YdelQ/KfΜ)Iz*̔_|Qz%wuIy;w̟?իO_$Y:u]n:~[f̘!O?,Xw^y衇dڴikҤI͹diM¢>H  kH\ZȔ)Sdɲj*pr-[nrM7IZZL:U,-[4hL0AH錔&L^{[4tfK /|Kk޼,ZH,Y"Z2I\vdΜ9r}I^^ 8vaCZ =ݰbK=|CU7m$u]!UAұcGdĉru#۾}{TbyfILLuy.AԤlM !`  $[Q| ,5lѥ\R,-~IIIfh{Zv^&:U~}Z7hذ$'';OgtyhљzꙥX<\xGm#Z.S`?ˈy\}lQ+I,: f34) $ XDKphM6D@Kd+h@@@ L$[a@@.7E6zO@pɖ(A`޼yfHݝ+dh@Vȉi >|X>Ѥ YdelQ!0tbeŊf@f͚IϞ=CGK{g…W_}hBƎ+ӟI&a4{Ҿ}{|-mG>yȑҹsg6lEgv郐qҥ2j(>|3&* @@}$[)#2O?]}3j}VֲeˤSNfywdܹի}:4m޼-[Gr=,=y/"G޽{ӦM3_&M?POntɞ&K$hHƏ/5k֔)SLyE|]wEYD. Ed-dQ+p5Ȍ3̬&=^{yԩSe9X 2h 0a_NyffFfKN;$hZ>CkѢEdiժIڵk's̑^'md}֭tMW9@ɖ"BPAұcGdĉruי6m$uզOAAA۷=}hVjbi•(9993[n-˗/7jտd}UV Kd^7K@gtɞ>,qp̙f+;;[f͚e]ʧɎ~_-]~o0`Yr3^rlܸQ}a/,_Y/]zXZ)@@; l1* 4%}NzT۶m&ҽ{wuvtJ[t9.ڵtAV\){>}'};SK>C@(㱊;>2,~%{|T::hM3tƫ~%֩3Ru-W{:gsVѢ3]3K )eџ:r (7m:@e g2 /-J}% pEuVtƝQG&B%ZPDKϭR |he1P@@E$[. &CA@@l'@@\$@`2@@ɖ}bAOpf9t@V#@   J-WA!  @H"G@@W l2 @@"-@>  Rd˕aeP   iHG@@@$[ +B@@H lE:   reX  DZd+}@@pɖ+ʠ@@@ $[#  +H\V   يth@@\)@ʰ2(@@V#@   J-WA!  @H"G@@W l2 @@"-@>  Rd˕aeP   i G`ŊcǎRk}GAn@|+*7:Do""2"4@ l95_,k׮-V?nײe Ju `G>/ `Wd+޵jՒk^uAFaZ SO=d+T]ҪU+m&ժU akTvk??ңGIlڴIԩ&NlAұcGdĉrukuֲ|rs&\/ՃRRR|Ɗ㑲oIޗԧmCd+q~7M ~{[zP~RC]zx%u6K&%%~mUZx}f n`-eRKx_Zn_3_) |'zgնm[iӦ}Z `OhLg1|I۷;.rzKyYgjޗ~[ Z 4#eӁm^9)Gs"%?@bl&H8gz͞\<]H۰SGKծԥb},ئp! cmU vK XQ+(1Hl׸TY;ltfD4qFW$Z}/ZoY'g~8 ֶGr$ۚݣW ZrfWX'9IWI,= CjC>N(.dK;vr9ʬZKΨIJC#٪`pUIՑe_Hb} 6غ%s$&*I;dˡL)Lڰ 5EmP_ٙ8vid.o?"P˶$Zy[ۙ[>d1t(Py.r[7h9<%k,dgj$o<Hg4њ~l:xD+8-ذIM]( KtFp1f˯ Mzdr8e#_-|:uF!oB*|A `$[z.K@gvgsנM-t*|EhAt+0pvWT % D$0AН挭 n=Z!h& fܣU!B۞C D$])<ЍJwS`?:Hqlsvgd"lw dvUt{w;@v~srE7PW}B?jyOOVNF \;՜-iuMwo_W?y}7B /?OroȽ}.%k27/ fԅ8Jdp}9wοˁ7]V*+V|,WBB m\z 6E"5{(]իH59Տ?lsx |;<}͒!##\v˭r`y{eVq H8/7O{<ڣ2dԵ OIdEY/جCëcɢ$rxsZ8<{2﷎wp۵s[+ Ͽ-lQ&_gy㱗Eg*:U`_$%>Ś٪zZYK{xg=ߑ^g; S1ˤK}=?by\3_&y{p*v@Nv5a_ gs^%GҬCqRGuZ3 ΗG >ؽ迥7ƌ/gΔdž_#c|ز]{+Ryd^[صK^r+ˇ?g;zb! HkǦm QJZٻG0>?Oλl꯾-)//_[BX />M#Ix7cs4ƿdl)FnN{1b+Itϙ+҄lͪtU' -$]vp0Ue7ߖX'_̞+[앚i˟>) WdoR%[oֱ̼YsWяGWJٕq@46*B PkטjZҡgb2T8YF<:j5aך{RҬ_ 9Ǜ0-yy򃕨}9s/=]֭\),2W󷏔-w'O&s~KÆdMݽuLϋV}:TG"@UqۆߌժWKb*ֿƞzу [˸O^K|˛X\6Դ~kߕS-Ƃd`ѻ5y7Mٴy>߱{v얋+( P]?m;I e}Aֽ[$!hY;[|%IS͡wx3s+66~IuִڰYآ^k2Yd6-[+1@ b6k6hnCZu4KoE|8M{-s+v|֡Cr[o?>+ |U+͟V|!5՗/T5o.;~mXZO*l.Dil%v_oo-bx4Q)@U$ o<%ziyZkz{Y7vy4iLRK5c2vq}F^fRRo-_MAyGdrd_vl_rER2nf |fvh+ k֐Jgs6-%MbOLkI׿y+7Գt{IZ5#~̼S'@0NZ3H޲ZI/MxJ*O=UnY|=fKF-WN3|+lC g.I{(reTˬs:_ W|س[^/rGGS25˗:@b o cӬ0S:5[$!j[^I].UkV+=Ȟ%-s䮧,<5^:Y:Xuo>Rɺ_ ~b5k\*7 ɭzMȿ|dZkjʊfrg.8M93bLKެf3Pߣ9͈< ;9bdx)>>sCa.id%U:'+?'n4[7vx~I.\"7yvpײz?p>/lzi-;G Ru#ݏ\tu\|?[A]a}a{hm-{o=֗.KM~t7w*ԑ|nf,jյ?L}y /}RtxݽvfVJs>9/$<\xu2W|1m4>TGEKov݋"k˔.3} O~݀/"L:t祷k jכx8(X9%dΖFKڟyyqSxU/ٚ/###]/ԫ+yMg)c .@H#ӄgJ-1L}ɹr{ .9{$95YV.^._Yd=khh,qyٷdkSfZr3${`<[tiwJuOOi=leSZb+U+{cgIMeJurĺgK8J`mԕvc Nt .  ٪^Y;9RCPήoShe}*σ=;IϔI~^[WJ7!)Vpx"HU!l@C(N ;bluUrE]d+䱩NH6_ZZxJ[:@?L>!*:Ĥ8Ypi!J xpKd]d4   `-n   HOF  6 ٲI   Kd]d4   `-n   HOF  6 ٲI   Kd]d4   `-n   HOF  6 ٲI   Kd]d4   `-n   HOF  6 ٲI   Kd]d4   `-n   HOF  6 ٲI   Kd]d4   `-n   HOF  6 ٲI   Kd]d4   `-n   HOF  6 ٲI   Kd]d4   `-n   HOFQ&#'Fp@pɖ3D/@R$+3&}8WRV!,@g x9/`8?[SIzN rr;D@ɲ{ǡw x{3ZUHM @ lEq:8_^*s!+p`A?5@ $[n"c@R=IkրOUBfgJM $[F.h߭lZOvn;0H I3G Ndu!e@ m:q9e[RVv Tdt;[cR9"q. @-.@59CGd5drPrA|=#yf&ow3I,MRAZ>@ bh+_GKծC܊{=xH;q;ሯncٷ+zWds]¥ρRn41ߝePM8"9 #F֋?Iq`t3 s'_zQN!NZLUq2kùL@U1?ζ>xWyxvf}^v<: +Kzͦ"IR)6&8*G-o<8S"%7Rhdq t,# T@".3[mzr-Lxί'lrDe-/)V @XHL# \WHV-fje 7K44ߏ㟼Pŀz@J_ @W&![T6nƣKΦ.@)@̸k@Zwmfn0hP'@"&@1zF.)M06kd`@ @8@PFu @$H"O AR-I @")@I}Fn )̭7F Dd@z@H lER@@@$[ -C@@H lER@@@$[ -C@@H lER@@@$[ -C@@H lER@@@$[ -C@@H lER@@@$[ -C@@H lER@@@$[ -C@@H lER@@@$[ -C@@H lER@@@$[ -C@@H lER@@@$[ -C@@H lER@"'=SdZC9OVPڢL#@HI vH "X4Mٌ`.@( ي 3D@ɩ&yNjF3U_nɮ퇤AӪvƇ `3- O}RRٙi*kYXؽA*,@* @U' Q#@5f %qiKJFTs! -Hl: Pͪ;Y,,# Gd+8Ԃ `ظJҸEuӓTkš*۠Wt@ ZH5p@V5:vo2,@l9%R@/7}KZ~A Jd+Tԋ 1/>^i@@*pV chuthqq' i׋Cm8"6dȾ݇%9nx.}Gԭ? b݃uTj5HH6`93#"MӨ8:fx@T-_Ynٷ0qۿ'K6W|^ts@" $[FQ$?񯒾 ]A*{3MEe@@$$[\ @|6̍Ѻ˕KpɖG@[{89D@gt)(@$[ B*apcvPImSٶl``@8-. @ tA;v -! e w >΁1*@$[."C@9Ze 9ֹ ~-ǘ"  @H"N   ~-ǘ"  @H"N   ~-ǘ"  @H"N   ~-ǘ"  @H"N   ~-ǘ"  @H"N   ~-ǘ"  @H"N   ~-#q0@@ يx;rI)qȾi@@ ي;fɩ r`_cKG@@P- T,w}? _`s咡ss_I˦ é  z@FUdCW`ApZٴʼn v ٲ{Td| @ -eؓeד#IZJ@ يθ;n՗Mkmw:.{lZyO oXe[-"_ž/wrR1.XZX>ABV#@~ $4_lVKA>[A@`s,dӖuNJƝpզ'ֱ:L[mef8+zl P @XHL#Y7Uι8" &;6ÇrxW0 se~w^dr; W Y_eۗұw?Fmε]C@XB壥j!n%׭wl9 veYʑyI~z|PFid*^+L}fIi2o<6.YhUjr4@o2="園%f6g֫Q10 ĸg00d+ .jE,a( ?|ֱdppAL| mؓd+Lq! ;!rg\ 6ݲZ1X=m讄ǵ@ *H" p/ N9Sf\$=ew؍ZHSsn6tӌ;Ch2cD$h &SEf䪑>doÍcۿݘbs6 @lEI& &÷`׽aTpMZjx.o]t@AF04KPmTek 2l"Gv 266;a wƵQ1UG@@!@U4NA@@ *K#   *   e l%@@@rlS@@@H   @9Hʁ)   @Y$[e }@@@$[@@@@,>   Prq    PVYB|@@(V98@@(Kd,!  Cdh8M !1i]~ [?8 Vi@jp7I{aHh@ P@8p@I5]GJub  ي:m"ahtJu s4M@(V98p@ZDӰӺMY7UN @$[6 B-j/BN=FuEGd%A\ )RQ FC6Ĥ` Hl S@="Naux0CSƨZdiҲtLՔD+4ԊU cHe'd,-UF^brA7k/PłwW[vN!_d1f #֝ȱ̌76|K֦?Dɖ=B@g{⪴Krʭ 3OLCVP9 @ ٻ%(Im9\*%(|bR{|;o $[F8^ 6 IVAWq2@ l9/f@@ @ E@@pɖbF@@@$[]D@@ l9/f@@ @ E@@pɖbF@@@$[]D@@ l9/f@@ @ E@@pɖbF@@@$[]D@@ l9/f@@ @ E@@pɖbF@@@$[]D@@ l9/f@@ @ E@@pɖbF@@@$[]D@@ l9/f@@ @ E@@pɖbF@@@$[]D@@ l9/f@@ @ E@@pɖbF@@@$[]D@@ l9/f@@ @ E@@pɖbF@@@$[]D@@ l9/f@@ @ E@@pɖbF@@@$[]D@@ l9/f@@ @ E@@pɖbF@@@$[]D@@ l9/f@@ @ E@@pɖbF@@@$[]D@@ l9/f@@ @ E@@pɖbF@@@$[]D@@ l9/f@@ 㱊.f,-Uv|^goxqs g}$&-Ánc=#qE_B݉XCRI|kI@LLcw x63[N}F@!Tu?rd"ɳ-'&ZZ75F[ΥrW%s[=h يH3N@,?e ̭\0j.`RW4lEC# ?)i2>EԉkH\Z @=ZDҥOd?'B@t3 -1 s^+Kd/&B@@tA;vs` l*@vޝN#{90F@HBJ Dۻ -# VM@@-.@@@ $[!@J@@@dk@@VP@@ @@@B @TD@@H@@@l*@~~jD@VP#5k} 6Ll_q'k@@ $[7 ڵK#钕%ƍ h_C%   ي=-H ..Nt6jٲetR5j >\ƌcFOHfͤgϞ/׆*) 8PVXa>vXc=&[޽{˚5k|Ǿ{Ҿ}{iӦ[.39rtYt[   (p+bpM8ƬYd̙ꫯ窫LyW^r]w$wޑs^ZN?tyMׯ_oN:Gbd_~m1c<Ӳ`s޽{塇iӦI&ɇ~(ӧO]ިI&{$,&WDp3 Td+ H*R? -DL"'OUV[n1]֭tM&SNK˖-%!!A $&Lk$R:# ӵ^kOZL̖&a^x[k޼,ZH,Y"Z2I\vdΜ9r}I^^=?<)D@ =ݍ5 Z}T0ĪRb5h"a矗=zU7m$u]!UAұcGdĉruיcdIdپ}{TbyfILLuy.AԤlMnC?pA@t`dތ,/뽾_38pTt Do'DVbD CK4aG~Y*]zx%l.KJJ23TZIǖU=`z&gz.C2`9p|f&mƍflrwcu+77׎$Q'>~lV'Y׵ySdڝa{Wso.Cs_@޽;J.@t+Nr3~nC7 ܶkB{ڶmk6HMM}jz_!ZU{cstbrie]v:ʕ+/>}ȓO>)}5_wy#;AxlVi$)rl)M.]H5ENMIT{(DK{OMh <9,yKIݚMN%|1kcѢ3\3_AVNmY[|YV#?ج{u /D;hWgtFKw Ǹx",VNFZ;M?H<+qpJS\/NTI|Y8*5yкde:7Ӥ[& rמL}4ْ`ֲBMwn"7҂~5k{kYwR,_lo9ݚz|G~ˬzudnkZO:t)&vZיG1wJKAŢ<͖Veֲn56ԖO3XMklivML=3Dm5KO?-ovdK;n8sO>?R7nҳgOOKbH;Aev}p/gy9><(?c?|ɈDo2D(yrYXGnyC.LL}]OPz9d :5Rdŏere%ZȐ?#SNm^K^I^۸[_D$.ӢuotU7q=ϒS_*$MK:K^6IJ uavԭ6|~w+ ҁX7~,sfu|QvF&zj,/$OiND%zNԣG9묳D/R@^hP|w!\4}v&]|4iD.Ӗ~OoT%W_}m|T"eOZ_q`ře԰hэ+ο.A<=W쓄Xiި$nӮ v1Zt#2L7(O)3sʺwY~DA ي3 VNoj׮-'NlQ~믅;`TN*6ɗnۄ ̡ݺunD+ѥ2p@|\%iۢgDk;KNZF VЫWI.HVuOH]J3]DK_Y2dC+ov ѲK4G$H"OۮН>T4iYJG0F*}@.C<=_6m:uzs}$@-{ k"kFDh ي3 @7xC^y5rHywf͚ɑ&dh-[׶Vү_?9s3;;,CKS '0͛voZ(>~ ,@ @8Rt aÊ}W [je6hӦ_ڵ+ѺnݺRJ1bDz s֫h<#`FN@JQ\ ,6$ P@zzI]*ͪ\5\~.PC"` 2‚F Cgh$V@\@OקM|ϋD1L@@ $[ԥn@@Z =G@@P lR@@@ jH6 @@B)@J]F@@ ي3p@@V(u@@Vd+jC@@@ $[ԥn@@Z =G@@P lR@@@ jH6 @ ĥrj(?;Lda 8A 6I!P)F9Sd+:Ψ@@lJOMi l9,`t@'*ÏNU}1S8S0jC@@lr:t PsDNM"6"D-'F># &Kj)]G@cl?r X @@ ÒPTOa-Mb+EA$[#@@Y;$w ?Mw&a nhtоg ٲw|  8T{8   ٲw|  8Tdˡ   `o-{LJ!  CH8   ٲw|  8Tdˡ   `o-{LJ!  CH8   ٲw|  8Tdˡ   `o-{LJ!  CH8   ٲw|  8Tdˡ   `o-{LJ!  CH8   ٲw|  8Tdˡ   `o-{LJ!  CH8   ٲw|  8Tdˡ   `o-{LJ!  CH8   ٲw|  8Tdˡ   `o-{LJ!  CH8  h*~ IENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/images/deployment_architecture_2.png0000664000175000017500000011255400000000000024273 0ustar00zuulzuul00000000000000PNG  IHDR1 AiCCPICC ProfileH wTSϽ7" %z ;HQIP&vDF)VdTG"cE b PQDE݌k 5ޚYg}׺PtX4X\XffGD=HƳ.d,P&s"7C$ E6<~&S2)212 "įl+ɘ&Y4Pޚ%ᣌ\%g|eTI(L0_&l2E9r9hxgIbטifSb1+MxL 0oE%YmhYh~S=zU&ϞAYl/$ZUm@O ޜl^ ' lsk.+7oʿ9V;?#I3eE妧KD d9i,UQ h A1vjpԁzN6p\W p G@ K0ށiABZyCAP8C@&*CP=#t] 4}a ٰ;GDxJ>,_“@FXDBX$!k"EHqaYbVabJ0՘cVL6f3bձX'?v 6-V``[a;p~\2n5׌ &x*sb|! ߏƿ' Zk! $l$T4QOt"y\b)AI&NI$R$)TIj"]&=&!:dGrY@^O$ _%?P(&OJEBN9J@y@yCR nXZOD}J}/G3ɭk{%Oחw_.'_!JQ@SVF=IEbbbb5Q%O@%!BӥyҸM:e0G7ӓ e%e[(R0`3R46i^)*n*|"fLUo՝mO0j&jajj.ϧwϝ_4갺zj=U45nɚ4ǴhZ ZZ^0Tf%9->ݫ=cXgN].[7A\SwBOK/X/_Q>QG[ `Aaac#*Z;8cq>[&IIMST`ϴ kh&45ǢYYF֠9<|y+ =X_,,S-,Y)YXmĚk]c}džjcΦ浭-v};]N"&1=xtv(}'{'IߝY) Σ -rqr.d._xpUەZM׍vm=+KGǔ ^WWbj>:>>>v}/avO8 FV> 2 u/_$\BCv< 5 ]s.,4&yUx~xw-bEDCĻHGKwFGEGME{EEKX,YFZ ={$vrK .3\rϮ_Yq*©L_wד+]eD]cIIIOAu_䩔)3ѩiB%a+]3='/40CiU@ёL(sYfLH$%Y jgGeQn~5f5wugv5k֮\۹Nw]m mHFˍenQQ`hBBQ-[lllfjۗ"^bO%ܒY}WwvwXbY^Ю]WVa[q`id2JjGէ{׿m>PkAma꺿g_DHGGu;776ƱqoC{P38!9 ҝˁ^r۽Ug9];}}_~imp㭎}]/}.{^=}^?z8hc' O*?f`ϳgC/Oϩ+FFGGόzˌㅿ)ѫ~wgbk?Jި9mdwi獵ޫ?cǑOO?w| x&mf2:Y~ pHYs+iTXtXML:com.adobe.xmp 5 2 1 2@IDATxUWy !(`5 ̴ T-0~f ܷooք`$3D} ^|m X Z iBL"F!ښI Y39>wgzwmV۠IB @bK`Tl-p@ @@s#@ @ 1@̇ @{ @1'yb> @ @9D};!@  @@ cށ@ D= @bNQ|@   @ sw C @Q=C`Ν裏 ĒXZÇ塇X) KO}|)=F֬Y#c>T@r ؼy3Ã]@AQ~ CMU BQ F I}z@MQuM@7K!@>TT7BIQ~u@ %ة4nq1 >]Mk oB (D}x)}ZV^-ǏBn)@Uz_]VTرCԛO h B?'^}Knj`ܻW!>QWdo>AЧ^ SNb^tvvڇǠ|@26Ba|B~pthWe͚5xcO_kQy}ԇI B gk2'8r5~0 WaO @W7񚌾 g5B9IV;wZoibS4ֆx 96l Byg]sm8&a {n&δ5֭['=h ZGQ_f~xߨKeg)>ʷKػ)|iM[G@HOC A@0O}_s_juobBu.IBi942'NȔ)SFXGA/P7-*Bzg_?[J P+D}RZ)/q.05ery۵[ c?۶UٳYN B-OM}L`芳0S* Dŧj L_^1SRu,K[[X`x*8f̘a יHHh0a2z![ٵS^4_5B=˨Q짆"ZQ+ Bث:v+{VΉ{i Pes)UO 32 R^VN.9k8*vF Y]g#i@NQ_7ǿ!wK.YY3/ ~}*"// ~nWѮ^?G.7\ow=~9D 9]La_̃o 4OQ_Krygǯ˘K Fvqk^@r,}Y, @A*~On^E֛/?㭯S + @u@ԗҟzbsf1K9AoD[_ j&!@BQ_NC]$蝸[_`-m}v~̩Z}M@@8üg/[?7'/>Y0",td 9@s'@ D}z/,/x8:=e`C]n@qw{)?O @!@M@{yuƛ>78 y߳p<,Gy҂'Cz|Y/\PM8@ǡ =>)xuƛsz靸|֗kiy{I{FN+]C23{ ӈg񏰯7 @H(D}cy鯹V\|?oro}&vj_,zEtuu9bPU1Oҗi6E.xcy@hDz7K_֛x=0=1SӮQõLio7ǏTB2!c] @h)D?K52-:MXsy;lClg?k ֕o{j ĉ>[E^ 1:7KW@^kH_{<\hHE#Mm 8H楿\,}^z絷W]_BqL}4>O}t>7#)KxKW};p>T,V (H(xu^'Vgzw/^ryƼ2Xf@[t;O!NȻ,>xt-p><9)#m3t :zr<̵=}풱zP1{-fAU@|U 2T^+~W^?և|#V 3nHa/#&śuMXW^ӟ._ }>/g[";{a>l*ӯM0ӾP $@EmY#N_Yr'_N La_4F[̧ZE2U8_ xm˼ꖌObt}Ք5AUm8'!@@R+yL1]䋨r&No=#[tS~><å*5 r* ,2}X ͼ |sB~Q|zi< 󐱽/[hCFڛj5}Ky(iv| H߼N[olDE>߿_~̜~̔;_22llքIv`3TchIsUaǦg%k:#WP.t;kVgpi̓R3T`aCť bJ ^jKG.1W/ZO39t}"$652 mR73Mڻ`Y3HO5x֛݆f1oBm͸ D;*l!@D0//yMcYik>ukp*Ⳬ%NȚAی(7FCЭWڂe(=nFOJ@ '9/K5x zS{ETm71"]U R7_M|{"hjG5\⫗ցv{o6;Nꆟ53ᘩp/f3<;}eJ!8%G U~]1c*k#-mK#fjR[V/)<7{Roܽ k Ր^3WG̔-"e7WmHOڇT!7 N6cH !mF}M67ߔӧO?]K5xecCS= +WqUބ jWRcCL]\Nu~3W_rYWwǏwv z>^)7`_WwLװ3FBQ!PMЗ!@RJ UǣF /P.b}'yݿ lx֣u P}= @H7)MAE{vy;)]vL8׏7><v[,vhNgVqqEIˇ^kF+QTV u{:1fo-(v@@S^=:xUC`ԃ1AxFp9֫ 6"{6VtlhNY1~7G$i43h2zYF̈D!=*җ{ xoo-fd13UL4j %2s:A  zR;*J*^*,JZ9Q,F`ˉ܊. hklɗ>SfJL hJv6 N^nV4!W{uYQ̊yA5w̬"۳!ę4ԜP`@@2F {袋: zիnܧ?S~mLA#c|U]5y##Ċ#iEĮ"?kλ 7v5bݶZ B7j @F u^a8Ϲl $AW93eG>Ɗ嶫V/UE{VEq1{wL@vzDCuۋtn܃JssWoMl8@R)GB?੷5}flޟ] ^Qߕ5y:ԙ5^y[*:rPNaP@@ _TWzԽo4uk.]37# iTݹfi| ͇td{w_YW?ӆu77RwݺbityBv4l ;B T(Eշ3vFwֵa旜Vn TXkH )q46Vq/i3 %)&_jg>SwgeY*\Qm>i= @^vEeةѕ /|ɇJ?Nt,,GϚ3e!27[2TWϘql2˝*=C@(uŽ{Lv黨G~@@<vxtVE c4 d'qo}CIA"(q$$C `w.Mk 0رcӭi(B t$@^yM4IN:t & ꛀǥ[kSNǏ{`{PE u9AZ!ѣ "ISH@AI :}KCzuיi(=3Z-|e/`R J+Eɮ]vńރz/ @u!b!2>׾G}pf6{+m8Z)赅7pWNSp:P{Ϟ=x'9@EYRt̂QYQl+{1FՂ^y7k֬?ixiOLlڴ zM  vAmtdy=fU67lt豥;>O יu`o>cEOG\[w͚<}vZgG1]|yï77GMERLi9 ^jSVvbLV3/Gi~ϴ',v̲ Yy/۳L2.5eݟgl^ϿlмjL_7]=CQ6ڑt=]9#( zmzw!+WdzKа˗ƍԪss&yQoCoڻfeƣݗW}[tٓFw\ڻn69<상lףSuƏ~8o\on>OGǽ=ywb7w9l1oË&]ӦU\uvvÇaz^S9sB!@>Kݣ9֑1"^0n_/l[BbFτo^wȗ*YfYڵk%;ذa{,\зr)#iQ֢!.a4FkI[<&>4֫&'!3&\Y)qW,6{LNx$vb@kz5GٳeղsNw:[orܹP>Z7B6ӦM'NݻЇT@`$_ޅ[Or3k4\velhH| ځy{ խvtÉǠaכ7 c4!;f AWohh>)q㭧СCruفA.+QFɸq .ѣGѺN;N²U)Sd߾}~zb?6"Dx#d@%4\E/?q8nH=gϞ~| |ߕ/| Oʅ^XTR%:BL5ost}y٭~'A@ DS z 9s=̏>Phj֛c.t.Ǐ+sε!5:s֭[=)iN{ T@ԧil'*uD݆u;{¶!t`~tm&'O_ڵzg̘! E`tZOA|A`O#?Rta6}@2U@!.y)뚿nxPv" @wzߑR  ,BpF+Z>]r 7#]kRe^a=T@h D}KS) (k>#w__ZM wrʴ5B4D} ^#y׋+bGUNj:^\ݻEgAǎ$.eC3ߒ[l_|7;vرa(@ D}P)@ ôD}z {!d:6T؟>}) j'9!@*,Y"@| D}H@PJgToٲEtZ #55AH4͛7#42D}{ r~O9f_'AfTd Zx@-$o C?Ry">}̟?_h9 D}x @_^-ZOUX@UFb PW9 L&c V}dTDRNQC>h”@@Q]DHvKF!FDžj"  (xWȧZxٻW\=~ ׅfϞmCqNj@b ӕ4#PkK—n{x,\@da T{}c `wC \}r<'=W!I3 4u'zsm%cTۣ^{@hiDo)_ꌚe%`@sj@ xW~rUΘNyeqD-XB9ܹse2k֬P@I#OZ@ø*'M$8v 6C @HtF@H7 ͛?|paz@ @7K 𛚶'QH#< pB&8A!OLW$@g7,lS+,#55AO>@ 1Jps eaF{PW؟:uJ446B(KQ_ !@ T:t=}d28|')B~Pywu]{=/xP\9P~zʗV?~|Y}zz!V`Vҧn@'{#ňAsd_SeNڵ o};  -CS]w+Pv>"|1'~ʹk#}x h=<,"}BcZl.N'cϒ >-=M;!3S?#篼"?b|~i*T8p@?.+WSCQ% '0vXٱcT{n @ ie$x鍯&j¾!l\Ĕ>H?*5axb[>OМ4Lkiy @ԧi1Gc '>'}Ό6aZ?!hF*w-:a_!X`XwCCU_?,a6olΪׁ*I@O}z6@ W2&U ~ҤIx+0 OxVC@MR@ #}gg -EQ1 P͓#G^@ r(. $zkIvwM׻n+1Rmmm#e| d2k }r"l.A(KQ_ !f81sN9tЈE91o믿.'O,LC9>d 6_,G굈ٳgŋ\> JQ*n*@:H>l߾]zZO|Bz.i*֥uD,L~y쵣F!)C8P@R8 06ǷB@28Aoș3gS?d40ߊo|f+_\tE2fA'Uڵc?ud4V@'O|@K@EѨW_KM717>oK/ {P"M6oc!*@a~0:.*pNs=rQ1mжhŽtm{?G`ժUmg9~8X D1" @ lH\x~V>+WArw6imE>+V,nV<#< JVҧn$[-P5hGE {wsmSi MWZ$@'WiZLyUkQa㺒gݻWǍg1%\b\ǡ+w!fj0 D}@`)i'P*<$콂~2a+Xg^_D@'iJQמnD1W<^ k<رcZZD`JDt#@t s_~Yt8Їd…jĞ={ !7<Hu[(轱rJ<Ц@VZIh@-`d6F& D_Ot8ĒfL,-h@ 67[:Kxgy{+[n]ji->萮.y/LwP$>]K - {]U^w]<Ѻu"kާI Vݷa@ 7A|=AϽ(Ç7omfGQ>E.O|e&M|#6^O6@@ :X|*:}%H'GZJ~{U8p}`+۷: - @aP@"ZGWN@@=*ϟoKC"  GDD@ h}cMOo B@GB<_~@)a{}S]u@ @SMb@O {ca5"?}Zjk@Li5%Pts̩Y#. eɒ%h"d25\A@U_vrGN>*`|%{Yq.R?~L0n?ZuPik(Ack.D} NQ_OdϾ '' ,ZO>Y*=GME<>Y})!@MJ:fB ܹsryD~믿^>я5QO?M }D޿:%8Q l@r0P69}IK H hpws\Q؎\tE6ǽ H$4ic^THۋq@<੏G?a%RO`$ {kR=>L^j͛#o/B&v`] >|Xv)ﷵ!E[Vk䫯껱*IgYl̚5~?hNs~ҤIF!r `  'B~ڵ6\Cc]o{+z<_^|2n8+pouSN[ni=,-}l!)Huƴ{q*М7PgmJ/~ ;VőcNJK.$z ˖-;u@ؗ!@.p9i4fʕV;|ژxoa={6 /b~̘1 Uիz.}H %OhҬ S/+֑YE5ϟR?t*5.bf gfYpaUQv WGO;vo\HQ#L iY5K׭&7]ebk 2pƍ an*w a_7:.@j SnܹsmbBJoU4S:Uz H @d,7:}>2]!C[ /^,6lhj.^"AFO}5:K$ɓ'[/$A *M&<"5:(3=اi)!z\;O=L>zbg<džN)ٳ'66cht ^iؠ%A(GQ_ K`޽`ĶE.`k׮%&ҁXw%C 0Rp 4Ggggz L& &@ F'M4  qW  } ~PaI=c̆ %>HD}ոzf,ii$r}bA)qqh,f6B("^gϞm=:>O!@ nNBq1H̆O>@@VXaD؇M = Aj&!hDw/k%ÇΝ;e֌Ǐ~H"V` pa:ed֬YEyz %6r~ wk$ G-ge+uuбL\/ T!8*Uȯ]bteQMi{N߶mׯ[n%Lh}Q{d<,VB]'N(8\tqaMS^{:[&E˻ |&/{v/x6I HQ?K,T- Pq*l½S 4T$gYreꅽ a'ޟ~iuc*Ub0FA~CIcad޼yơ]@{BPD?*G%W^}~C&Uﰊwݪ]=*p'NhU̧!EۯN&e+ q[.ʆ,d'@^zY5?˗/5k… kL(G ^wUݻ׆QN\~G(3+'}Y`~GDQ֦ -U  W:EƍPbuH:^i׮]mq*F]y]`l 胑}p {4n/x6IoQ?w\*WM: h|}gg:t?u#{1֥q{GB^E NmZR5C@^'}RηzkfZD}{"২{LvQ d@tqƉJ@34B7o i&y衇xdfn:uoFoWpUV`caSOИV 9W$4K@:SW)ot}P!}=:Ygv;pvZ?5QJTAE:@ h Ƹ ( {}Ё з*3t[R X:x1鬘o$E+[-T@Xm_r7IKߪt$W'g&~#AO*U\ӊ+6}O |ui[ٳgU8}+Oo()(MiֶUxY&a.S[qz׭[cC,{ ~~5_l9w`Mc^gv҇ b#~T1O&L_J.NEHS @ >P} Lz$J+|Ϙ @&w}ٺm۶" lݺB)a4 H0. ~,ԳR ӏ&bٲev= ^Rd2M4;s~*rD+nj;5/1W9z@ڵk*U ҤgQ?އ;R:x[現[R͆7O ,aǎamܸcJ^stD}hVI#Rtɒ%V%+? ˖-;@q&a{;ᄏ&::SogtjT׏b h1 q3PرQ[:y}TSOŭ !Ǐ3jABWWTG@Soj'EhL/_jzY:"l!^V}5fVl$iT 3UU޿NT~R, jiO]8+\?sl%7_?>~84lB= ^S1B Z:7轤Q6HW+UUqCɞ={-~ʩL@?c^Em6L̻6p8SKDW9EgА 'vvvvʡCv 6e!kշNía@Gŋ˂ (<ވw^;㚆_zJ檥Z?~UƎ7DcI.>n8 7ee-շSRI|6WqO?pBStC~\OQ_?31~|*g֋T7]شig*E\̋8?}:IŽRT_wu6TDn(ݪyYf*McrpL8`~Qꀰ}%8M41aވ Nk7*TO W:CY&*n$3$eWѮ G^n{ kDǏrHMf`7I$E`ʕVDj`#ޏё℮ |2SNmUN2%QjK^z%;@GۢmRFLζ:D}u>M~|(I#04'"oz' ~hF;L[ԫ_}Gz]ro9ݪ8qPC‡\^I.E]TIJӪ~lUX/>MǏrf̘a$^ &믿d5R {?~GBcZӎ8c=c^D}{ &׏Ri^6 cC{66 4"'C@*P .@IDAT h\'iD]A%}c ` J TPa:~WA `~ WJ(^QeD3fE[;%&e2;ےN}v`JKR\ 5QFic]pE7mkܼ wTꝟ7o8qŠ}=dHAmN'#X~|(Ǐ2@vaٹsg x:uG.0lٲB|= P@ +@-BLE@? ?_v]FSϚ&O* sbm69uꔬ_^nT2эH=+WSH]e1S]k G9~M]}ɒ%V̫@sX?ʦdPQ*Ҷl½uQ a)+~N#X~|(Ǐ2E6-S1<͖#իWsjq~j[ctnN_` B%BL;v*U1:x.\F"@'>yˣG9~( АB&%u׹7n r8VDE)cS|Q+N :/X^~eJ3#Gaܹ6TB=$I@;;;СCM;.ʊVFF|-w,>rNsYn>wApmׁ֋/ 6W %C0x!@ hhjP?<~H_jEUaO@4 gڴi3Y2OU'Џ< , z5< I熞>}:>7@H;v}p޳gOH5R   RE@UڵkWm޽{eCC iT  P N%Nj Hci,Ocχf~#A@8pаuIOOOD, $L,}Zhʶ^P0㉍[ )x+]X ҷ#\swV~* S4r$:뭸2erGmG* 3eg*;$]c{tgdsae-譃;6-ii @@1?bF;z]vd q9y5rMwI`tgf̡wo4 @@_OF]rzUwdd@g<_F$Umiب W>T@ԧi, @J@\g6Q/j8Nkyس՟%]C3__0ӼU/3ov+bz4@@W=}tWr  @,2Kdp(6Z n+l9\I2}ޗ1/zF6&AmX~Srp[BJ[Lͼbl T6mLkM?V(?a-nxeӏ&<#<P12g8@'95&>H @ "OPgҔ,ZHv^@ NA$;yd9yFR+[Qg#l&ߒяA#(K?˻Zmx*P!n:驐 @ <xcMM #3fȁdҤIuލVY7.H te kҏ#X!h8[eY|9sWGv.vunFJ}LnG! =cmEl@&}wԩ.$7 D}S8̙#z\25(,"-M1)fˋ_fɮJez[%ݺN_UM0ׯ~L}r~pzĤ03CZb+VyE=V{. |WJG>[o$]݈Vٳ;v͛ k2M@bСCO[qsΰ͠>>u]N j*6ѹsʆ "EeFeI}KMLYAnoX^'YZoiQW-Y>1*{@Iq7@ ̊nA''NiӦڵk婧_CX#7(KƟn5 lOAu{K`?_oϘ+= u/p:g=zO /o #Wz})"zߓA|Jy?tMlfxza>̌Q]T79m;2**;}iOIT;ﴞ{R]g+#sx3&(DrC;zEtuƳ\vvt{31C_]ڶ7̞ⶎhİ ؞!dkҷP&\Gk>v-P q.ؖH}xb5k P>ŝO! &$kgőTM=XK&̢0SH㉠7KW{V2fut xwn3fjFRIlPf+ cAEug̔3C,xC`7gC+ pEgŸ|᭪dMa8LX'_uR?C6jƍbc̆@<X 0m'+<Z$+SLײR9:edu9+ԷwKf+ 35 qL3ҡgHt;`o%|-SP!n:1T @hfq-$3f5u6 S?J;~SPo-[dɜ>}J!@ ouꄀ9s[o+Wz @j'9!+VȼyD  @ OfҪp+ivmrw b}GJUpU)3%6ӋxX}ĉ2m4YvЯN:U!@H<)l/Ν;UՄ}C!x–?ctQE{O/^,;vZ~|kD&p2p$c#7 ,@ԋi&ѣUCtj. Ė>]i&{ާm >w?x`av :ulذ!vm`@!@L?)={twwXzxaYd8p@&MTbN@Go>L`QT1:>:}%zǏo7Y{}@RBO}J:fƟ4;c ٽ{;_jos|&?џctQT%cO>-7n,+1UO]D*J1E/,#J?F N_` иyN;vl5J $)-Jz=$OJ&Hdt&~SN@C @ YOZtKBs"$ @ 6@ctǏ7v1WAzF p L-D}$!F S4D}:S 4E)/!m6Yl @ $~h@u@N*7p1Ki>Gz꩔>S hQ[>)'ЈD3f̐ΆCT\R:&uK,[oU/^VƧixWX _~˖-|r(x H{y`Ba,8L&S<M@ȍ?AnBG30a3g#A/*ήZ[*rN z-Hxk<}w=ehTBZOHG K`Ŋ2oLG"G^hr"(_ P3[4w\ٹsgj54n8};Lԗ!ą@#1mիWm&wygiC*[ځx諒Htk-5~"N{$b׿n7ëj&!=~zm1LjLAJ_?E?CCЂJD} KԻvL_ڵK3C@9:&CZp!r8 =zGdӦMQ9u:ꋡ *DoYf cLjtfBE}:8o:bhǎn ֧: N@q%5G@߼[{ѣGo%9Q  }b9{l;Zc`5 5KrT+7G:=u]WO]E}ޤKoII%oa9QMP. sKq,SH@E }n_^UWϾiJ*5Fѱ/>񐟬;Q5)#OY\L;vL.b,Gu$k, <:P\57X*tRϻϳ>k'=Pb^k&n}ڬf r=ZHQBZnܴ>?*8`ÈO@SPq/R(Ul]zKq~ uzu넺9sPߵ g>L2ڧn@ԧiq8&lذ`o>[>Yv=~zj$z]ro9ݪ\&*Ľ u=aCew #!#^#15@1c޽ IC D}HA ʧPOיn\*OJ=.[@/Q5!@ h,}e GO}x (^&I"F@JO}U< @@ GX@ Up @'~a! @@WI@ D>} @JQ_'!@ }B @*D}U< @@ GX@ Up @'~a! @@WI@ D>}6mT w  d@'iE <#裏VmÇjNB <m&%Y#S;&cǎ lkk?sm&+VHȱ-ݶ=zMQzojҭۯ C@Fמ@+ ̚5K.\(s_~)[na7ah:B^?O/>)tT85SNɌ32}t+Txj idݢ'ջyNٟ˂o^=~E2ҙ-xp/ʅ^(\p>B@ MOSo4itww[o1GxbJϟgoaK8Ÿ~]sq KyR'j*A;wu?~=tPu'Ϝ1n?1N->˘1c zc|a: iHwA`8${k׮u։zI'[oPO= %@M|)Pפzޓ'bUGEwR\2&A@pǖ!(ٳ΄㦸 „ONBWA+˹oŚ@P}|xm.)-{ Do()(teGꫯ}% .6:;d\r[d{>~Ld· iO PD}mcNݑ޾l[R1>܉PMRyxozr ѸoȻ] + ]GxafD} 0DQ?Ăp^=Ĝ+'MmI>Vur?4FE~Ǝ(7MKh\i!v$qS< T%@٪x8N'Rt"msL yUԫ^ }ӱqlꗇAk׾*2xiZ+_#A@xᱦpybiR~ڥ: b;Is}Ov?Wrզ{7$@Hoэ4''1⺳RS"{\TG=91{'?ܚB^m~/ʿ9/O?Mj߲n)k2aܯzuǟ~R>rײsk䢱;@PSQ8a<ڧ*.=wm9ڠ69wfK䊫ʻ>_\?sX O=r[*׿1wGX^8ke^H <|иפ$wc|z]=,sTm&~'}Ư1s[}w[w- KQ.oj jx_rTS*tg2z1?T;O}݊6$74T1If JK%/Ez^QVՉ6zoL ?V{gUqoWG d @ǥ"l^0|>Y&!'Ia>Ê`qswog/?h/>(w/T:A$@ 5L$H /_~ӽGRͬso6>s:Bmmmy3h @ǧ 'TkJ>B SM {헧d;EE@[_ۍ /qȩ"3ېQNO'_M5O,φJ"@/*W z;}^'*b[[DFЛps|?L%׷~J/ EIFKEo1VBfKoNWqVݶllW7a7x>mio=cz蛲6¾]B@J Sqlz\ TDZUѰ篝Η㴼ۖWPsfЧKo11by6cc >@ 8A… <1ca:9\80 _zRF?ɘ3˨_ț?a6ĺQeԯL؄-?|Tl' ć>>}zKss)-X+}w[w<.[+MXț?\M&m,{2'Fk22by3w;_1^ENka? EQii@AoMqUMmUrǽy"g ..c~?eW7h("Ai OC/'NiK56䦔ۆlRM̙3r1N6]r @i. G86[3or1v[k 4<)0 ȥ;fn[ =>eNsB 2U@P:+ƅl~!sl)zO @>P$tK-XeBh쨈ϛ yk~o3K~siy_Q }˥l%ñM`snI pV |) 3ϡH2kW)ZH8]28𢴽ʒM[Q_a.wsJJ+ GQ[JB ^lt1Q`QF]UR敎FL@Xd$Xya-1v[(쫠׏[LE}0(iuvn AHD}B qi+s;YٕWx.yW$@h D}kS>@Qk%OY;oec=#븹3}w[wܳ?\v!$>ݚFYJ+Tx-;^%m91XٕWxFQMSsc鶮dmHox#mwSvU:nl(t"6@i%Ok'ݱRX3}w[w}w1-^z +ǯqݜwo" L Fߤf!ys>UWtW#Kh=. 1/<gKM~ce+ zqǫב+]cuxkp_120EeȥaҀ2ynAڮ*dLIO}TN}6#VuBu4[@SC\ \=xLlϔ{bH6}KyO9_Շ5暬|uyWaO.GwȲ=r6LȲۏȧ7U/-Tg;LWo͍y 옺UejSvW:% @S_?3rNFYtKִ90/ʍ%y6;+|<ϵˍ e0ۻ iwˍqLf eHK.-=澏tNG+3&5_7ӈ;3=Q|8~෇QZ:79+W;M2knK>  2uB S[5pX9c.WϬsiXvpghvyaw.06ͫg xޝV\G|^f:AqkʫtY*˺;k>{~/K}5#Ҧa߱^kod=?7<s\zn^G wٖ᭯|o_67nvr͟g@a & KpE!/W+LͷYdwVn3 /Pg"*"jRÔH9:vbˊk%\u̔~Jϝ̚w4ytu J 9!=_Usd2 dIפyP]:֘DI(~[\=JMζ6o@e(ɥo#GSj<$@`Rz9hXF߼MveZ\\_-ie17K0oU7˻_oWp-d_+LFQ%p\YzγߵO}w[ ~׈ҷx-}O3P?]z92pQ(V"r*|&^$Ǒ̐Bλf-kW+KYIN^ B^ 9duP8e}?H'Ccň!H,켿sfHYsNnzj##R*X, fO* @DB}DLJWre[ťV+ߍdxw@>vǙnRw.i׷աzW.y\[k!Zv|?r{dKߺs[zߍzۅr{{:F!OG% ?i,/$;$%؇i{cb(211!~Ȉ'% @w~WjMAbe#]A罖BW&ɅKp"u|ߒw~{[~Rk۷yګʽ;vSol[~iӪC8CMyKw֥ \1ZiyԐ:R#̫op/ -F%K]L?L-+ C'37ޕz >=N_s,]7ZΟ\Ο6˽~l[F?qujg)9+x}NEgi{PďM/jҖ  @]@ʔ.`R:Şi,u+m{˝>ꐍzGN!@ivRjv7ޏ?{x>8Si.Rj=]卯/xrO vzD'@-5wY]r}]}fV@omL۽ ;m}|ieXג @ti--YZi5ۺ=e7neg:moN;$Guԋ Y3٣G;駇'һ[s?s^=ugWڽ :mw/e?ަ\Ep ` ߹jX ɤ46v.7:Zols0@R ԧL`@6kX|z?LuhBEn[ɗb/@! @B}D^ʙW#v[i3]ib;%b ԤT#1rl*$/"gͳRv~/2B.@`p\)">2 }\-IN/  D GĤt ({ZX3RZ^|x=lY7A]rRcɍۨ|}ֳyQ 0羼J?5/,2K+3x!?77 da]8ڜ~UPv znHmE?0yzo7e:!`C̩"@_ [.kOJ"}}͇w?Nz23SO]fB]""2uUfj+n~Dr]ˈᩩoچCz@P)h7~O'`Vnrr4NŢn&P̜pt>9گRI ~t^^Gw!>{ tCP ULE 1J܈z&Mu]LLL gF-iDO^D?wZkk&^zO@_PV1[i rYT 0]K :bkkqQ3s]ϓCWkF>itrCAP5 E k:FG굴@5YN;;}4UP^*SFJr4Rvs'V@gSug}Y@NP?p]?̀l4K+f[pT> r{ee-Wvp./?l7 td7Ny dKPl@K_Pvm#Ҫ!i/EB9IL7F*R[G[ROɣd ;dwvF'_j> }ոyu;:^׵혈2BT]~nUk_Cy{Zhe2_F5J}oM[.Я5y2ʛ3{?}rn]k6J?~!g;Co=NJBU~әoycKj\]j{rR^¡|𧽺ƒ\Coߔk/Ю!\mTݶYa-kڲ4+@!;F /[ +.nlM9e~\d8/2?|E?i9= n\)YX(5]D9FGRZr#r#K4,~ˇm gNp-[o޺B{-۟@ gA`F*G.My!;2?Qъ xMq&05» h>UDÍj(c;Ҳ)5h<{[->!}I9,0o!^,|Z[ymw<@!@F?p+kh{8Q {r{*37\\>ߚAeG[Crkm 750궞~dQ;!k}wT]j\?x"17eyXhy ^<䈼i> ϖj ~p4r~!|$=\/>=5p7F5ZCpW=-exm ]Ig2~,C*L?gJ~bZrŒ}ٻn;!G2<yU6ޖhw5ćAF_/L@@&AswO>O?$#7kR/Fȹ=+-[aXk/Y&;:'꺞^᥯ޞ/IT yaLڑ %z Rwo )xW}ԏ]wЪmky~&y/pjޟ՘{ZZԂ>M] @'uڃdTWy$ˮ2+|Xf {[ O Z&|.]i{O/-:MNNJZ^G7dխ=5m>*K˴L^2T %+\N.lMRYwt^ vs R\IVPK[o䓣OIK@ zR[=G_@Mk=7!ܜa> 5$F ~4*z4IDATէ}5١ё}}䗟nc?,JC?PLy= x'`fנMYfYE@\BDUg# ?^GoMv,1Uky 0¼v!zW:Ziia^C|#7}nAF`ߪpk¹YXj ajYc@.&@Ge@t`5g:c" SytF䷾*gF-ki3,[ϳ؂u^Cl!J{M0kpo[KkO۲PoJۮعٱZէ<@!@H X0z4"?׏e~zJ&˲~uTJ<†4o{0{mT^}8kge˝KK w+= 򶏕ZҺ0rcezن @ ?bq6}L&WdyTVGeHŅY)OLɵqSW9诺nmu?"AƜn-]wTǻ+6[ҖDp{pel]lC@nRgt [:Ҫ_ޖ5ܔ]H+}pT!YJeBNrqX1)M]/Qa) 5nnn;ؕJEmɢDMnLhy}(y[0\yMf,tUSx=<󶅯 @h ;;;틑Z#u}nWDv*9Y;Q5''E9yYO2? ƅ5 suڌtG#6׺Ԍm@@/1W"`5ȇ-Zi^˺.$ GbuF-[OzאΓ^+mJ@@ r|&Vjpק[V:zoEh𶧆r v 6ƦՄ#6*u¬# @LB}LMJExXT--[i-[CK av ܭA>  @BPa@g}]ou0[HmapVX=  @Zi' -8-!]m2  >Mmں2 Nkapm(@@ _<"  I{*@@H_P9-"  UP@@H_P9-"  UP@@H_P9-"  UP@@H_P9-"  UP@@H_P9-"  UP@@H_P9-"  UP@@H_P9-"  UP@@H_P9-"  UP@@H_P9-"  UP@@H_P9-"  UP@@H_P9-"  UP@@H_P9-"  UP@@H_P9-"  UP@@H_P9-"  UP@@H_P9-"  UP@@H_P9-"  UP@@H_P9-"  UP@@H_P9-"  UP@@H_P9-"  UP@@H_P9-"  UP@@H_P9-"  UP@@H_P9-"  UP@@H_P9-"  UP@@H_P9-"  UP@@H_P9-"  UP@@H_P9-"  U~`jZIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/images/logical_architecture.png0000664000175000017500000011230500000000000023276 0ustar00zuulzuul00000000000000PNG  IHDRMsRGBgAMA a pHYs+ZIDATx^o]}'G3ƥLIlj.[MGjXāJ$p)v@P\yQ}Q21!~a3(ȃ[.2v@IGkἸQ2Z{u9ph|Zϟom|Ⱦ/ @ @ z @ @!ޅ@ @)a @ @!5@ @)a @ @!5@ @)a @ @!5@ @)a @ @!5@ @)a @ @!5@ @)a @ @!5@ @)a @ @!5@ @)a @ @!5@ @)a @ @!5@ @)a @ @!5@ @)a @ @!5@ @)a @ @!5@ @)a @ @!5@ @)a @ @!5@ @)a^ :x;v>mnqdBɓ +ݺx o*Fʌc e#w* @B|~I=y~~}_ e{Yhغ}Ζ6UX= Y_WC?`v;- lkyYms6~z.ҟtBَ,nv0]㭳>Vob?LhMe}fun9L @ B``!~v}gl3g'[_og7綘7?_7&vV~}rO_5F*#kA}g sfȞx"riOv_s nWIԧo>`Km ΅Cl)9ŇuyVUp @Mn%/etxO=^8+(GmrfA~.7[.7'koIs S(=hgՖF׌ _v8Xlct6]K4 @ V+o$7WO]Wߚ=Ȯxjt0SYئJ @@#[Ϟt>yJ|g}zn|,'  @6n%~to:Z-3YZ @ ЖC|[%@ @ l;}Z'@ @m mj @4( 7) @ Цߦ  @ @@B|"@ @m mj @4( 7) @ Цߦ  @ @@B|"@ @m mj @4( 7) @ Цߦ  @ @@B|"@ @m mj @4( 7) @ Цߦ  @ @@B|"@ @m mj @4( 7) @ Цߦ  @ @@B|"@ @m mj @4( 7) @ Цߦ  @ @@B|"@ @m mj @4( 7) @ Цߦ  @ @@B|S!E @}U99 o|#$@ @ XoXnUe| @ @``B|-5M @ -kpͱ- W @ B*0tM @ @@B|ũ44G @  D NIS @ P1 M4C @ F LU @ У_Ͱf5t @A@EN%@ @].4 @@@G 4N!@ @-q }%  @ @!~ Bt c؂̡ @ Р_3XJ9 @K cS J @ @@ 7o<}'^XG篆sӟGjZ @ 5Ia;1Fv @)]J8&J1p @D į(Bӡ7O?m//~Ĥ|v~_ZXdsצ! @ @t_p<~!u+_u%7=v @K@G!>7'{Yw}${(^g&}-AMviu{w @) t L|Ų> ي}ë'ڇnugy}Ȟ~ow 46u4 @, gBO»zȶ2}ǧ!+J/S~gs2J|WS>M @ s~Wn/Uo/gB};o2O?_́mCss< @J| wɏT/vV܋必?v/slwtZ'@ @mF?pkE~c  @ (WKos%gT @.0?0;W8 @* Vy  @Mw/ۿPA @Fֽ^~@ @/6۽XM @͗F @ @@, iKUy+G @ @OAxtKf @ @@ hK]u;g @ @ Ax!%ð @ Ж`B%²9K- @ @IAxKbMH @ɇxa%l۳2 @$L9 @ @ E!ӛ;Qw @ 0CMEO%ȧdE` @!pFU| _sk/#@ @!$rWc @ Ю߮uad] @ @`B|E7i @* @ @_vq-U @ R[o%@ @GuA @ -zZ&  @ @ B!lZmy @HH@oXUV| @ @ R!lz]ٮR#@ @Ⱦ5f;WmFe @p.7 98}{sYwMT;bno9lSlj{vn49ƦjTj|ަy%d /+b?;!JAh 'w+:(»? ;#j w . wX!Cl] Љ@ N и' N<f @`X~VM͆ on)1jbDZV. @ _A* @V@[o uO B?b*GUnM\@OOC~~J`[!~[1 @ @uK§T-c%@@uI+!+i @ @B|M@ @ @CDlOpMWsuC @ uO @:;  @+ t> @H@Z7 @ @_W @&?q qŨ ?) @" @ @0  @ @ @ @0  @ @ @ ^B8u@8pT0 - fG,=߷'ƴ(u_q~Q.jZw̮9T⫹9HM'ng,؟h'npwJ\ey^a'fJsșg/U?ٙ* ķʫq/kh@i;z6uF1>r*tᔭ]+'I=·Y9…-u}l8ZGsF\@w @xpa "8~'|grlJSs[h½ƅ٪t bN-o럞wlg+.3v}VPnw>Ock{)cjkҭ3G|lعpBjwnOvmq=L~??ƪ3@ΧGg3'2 /$|}N½Y;Ymv[=[緍wv}pjzѳob:م1LyYg&lxb^~EvS s>yr~uc.yuiT+9pb}Z? 痶 yPl/xv`w[݄dz>:; Ź-1]xbvqŭWu5r;(,YuѢ8\çcۊ?Y1Evǧ[g*<1ydX׍y1 ue :_`gRGDm{,m_~4l;k| IWK~[}fN j5-;7fok|R[aȅD~Z-ֿS_0q,o1cئ%@`BX+o @`L;:ּE~w'Cwb~fVWXOC_;=.{h%o.Awbo=n;+͓CQ0ڹL0얉wKԪ6![eyP7"X1:c(s9q 㪷 @ ϶g_xXlK?zna,+ mY_>^ o,ϖk?;ާ;o}ѕˬ:L3}V~:97{$ïygݘak<' 0t!~6? @`"pU=6h~elupol]wguh [/2s?tm^1=N/Lv6=s K]uϽשբ>nuư l׍O3 䵗Ǡω?w'#(2?nUV-' `%>"" @.yۥl' ohNr+  @ -g~6U=w~K~Vɓ#@  !|0 [s @ bǵ8TM @ @@B|E7< @ @@D>bb,|Gwl $N*)s}ar/} @ 5 M8j:C>jqosbV ! /=ۛ9t! WT1 9bNc:BۍOxk|}C- @&-5SEBȖE)\{ Z%oha~ @n)"AdqvXj%2%o6r _E9 @Y!~ &ֆp[E֏Gxo^|z @n³PPZ]/\!\qj(IvFNp|knۜ媖TeSa @-!~P`zqҮvS)8WE*W2koTٞyw]7aTDZoT @`?'4?ye^~Of1AJ@Ϥj^f~nj&no%[UA50C S}[߯  @c}s $i]G!: @`lnoH۶fbj, 6{R?F @@[ ˉK[/fUfnE] @:хxpA @]FF`o}uO1: @F]TΩԣ+z NNl @FvcW\CP& @ oQ'hB8k֩ x3% WZ2Z @@vir, @%0v )M6uk[ڳ2 @@wl @4)0$ @ @@V86M+q ?hx|`xL'?}<f?6  @#tY-M@C'tЏ.b8쥘X÷?xx %0o%udzpZ6cǗVӜQ @HQ`!>b3\ 7L~p%a)pϤ[<6WcN럙=yJuďb87W?q<D.NuB_mzo/~)_ _艝~~;|-%_?ݟΩJxr6CC!|gdӐ'σpiP7}7 i{XܣG?/f};_)~6ق ܯ~ṛ[Wm9hs  @XYuӏI y?b5P';dW^ gA>W'}7oo,;w3A/UZ>;ɓ簦o\Zq}O?d[i @ ZA)^]_X~z$ЁcU΂΂4OyUt#p}䃳;]̭.0h]?~lbX/V]  ]Ow%X? ojNVIPl?Oo;}eI' @\|w~''/>-M{}5:9.]G!j?ܟ?ܹoeo-]}4%V \=,<{ ɏO~U<R46[~zscm_7s|+`%_hZJ|Ӣ#МߜZr ͛ݢ]IF@I!IMmhV`P酠f/ @ @@\Z⻻7om4[v_ 1pg=+|:+qh @`Zň?6:{B}' @O!O}} 0*VQᬘ79{CdVOG@O͔#VCК*_N{9'G @Ws3&WؽYh7ܳᡧ'Nxxd:3&- )_+WnχoM~8_FEt% w% d+gLGw?`ojcRxݹ':_)oCso,of|ߺ6`xBV@4F#c_ }^ g?Vϯ϶go,p8,nXY;1kbX#[Rm^)v eGf]/wͳS/LͮKggIׁ  @E!+o}q'|b@?6 5?kiC \N`_$`\6K0pܡᥥc9tN~L`tSW۹h0|x3$Ws#Y0߄ٳ(^`'@F( ķPv(+V?/<0zxb8Lۿν4\VI[c* i>] ['ڕp1[YW].[1`)互.G/, y"(L}ūG rw߄?gzlp]o1 wmۦǼ?m: 4'p7i}i;Bok5hzxgZ^<7'7IK`s; PEXq/Ӱ ;X|S|v @Q *ǰowx5[Yσ|^f}xzsFqd4Wasm_=hk!qe+_<#hw9b~1+lW90ESV}tؑpjr @ *wdb+_dY5w}|}?<\ fI &?~2^sWxeﴛ'} ewtU^sT2SY`y ; =g➅B<c#@b8p#mPuƓ?Ԫϯg: '˞<@}>'vohWNVFfox|k!^uYeW) Ͻx_{.k`U@Lߔ>c9_q:N#0(+ sg+>m~>>Bn5E |>b77?qsx͏?+f>"~C!@,l+}xcﻳW~nV[ªpQ&YP  @B|WDavEs>{Xk[S L?SJ|L @ : n)+{~7^b/2{⻺b=Uxzr6aXFb@C ~*Ic%6' @l]" @ @ !>B& @d k;Zeݝu=_$@╖ @N~3 hU@oW @ @9xYHx6Y5e;fuufD @x3'@/7k @D`!~+b}\Pɾ?{= @Q`!>Fpc"@`7.=>ə @@V2]g5q4~Ծ @|Ĝk@"@ @`nd_c Scfs(V{OnpܾJƫz8X+n|1!]VɃyMdG(g){ M \ B}$(9UZ|UZwI5 @I@SuG@w1 @ bՌ_ @( H @@sB|sZ\@@G @F!~# @ @?  @)8'*fݬ'mPk @tN]@BG* X+c\B]YV"@ 0z!~Ԍ @ A * @ $ Ļ$# &B @>BKc h @l]" ɗ @() ėrX|u1a yҰ&d6 @7 c̟hKmj@}!z{@% @ s< @CRɑCIM @B #>R( @- -jYYO @ fF5j~T6Y @.cp @t, w |y+G @ 0!~uNn|r%3` @:;@vv^&@ @`<BxjL$d @Ν>1ttk7 + Dr)i @Q'!_C3 @ @}!}c=l]" @(' ėsuԪZi;cX*a @ ķ\"nȫtm^oSc @@@e|:z] j @-æ[n' 0'ɦ~z^HM 0j!W U`J+ ĪT=o&H 0H!j8D-.˾(C4>hb  Ѝq *B| l)|Iv֓$f @ χP;EË,񮪾yUĹ  @`B|ER #Xo=IDJ б1Vw|-ўk`G k @Q&!WF4[OR @# >\k;O=a̽|,8fH (pF<7z`idot!~>Ƹ5 {H?fu8{:S$@ bG\C/U޷>o0| @c_V5J\ߴj$׼>zDaS_~O`S7h[J|'P]@`fШ0R 0KA+|-}M((:B|= Ѕ߅>Tp3pl1̵V .)7Ub#w]_CV滾ߟ  @@[Vd>pV_FбC} u^X @f^XXOֈnCa3 @ чx+l/M.yW6.6]+]>6.mE Y`!/u[7m|9bc} uCf́ ؅UQmGx>$0?k\ @) .wӛrC;goE|5C{ʯsJC$01?> yp/CjN @8FȱT[QT+g @CMJb C75G @Fq=Kۜ*ɩ @ >űLتZoW^y9 @`;AxLow1l{4m;^Ռ[57g @llG7q&lCmeH~>^޵l @@ CbdK @EAxE&@7fgslQ+ @ hޏ{e< @` Xr߯  @M xJS!мB|4d󧏇õ;߻u~8_ }G @ bB|[N -2ܞ? @ 1pZcǗV[7V~y?-jgߞ?g @! U WֲGoL{&ݚ䱹w]tT^8l5ug['|}>:ٺ_+קAn/ſ$8>o49Mm(p­\Z|x-lcOi@c-ސ1N @B Ӌ^W~rj,O|w%vdw}|p4|;~Yiux<+V?&_>?Jj|.ax7NEu'_Yp{̓W/O gJyvVM/ھ7g__ c_B}O0"U?6o?9*t},ԿD4I;j|N, 6yn%~qE8`G540ijbe|cF Z>N5+oʯ/Ue\B_*8~Çj5tdyo7S*n܁HN wrtL#-L˃*oH "P^ $@z)|7N?= @I_^>zx8ܞ D!`Kw?eޞUlkY޼ƶg3X/ t} =w>z!@o>⮏HM@4:<9{P|ql#{|VU7m_K8vzYu{{;g++x xJ.S6Fmj Y`A|!ayl`?O[>y|WM޼xha._DH[n뗢wb_ ze:N]׋a}C- 03mU1YUcYȝ+vf"zŭC&/2}==HlewTR7;t+U{szGq/;UD @ ^!i~h~[<.ۗg}}lZ \9w~wy˵~ˍQ/ (]C>یN-Z,,D`Ʉ4B}> Bm,ntW`˹b|D?<$M87,lI6U`!~R=: WbxnSvkW·g+ON?L,[æ~zś7C0  x#b3mT&0˾ϯlU}}//x=?R14]qf4Zk7N`F?ʚ d)yhniM @@d$ C !VsW+ ]]KCӨXuڴj{n5gjV[mq6Ti5gU|fKM7tk[k d>_ @,F@ E!>Ū3f MA>͓1J|EY?$20#GVp%@ u|52 c໯:H uݳege7yի٪k~c+_oi4jMv^cwHUJ|3nx~nxTz:_"@_ު#c?^q qŨD# oWmVo9oWG @ |*vI`lImH[AczXƤ8-ƯFou~{ rj $[8+1mMM^jKe[ V)-hT@oSc-MPDU}P _"R @@ZɆxa" MҨQ:{uXCg 5C@LTB NdC|FE`y`#([bS/;geG @@zIxA" N}Oэ1ӛl_w>J7z!@!>Ut&0l)|W>- (]?'@`ɇ.j|c[_zy՘)kijI W ?ҙ9xL|xc$@ A>*S[2hvDѬgJ CW |Jlc`83sOӰV -0+ ڕu >nb]pi.]"eV盺R w|[n?s7MNs^\O<`<0\\(so2śl[A~2CCvϦ=<ٹO<8w0 SٶlK?BIо;TsJ8 ] W/09G^W'o  W]DDz7 ypH:hul]t/03 \LqC/nn1Wc[4oߝi}=׮OCb{i/Pӽ6= |5RbLlۿ0}Cnon :?wឝd{WW&ATv?} ?m$RLϋXWbWA̳ݘ : ]|{Z&@K>_~=<-m~|fԽ@-]-~vKY^Zpǭ ~G_ 륫׌~f *0?кش:_v)7?qsxfA~nƗ_ z'qqG=soݬT eǭ{#GEq.3| 0^71L?f7[*ϝ>Q{uye輁__rMu',!dJupD>vy!۹7<&p䡑óOk)̺;gi=h2߂?=O?nK i U`4!>/ _2j@jM!W5aOOӚM1C̣МZ}ޏS_ WO>8dJ{fcw|QM#v<ةr_}zFfoX̌F^wl 4 ??+UM K0W/>!_᎛><@ri%`>2}~gXܗw}r} *SO>/a % :{g*XӜgNG~u6o\\~#pFa<|c3ݻwy7 @&}DZUOϝx =]|^i0cyμv%\,ݕU1Տq$%0WJP]}rIul\~ǣ5FRU;~m+OfwK썀l+}1+?DW`—q= z;k>m£+oe--}C5b_7fѯc!0w -~k]wJ{帟WrowC?k]} K~c|z\elh;=7McS-_U~_y[G @ !~JeUz,LibȷT6U\u+}SXngd=uor)U<mqn~æ tpY'A´؝Zi k5!-ן@lX:3Gf!ο|pJ!Т7:Z4`- 5Yyߢ%@`"PFM!t|쳩hwG`M/ϨG“IO|ofOоzxgss';dYGi_ٛwt fxnietҏy%XAlV#m~j [ x m`EyS^Ny[ȡ(Pf=UÇ>6 ]wdG|~4p價 1ٖGS1y;gIkRX?v mn;IE@4|40QPWSg8Hmn/xHdu{;O.NN~x^xaq;p~vJdzбl|nol9~q9F( B@oU>lLS$$PVR;<_<.;qswnw?鹭{W7,/Y msdmWDoVa$@{%߳_Yr_ɶ{_Onpkϝ椝^/^xerxRlϟx?_?v!i XO~FOF֭+uVd,+ jdbE>̏k&>o(Pv=b~sӘc8쥱L< $' 'W2&@@uxH]I@Vz㭐[##C- -j  jՔ zp5Lg @I!IMm @gM/j3  й7U:'!$$fH}]- W@p_VGm+xD Pf;-ƿn>+Ŋp!b{O?wD(z'@`xʊ{7 )a @:;'! %7w # C arN"@`* Ļ U /Gv1 {+&p穧' [!yV{v}XiLv^g*׆{⻽F`!~-ǎ^J`/&ؗqc yՖ*Z4XʦqB|U!w{6B6Z@}PI(ߏ)W*|ޖpukPIWݑ{[B *B|5 Z@%7^T/ħ:9-v\[5!ķ @U!'cx_FӺ!6EZK=ۼC|:u:z@ ]ORJpo2W~E:bJz5Y5hW.#^aW )|B)}?nʼQ @$`%~L6׉@}]l.NXu~f{{ԽU:n^sVYO1V}sb^Ū}(NʼQ}WQ^ 0L+ìY- 5*U/2atȡJ|Eu 6բߖlz5ЌV]@0)_cUB|^}wՂҕt{M4c~ |c |Zp_mmo#6US$뭞D=?g +xW`}e/lO/+U!fPMdg +㨳Y[ q X.FUC@x݋g{/ v _ `{ڲ_U.ު(Ķ8*B|5D- į.؃˶{pMbP_-xmWT_Bvuv4.lR[_ '+>ζ-FI^o!?4m]j,`%~j|0)o^=UZ%ܶrzYʥ_Ǝ"Ї߇>˓5 寁6%!šm6S(Œq!\S~j(}}Q~{Ρy}ڷuF!>@?S$m]cnW___/B_{o_I5 k֙]>_w'c1UHB`Xl7zj6|rպͳ˼f&@?Cjrl6Bk(xʒo,aeǬz"I, ď =U䘙cscZMl^L7bV2i:!b . 8M<Խ:~+)o}Ig @] h:XXu/Afmi ƹ'z5xGR1'Vr\V7 % } =nP&kU|UGBb'02aBpcZM7}- u_{F, @ v!> ߎbTYpW,~%A_#01ϽM-pF5nOELLe.. me]7b+ _*ﵴ6j#r T+7q |nŽK7jbkLȽS% U!#)So2s, @`WvzWCV+S~yoUX:uS?wTϺ{޹n  0F!~UOl|7T(MB|{lL@=k4-`;}Ӣ#0<#) x@W@_ OfOnd\m s){vS{Q׮G(' ėsr@tf; 듭|Gg/0)ߥ۽mݛ c'0>}y} gl*{7o do a}ͅ{;vzfW @66T@-ܕx/wj|`V܇10*~7?]OB0~w @@!c-0Y^}W?>;gqEJX^Z+~ߝvWt$P_*|}C- @ esO<|c'0h7A`d'_X^g1u!j YJkn `S @ A X 2E+ Ļ* 0b2~ @ ѕĀ Џm c  0 2 @! &AzV9 Еߕ~ Ua(P xQ_) Y|Fy~*|"@9 HJf @!@ F?b* 0H!~e5)l(}-AJ@JZ?Y*|= @!DM'jٮk{?]Zv1 @`H^*|%4 @D@w! @`dVGVp%@% Ü֕w]#P @!g @{a) @5!5Z  @ .㪇 @UԜӹ-ݐsƹ^w% @@wB|wz"@@ouVϝ>۸uL ( Ļ"JnuV @!g @7: Z @+xFRV)v\jG%_ @@B|z @@gwF# ZP]^K\@OcUfγYϘZ>j  @!Gt, x6αXZ K% Оߞ  ЛUuLhU@oWm XE˯_lg KMx @G[aL\VGnR9*|*2N @BfΈL@ ݮ Jh)B ЌߌVzLS9js}JJ?J`"6  Еt% ۛ9#bA5ZV'  @ z!> ܨo{ Vc @qJDy`ZaхP7 9 @@"B|"2>tϰ n팜 k뼓8k Gs* @ q!>fڱw0#lV=͆ ýcc_E\M @"#-a#0UΫ VZ?wDhFN?f!Xi.+y @8qݬ3pcwUkhE ЖߖvH)4d.j>2 @!!!Hͤ/s@ylW>Hkd @@~X\,HpJ @`Bl]W]ƙ] XJZ? @4ȾQSSOm=WzX~{{)CzU^ PC̳T- -jub xSRfeRo31N?*# HJfUAd& Cl^M͈ PU@*<$`uK%@ @@@M @! ďfI@"V)a @z{-<5A I@$Az@% @ !>"" ?! @ ! Em @A bj* yRD@B|z @Zs @J9 @, \ 0n㮿 @s<HavNhhƚ!@+ t>05A?""  @- ďfO@V#(! @)a 0N-̈́ еߵ @ @@E!" PE*|5 @Bk-a}uf&  @#GVp%@}2a=ٝyRhz @(% ėbr zp6 @Ⱦ0Qs$@`8wzjɼqѭϩzªS>_?wDUHPX-av]eMSoUԆuNk`ճ%c6 @Vs< TYeoj@Mm>S [^wFl @ e+)W D`~)1ۥh|}]/{\|34" @m!ma @c$jblX~L˟DX}Jdн  @܌ .D@jMu>ϹO .S3#!@ VƸ P@Mc_Vd>(SEbq@ku gŘR ilU49- @e2J!@|ʗɨN,sϨ g @ !>B 9/תЮZs2{PEs |Niy(+#+D& GV!0bV*_) Y"@6 |ʪ>jןת.w?*#sޟႲ@Ep7g @GkCsx_-?y#-_}vdz<ݘ n|2I7z!PE@~CO ,s6*fc~L6W--jq< Xs6hJ@oJR;F( /P|RB?N|Z#@ ۊ9PjԶٶge N@w} `8jq794- 7-=H[57g/ _##$@a êhU@˯_ٳʲRǼ9K- @MB&!'@ 1#7Il[_@oXUG NsO90"@ .J 4\=YX?wew|^Mv᧳O|p~~}N\1͆6Ԙkc4 @@|B||51"8|cy]v+퇎'yI~<;ھO@&FD` /cÉ3~{/?ޯ7}`ϼM~4޹ wk: @@B|Z#@`;-™oVaOKsᅅm o; @hW.N@jUx[gu3=ݶOnpe+ xZ5Ϻmw}ZȷӻE`M!`%S _lt?`-S#@H@Z7$'"@ G@4UG3k=~u!j 0$!~H4 @? Z @4% 7% @ в2  @ @@SB|S!0=Uܗk/߿~ִZFKHQ@OjL Y_ < MO>}uWƒdKi~>|Íg_t_ PI@$C\4#>}c+8qÁ/:L"@` NsȌLpL-p~믄'U<ς?nc4Vmt++ _;ig?+V2ܣG?/f};Jm~W}_4ݪj;yEs'h @@!c/.O)4ObK|(V~b?uɱ^^h@[vތ4?9iG“]>;ɓ|u07e?ӭwihJ!R⹧m~]Gg'AJxS!;O.N~^xarN^U,k7?= "@& C z^o̷׿j߷f+fA>'gy~~wl#ޙ\۳l$_,q%|{eྞkק!~WnqW_z1|=o"{ r@B-sC6?f+Gݧ~e/ӟeS8Y>O{v~w2W&ATv?%H(AdO2B}Snm/gvC~'s7w>r'׬7{ oLkn/V_5t @ J!>ʲ nqssWl|Krqq'TJl5t)'n=,|pwc;ڴt/pF}z$@ F[UnUOy?õWi}m'?W|_+pKNtg? @ H@P1M@]uϿ  @cPes$@ @A(I_ f+e @) )W  @ @`TBm^ B/ @88`p~Jw @RS @ @hі  @K;>0<3w|{}?pED* GZ"Ч;ގ'?ׯ~~h 7gD @c!cp @y*yjVC;ܕA V.i @`Bn s/9>{lð޿ь@';93#x @[x%.nztӏy̑.㸧bt @@B|: @ ?)HY@OzNem7÷NG{a;?rxxhӖt&n @@!S @@Sy0ݸq D-Eܣ Wk j@B|:# AJc\c-ۺ_Vq㮏э[@w͞-߯ 4) 7- x]Z#@gg?z-h\@oT @`_!Ak-)} )UX  @mmO A^oL7X] 0XAҲmV @` @A ͖{Z#'bq?K/ x6Sco4 PGs.F 0 /^}# @@B|#@ @l4 @ еߵ @ @@E!" @ @@B|#@ @pN#@ 0TI| g] @] @ @ !>B&/y @   @ H  @ @ H } ;}"//} @{~_) Y"@ @{x @HD@OPI>lS_NVD_Uy @ @c!cp @ UVθ  PN*|9'G[@'@@B|B2Tl! oP= =@ @7Ⱦ? 0fs}s'@`hcVq%@@kSSX@CB|C!@XʏM@NrM`* Ļ @¼1!otc"л{ c6mlY6hMm}ocWMm+iۧEu^˺U6˚5h [8 @ @R>b @ @@B|50 @ PJ@/  @ п  @K19 @/ _# @ @RL"@ @  @ @@)! @ @@B|50 @ PJ@/  @ п  @K19 @/ _# @ @RL"@ @  @ @@)! @ @@B|50 @ PJ@/  @ п  @K19 @/ _# @ @RL"@ @  @ @@)! @ @@B|50 @ PJ@/  @ п  @K19 @/ _# @ @RL"@ @  @ @@)! @ @@B|50 @ PJ@/  @ п  @K19 @/ _# @ @RL"@ @  @ @@)! @ @@B|50 @ PJ@/  @ п  @K19 @/ _# @ @RL"@ @  @ @@)! @ @@B|50 @ PJ@/  @ п  @K19 @/ _# @ @RL"@ @  @ @@)! @ @@B|50 @ PJ@/  @ п  @K19 @/ _# @ @RL"@ @  @ @@)! @ @@B|50 @ PJ@/  @ п  @K19 @/ _# @ @RL"@ @  @ @@)ej}|IENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/images/states.svg0000664000175000017500000011642400000000000020446 0ustar00zuulzuul00000000000000 Ironic states enroll enroll verifying verifying enroll->verifying manage (via API) verifying->enroll fail manageable manageable verifying->manageable done cleaning cleaning manageable->cleaning provide (via API) manageable->cleaning clean (via API) inspecting inspecting manageable->inspecting inspect (via API) adopting adopting manageable->adopting adopt (via API) cleaning->manageable manage available available cleaning->available done clean failed clean failed cleaning->clean failed fail clean wait clean wait cleaning->clean wait wait inspecting->manageable done inspect failed inspect failed inspecting->inspect failed fail inspect wait inspect wait inspecting->inspect wait wait active active adopting->active done adopt failed adopt failed adopting->adopt failed fail available->manageable manage (via API) deploying deploying available->deploying active (via API) deploying->active done deploy failed deploy failed deploying->deploy failed fail wait call-back wait call-back deploying->wait call-back wait active->deploying rebuild (via API) deleting deleting active->deleting deleted (via API) rescuing rescuing active->rescuing rescue (via API) deleting->cleaning clean error error deleting->error error rescue rescue rescuing->rescue done rescue wait rescue wait rescuing->rescue wait wait rescue failed rescue failed rescuing->rescue failed fail error->deploying rebuild (via API) error->deleting deleted (via API) rescue->deleting deleted (via API) rescue->rescuing rescue (via API) unrescuing unrescuing rescue->unrescuing unrescue (via API) unrescuing->active done unrescue failed unrescue failed unrescuing->unrescue failed fail deploy failed->deploying rebuild (via API) deploy failed->deploying active (via API) deploy failed->deleting deleted (via API) wait call-back->deploying resume wait call-back->deleting deleted (via API) wait call-back->deploy failed fail clean failed->manageable manage (via API) clean wait->cleaning resume clean wait->clean failed fail clean wait->clean failed abort (via API) inspect failed->manageable manage (via API) inspect failed->inspecting inspect (via API) inspect wait->manageable done inspect wait->inspect failed fail inspect wait->inspect failed abort (via API) adopt failed->manageable manage (via API) adopt failed->adopting adopt (via API) rescue wait->deleting deleted (via API) rescue wait->rescuing resume rescue wait->rescue failed fail rescue wait->rescue failed abort (via API) rescue failed->deleting deleted (via API) rescue failed->rescuing rescue (via API) rescue failed->unrescuing unrescue (via API) unrescue failed->deleting deleted (via API) unrescue failed->rescuing rescue (via API) unrescue failed->unrescuing unrescue (via API) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/index.rst0000664000175000017500000000501000000000000017002 0ustar00zuulzuul00000000000000================================== Welcome to Ironic's documentation! ================================== Introduction ============ Ironic is an OpenStack project which provisions bare metal (as opposed to virtual) machines. It may be used independently or as part of an OpenStack Cloud, and integrates with the OpenStack Identity (keystone), Compute (nova), Network (neutron), Image (glance), and Object (swift) services. The Bare Metal service manages hardware through both common (eg. PXE and IPMI) and vendor-specific remote management protocols. It provides the cloud operator with a unified interface to a heterogeneous fleet of servers while also providing the Compute service with an interface that allows physical servers to be managed as though they were virtual machines. This documentation is continually updated and may not represent the state of the project at any specific prior release. To access documentation for a previous release of ironic, append the OpenStack release name to the URL; for example, the ``ocata`` release is available at https://docs.openstack.org/ironic/ocata/. Found a bug in one of our projects? Please see :doc:`/contributor/bugs`. Would like to engage with the community? See :doc:`/contributor/community`. Installation Guide ================== .. toctree:: :maxdepth: 2 install/index install/standalone admin/upgrade-guide User Guide ========== .. toctree:: :maxdepth: 3 user/index Administrator Guide =================== .. toctree:: :maxdepth: 3 admin/drivers .. toctree:: :maxdepth: 2 admin/index Configuration Guide =================== .. toctree:: :maxdepth: 2 configuration/index Bare Metal API References ========================= Ironic's REST API has changed since its first release, and continues to evolve to meet the changing needs of the community. Here we provide a conceptual guide as well as more detailed reference documentation. .. toctree:: :maxdepth: 1 API Concept Guide API Reference (latest) API Version History Command References ================== Here are references for commands not elsewhere documented. .. toctree:: :maxdepth: 2 cli/index Contributor Guide ================= .. toctree:: :maxdepth: 2 contributor/index Release Notes ============= `Release Notes `_ .. only:: html Indices and tables ================== * :ref:`genindex` * :ref:`search` ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648108935.8946667 ironic-20.1.0/doc/source/install/0000775000175000017500000000000000000000000016613 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/install/advanced.rst0000664000175000017500000000053100000000000021111 0ustar00zuulzuul00000000000000.. _advanced: Advanced features ================= .. include:: include/local-boot-partition-images.inc .. include:: include/root-device-hints.inc .. include:: include/kernel-boot-parameters.inc .. include:: include/boot-mode.inc .. include:: include/disk-label.inc .. include:: include/notifications.inc .. include:: include/console.inc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/install/configdrive.rst0000664000175000017500000001233600000000000021651 0ustar00zuulzuul00000000000000.. _configdrive: Enabling the configuration drive (configdrive) ============================================== The Bare Metal service supports exposing a configuration drive image to the instances. The configuration drive is used to store instance-specific metadata and is present to the instance as a disk partition labeled ``config-2``. The configuration drive has a maximum size of 64MB. One use case for using the configuration drive is to expose a networking configuration when you do not use DHCP to assign IP addresses to instances. The configuration drive is usually used in conjunction with the Compute service, but the Bare Metal service also offers a standalone way of using it. The following sections will describe both methods. When used with Compute service ------------------------------ To enable the configuration drive for a specific request, pass ``--config-drive true`` parameter to the :command:`nova boot` command, for example:: nova boot --config-drive true --flavor baremetal --image test-image instance-1 It's also possible to enable the configuration drive automatically on all instances by configuring the ``OpenStack Compute service`` to always create a configuration drive by setting the following option in the ``/etc/nova/nova.conf`` file, for example:: [DEFAULT] ... force_config_drive=True In some cases, you may wish to pass a user customized script when deploying an instance. To do this, pass ``--user-data /path/to/file`` to the :command:`nova boot` command. When used standalone -------------------- When used without the Compute service, the operator needs to create a configuration drive and provide the file or HTTP URL to the Bare Metal service. See :ref:`deploy-configdrive` for details. Configuration drive storage in an object store ---------------------------------------------- Under normal circumstances, the configuration drive can be stored in the Bare Metal service when the size is less than 64KB. Optionally, if the size is larger than 64KB there is support to store it in a swift endpoint. Both swift and radosgw use swift-style APIs. The following option in ``/etc/ironic/ironic.conf`` enables swift as an object store backend to store config drive. This uses the Identity service to establish a session between the Bare Metal service and the Object Storage service. :: [deploy] ... configdrive_use_object_store = True Use the following options in ``/etc/ironic/ironic.conf`` to enable radosgw. Credentials in the swift section are needed because radosgw will not use the Identity service and relies on radosgw's username and password authentication instead. :: [deploy] ... configdrive_use_object_store = True [swift] ... username = USERNAME password = PASSWORD auth_url = http://RADOSGW_IP:8000/auth/v1 If the :ref:`direct-deploy` is being used, edit ``/etc/glance/glance-api.conf`` to store the instance images in respective object store (radosgw or swift) as well:: [glance_store] ... swift_store_user = USERNAME swift_store_key = PASSWORD swift_store_auth_address = http://RADOSGW_OR_SWIFT_IP:PORT/auth/v1 Accessing the configuration drive data -------------------------------------- When the configuration drive is enabled, the Bare Metal service will create a partition on the instance disk and write the configuration drive image onto it. The configuration drive must be mounted before use. This is performed automatically by many tools, such as cloud-init and cloudbase-init. To mount it manually on a Linux distribution that supports accessing devices by labels, simply run the following:: mkdir -p /mnt/config mount /dev/disk/by-label/config-2 /mnt/config If the guest OS doesn't support accessing devices by labels, you can use other tools such as ``blkid`` to identify which device corresponds to the configuration drive and mount it, for example:: CONFIG_DEV=$(blkid -t LABEL="config-2" -odevice) mkdir -p /mnt/config mount $CONFIG_DEV /mnt/config Cloud-init integration ---------------------- The configuration drive can be especially useful when used with `cloud-init `_, but in order to use it we should follow some rules: * ``Cloud-init`` data should be organized in the `expected format`_. * Since the Bare Metal service uses a disk partition as the configuration drive, it will only work with `cloud-init version >= 0.7.5 `_. * ``Cloud-init`` has a collection of data source modules, so when building the image with `disk-image-builder`_ we have to define ``DIB_CLOUD_INIT_DATASOURCES`` environment variable and set the appropriate sources to enable the configuration drive, for example:: DIB_CLOUD_INIT_DATASOURCES="ConfigDrive, OpenStack" disk-image-create -o fedora-cloud-image fedora baremetal For more information see `how to configure cloud-init data sources `_. .. _`expected format`: https://docs.openstack.org/nova/latest/user/vendordata.html .. _disk-image-builder: https://docs.openstack.org/diskimage-builder/latest/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/install/configure-cleaning.rst0000664000175000017500000000212000000000000023077 0ustar00zuulzuul00000000000000.. _configure-cleaning: Configure the Bare Metal service for cleaning ============================================= .. note:: If you configured the Bare Metal service to do :ref:`automated_cleaning` (which is enabled by default), you will need to set the ``cleaning_network`` configuration option. #. Note the network UUID (the `id` field) of the network you created in :ref:`configure-networking` or another network you created for cleaning: .. code-block:: console $ openstack network list #. Configure the cleaning network UUID via the ``cleaning_network`` option in the Bare Metal service configuration file (``/etc/ironic/ironic.conf``). In the following, replace ``NETWORK_UUID`` with the UUID you noted in the previous step: .. code-block:: ini [neutron] cleaning_network = NETWORK_UUID #. Restart the Bare Metal service's ironic-conductor: .. code-block:: console Fedora/RHEL8/CentOS8/SUSE: sudo systemctl restart openstack-ironic-conductor Ubuntu: sudo service ironic-conductor restart ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/install/configure-compute.rst0000664000175000017500000001277100000000000023010 0ustar00zuulzuul00000000000000Configure the Compute service to use the Bare Metal service ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The Compute service needs to be configured to use the Bare Metal service's driver. The configuration file for the Compute service is typically located at ``/etc/nova/nova.conf``. .. note:: As of the Newton release, it is possible to have multiple nova-compute services running the ironic virtual driver (in nova) to provide redundancy. Bare metal nodes are mapped to the services via a hash ring. If a service goes down, the available bare metal nodes are remapped to different services. Once active, a node will stay mapped to the same nova-compute even when it goes down. The node is unable to be managed through the Compute API until the service responsible returns to an active state. The following configuration file must be modified on the Compute service's controller nodes and compute nodes. #. Change these configuration options in the Compute service configuration file (for example, ``/etc/nova/nova.conf``): .. code-block:: ini [default] # Defines which driver to use for controlling virtualization. # Enable the ironic virt driver for this compute instance. compute_driver=ironic.IronicDriver # Amount of memory in MB to reserve for the host so that it is always # available to host processes. # It is impossible to reserve any memory on bare metal nodes, so set # this to zero. reserved_host_memory_mb=0 [filter_scheduler] # Enables querying of individual hosts for instance information. # Not possible for bare metal nodes, so set it to False. track_instance_changes=False [scheduler] # This value controls how often (in seconds) the scheduler should # attempt to discover new hosts that have been added to cells. # If negative (the default), no automatic discovery will occur. # As each bare metal node is represented by a separate host, it has # to be discovered before the Compute service can deploy on it. # The value here has to be carefully chosen based on a compromise # between the enrollment speed and the load on the Compute scheduler. # The recommended value of 2 minutes matches how often the Compute # service polls the Bare Metal service for node information. discover_hosts_in_cells_interval=120 .. note:: The alternative to setting the ``discover_hosts_in_cells_interval`` option is to run the following command on any Compute controller node after each node is enrolled:: nova-manage cell_v2 discover_hosts --by-service #. Consider enabling the following option on controller nodes: .. code-block:: ini [filter_scheduler] # Enabling this option is beneficial as it reduces re-scheduling events # for ironic nodes when scheduling is based on resource classes, # especially for mixed hypervisor case with host_subset_size = 1. # However enabling it will also make packing of VMs on hypervisors # less dense even when scheduling weights are completely disabled. #shuffle_best_same_weighed_hosts = false #. Carefully consider the following option: .. code-block:: ini [compute] # This option will cause nova-compute to set itself to a disabled state # if a certain number of consecutive build failures occur. This will # prevent the scheduler from continuing to send builds to a compute # service that is consistently failing. In the case of bare metal # provisioning, however, a compute service is rarely the cause of build # failures. Furthermore, bare metal nodes, managed by a disabled # compute service, will be remapped to a different one. That may cause # the second compute service to also be disabled, and so on, until no # compute services are active. # If this is not the desired behavior, consider increasing this value or # setting it to 0 to disable this behavior completely. #consecutive_build_service_disable_threshold = 10 #. Change these configuration options in the ``ironic`` section. Replace: - ``IRONIC_PASSWORD`` with the password you chose for the ``ironic`` user in the Identity Service - ``IRONIC_NODE`` with the hostname or IP address of the ironic-api node - ``IDENTITY_IP`` with the IP of the Identity server .. code-block:: ini [ironic] # Ironic authentication type auth_type=password # Keystone API endpoint auth_url=http://IDENTITY_IP:5000/v3 # Ironic keystone project name project_name=service # Ironic keystone admin name username=ironic # Ironic keystone admin password password=IRONIC_PASSWORD # Ironic keystone project domain # or set project_domain_id project_domain_name=Default # Ironic keystone user domain # or set user_domain_id user_domain_name=Default #. On the Compute service's controller nodes, restart the ``nova-scheduler`` process: .. code-block:: console Fedora/RHEL8/CentOS8/SUSE: sudo systemctl restart openstack-nova-scheduler Ubuntu: sudo service nova-scheduler restart #. On the Compute service's compute nodes, restart the ``nova-compute`` process: .. code-block:: console Fedora/RHEL8/CentOS8/SUSE: sudo systemctl restart openstack-nova-compute Ubuntu: sudo service nova-compute restart ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/install/configure-glance-images.rst0000664000175000017500000000614000000000000024021 0ustar00zuulzuul00000000000000.. _image-requirements: Add images to the Image service ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #. Build or download the user images as described in :doc:`/user/creating-images`. #. Add the user images to the Image service Load all the images created in the below steps into the Image service, and note the image UUIDs in the Image service for each one as it is generated. - For *whole disk images* just upload the image: .. code-block:: console $ openstack image create my-whole-disk-image --public \ --disk-format qcow2 --container-format bare \ --file my-whole-disk-image.qcow2 .. warning:: The kernel/ramdisk pair must not be set for whole disk images, otherwise they'll be mistaken for partition images. - For *partition images* to be used only with *local boot* (the default) the ``img_type`` property must be set: .. code-block:: console $ openstack image create my-image --public \ --disk-format qcow2 --container-format bare \ --property img_type=partition --file my-image.qcow2 - For *partition images* to be used with both *local* and *network* boot: Add the kernel and ramdisk images to the Image service: .. code-block:: console $ openstack image create my-kernel --public \ --disk-format aki --container-format aki --file my-image.vmlinuz Store the image uuid obtained from the above step as ``MY_VMLINUZ_UUID``. .. code-block:: console $ openstack image create my-image.initrd --public \ --disk-format ari --container-format ari --file my-image.initrd Store the image UUID obtained from the above step as ``MY_INITRD_UUID``. Add the *my-image* to the Image service which is going to be the OS that the user is going to run. Also associate the above created images with this OS image. These two operations can be done by executing the following command: .. code-block:: console $ openstack image create my-image --public \ --disk-format qcow2 --container-format bare --property \ kernel_id=$MY_VMLINUZ_UUID --property \ ramdisk_id=$MY_INITRD_UUID --file my-image.qcow2 #. Build or download the deploy images The deploy images are used initially for preparing the server (creating disk partitions) before the actual OS can be deployed. There are several methods to build or download deploy images, please read the :ref:`deploy-ramdisk` section. #. Add the deploy images to the Image service Add the deployment kernel and ramdisk images to the Image service: .. code-block:: console $ openstack image create deploy-vmlinuz --public \ --disk-format aki --container-format aki \ --file ironic-python-agent.vmlinuz Store the image UUID obtained from the above step as ``DEPLOY_VMLINUZ_UUID``. .. code-block:: console $ openstack image create deploy-initrd --public \ --disk-format ari --container-format ari \ --file ironic-python-agent.initramfs Store the image UUID obtained from the above step as ``DEPLOY_INITRD_UUID``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/install/configure-glance-swift.rst0000664000175000017500000000577300000000000023723 0ustar00zuulzuul00000000000000.. _image-store: Configure the Image service for temporary URLs ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Some drivers of the Baremetal service (in particular, any drivers using :ref:`direct-deploy` or :ref:`ansible-deploy` interfaces, and some virtual media drivers) require target user images to be available over clean HTTP(S) URL with no authentication involved (neither username/password-based, nor token-based). When using the Baremetal service integrated in OpenStack, this can be achieved by specific configuration of the Image service and Object Storage service as described below. #. Configure the Image service to have object storage as a backend for storing images. For more details, please refer to the Image service configuration guide. .. note:: When using Ceph+RadosGW for Object Storage service, images stored in Image service must be available over Object Storage service as well. #. Enable TempURLs for the Object Storage account used by the Image service for storing images in the Object Storage service. #. Check if TempURLs are enabled: .. code-block:: shell # executed under credentials of the user used by Image service # to access Object Storage service $ openstack object store account show +------------+---------------------------------------+ | Field | Value | +------------+---------------------------------------+ | Account | AUTH_bc39f1d9dcf9486899088007789ae643 | | Bytes | 536661727 | | Containers | 1 | | Objects | 19 | | properties | Temp-Url-Key='secret' | +------------+---------------------------------------+ #. If property ``Temp-Url-Key`` is set, note its value. #. If property ``Temp-Url-Key`` is not set, you have to configure it (``secret`` is used in the example below for the value): .. code-block:: shell $ openstack object store account set --property Temp-Url-Key=secret #. Optionally, configure the ironic-conductor service. The default configuration assumes that: #. the Object Storage service is implemented by :swift-doc:`swift <>`, #. the Object Storage service URL is available from the service catalog, #. the project, used by the Image service to access the Object Storage, is the same as the project, used by the Bare Metal service to access it, #. the container, used by the Image service, is called ``glance``. If any of these assumptions do not hold, you may want to change your configuration file (typically located at ``/etc/ironic/ironic.conf``), for example: .. code-block:: ini [glance] swift_endpoint_url = http://openstack/swift swift_account = AUTH_bc39f1d9dcf9486899088007789ae643 swift_container = glance swift_temp_url_key = secret #. (Re)start the ironic-conductor service. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/install/configure-identity.rst0000664000175000017500000000672700000000000023171 0ustar00zuulzuul00000000000000Configure the Identity service for the Bare Metal service ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #. Create the Bare Metal service user (for example, ``ironic``). The service uses this to authenticate with the Identity service. Use the ``service`` tenant and give the user the ``admin`` role: .. code-block:: console $ openstack user create --password IRONIC_PASSWORD \ --email ironic@example.com ironic $ openstack role add --project service --user ironic admin #. You must register the Bare Metal service with the Identity service so that other OpenStack services can locate it. To register the service: .. code-block:: console $ openstack service create --name ironic --description \ "Ironic baremetal provisioning service" baremetal #. Use the ``id`` property that is returned from the Identity service when registering the service (above), to create the endpoint, and replace ``IRONIC_NODE`` with your Bare Metal service's API node: .. code-block:: console $ openstack endpoint create --region RegionOne \ baremetal admin http://$IRONIC_NODE:6385 $ openstack endpoint create --region RegionOne \ baremetal public http://$IRONIC_NODE:6385 $ openstack endpoint create --region RegionOne \ baremetal internal http://$IRONIC_NODE:6385 #. You may delegate limited privileges related to the Bare Metal service to your Users by creating Roles with the OpenStack Identity service. By default, the Bare Metal service expects the "baremetal_admin" and "baremetal_observer" Roles to exist, in addition to the default "admin" Role. There is no negative consequence if you choose not to create these Roles. They can be created with the following commands: .. code-block:: console $ openstack role create baremetal_admin $ openstack role create baremetal_observer If you choose to customize the names of Roles used with the Bare Metal service, do so by changing the "is_member", "is_observer", and "is_admin" policy settings in ``/etc/ironic/policy.yaml``. More complete documentation on managing Users and Roles within your OpenStack deployment are outside the scope of this document, but may be found :keystone-doc:`here `. #. You can further restrict access to the Bare Metal service by creating a separate "baremetal" Project, so that Bare Metal resources (Nodes, Ports, etc) are only accessible to members of this Project: .. code-block:: console $ openstack project create baremetal At this point, you may grant read-only access to the Bare Metal service API without granting any other access by issuing the following commands: .. code-block:: console $ openstack user create \ --domain default --project-domain default --project baremetal \ --password PASSWORD USERNAME $ openstack role add \ --user-domain default --project-domain default --project baremetal \ --user USERNAME baremetal_observer #. Further documentation is available elsewhere for the ``openstack`` :python-openstackclient-doc:`command-line client ` and the :keystone-doc:`Identity ` service. A :doc:`policy.yaml.sample ` file, which enumerates the service's default policies, is provided for your convenience with the Bare Metal Service. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/install/configure-integration.rst0000664000175000017500000000062500000000000023652 0ustar00zuulzuul00000000000000========================================= Integration with other OpenStack services ========================================= .. toctree:: :maxdepth: 1 configure-identity configure-compute configure-networking configure-ipv6-networking configure-glance-swift enabling-https configure-cleaning configure-tenant-networks.rst configure-glance-images configure-nova-flavors ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/install/configure-ipmi.rst0000664000175000017500000000770600000000000022274 0ustar00zuulzuul00000000000000Configuring IPMI support ------------------------ Installing ipmitool command ~~~~~~~~~~~~~~~~~~~~~~~~~~~ To enable one of the drivers that use IPMI_ protocol for power and management actions (for example, ``ipmi``), the ``ipmitool`` command must be present on the service node(s) where ``ironic-conductor`` is running. On most distros, it is provided as part of the ``ipmitool`` package. Source code is available at http://ipmitool.sourceforge.net/. .. warning:: Certain distros, notably Mac OS X and SLES, install ``openipmi`` instead of ``ipmitool`` by default. This driver is not compatible with ``openipmi`` as it relies on error handling options not provided by this tool. Please refer to the :doc:`/admin/drivers/ipmitool` for information on how to configure and use IPMItool-based drivers. Configuring hardware ~~~~~~~~~~~~~~~~~~~~ IPMI is a relatively old protocol and may require additional set up on the hardware side that the Bare Metal service cannot do automatically: #. Make sure IPMI is enabled and the account you use have the permissions to change power and boot devices. By default the adminstrator rights are expected, you can change it: see :ref:`ipmi-priv-level`. #. Make sure the cipher suites are configured for maximum security. Suite 17 is recommended, 3 can be used if it's not available. Cipher suite 0 **must** be disabled as it provides unauthenticated access to the BMC. .. seealso:: :ref:`ipmi-cipher-suites` #. Make sure the boot mode correspond to the expected boot mode on the node (see :ref:`boot_mode_support`). Some hardware is able to change the boot mode to the requested by Ironic, some does not. Validation and troubleshooting ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Check that you can connect to, and authenticate with, the IPMI controller in your bare metal server by running ``ipmitool``:: ipmitool -I lanplus -H -U -P chassis power status where ```` is the IP of the IPMI controller you want to access. This is not the bare metal node's main IP. The IPMI controller should have its own unique IP. If the above command doesn't return the power status of the bare metal server, check that - ``ipmitool`` is installed and is available via the ``$PATH`` environment variable. - The IPMI controller on your bare metal server is turned on. - The IPMI controller credentials and IP address passed in the command are correct. - The conductor node has a route to the IPMI controller. This can be checked by just pinging the IPMI controller IP from the conductor node. IPMI configuration ~~~~~~~~~~~~~~~~~~ If there are slow or unresponsive BMCs in the environment, the ``min_command_interval`` configuration option in the ``[ipmi]`` section may need to be raised. The default is fairly conservative, as setting this timeout too low can cause older BMCs to crash and require a hard-reset. .. _ipmi-sensor-data: Collecting sensor data ~~~~~~~~~~~~~~~~~~~~~~ Bare Metal service supports sending IPMI sensor data to Telemetry with certain hardware types, such as ``ipmi``, ``ilo`` and ``irmc``. By default, support for sending IPMI sensor data to Telemetry is disabled. If you want to enable it, you should make the following two changes in ``ironic.conf``: .. code-block:: ini [conductor] send_sensor_data = true [oslo_messaging_notifications] driver = messagingv2 If you want to customize the sensor types which will be sent to Telemetry, change the ``send_sensor_data_types`` option. For example, the below settings will send information about temperature, fan, voltage from sensors to the Telemetry service: .. code-block:: ini send_sensor_data_types=Temperature,Fan,Voltage Supported sensor types are defined by the Telemetry service, currently these are ``Temperature``, ``Fan``, ``Voltage``, ``Current``. Special value ``All`` (the default) designates all supported sensor types. .. _IPMI: https://en.wikipedia.org/wiki/Intelligent_Platform_Management_Interface ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/install/configure-ipv6-networking.rst0000664000175000017500000001375200000000000024405 0ustar00zuulzuul00000000000000Configuring services for bare metal provisioning using IPv6 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Use of IPv6 addressing for baremetal provisioning requires additional configuration. This page covers the IPv6 specifics only. Please refer to :doc:`/install/configure-tenant-networks` and :doc:`/install/configure-networking` for general networking configuration. Configure ironic PXE driver for provisioning using IPv6 addressing ================================================================== The PXE drivers operate in such a way that they are able to utilize both IPv4 and IPv6 addresses based upon the deployment's operating state and configuration. Internally, the drivers attempt to prepare configuration options for both formats, which allows ports which are IPv6 only to automatically receieve boot parameters. As a result of this, it is critical that the ``[DEFAULT]my_ipv6`` configuration parameter is set to the conductor's IPv6 address. This option is unique per conductor, and due to the nature of automatic address assignment, it cannot be "guessed" by the software. Provisioning with IPv6 stateless addressing ------------------------------------------- When using stateless addressing DHCPv6 does not provide addresses to the client. DHCPv6 however provides other configuration via DHCPv6 options such as the bootfile-url and bootfile-parameters. Once the PXE driver is set to operate in IPv6 mode no further configuration is required in the Baremetal Service. Creating networks and subnets in the Networking Service ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ When creating the Baremetal Service network(s) and subnet(s) in the Networking Service's, subnets should have ``ipv6-address-mode`` set to ``dhcpv6-stateless`` and ``ip-version`` set to ``6``. Depending on whether a router in the Networking Service is providing RA's (Router Advertisements) or not, the ``ipv6-ra-mode`` for the subnet(s) should either be set to ``dhcpv6-stateless`` or be left unset. .. Note:: If ``ipv6-ra-mode`` is left unset, an external router on the network is expected to provide RA's with the appropriate flags set for automatic addressing and other configuration. Provisioning with IPv6 stateful addressing ------------------------------------------ When using stateful addressing DHCPv6 is providing both addresses and other configuration via DHCPv6 options such as the bootfile-url and bootfile- parameters. The "identity-association" (IA) construct used by DHCPv6 is challenging when booting over the network. Firmware, and ramdisks typically end up using different DUID/IAID combinations and it is not always possible for one chain- booting stage to release its address before giving control to the next step. In case the DHCPv6 server is configured with static reservations only the result is that booting will fail because the DHCPv6 server has no addresses available. To get past this issue either configure the DHCPv6 server with multiple address reservations for each host, or use a dynamic range. .. Note:: Support for multiple address reservations requires dnsmasq version 2.81 or later. Some distributions may backport this feature to earlier dnsmasq version as part of the packaging, check the distributions release notes. If a different (not dnsmasq) DHCPv6 server backend is used with the Networking service, use of multiple address reservations might not work. Using the ``flat`` network interface ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Due to the "identity-association" challenges with DHCPv6 provisioning using the ``flat`` network interface is not recommended. When ironic operates with the ``flat`` network interface the server instance port is used for provisioning and other operations. Ironic will not use multiple address reservations in this scenario. Because of this **it will not work in most cases**. Using the ``neutron`` network interface ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ When using the ``neutron`` network interface the Baremetal Service will allocate multiple IPv6 addresses (4 addresses per port by default) on the service networks used for provisioning, cleaning, rescue and introspection. The number of addresses allocated can be controlled via the ``[neutron]/dhcpv6_stateful_address_count`` option in the Bare Metal Service's configuration file (``/etc/ironic/ironic.conf``). Using multiple address reservations ensures that the DHCPv6 server can lease addresses to each step. To enable IPv6 provisioning on neutron *flat* provider networks with no switch management, the ``local_link_connection`` field of baremetal ports must be set to ``{'network_type': 'unmanaged'}``. The following example shows how to set the local_link_connection for operation on unmanaged networks:: baremetal port set \ --local-link-connection network_type=unmanaged The use of multiple IPv6 addresses must also be enabled in the Networking Service's dhcp agent configuration (``/etc/neutron/dhcp_agent.ini``) by setting the option ``[DEFAULT]/dnsmasq_enable_addr6_list`` to ``True`` (default ``False`` in Ussuri release). .. Note:: Support for multiple IPv6 address reservations in the dnsmasq backend was added to the Networking Service Ussuri release. It was also backported to the stable Train release. Creating networks and subnets in the Networking Service ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ When creating the ironic service network(s) and subnet(s) in the Networking Service, subnets should have ``ipv6-address-mode`` set to ``dhcpv6-stateful`` and ``ip-version`` set to ``6``. Depending on whether a router in the Networking Service is providing RA's (Router Advertisements) or not, the ``ipv6-ra-mode`` for the subnet(s) should be set to either ``dhcpv6-stateful`` or be left unset. .. Note:: If ``ipv6-ra-mode`` is left unset, an external router on the network is expected to provide RA's with the appropriate flags set for managed addressing and other configuration. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/install/configure-networking.rst0000664000175000017500000001227400000000000023521 0ustar00zuulzuul00000000000000.. _configure-networking: Configure the Networking service for bare metal provisioning ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You need to configure Networking so that the bare metal server can communicate with the Networking service for DHCP, PXE boot and other requirements. This section covers configuring Networking for a single flat network for bare metal provisioning. It is recommended to use the baremetal ML2 mechanism driver and L2 agent for proper integration with the Networking service. Documentation regarding installation and configuration of the baremetal mechanism driver and L2 agent is available :networking-baremetal-doc:`here `. For use with :neutron-doc:`routed networks ` the baremetal ML2 components are required. .. Note:: When the baremetal ML2 components are *not* used, ports in the Networking service will have status: ``DOWN``, and binding_vif_type: ``binding_failed``. This was always the status for Bare Metal service ``flat`` network interface ports prior to the introduction of the baremetal ML2 integration. For a non-routed network, bare metal servers can still be deployed and are functional, despite this port binding state in the Networking service. You will also need to provide Bare Metal service with the MAC address(es) of each node that it is provisioning; Bare Metal service in turn will pass this information to Networking service for DHCP and PXE boot configuration. An example of this is shown in the :ref:`enrollment` section. #. Install the networking-baremetal ML2 mechanism driver and L2 agent in the Networking service. #. Edit ``/etc/neutron/plugins/ml2/ml2_conf.ini`` and modify these: .. code-block:: ini [ml2] type_drivers = flat tenant_network_types = flat mechanism_drivers = openvswitch,baremetal [ml2_type_flat] flat_networks = physnet1 [securitygroup] firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver enable_security_group = True [ovs] bridge_mappings = physnet1:br-eth2 # Replace eth2 with the interface on the neutron node which you # are using to connect to the bare metal server #. Restart the ``neutron-server`` service, to load the new configuration. #. Create and edit ``/etc/neutron/plugins/ml2/ironic_neutron_agent.ini`` and add the required configuration. For example: .. code-block:: ini [ironic] project_domain_name = Default project_name = service user_domain_name = Default password = password username = ironic auth_url = http://identity-server.example.com/identity auth_type = password region_name = RegionOne #. Make sure the ``ironic-neutron-agent`` service is started. #. If neutron-openvswitch-agent runs with ``ovs_neutron_plugin.ini`` as the input config-file, edit ``ovs_neutron_plugin.ini`` to configure the bridge mappings by adding the [ovs] section described in the previous step, and restart the neutron-openvswitch-agent. #. Add the integration bridge to Open vSwitch: .. code-block:: console $ ovs-vsctl add-br br-int #. Create the br-eth2 network bridge to handle communication between the OpenStack services (and the Bare Metal services) and the bare metal nodes using eth2. Replace eth2 with the interface on the network node which you are using to connect to the Bare Metal service: .. code-block:: console $ ovs-vsctl add-br br-eth2 $ ovs-vsctl add-port br-eth2 eth2 #. Restart the Open vSwitch agent: .. code-block:: console # service neutron-plugin-openvswitch-agent restart #. On restarting the Networking service Open vSwitch agent, the veth pair between the bridges br-int and br-eth2 is automatically created. Your Open vSwitch bridges should look something like this after following the above steps: .. code-block:: console $ ovs-vsctl show Bridge br-int fail_mode: secure Port "int-br-eth2" Interface "int-br-eth2" type: patch options: {peer="phy-br-eth2"} Port br-int Interface br-int type: internal Bridge "br-eth2" Port "phy-br-eth2" Interface "phy-br-eth2" type: patch options: {peer="int-br-eth2"} Port "eth2" Interface "eth2" Port "br-eth2" Interface "br-eth2" type: internal ovs_version: "2.3.0" #. Create the flat network on which you are going to launch the instances: .. code-block:: console $ openstack network create --project $TENANT_ID sharednet1 --share \ --provider-network-type flat --provider-physical-network physnet1 #. Create the subnet on the newly created network: .. code-block:: console $ openstack subnet create $SUBNET_NAME --network sharednet1 \ --subnet-range $NETWORK_CIDR --ip-version 4 --gateway $GATEWAY_IP \ --allocation-pool start=$START_IP,end=$END_IP --dhcp ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/install/configure-nova-flavors.rst0000664000175000017500000001112500000000000023741 0ustar00zuulzuul00000000000000.. _flavor-creation: Create flavors for use with the Bare Metal service ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You'll need to create a special bare metal flavor in the Compute service. The flavor is mapped to the bare metal node through the node's ``resource_class`` field (available starting with Bare Metal API version 1.21). A flavor can request *exactly one* instance of a bare metal resource class. Note that when creating the flavor, it's useful to add the ``RAM_MB`` and ``CPU`` properties as a convenience to users, although they are not used for scheduling. The ``DISK_GB`` property is also not used for scheduling, but is still used to determine the root partition size. #. Change these to match your hardware: .. code-block:: console $ RAM_MB=1024 $ CPU=2 $ DISK_GB=100 #. Create the bare metal flavor by executing the following command: .. code-block:: console $ openstack flavor create --ram $RAM_MB --vcpus $CPU --disk $DISK_GB \ my-baremetal-flavor .. note:: You can add ``--id `` to specify an ID for the flavor. See the :python-openstackclient-doc:`docs on this command ` for other options that may be specified. After creation, associate each flavor with one custom resource class. The name of a custom resource class that corresponds to a node's resource class (in the Bare Metal service) is: * the bare metal node's resource class all upper-cased * prefixed with ``CUSTOM_`` * all punctuation replaced with an underscore For example, if the resource class is named ``baremetal-small``, associate the flavor with this custom resource class via: .. code-block:: console $ openstack flavor set --property resources:CUSTOM_BAREMETAL_SMALL=1 my-baremetal-flavor Another set of flavor properties must be used to disable scheduling based on standard properties for a bare metal flavor: .. code-block:: console $ openstack flavor set --property resources:VCPU=0 my-baremetal-flavor $ openstack flavor set --property resources:MEMORY_MB=0 my-baremetal-flavor $ openstack flavor set --property resources:DISK_GB=0 my-baremetal-flavor Example ------- If you want to define a class of nodes called ``baremetal.with-GPU``, start with tagging some nodes with it: .. code-block:: console $ baremetal node set --resource-class baremetal.with-GPU .. warning:: It is possible to **add** a resource class to ``active`` nodes, but it is not possible to **replace** an existing resource class on them. Then you can update your flavor to request the resource class instead of the standard properties: .. code-block:: console $ openstack flavor set --property resources:CUSTOM_BAREMETAL_WITH_GPU=1 my-baremetal-flavor $ openstack flavor set --property resources:VCPU=0 my-baremetal-flavor $ openstack flavor set --property resources:MEMORY_MB=0 my-baremetal-flavor $ openstack flavor set --property resources:DISK_GB=0 my-baremetal-flavor Note how ``baremetal.with-GPU`` in the node's ``resource_class`` field becomes ``CUSTOM_BAREMETAL_WITH_GPU`` in the flavor's properties. .. _scheduling-traits: Scheduling based on traits -------------------------- Starting with the Queens release, the Compute service supports scheduling based on qualitative attributes using traits. Starting with Bare Metal REST API version 1.37, it is possible to assign a list of traits to each bare metal node. Traits assigned to a bare metal node will be assigned to the corresponding resource provider in the Compute service placement API. When creating a flavor in the Compute service, required traits may be specified via flavor properties. The Compute service will then schedule instances only to bare metal nodes with all of the required traits. Traits can be either standard or custom. Standard traits are listed in the `os_traits library `_. Custom traits must meet the following requirements: * prefixed with ``CUSTOM_`` * contain only upper case characters A to Z, digits 0 to 9, or underscores * no longer than 255 characters in length A bare metal node can have a maximum of 50 traits. Example ^^^^^^^ To add the standard trait ``HW_CPU_X86_VMX`` and a custom trait ``CUSTOM_TRAIT1`` to a node: .. code-block:: console $ baremetal node add trait CUSTOM_TRAIT1 HW_CPU_X86_VMX Then, update the flavor to require these traits: .. code-block:: console $ openstack flavor set --property trait:CUSTOM_TRAIT1=required my-baremetal-flavor $ openstack flavor set --property trait:HW_CPU_X86_VMX=required my-baremetal-flavor ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/install/configure-pxe.rst0000664000175000017500000004401300000000000022122 0ustar00zuulzuul00000000000000Configuring PXE and iPXE ======================== DHCP server setup ----------------- A DHCP server is required by PXE/iPXE client. You need to follow steps below. #. Set the ``[dhcp]/dhcp_provider`` to ``neutron`` in the Bare Metal Service's configuration file (``/etc/ironic/ironic.conf``): .. note:: Refer :doc:`/install/configure-tenant-networks` for details. The ``dhcp_provider`` configuration is already set by the configuration defaults, and when you create subnet, DHCP is also enabled if you do not add any dhcp options at "openstack subnet create" command. #. Enable DHCP in the subnet of PXE network. #. Set the ip address range in the subnet for DHCP. .. note:: Refer :doc:`/install/configure-networking` for details about the two precedent steps. #. Connect the openstack DHCP agent to the external network through the OVS bridges and the interface ``eth2``. .. note:: Refer :doc:`/install/configure-networking` for details. You do not require this part if br-int, br-eth2 and eth2 are already connected. #. Configure the host ip at ``br-eth2``. If it locates at ``eth2``, do below:: ip addr del 192.168.2.10/24 dev eth2 ip addr add 192.168.2.10/24 dev br-eth2 .. note:: Replace eth2 with the interface on the network node which you are using to connect to the Bare Metal service. TFTP server setup ----------------- In order to deploy instances via PXE, a TFTP server needs to be set up on the Bare Metal service nodes which run the ``ironic-conductor``. #. Make sure the tftp root directory exist and can be written to by the user the ``ironic-conductor`` is running as. For example:: sudo mkdir -p /tftpboot sudo chown -R ironic /tftpboot #. Install tftp server: Ubuntu:: sudo apt-get install xinetd tftpd-hpa RHEL8/CentOS8/Fedora:: sudo dnf install tftp-server xinetd SUSE:: sudo zypper install tftp xinetd #. Using xinetd to provide a tftp server setup to serve ``/tftpboot``. Create or edit ``/etc/xinetd.d/tftp`` as below:: service tftp { protocol = udp port = 69 socket_type = dgram wait = yes user = root server = /usr/sbin/in.tftpd server_args = -v -v -v -v -v --map-file /tftpboot/map-file /tftpboot disable = no # This is a workaround for Fedora, where TFTP will listen only on # IPv6 endpoint, if IPv4 flag is not used. flags = IPv4 } and restart the ``xinetd`` service: Ubuntu:: sudo service xinetd restart Fedora/RHEL8/CentOS8/SUSE:: sudo systemctl restart xinetd .. note:: In certain environments the network's MTU may cause TFTP UDP packets to get fragmented. Certain PXE firmwares struggle to reconstruct the fragmented packets which can cause significant slow down or even prevent the server from PXE booting. In order to avoid this, TFTPd provides an option to limit the packet size so that it they do not get fragmented. To set this additional option in the server_args above:: --blocksize #. Create a map file in the tftp boot directory (``/tftpboot``):: echo 're ^(/tftpboot/) /tftpboot/\2' > /tftpboot/map-file echo 're ^/tftpboot/ /tftpboot/' >> /tftpboot/map-file echo 're ^(^/) /tftpboot/\1' >> /tftpboot/map-file echo 're ^([^/]) /tftpboot/\1' >> /tftpboot/map-file .. _uefi-pxe-grub: UEFI PXE - Grub setup --------------------- In order to deploy instances with PXE on bare metal nodes which support UEFI, perform these additional steps on the ironic conductor node to configure the PXE UEFI environment. #. Install Grub2 and shim packages: Ubuntu (18.04LTS and later):: sudo apt-get install grub-efi-amd64-signed shim-signed RHEL8/CentOS8/Fedora:: sudo dnf install grub2-efi shim SUSE:: sudo zypper install grub2-x86_64-efi shim #. Copy grub and shim boot loader images to ``/tftpboot`` directory: Ubuntu (18.04LTS and later):: sudo cp /usr/lib/shim/shimx64.efi.signed /tftpboot/bootx64.efi sudo cp /usr/lib/grub/x86_64-efi-signed/grubnetx64.efi.signed /tftpboot/grubx64.efi Fedora:: sudo cp /boot/efi/EFI/fedora/shim.efi /tftpboot/bootx64.efi sudo cp /boot/efi/EFI/fedora/grubx64.efi /tftpboot/grubx64.efi RHEL8/CentOS8:: sudo cp /boot/efi/EFI/centos/shim.efi /tftpboot/bootx64.efi sudo cp /boot/efi/EFI/centos/grubx64.efi /tftpboot/grubx64.efi SUSE:: sudo cp /usr/lib64/efi/shim.efi /tftpboot/bootx64.efi sudo cp /usr/lib/grub2/x86_64-efi/grub.efi /tftpboot/grubx64.efi #. Update the bare metal node with ``boot_mode:uefi`` capability in node's properties field. See :ref:`boot_mode_support` for details. #. Make sure that bare metal node is configured to boot in UEFI boot mode and boot device is set to network/pxe. .. note:: Some drivers, e.g. ``ilo``, ``irmc`` and ``redfish``, support automatic setting of the boot mode during deployment. This step is not required for them. Please check :doc:`../admin/drivers` for information on whether your driver requires manual UEFI configuration. Legacy BIOS - Syslinux setup ---------------------------- In order to deploy instances with PXE on bare metal using Legacy BIOS boot mode, perform these additional steps on the ironic conductor node. #. Install the syslinux package with the PXE boot images: Ubuntu (16.04LTS and later):: sudo apt-get install syslinux-common pxelinux RHEL8/CentOS8/Fedora:: sudo dnf install syslinux-tftpboot SUSE:: sudo zypper install syslinux #. Copy the PXE image to ``/tftpboot``. The PXE image might be found at [1]_: Ubuntu (16.04LTS and later):: sudo cp /usr/lib/PXELINUX/pxelinux.0 /tftpboot RHEL8/CentOS8/SUSE:: sudo cp /usr/share/syslinux/pxelinux.0 /tftpboot #. If whole disk images need to be deployed via PXE-netboot, copy the chain.c32 image to ``/tftpboot`` to support it: Ubuntu (16.04LTS and later):: sudo cp /usr/lib/syslinux/modules/bios/chain.c32 /tftpboot Fedora:: sudo cp /boot/extlinux/chain.c32 /tftpboot RHEL8/CentOS8/SUSE:: sudo cp /usr/share/syslinux/chain.c32 /tftpboot/ #. If the version of syslinux is **greater than** 4 we also need to make sure that we copy the library modules into the ``/tftpboot`` directory [2]_ [1]_. For example, for Ubuntu run:: sudo cp /usr/lib/syslinux/modules/*/ldlinux.* /tftpboot #. Update the bare metal node with ``boot_mode:bios`` capability in node's properties field. See :ref:`boot_mode_support` for details. #. Make sure that bare metal node is configured to boot in Legacy BIOS boot mode and boot device is set to network/pxe. .. [1] On **Fedora/RHEL** the ``syslinux-tftpboot`` package already installs the library modules and PXE image at ``/tftpboot``. If the TFTP server is configured to listen to a different directory you should copy the contents of ``/tftpboot`` to the configured directory .. [2] http://www.syslinux.org/wiki/index.php/Library_modules iPXE setup ---------- If you will be using iPXE to boot instead of PXE, iPXE needs to be set up on the Bare Metal service node(s) where ``ironic-conductor`` is running. #. Make sure these directories exist and can be written to by the user the ``ironic-conductor`` is running as. For example:: sudo mkdir -p /tftpboot sudo mkdir -p /httpboot sudo chown -R ironic /tftpboot sudo chown -R ironic /httpboot #. Create a map file in the tftp boot directory (``/tftpboot``):: echo 'r ^([^/]) /tftpboot/\1' > /tftpboot/map-file echo 'r ^(/tftpboot/) /tftpboot/\2' >> /tftpboot/map-file .. _HTTP server: #. Set up TFTP and HTTP servers. These servers should be running and configured to use the local /tftpboot and /httpboot directories respectively, as their root directories. (Setting up these servers is outside the scope of this install guide.) These root directories need to be mounted locally to the ``ironic-conductor`` services, so that the services can access them. The Bare Metal service's configuration file (/etc/ironic/ironic.conf) should be edited accordingly to specify the TFTP and HTTP root directories and server addresses. For example: .. code-block:: ini [pxe] # Ironic compute node's tftp root path. (string value) tftp_root=/tftpboot # IP address of Ironic compute node's tftp server. (string # value) tftp_server=192.168.0.2 [deploy] # Ironic compute node's http root path. (string value) http_root=/httpboot # Ironic compute node's HTTP server URL. Example: # http://192.1.2.3:8080 (string value) http_url=http://192.168.0.2:8080 See also: :ref:`l3-external-ip`. #. Install the iPXE package with the boot images: Ubuntu:: apt-get install ipxe RHEL8/CentOS8/Fedora:: dnf install ipxe-bootimgs .. note:: SUSE does not provide a package containing iPXE boot images. If you are using SUSE or if the packaged version of the iPXE boot image doesn't work, you can download a prebuilt one from http://boot.ipxe.org or build one image from source, see http://ipxe.org/download for more information. #. Copy the iPXE boot image (``undionly.kpxe`` for **BIOS** and ``ipxe.efi`` for **UEFI**) to ``/tftpboot``. The binary might be found at: Ubuntu:: cp /usr/lib/ipxe/{undionly.kpxe,ipxe.efi,snponly.efi} /tftpboot Fedora/RHEL8/CentOS8:: cp /usr/share/ipxe/{undionly.kpxe,ipxe-x86_64.efi,ipxe-snponly-x86_64.efi} /tftpboot .. note:: ``snponly`` variants may not be available for all distributions. #. Enable/Configure iPXE overrides in the Bare Metal Service's configuration file **if required** (/etc/ironic/ironic.conf): .. code-block:: ini [pxe] # Neutron bootfile DHCP parameter. (string value) ipxe_bootfile_name=undionly.kpxe # Bootfile DHCP parameter for UEFI boot mode. (string value) uefi_ipxe_bootfile_name=ipxe.efi # Template file for PXE configuration. (string value) ipxe_config_template=$pybasedir/drivers/modules/ipxe_config.template .. note:: Most UEFI systems have integrated networking which means the ``[pxe]uefi_ipxe_bootfile_name`` setting should be set to ``snponly.efi`` or ``ipxe-snponly-x86_64.efi`` if it's available for your distribution. .. note:: Setting the iPXE parameters noted in the code block above to no value, in other words setting a line to something like ``ipxe_bootfile_name=`` will result in ironic falling back to the default values of the non-iPXE PXE settings. This is for backwards compatability. #. Ensure iPXE is the default PXE, if applicable. In earlier versions of ironic, a ``[pxe]ipxe_enabled`` setting allowing operators to declare the behavior of the conductor to exclusively operate as if only iPXE was to be used. As time moved on, iPXE functionality was moved to it's own ``ipxe`` boot interface. If you want to emulate that same hehavior, set the following in the configuration file (/etc/ironic/ironic.conf): .. code-block:: ini [DEFAULT] default_boot_interface=ipxe enabled_boot_interfaces=ipxe,pxe .. note:: The ``[DEFAULT]enabled_boot_interfaces`` setting may be exclusively set to ``ipxe``, however ironic has multiple interfaces available depending on the hardware types available for use. #. It is possible to configure the Bare Metal service in such a way that nodes will boot into the deploy image directly from Object Storage. Doing this avoids having to cache the images on the ironic-conductor host and serving them via the ironic-conductor's `HTTP server`_. This can be done if: #. the Image Service is used for image storage; #. the images in the Image Service are internally stored in Object Storage; #. the Object Storage supports generating temporary URLs for accessing objects stored in it. Both the OpenStack Swift and RADOS Gateway provide support for this. * See :doc:`/admin/radosgw` on how to configure the Bare Metal Service with RADOS Gateway as the Object Storage. Configure this by setting the ``[pxe]/ipxe_use_swift`` configuration option to ``True`` as follows: .. code-block:: ini [pxe] # Download deploy images directly from swift using temporary # URLs. If set to false (default), images are downloaded to # the ironic-conductor node and served over its local HTTP # server. Applicable only when 'ipxe_enabled' option is set to # true. (boolean value) ipxe_use_swift=True Although the `HTTP server`_ still has to be deployed and configured (as it will serve iPXE boot script and boot configuration files for nodes), such configuration will shift some load from ironic-conductor hosts to the Object Storage service which can be scaled horizontally. Note that when SSL is enabled on the Object Storage service you have to ensure that iPXE firmware on the nodes can indeed boot from generated temporary URLs that use HTTPS protocol. #. Restart the ``ironic-conductor`` process: Fedora/RHEL8/CentOS8/SUSE:: sudo systemctl restart openstack-ironic-conductor Ubuntu:: sudo service ironic-conductor restart PXE multi-architecture setup ---------------------------- It is possible to deploy servers of different architecture by one conductor. To use this feature, architecture-specific boot and template files must be configured using the configuration options ``[pxe]pxe_bootfile_name_by_arch`` and ``[pxe]pxe_config_template_by_arch`` respectively, in the Bare Metal service's configuration file (/etc/ironic/ironic.conf). These two options are dictionary values; the key is the architecture and the value is the boot (or config template) file. A node's ``cpu_arch`` property is used as the key to get the appropriate boot file and template file. If the node's ``cpu_arch`` is not in the dictionary, the configuration options (in [pxe] group) ``pxe_bootfile_name``, ``pxe_config_template``, ``uefi_pxe_bootfile_name`` and ``uefi_pxe_config_template`` will be used instead. In the following example, since 'x86' and 'x86_64' keys are not in the ``pxe_bootfile_name_by_arch`` or ``pxe_config_template_by_arch`` options, x86 and x86_64 nodes will be deployed by 'pxelinux.0' or 'bootx64.efi', depending on the node's ``boot_mode`` capability ('bios' or 'uefi'). However, aarch64 nodes will be deployed by 'grubaa64.efi', and ppc64 nodes by 'bootppc64':: [pxe] # Bootfile DHCP parameter. (string value) pxe_bootfile_name=pxelinux.0 # On ironic-conductor node, template file for PXE # configuration. (string value) pxe_config_template = $pybasedir/drivers/modules/pxe_config.template # Bootfile DHCP parameter for UEFI boot mode. (string value) uefi_pxe_bootfile_name=bootx64.efi # On ironic-conductor node, template file for PXE # configuration for UEFI boot loader. (string value) uefi_pxe_config_template=$pybasedir/drivers/modules/pxe_grub_config.template # Bootfile DHCP parameter per node architecture. (dict value) pxe_bootfile_name_by_arch=aarch64:grubaa64.efi,ppc64:bootppc64 # On ironic-conductor node, template file for PXE # configuration per node architecture. For example: # aarch64:/opt/share/grubaa64_pxe_config.template (dict value) pxe_config_template_by_arch=aarch64:pxe_grubaa64_config.template,ppc64:pxe_ppc64_config.template .. note:: The grub implementation may vary on different architecture, you may need to tweak the pxe config template for a specific arch. For example, grubaa64.efi shipped with CentoOS7 does not support ``linuxefi`` and ``initrdefi`` commands, you'll need to switch to use ``linux`` and ``initrd`` command instead. .. note:: A ``[pxe]ipxe_bootfile_name_by_arch`` setting is available for multi-arch iPXE based deployment, and defaults to the same behavior as the comperable ``[pxe]pxe_bootfile_by_arch`` setting for standard PXE. PXE timeouts tuning ------------------- Because of its reliance on UDP-based protocols (DHCP and TFTP), PXE is particularly vulnerable to random failures during the booting stage. If the deployment ramdisk never calls back to the bare metal conductor, the build will be aborted, and the node will be moved to the ``deploy failed`` state, after the deploy callback timeout. This timeout can be changed via the :oslo.config:option:`conductor.deploy_callback_timeout` configuration option. Starting with the Train release, the Bare Metal service can retry PXE boot if it takes too long. The timeout is defined via :oslo.config:option:`pxe.boot_retry_timeout` and must be smaller than the ``deploy_callback_timeout``, otherwise it will have no effect. For example, the following configuration sets the overall timeout to 60 minutes, allowing two retries after 20 minutes: .. code-block:: ini [conductor] deploy_callback_timeout = 3600 [pxe] boot_retry_timeout = 1200 PXE artifacts ------------- Ironic features the capability to load PXE artifacts into the conductor startup, minimizing the need for external installation and configuration management tooling from having to do additional work to facilitate. While this is an advanced feature, and destination file names must match existing bootloader configured filenames. For example, if using iPXE and GRUB across interfaces, you may desire a configuration similar to this example. .. code-block:: ini [pxe] loader_file_paths = ipxe.efi:/usr/share/ipxe/ipxe-snponly-x86_64.efi,undionly.kpxe:/usr/share/ipxe/undionly.kpxe,bootx64.efi,/boot/efi/EFI/boot/grubx64.efi,bootx64.efi:/boot/efi/EFI/boot/BOOTX64.EFI If you choose to use relative paths as part of your destination, those paths will be created using configuration parameter ``[pxe]dir_permission`` where as actual files copied are set with the configuration parameter ``[pxe]file_permission``. Absolute destination paths are not supported and will result in ironic failing to start up as it is a misconfiguration of the deployment. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/install/configure-tenant-networks.rst0000664000175000017500000001531100000000000024470 0ustar00zuulzuul00000000000000.. _configure-tenant-networks: Configure tenant networks ========================= Below is an example flow of how to set up the Bare Metal service so that node provisioning will happen in a multi-tenant environment (which means using the ``neutron`` network interface as stated above): #. Network interfaces can be enabled on ironic-conductor by adding them to the ``enabled_network_interfaces`` configuration option under the ``default`` section of the configuration file:: [DEFAULT] ... enabled_network_interfaces=noop,flat,neutron Keep in mind that, ideally, all ironic-conductors should have the same list of enabled network interfaces, but it may not be the case during ironic-conductor upgrades. This may cause problems if one of the ironic-conductors dies and some node that is taken over is mapped to an ironic-conductor that does not support the node's network interface. Any actions that involve calling the node's driver will fail until that network interface is installed and enabled on that ironic-conductor. #. It is recommended to set the default network interface via the ``default_network_interface`` configuration option under the ``default`` section of the configuration file:: [DEFAULT] ... default_network_interface=neutron This default value will be used for all nodes that don't have a network interface explicitly specified in the creation request. If this configuration option is not set, the default network interface is determined by looking at the ``[dhcp]dhcp_provider`` configuration option value. If it is ``neutron``, then ``flat`` network interface becomes the default, otherwise ``noop`` is the default. #. Define a provider network in the Networking service, which we shall refer to as the "provisioning" network. Using the ``neutron`` network interface requires that ``provisioning_network`` and ``cleaning_network`` configuration options are set to valid identifiers (UUID or name) of networks in the Networking service. If these options are not set correctly, cleaning or provisioning will fail to start. There are two ways to set these values: - Under the ``neutron`` section of ironic configuration file: .. code-block:: ini [neutron] cleaning_network = $CLEAN_UUID_OR_NAME provisioning_network = $PROVISION_UUID_OR_NAME - Under ``provisioning_network`` and ``cleaning_network`` keys of the node's ``driver_info`` field as ``driver_info['provisioning_network']`` and ``driver_info['cleaning_network']`` respectively. .. note:: If these ``provisioning_network`` and ``cleaning_network`` values are not specified in node's `driver_info` then ironic falls back to the configuration in the ``neutron`` section. Please refer to :doc:`configure-cleaning` for more information about cleaning. .. warning:: Please make sure that the Bare Metal service has exclusive access to the provisioning and cleaning networks. Spawning instances by non-admin users in these networks and getting access to the Bare Metal service's control plane is a security risk. For this reason, the provisioning and cleaning networks should be configured as non-shared networks in the ``admin`` tenant. .. note:: When using the ``flat`` network interface, bare metal instances are normally spawned onto the "provisioning" network. This is not supported with the ``neutron`` interface and the deployment will fail. Please ensure a different network is chosen in the Networking service when a bare metal instance is booted from the Compute service. .. note:: The "provisioning" and "cleaning" networks may be the same network or distinct networks. To ensure that communication between the Bare Metal service and the deploy ramdisk works, it is important to ensure that security groups are disabled for these networks, *or* that the default security groups allow: * DHCP * TFTP * egress port used for the Bare Metal service (6385 by default) * ingress port used for ironic-python-agent (9999 by default) * if using :ref:`direct-deploy`, the egress port used for the Object Storage service or the local HTTP server (typically 80 or 443) * if using iPXE, the egress port used for the HTTP server running on the ironic-conductor nodes (typically 80). #. This step is optional and applicable only if you want to use security groups during provisioning and/or cleaning of the nodes. If not specified, default security groups are used. #. Define security groups in the Networking service, to be used for provisioning and/or cleaning networks. #. Add the list of these security group UUIDs under the ``neutron`` section of ironic-conductor's configuration file as shown below:: [neutron] ... cleaning_network=$CLEAN_UUID_OR_NAME cleaning_network_security_groups=[$LIST_OF_CLEAN_SECURITY_GROUPS] provisioning_network=$PROVISION_UUID_OR_NAME provisioning_network_security_groups=[$LIST_OF_PROVISION_SECURITY_GROUPS] Multiple security groups may be applied to a given network, hence, they are specified as a list. The same security group(s) could be used for both provisioning and cleaning networks. .. warning:: If security groups are configured as described above, do not set the "port_security_enabled" flag to False for the corresponding Networking service's network or port. This will cause the deploy to fail. For example: if ``provisioning_network_security_groups`` configuration option is used, ensure that "port_security_enabled" flag for the provisioning network is set to True. This flag is set to True by default; make sure not to override it by manually setting it to False. #. Install and configure a compatible ML2 mechanism driver which supports bare metal provisioning for your switch. See :neutron-doc:`ML2 plugin configuration manual ` for details. #. Restart the ironic-conductor and ironic-api services after the modifications: - Fedora/RHEL8/CentOS8:: sudo systemctl restart openstack-ironic-api sudo systemctl restart openstack-ironic-conductor - Ubuntu:: sudo service ironic-api restart sudo service ironic-conductor restart #. Make sure that the ironic-conductor is reachable over the provisioning network by trying to download a file from a TFTP server on it, from some non-control-plane server in that network:: tftp $TFTP_IP -c get $FILENAME where FILENAME is the file located at the TFTP server. See :ref:`multitenancy` for required node configuration. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/install/creating-images.rst0000664000175000017500000000024500000000000022405 0ustar00zuulzuul00000000000000Create user images for the Bare Metal service ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The content has been migrated, please see :doc:`/user/creating-images`. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/install/deploy-ramdisk.rst0000664000175000017500000000316400000000000022275 0ustar00zuulzuul00000000000000.. _deploy-ramdisk: Building or downloading a deploy ramdisk image ============================================== Ironic depends on having an image with the :ironic-python-agent-doc:`ironic-python-agent (IPA) <>` service running on it for controlling and deploying bare metal nodes. Two kinds of images are published on every commit from every branch of :ironic-python-agent-doc:`ironic-python-agent (IPA) <>` * DIB_ images are suitable for production usage and can be downloaded from https://tarballs.openstack.org/ironic-python-agent/dib/files/. * For Train and older use CentOS 7 images. * For Ussuri and newer use CentOS 8 images. .. warning:: CentOS 7 master images are no longer updated and must not be used. .. warning:: The published images will not work for dhcp-less deployments since the simple-init_ element is not present. Check the DIB_ documentation to see how to build the image. * TinyIPA_ images are suitable for CI and testing environments and can be downloaded from https://tarballs.openstack.org/ironic-python-agent/tinyipa/files/. Building from source -------------------- Check the ironic-python-agent-builder_ project for information on how to build ironic-python-agent ramdisks. .. _DIB: https://docs.openstack.org/ironic-python-agent-builder/latest/admin/dib.html .. _TinyIPA: https://docs.openstack.org/ironic-python-agent-builder/latest/admin/tinyipa.html .. _ironic-python-agent-builder: https://docs.openstack.org/ironic-python-agent-builder/latest/ .. _simple-init: https://docs.openstack.org/diskimage-builder/latest/elements/simple-init/README.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/install/enabling-drivers.rst0000664000175000017500000002411000000000000022576 0ustar00zuulzuul00000000000000Enabling drivers and hardware types =================================== Introduction ------------ The Bare Metal service delegates actual hardware management to **drivers**. *Drivers*, also called *hardware types*, consist of *hardware interfaces*: sets of functionality dealing with some aspect of bare metal provisioning in a vendor-specific way. There are generic **hardware types** (eg. ``redfish`` and ``ipmi``), and vendor-specific ones (eg. ``ilo`` and ``irmc``). .. note:: Starting with the Rocky release, the terminologies *driver*, *dynamic driver*, and *hardware type* have the same meaning in the scope of Bare Metal service. .. _enable-hardware-types: Enabling hardware types ----------------------- Hardware types are enabled in the configuration file of the **ironic-conductor** service by setting the ``enabled_hardware_types`` configuration option, for example: .. code-block:: ini [DEFAULT] enabled_hardware_types = ipmi,redfish Due to the driver's dynamic nature, they also require configuring enabled hardware interfaces. .. note:: All available hardware types and interfaces are listed in setup.cfg_ file in the source code tree. .. _enable-hardware-interfaces: Enabling hardware interfaces ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ There are several types of hardware interfaces: bios manages configuration of the BIOS settings of a bare metal node. This interface is vendor-specific and can be enabled via the ``enabled_bios_interfaces`` option: .. code-block:: ini [DEFAULT] enabled_hardware_types = enabled_bios_interfaces = See :doc:`/admin/bios` for details. boot manages booting of both the deploy ramdisk and the user instances on the bare metal node. See :doc:`/admin/interfaces/boot` for details. Boot interface implementations are often vendor specific, and can be enabled via the ``enabled_boot_interfaces`` option: .. code-block:: ini [DEFAULT] enabled_hardware_types = ipmi,ilo enabled_boot_interfaces = pxe,ilo-virtual-media Boot interfaces with ``pxe`` in their name require :doc:`configure-pxe`. There are also a few hardware-specific boot interfaces - see :doc:`/admin/drivers` for their required configuration. console manages access to the serial console of a bare metal node. See :doc:`/admin/console` for details. deploy defines how the image gets transferred to the target disk. See :doc:`/admin/interfaces/deploy` for an explanation of the difference between supported deploy interfaces. The deploy interfaces can be enabled as follows: .. code-block:: ini [DEFAULT] enabled_hardware_types = ipmi,redfish enabled_deploy_interfaces = direct,ramdisk .. note:: The ``direct`` deploy interface requires the Object Storage service or an HTTP service inspect implements fetching hardware information from nodes. Can be implemented out-of-band (via contacting the node's BMC) or in-band (via booting a ramdisk on a node). The latter implementation is called ``inspector`` and uses a separate service called :ironic-inspector-doc:`ironic-inspector <>`. Example: .. code-block:: ini [DEFAULT] enabled_hardware_types = ipmi,ilo,irmc enabled_inspect_interfaces = ilo,irmc,inspector See :doc:`/admin/inspection` for more details. management provides additional hardware management actions, like getting or setting boot devices. This interface is usually vendor-specific, and its name often matches the name of the hardware type (with ``ipmitool`` being a notable exception). For example: .. code-block:: ini [DEFAULT] enabled_hardware_types = ipmi,redfish,ilo,irmc enabled_management_interfaces = ipmitool,redfish,ilo,irmc Using ``ipmitool`` requires :doc:`configure-ipmi`. See :doc:`/admin/drivers` for the required configuration of each driver. network connects/disconnects bare metal nodes to/from virtual networks. See :doc:`configure-tenant-networks` for more details. power runs power actions on nodes. Similar to the management interface, it is usually vendor-specific, and its name often matches the name of the hardware type (with ``ipmitool`` being again an exception). For example: .. code-block:: ini [DEFAULT] enabled_hardware_types = ipmi,redfish,ilo,irmc enabled_power_interfaces = ipmitool,redfish,ilo,irmc Using ``ipmitool`` requires :doc:`configure-ipmi`. See :doc:`/admin/drivers` for the required configuration of each driver. raid manages building and tearing down RAID on nodes. Similar to inspection, it can be implemented either out-of-band or in-band (via ``agent`` implementation). See :doc:`/admin/raid` for details. For example: .. code-block:: ini [DEFAULT] enabled_hardware_types = ipmi,redfish,ilo,irmc enabled_raid_interfaces = agent,no-raid storage manages the interaction with a remote storage subsystem, such as the Block Storage service, and helps facilitate booting from a remote volume. This interface ensures that volume target and connector information is updated during the lifetime of a deployed instance. See :doc:`/admin/boot-from-volume` for more details. This interface defaults to a ``noop`` driver as it is considered an "opt-in" interface which requires additional configuration by the operator to be usable. For example: .. code-block:: ini [DEFAULT] enabled_hardware_types = ipmi,irmc enabled_storage_interfaces = cinder,noop vendor is a place for vendor extensions to be exposed in API. See :doc:`/contributor/vendor-passthru` for details. .. code-block:: ini [DEFAULT] enabled_hardware_types = ipmi,redfish,ilo,irmc enabled_vendor_interfaces = ipmitool,no-vendor Here is a complete configuration example, enabling two generic protocols, IPMI and Redfish, with a few additional features: .. code-block:: ini [DEFAULT] enabled_hardware_types = ipmi,redfish enabled_boot_interfaces = pxe enabled_console_interfaces = ipmitool-socat,no-console enabled_deploy_interfaces = direct enabled_inspect_interfaces = inspector enabled_management_interfaces = ipmitool,redfish enabled_network_interfaces = flat,neutron enabled_power_interfaces = ipmitool,redfish enabled_raid_interfaces = agent enabled_storage_interfaces = cinder,noop enabled_vendor_interfaces = ipmitool,no-vendor Note that some interfaces have implementations named ``no-`` where ```` is the interface type. These implementations do nothing and return errors when used from API. Hardware interfaces in multi-conductor environments ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When enabling hardware types and their interfaces, make sure that for every enabled hardware type, the whole set of enabled interfaces matches for all conductors. However, different conductors can have different hardware types enabled. For example, you can have two conductors with the following configuration respectively: .. code-block:: ini [DEFAULT] enabled_hardware_types = ipmi enabled_deploy_interfaces = direct enabled_power_interfaces = ipmitool enabled_management_interfaces = ipmitool .. code-block:: ini [DEFAULT] enabled_hardware_types = redfish enabled_deploy_interfaces = ansible enabled_power_interfaces = redfish enabled_management_interfaces = redfish But you cannot have two conductors with the following configuration respectively: .. code-block:: ini [DEFAULT] enabled_hardware_types = ipmi,redfish enabled_deploy_interfaces = direct enabled_power_interfaces = ipmitool,redfish enabled_management_interfaces = ipmitool,redfish .. code-block:: ini [DEFAULT] enabled_hardware_types = redfish enabled_deploy_interfaces = ansible enabled_power_interfaces = redfish enabled_management_interfaces = redfish This is because the ``redfish`` hardware type will have different enabled *deploy* interfaces on these conductors. It would have been fine, if the second conductor had ``enabled_deploy_interfaces = direct`` instead of ``ansible``. This situation is not detected by the Bare Metal service, but it can cause inconsistent behavior in the API, when node functionality will depend on which conductor it gets assigned to. .. note:: We don't treat this as an error, because such *temporary* inconsistency is inevitable during a rolling upgrade or a configuration update. Configuring interface defaults ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When an operator does not provide an explicit value for one of the interfaces (when creating a node or updating its driver), the default value is calculated as described in :ref:`hardware_interfaces_defaults`. It is also possible to override the defaults for any interfaces by setting one of the options named ``default__interface``, where ```` is the interface name. For example: .. code-block:: ini [DEFAULT] default_deploy_interface = direct default_network_interface = neutron This configuration forces the default *deploy* interface to be ``direct`` and the default *network* interface to be ``neutron`` for all hardware types. The defaults are calculated and set on a node when creating it or updating its hardware type. Thus, changing these configuration options has no effect on existing nodes. .. warning:: The default interface implementation must be configured the same way across all conductors in the cloud, except maybe for a short period of time during an upgrade or configuration update. Otherwise the default implementation will depend on which conductor handles which node, and this mapping is not predictable or even persistent. .. warning:: These options should be used with care. If a hardware type does not support the provided default implementation, its users will have to always provide an explicit value for this interface when creating a node. .. _setup.cfg: https://opendev.org/openstack/ironic/src/branch/master/setup.cfg ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/install/enabling-https.rst0000664000175000017500000000630200000000000022265 0ustar00zuulzuul00000000000000.. _enabling-https: Enabling HTTPS -------------- .. _EnableHTTPSinSwift: Enabling HTTPS in Swift ======================= The drivers using virtual media use swift for storing boot images and node configuration information (contains sensitive information for Ironic conductor to provision bare metal hardware). By default, HTTPS is not enabled in swift. HTTPS is required to encrypt all communication between swift and Ironic conductor and swift and bare metal (via virtual media). It can be enabled in one of the following ways: * `Using an SSL termination proxy `_ * :swift-doc:`Using native SSL support in swift ` (recommended only for testing purpose by swift). .. _EnableHTTPSinGlance: Enabling HTTPS in Image service =============================== Ironic drivers usually use Image service during node provisioning. By default, image service does not use HTTPS, but it is required for secure communication. It can be enabled by making the following changes to ``/etc/glance/glance-api.conf``: #. :glance-doc:`Configuring SSL support ` #. Restart the glance-api service:: Fedora/RHEL8/CentOS8/SUSE: sudo systemctl restart openstack-glance-api Debian/Ubuntu: sudo service glance-api restart See the :glance-doc:`Glance <>` documentation, for more details on the Image service. Enabling HTTPS communication between Image service and Object storage ===================================================================== This section describes the steps needed to enable secure HTTPS communication between Image service and Object storage when Object storage is used as the Backend. To enable secure HTTPS communication between Image service and Object storage follow these steps: #. :ref:`EnableHTTPSinSwift` #. :glance-doc:`Configure Swift Storage Backend ` #. :ref:`EnableHTTPSinGlance` Enabling HTTPS communication between Image service and Bare Metal service ========================================================================= This section describes the steps needed to enable secure HTTPS communication between Image service and Bare Metal service. To enable secure HTTPS communication between Bare Metal service and Image service follow these steps: #. Edit ``/etc/ironic/ironic.conf``:: [glance] ... glance_cafile=/path/to/certfile .. note:: 'glance_cafile' is an optional path to a CA certificate bundle to be used to validate the SSL certificate served by Image service. #. If not using the keystone service catalog for the Image service API endpoint discovery, also edit the ``endpoint_override`` option to point to HTTPS URL of image service (replace ```` with hostname[:port][path] of the Image service endpoint):: [glance] ... endpoint_override = https:// #. Restart ironic-conductor service:: Fedora/RHEL8/CentOS8/SUSE: sudo systemctl restart openstack-ironic-conductor Debian/Ubuntu: sudo service ironic-conductor restart ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/install/enrollment.rst0000664000175000017500000010100100000000000021515 0ustar00zuulzuul00000000000000.. _enrollment: Enrollment ========== After all the services have been properly configured, you should enroll your hardware with the Bare Metal service, and confirm that the Compute service sees the available hardware. The nodes will be visible to the Compute service once they are in the ``available`` provision state. .. note:: After enrolling nodes with the Bare Metal service, the Compute service will not be immediately notified of the new resources. The Compute service's resource tracker syncs periodically, and so any changes made directly to the Bare Metal service's resources will become visible in the Compute service only after the next run of that periodic task. More information is in the :ref:`troubleshooting-install` section. .. note:: Any bare metal node that is visible to the Compute service may have a workload scheduled to it, if both the ``power`` and ``management`` interfaces pass the ``validate`` check. If you wish to exclude a node from the Compute service's scheduler, for instance so that you can perform maintenance on it, you can set the node to "maintenance" mode. For more information see the :ref:`maintenance_mode` section. Choosing a driver ----------------- When enrolling a node, the most important information to supply is *driver*. See :doc:`enabling-drivers` for a detailed explanation of bare metal drivers, hardware types and interfaces. The ``driver list`` command can be used to list all drivers enabled on all hosts: .. code-block:: console baremetal driver list +---------------------+-----------------------+ | Supported driver(s) | Active host(s) | +---------------------+-----------------------+ | ipmi | localhost.localdomain | +---------------------+-----------------------+ The specific driver to use should be picked based on actual hardware capabilities and expected features. See :doc:`/admin/drivers` for more hints on that. Each driver has a list of *driver properties* that need to be specified via the node's ``driver_info`` field, in order for the driver to operate on node. This list consists of the properties of the hardware interfaces that the driver uses. These driver properties are available with the ``driver property list`` command: .. code-block:: console $ baremetal driver property list ipmi +----------------------+-------------------------------------------------------------------------------------------------------------+ | Property | Description | +----------------------+-------------------------------------------------------------------------------------------------------------+ | ipmi_address | IP address or hostname of the node. Required. | | ipmi_password | password. Optional. | | ipmi_username | username; default is NULL user. Optional. | | ... | ... | | deploy_kernel | UUID (from Glance) of the deployment kernel. Required. | | deploy_ramdisk | UUID (from Glance) of the ramdisk that is mounted at boot time. Required. | +----------------------+-------------------------------------------------------------------------------------------------------------+ The properties marked as required must be supplied either during node creation or shortly after. Some properties may only be required for certain features. Note on API versions -------------------- Starting with API version 1.11, the Bare Metal service added a new initial provision state of ``enroll`` to its state machine. When this or later API version is used, new nodes get this state instead of ``available``. Existing automation tooling that use an API version lower than 1.11 are not affected, since the initial provision state is still ``available``. However, using API version 1.11 or above may break existing automation tooling with respect to node creation. The default API version used by (the most recent) python-ironicclient is 1.9, but it may change in the future and should not be relied on. In the examples below we will use version 1.11 of the Bare metal API. This gives us the following advantages: * Explicit power credentials validation before leaving the ``enroll`` state. * Running node cleaning before entering the ``available`` state. * Not exposing half-configured nodes to the scheduler. To set the API version for all commands, you can set the environment variable ``IRONIC_API_VERSION``. For the OpenStackClient baremetal plugin, set the ``OS_BAREMETAL_API_VERSION`` variable to the same value. For example: .. code-block:: console $ export IRONIC_API_VERSION=1.11 $ export OS_BAREMETAL_API_VERSION=1.11 Enrollment process ------------------ Creating a node ~~~~~~~~~~~~~~~ This section describes the main steps to enroll a node and make it available for provisioning. Some steps are shown separately for illustration purposes, and may be combined if desired. #. Create a node in the Bare Metal service with the ``node create`` command. At a minimum, you must specify the driver name (for example, ``ipmi``). This command returns the node UUID along with other information about the node. The node's provision state will be ``enroll``: .. code-block:: console $ export OS_BAREMETAL_API_VERSION=1.11 $ baremetal node create --driver ipmi +--------------+--------------------------------------+ | Property | Value | +--------------+--------------------------------------+ | uuid | dfc6189f-ad83-4261-9bda-b27258eb1987 | | driver_info | {} | | extra | {} | | driver | ipmi | | chassis_uuid | | | properties | {} | | name | None | +--------------+--------------------------------------+ $ baremetal node show dfc6189f-ad83-4261-9bda-b27258eb1987 +------------------------+--------------------------------------+ | Property | Value | +------------------------+--------------------------------------+ | target_power_state | None | | extra | {} | | last_error | None | | maintenance_reason | None | | provision_state | enroll | | uuid | dfc6189f-ad83-4261-9bda-b27258eb1987 | | console_enabled | False | | target_provision_state | None | | provision_updated_at | None | | maintenance | False | | power_state | None | | driver | ipmi | | properties | {} | | instance_uuid | None | | name | None | | driver_info | {} | | ... | ... | +------------------------+--------------------------------------+ A node may also be referred to by a logical name as well as its UUID. A name can be assigned to the node during its creation by adding the ``-n`` option to the ``node create`` command or by updating an existing node with the ``node set`` command. See `Logical Names`_ for examples. #. Starting with API version 1.31 (and ``python-ironicclient`` 1.13), you can pick which hardware interface to use with nodes that use hardware types. Each interface is represented by a node field called ``_interface`` where ```` in the interface type, e.g. ``boot``. See :doc:`enabling-drivers` for details on hardware interfaces. An interface can be set either separately: .. code-block:: console $ baremetal node set $NODE_UUID --deploy-interface direct --raid-interface agent or set during node creation: .. code-block:: console $ baremetal node create --driver ipmi \ --deploy-interface direct \ --raid-interface agent If no value is provided for some interfaces, `Defaults for hardware interfaces`_ are used instead. #. Update the node ``driver_info`` with the required driver properties, so that the Bare Metal service can manage the node: .. code-block:: console $ baremetal node set $NODE_UUID \ --driver-info ipmi_username=$USER \ --driver-info ipmi_password=$PASS \ --driver-info ipmi_address=$ADDRESS .. note:: If IPMI is running on a port other than 623 (the default). The port must be added to ``driver_info`` by specifying the ``ipmi_port`` value. Example: .. code-block:: console $ baremetal node set $NODE_UUID --driver-info ipmi_port=$PORT_NUMBER You may also specify all ``driver_info`` parameters during node creation by passing the **--driver-info** option multiple times: .. code-block:: console $ baremetal node create --driver ipmi \ --driver-info ipmi_username=$USER \ --driver-info ipmi_password=$PASS \ --driver-info ipmi_address=$ADDRESS See `Choosing a driver`_ above for details on driver properties. #. Specify a deploy kernel and ramdisk compatible with the node's driver, for example: .. code-block:: console $ baremetal node set $NODE_UUID \ --driver-info deploy_kernel=$DEPLOY_VMLINUZ_UUID \ --driver-info deploy_ramdisk=$DEPLOY_INITRD_UUID See :doc:`configure-glance-images` for details. #. Optionally you can specify the provisioning and/or cleaning network UUID or name in the node's ``driver_info``. The ``neutron`` network interface requires both ``provisioning_network`` and ``cleaning_network``, while the ``flat`` network interface requires the ``cleaning_network`` to be set either in the configuration or on the nodes. For example: .. code-block:: console $ baremetal node set $NODE_UUID \ --driver-info cleaning_network=$CLEAN_UUID_OR_NAME \ --driver-info provisioning_network=$PROVISION_UUID_OR_NAME See :doc:`configure-tenant-networks` for details. #. You must also inform the Bare Metal service of the network interface cards which are part of the node by creating a port with each NIC's MAC address. These MAC addresses are passed to the Networking service during instance provisioning and used to configure the network appropriately: .. code-block:: console $ baremetal port create $MAC_ADDRESS --node $NODE_UUID .. note:: When it is time to remove the node from the Bare Metal service, the command used to remove the port is ``baremetal port delete ``. When doing so, it is important to ensure that the baremetal node is not in ``maintenance`` as guarding logic to prevent orphaning Neutron Virtual Interfaces (VIFs) will be overridden. .. _enrollment-scheduling: Adding scheduling information ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #. Assign a *resource class* to the node. A *resource class* should represent a class of hardware in your data center, that corresponds to a Compute flavor. For example, let's split hardware into these three groups: #. nodes with a lot of RAM and powerful CPU for computational tasks, #. nodes with powerful GPU for OpenCL computing, #. smaller nodes for development and testing. We can define three resource classes to reflect these hardware groups, named ``large-cpu``, ``large-gpu`` and ``small`` respectively. Then, for each node in each of the hardware groups, we'll set their ``resource_class`` appropriately via: .. code-block:: console $ baremetal node set $NODE_UUID --resource-class $CLASS_NAME The ``--resource-class`` argument can also be used when creating a node: .. code-block:: console $ baremetal node create --driver $DRIVER --resource-class $CLASS_NAME To use resource classes for scheduling you need to update your flavors as described in :doc:`configure-nova-flavors`. .. note:: This is not required for standalone deployments, only for those using the Compute service for provisioning bare metal instances. #. Update the node's properties to match the actual hardware of the node: .. code-block:: console $ baremetal node set $NODE_UUID \ --property cpus=$CPU_COUNT \ --property memory_mb=$RAM_MB \ --property local_gb=$DISK_GB As above, these can also be specified at node creation by passing the **--property** option to ``node create`` multiple times: .. code-block:: console $ baremetal node create --driver ipmi \ --driver-info ipmi_username=$USER \ --driver-info ipmi_password=$PASS \ --driver-info ipmi_address=$ADDRESS \ --property cpus=$CPU_COUNT \ --property memory_mb=$RAM_MB \ --property local_gb=$DISK_GB These values can also be discovered during `Hardware Inspection`_. .. warning:: The value provided for the ``local_gb`` property must match the size of the root device you're going to deploy on. By default **ironic-python-agent** picks the smallest disk which is not smaller than 4 GiB. If you override this logic by using root device hints (see :ref:`root-device-hints`), the ``local_gb`` value should match the size of picked target disk. #. If you wish to perform more advanced scheduling of the instances based on hardware capabilities, you may add metadata to each node that will be exposed to the Compute scheduler (see: :nova-doc:`ComputeCapabilitiesFilter `). A full explanation of this is outside of the scope of this document. It can be done through the special ``capabilities`` member of node properties: .. code-block:: console $ baremetal node set $NODE_UUID \ --property capabilities=key1:val1,key2:val2 Some capabilities can also be discovered during `Hardware Inspection`_. #. If you wish to perform advanced scheduling of instances based on qualitative attributes of bare metal nodes, you may add traits to each bare metal node that will be exposed to the Compute scheduler (see: :ref:`scheduling-traits` for a more in-depth discussion of traits in the Bare Metal service). For example, to add the standard trait ``HW_CPU_X86_VMX`` and a custom trait ``CUSTOM_TRAIT1`` to a node: .. code-block:: console $ baremetal node add trait $NODE_UUID \ CUSTOM_TRAIT1 HW_CPU_X86_VMX Validating node information ~~~~~~~~~~~~~~~~~~~~~~~~~~~ #. To check if Bare Metal service has the minimum information necessary for a node's driver to be functional, you may ``validate`` it: .. code-block:: console $ baremetal node validate $NODE_UUID +------------+--------+--------+ | Interface | Result | Reason | +------------+--------+--------+ | boot | True | | | console | True | | | deploy | True | | | inspect | True | | | management | True | | | network | True | | | power | True | | | raid | True | | | storage | True | | +------------+--------+--------+ If the node fails validation, each driver interface will return information as to why it failed: .. code-block:: console $ baremetal node validate $NODE_UUID +------------+--------+-------------------------------------------------------------------------------------------------------------------------------------+ | Interface | Result | Reason | +------------+--------+-------------------------------------------------------------------------------------------------------------------------------------+ | boot | True | | | console | None | not supported | | deploy | False | Cannot validate iSCSI deploy. Some parameters were missing in node's instance_info. Missing are: ['root_gb', 'image_source'] | | inspect | True | | | management | False | Missing the following IPMI credentials in node's driver_info: ['ipmi_address']. | | network | True | | | power | False | Missing the following IPMI credentials in node's driver_info: ['ipmi_address']. | | raid | None | not supported | | storage | True | | +------------+--------+-------------------------------------------------------------------------------------------------------------------------------------+ When using the Compute Service with the Bare Metal service, it is safe to ignore the deploy interface's validation error due to lack of image information. You may continue the enrollment process. This information will be set by the Compute Service just before deploying, when an instance is requested: .. code-block:: console $ baremetal node validate $NODE_UUID +------------+--------+------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | Interface | Result | Reason | +------------+--------+------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | boot | False | Cannot validate image information for node because one or more parameters are missing from its instance_info. Missing are: ['ramdisk', 'kernel', 'image_source'] | | console | True | | | deploy | False | Cannot validate image information for node because one or more parameters are missing from its instance_info. Missing are: ['ramdisk', 'kernel', 'image_source'] | | inspect | True | | | management | True | | | network | True | | | power | True | | | raid | None | not supported | | storage | True | | +------------+--------+------------------------------------------------------------------------------------------------------------------------------------------------------------------+ Making node available for deployment ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In order for nodes to be available for deploying workloads on them, nodes must be in the ``available`` provision state. To do this, nodes created with API version 1.11 and above must be moved from the ``enroll`` state to the ``manageable`` state and then to the ``available`` state. This section can be safely skipped, if API version 1.10 or earlier is used (which is the case by default). After creating a node and before moving it from its initial provision state of ``enroll``, basic power and port information needs to be configured on the node. The Bare Metal service needs this information because it verifies that it is capable of controlling the node when transitioning the node from ``enroll`` to ``manageable`` state. To move a node from ``enroll`` to ``manageable`` provision state: .. code-block:: console $ baremetal node manage $NODE_UUID $ baremetal node show $NODE_UUID +------------------------+--------------------------------------------------------------------+ | Property | Value | +------------------------+--------------------------------------------------------------------+ | ... | ... | | provision_state | manageable | <- verify correct state | uuid | 0eb013bb-1e4b-4f4c-94b5-2e7468242611 | | ... | ... | +------------------------+--------------------------------------------------------------------+ .. note:: Since it is an asynchronous call, the response for ``baremetal node manage`` will not indicate whether the transition succeeded or not. You can check the status of the operation via ``baremetal node show``. If it was successful, ``provision_state`` will be in the desired state. If it failed, there will be information in the node's ``last_error``. When a node is moved from the ``manageable`` to ``available`` provision state, the node will go through automated cleaning if configured to do so (see :ref:`configure-cleaning`). To move a node from ``manageable`` to ``available`` provision state: .. code-block:: console $ baremetal node provide $NODE_UUID $ baremetal node show $NODE_UUID +------------------------+--------------------------------------------------------------------+ | Property | Value | +------------------------+--------------------------------------------------------------------+ | ... | ... | | provision_state | available | < - verify correct state | uuid | 0eb013bb-1e4b-4f4c-94b5-2e7468242611 | | ... | ... | +------------------------+--------------------------------------------------------------------+ For more details on the Bare Metal service's state machine, see the :doc:`/user/states` documentation. Mapping nodes to Compute cells ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If the Compute service is used for scheduling, and the ``discover_hosts_in_cells_interval`` was not set as described in :doc:`configure-compute`, then log into any controller node and run the following command to map the new node(s) to Compute cells:: nova-manage cell_v2 discover_hosts Logical names ------------- A node may also be referred to by a logical name as well as its UUID. Names can be assigned either during its creation by adding the ``-n`` option to the ``node create`` command or by updating an existing node with the ``node set`` command. Node names must be unique, and conform to: - rfc952_ - rfc1123_ - wiki_hostname_ The node is named 'example' in the following examples: .. code-block:: console $ baremetal node create --driver ipmi --name example or .. code-block:: console $ baremetal node set $NODE_UUID --name example Once assigned a logical name, a node can then be referred to by name or UUID interchangeably: .. code-block:: console $ baremetal node create --driver ipmi --name example +--------------+--------------------------------------+ | Property | Value | +--------------+--------------------------------------+ | uuid | 71e01002-8662-434d-aafd-f068f69bb85e | | driver_info | {} | | extra | {} | | driver | ipmi | | chassis_uuid | | | properties | {} | | name | example | +--------------+--------------------------------------+ $ baremetal node show example +------------------------+--------------------------------------+ | Property | Value | +------------------------+--------------------------------------+ | target_power_state | None | | extra | {} | | last_error | None | | updated_at | 2015-04-24T16:23:46+00:00 | | ... | ... | | instance_info | {} | +------------------------+--------------------------------------+ .. _rfc952: https://tools.ietf.org/html/rfc952 .. _rfc1123: https://tools.ietf.org/html/rfc1123 .. _wiki_hostname: https://en.wikipedia.org/wiki/Hostname .. _hardware_interfaces_defaults: Defaults for hardware interfaces -------------------------------- For *hardware types*, users can request one of enabled implementations when creating or updating a node as explained in `Creating a node`_. When no value is provided for a certain interface when creating a node, or changing a node's hardware type, the default value is used. You can use the driver details command to list the current enabled and default interfaces for a hardware type (for your deployment): .. code-block:: console $ baremetal driver show ipmi +-------------------------------+----------------+ | Field | Value | +-------------------------------+----------------+ | default_boot_interface | pxe | | default_console_interface | no-console | | default_deploy_interface | direct | | default_inspect_interface | no-inspect | | default_management_interface | ipmitool | | default_network_interface | flat | | default_power_interface | ipmitool | | default_raid_interface | no-raid | | default_vendor_interface | no-vendor | | enabled_boot_interfaces | pxe | | enabled_console_interfaces | no-console | | enabled_deploy_interfaces | direct | | enabled_inspect_interfaces | no-inspect | | enabled_management_interfaces | ipmitool | | enabled_network_interfaces | flat, noop | | enabled_power_interfaces | ipmitool | | enabled_raid_interfaces | no-raid, agent | | enabled_vendor_interfaces | no-vendor | | hosts | ironic-host-1 | | name | ipmi | | type | dynamic | +-------------------------------+----------------+ The defaults are calculated as follows: #. If the ``default__interface`` configuration option (where ```` is the interface name) is set, its value is used as the default. If this implementation is not compatible with the node's hardware type, an error is returned to a user. An explicit value has to be provided for the node's ``_interface`` field in this case. #. Otherwise, the first supported implementation that is enabled by an operator is used as the default. A list of supported implementations is calculated by taking the intersection between the implementations supported by the node's hardware type and implementations enabled by the ``enabled__interfaces`` option (where ```` is the interface name). The calculation preserves the order of items, as provided by the hardware type. If the list of supported implementations is not empty, the first one is used. Otherwise, an error is returned to a user. In this case, an explicit value has to be provided for the ``_interface`` field. See :doc:`enabling-drivers` for more details on configuration. Example ~~~~~~~ Consider the following configuration (shortened for simplicity): .. code-block:: ini [DEFAULT] enabled_hardware_types = ipmi,redfish enabled_console_interfaces = no-console,ipmitool-shellinabox enabled_deploy_interfaces = direct enabled_management_interfaces = ipmitool,redfish enabled_power_interfaces = ipmitool,redfish default_deploy_interface = ansible A new node is created with the ``ipmi`` driver and no interfaces specified: .. code-block:: console $ export OS_BAREMETAL_API_VERSION=1.31 $ baremetal node create --driver ipmi +--------------+--------------------------------------+ | Property | Value | +--------------+--------------------------------------+ | uuid | dfc6189f-ad83-4261-9bda-b27258eb1987 | | driver_info | {} | | extra | {} | | driver | ipmi | | chassis_uuid | | | properties | {} | | name | None | +--------------+--------------------------------------+ Then the defaults for the interfaces that will be used by the node in this example are calculated as follows: deploy An explicit value of ``ansible`` is provided for ``default_deploy_interface``, so it is used. power No default is configured. The ``ipmi`` hardware type supports only ``ipmitool`` power. The intersection between supported power interfaces and values provided in the ``enabled_power_interfaces`` option has only one item: ``ipmitool``. It is used. console No default is configured. The ``ipmi`` hardware type supports the following console interfaces: ``ipmitool-socat``, ``ipmitool-shellinabox`` and ``no-console`` (in this order). Of these three, only two are enabled: ``no-console`` and ``ipmitool-shellinabox`` (order does not matter). The intersection contains ``ipmitool-shellinabox`` and ``no-console``. The first item is used, and it is ``ipmitool-shellinabox``. management Following the same calculation as *power*, the ``ipmitool`` management interface is used. Hardware Inspection ------------------- The Bare Metal service supports hardware inspection that simplifies enrolling nodes - please see :doc:`/admin/inspection` for details. Tenant Networks and Port Groups ------------------------------- See :doc:`/admin/multitenancy` and :doc:`/admin/portgroups`. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/install/get_started.rst0000664000175000017500000001414100000000000021653 0ustar00zuulzuul00000000000000=========================== Bare Metal service overview =========================== The Bare Metal service, codenamed ``ironic``, is a collection of components that provides support to manage and provision physical machines. Bare Metal service components ----------------------------- The Bare Metal service includes the following components: ironic-api A RESTful API that processes application requests by sending them to the ironic-conductor over `remote procedure call (RPC)`_. Can be run through WSGI_ or as a separate process. ironic-conductor Adds/edits/deletes nodes; powers on/off nodes with IPMI or other vendor-specific protocol; provisions/deploys/cleans bare metal nodes. ironic-conductor uses :doc:`drivers ` to execute operations on hardware. ironic-python-agent A python service which is run in a temporary ramdisk to provide ironic-conductor and ironic-inspector services with remote access, in-band hardware control, and hardware introspection. Additionally, the Bare Metal service has certain external dependencies, which are very similar to other OpenStack services: - A database to store hardware information and state. You can set the database back-end type and location. A simple approach is to use the same database back end as the Compute service. Another approach is to use a separate database back-end to further isolate bare metal resources (and associated metadata) from users. - An :oslo.messaging-doc:`oslo.messaging <>` compatible queue, such as RabbitMQ. It may use the same implementation as that of the Compute service, but that is not a requirement. Used to implement RPC between ironic-api and ironic-conductor. Deployment architecture ----------------------- The Bare Metal RESTful API service is used to enroll hardware that the Bare Metal service will manage. A cloud administrator usually registers it, specifying their attributes such as MAC addresses and IPMI credentials. There can be multiple instances of the API service. The *ironic-conductor* process does the bulk of the work. For security reasons, it is advisable to place it on an isolated host, since it is the only service that requires access to both the data plane and IPMI control plane. There can be multiple instances of the conductor service to support various class of drivers and also to manage fail over. Instances of the conductor service should be on separate nodes. Each conductor can itself run many drivers to operate heterogeneous hardware. This is depicted in the following figure. .. figure:: ../images/deployment_architecture_2.png :alt: Deployment Architecture The API exposes a list of supported drivers and the names of conductor hosts servicing them. Interaction with OpenStack components ------------------------------------- The Bare Metal service may, depending upon configuration, interact with several other OpenStack services. This includes: - the OpenStack Telemetry module (``ceilometer``) for consuming the IPMI metrics - the OpenStack Identity service (``keystone``) for request authentication and to locate other OpenStack services - the OpenStack Image service (``glance``) from which to retrieve images and image meta-data - the OpenStack Networking service (``neutron``) for DHCP and network configuration - the OpenStack Compute service (``nova``) works with the Bare Metal service and acts as a user-facing API for instance management, while the Bare Metal service provides the admin/operator API for hardware management. The OpenStack Compute service also provides scheduling facilities (matching flavors <-> images <-> hardware), tenant quotas, IP assignment, and other services which the Bare Metal service does not, in and of itself, provide. - the OpenStack Object Storage (``swift``) provides temporary storage for the configdrive, user images, deployment logs and inspection data. Logical architecture -------------------- The diagram below shows the logical architecture. It shows the basic components that form the Bare Metal service, the relation of the Bare Metal service with other OpenStack services and the logical flow of a boot instance request resulting in the provisioning of a physical server. .. figure:: ../images/logical_architecture.png :alt: Logical Architecture A user's request to boot an instance is passed to the Compute service via the Compute API and the Compute Scheduler. The Compute service uses the *ironic virt driver* to hand over this request to the Bare Metal service, where the request passes from the Bare Metal API, to the Conductor, to a Driver to successfully provision a physical server for the user. Just as the Compute service talks to various OpenStack services like Image, Network, Object Store etc to provision a virtual machine instance, here the Bare Metal service talks to the same OpenStack services for image, network and other resource needs to provision a bare metal instance. See :ref:`understanding-deployment` for a more detailed breakdown of a typical deployment process. Associated projects ------------------- Optionally, one may wish to utilize the following associated projects for additional functionality: :python-ironicclient-doc:`python-ironicclient <>` A command-line interface (CLI) and python bindings for interacting with the Bare Metal service. :ironic-ui-doc:`ironic-ui <>` Horizon dashboard, providing graphical interface (GUI) for the Bare Metal API. :ironic-inspector-doc:`ironic-inspector <>` An associated service which performs in-band hardware introspection by PXE booting unregistered hardware into the ironic-python-agent ramdisk. diskimage-builder_ A related project to help facilitate the creation of ramdisks and machine images, such as those running the ironic-python-agent. :bifrost-doc:`bifrost <>` A set of Ansible playbooks that automates the task of deploying a base image onto a set of known hardware using ironic in a standalone mode. .. _remote procedure call (RPC): https://en.wikipedia.org/wiki/Remote_procedure_call .. _WSGI: https://en.wikipedia.org/wiki/Web_Server_Gateway_Interface .. _diskimage-builder: https://docs.openstack.org/diskimage-builder/latest/ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648108935.8986666 ironic-20.1.0/doc/source/install/include/0000775000175000017500000000000000000000000020236 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/install/include/boot-mode.inc0000664000175000017500000000747000000000000022626 0ustar00zuulzuul00000000000000.. _boot_mode_support: Boot mode support ----------------- Some of the bare metal hardware types (namely, ``redfish``, ``ilo`` and generic ``ipmi``) support setting boot mode (Legacy BIOS or UEFI). .. note:: Setting boot mode support in generic ``ipmi`` driver is coupled with setting boot device. That makes boot mode support in the ``ipmi`` driver incomplete. .. note:: In this chapter we will distinguish *ironic node* from *bare metal node*. The difference is that *ironic node* refers to a logical node, as it is configured in ironic, while *bare metal node* indicates the hardware machine that ironic is managing. The following rules apply in order when ironic manages node boot mode: * If the hardware type (or bare metal node) does not implement reading current boot mode of the bare metal node, then ironic assumes that boot mode is not set on the bare metal node * If boot mode is not set on ironic node and bare metal node boot mode is unknown (not set, can't be read etc.), ironic node boot mode is set to the value of the `[deploy]/default_boot_mode` option * If boot mode is set on a bare metal node, but is not set on ironic node, bare metal node boot mode is set on ironic node * If boot mode is set on ironic node, but is not set on the bare metal node, ironic node boot mode is attempted to be set on the bare metal node (failure to set boot mode on the bare metal node will not fail ironic node deployment) * If different boot modes appear on to be set ironic node and on the bare metal node, ironic node boot mode is attempted to be set on the bare metal node (failure to set boot mode on the bare metal node will fail ironic node deployment) .. warning:: If a bare metal node does not support setting boot mode, then the operator needs to make sure that boot mode configuration is consistent between ironic node and the bare metal node. The boot modes can be configured in the Bare Metal service in the following way: * Only one boot mode (either ``uefi`` or ``bios``) can be configured for the node. * If the operator wants a node to boot always in ``uefi`` mode or ``bios`` mode, then they may use ``capabilities`` parameter within ``properties`` field of an bare metal node. The operator must manually set the appropriate boot mode on the bare metal node. To configure a node in ``uefi`` mode, then set ``capabilities`` as below:: openstack baremetal node set --property capabilities='boot_mode:uefi' Conversely, to configure a node in ``bios`` mode, then set the ``capabilities`` as below:: openstack baremetal node set --property capabilities='boot_mode:bios' .. note:: The Ironic project changed the default boot mode setting for nodes from ``bios`` to ``uefi`` during the Yoga development cycle. Nodes having ``boot_mode`` set to ``uefi`` may be requested by adding an ``extra_spec`` to the Compute service flavor:: nova flavor-key ironic-test-3 set capabilities:boot_mode="uefi" nova boot --flavor ironic-test-3 --image test-image instance-1 If ``capabilities`` is used in ``extra_spec`` as above, nova scheduler (``ComputeCapabilitiesFilter``) will match only bare metal nodes which have the ``boot_mode`` set appropriately in ``properties/capabilities``. It will filter out rest of the nodes. The above facility for matching in the Compute service can be used in heterogeneous environments where there is a mix of ``uefi`` and ``bios`` machines, and operator wants to provide a choice to the user regarding boot modes. If the flavor doesn't contain ``boot_mode`` and ``boot_mode`` is configured for bare metal nodes, then nova scheduler will consider all nodes and user may get either ``bios`` or ``uefi`` machine. Some hardware support setting secure boot mode, see :ref:`secure-boot` for details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/install/include/common-configure.inc0000664000175000017500000000122100000000000024174 0ustar00zuulzuul00000000000000The Bare Metal service is configured via its configuration file. This file is typically located at ``/etc/ironic/ironic.conf``. Although some configuration options are mentioned here, it is recommended that you review all the :doc:`/configuration/sample-config` so that the Bare Metal service is configured for your needs. It is possible to set up an ironic-api and an ironic-conductor services on the same host or different hosts. Users also can add new ironic-conductor hosts to deal with an increasing number of bare metal nodes. But the additional ironic-conductor services should be at the same version as that of existing ironic-conductor services. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/install/include/common-prerequisites.inc0000664000175000017500000000220200000000000025117 0ustar00zuulzuul00000000000000Install and configure prerequisites ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The Bare Metal service is a collection of components that provides support to manage and provision physical machines. You can configure these components to run on separate nodes or the same node. In this guide, the components run on one node, typically the Compute Service's compute node. It assumes that the Identity, Image, Compute, and Networking services have already been set up. Set up the database for Bare Metal ---------------------------------- The Bare Metal service stores information in a database. This guide uses the MySQL database that is used by other OpenStack services. #. In MySQL, create an ``ironic`` database that is accessible by the ``ironic`` user. Replace ``IRONIC_DBPASSWORD`` with a suitable password: .. code-block:: console # mysql -u root -p mysql> CREATE DATABASE ironic CHARACTER SET utf8; mysql> GRANT ALL PRIVILEGES ON ironic.* TO 'ironic'@'localhost' \ IDENTIFIED BY 'IRONIC_DBPASSWORD'; mysql> GRANT ALL PRIVILEGES ON ironic.* TO 'ironic'@'%' \ IDENTIFIED BY 'IRONIC_DBPASSWORD'; ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/install/include/configure-ironic-api-mod_wsgi.inc0000664000175000017500000000576600000000000026566 0ustar00zuulzuul00000000000000Configuring ironic-api behind mod_wsgi -------------------------------------- Bare Metal service comes with an example file for configuring the ``ironic-api`` service to run behind Apache with mod_wsgi. .. note:: This is optional, the ironic APIs can be run using independent scripts that provide HTTP servers. But it is generally considered more performant and flexible to run them using a generic HTTP server that supports WSGI (such as Apache or nginx). #. Install the apache service: Fedora/RHEL8/CentOS8:: sudo dnf install httpd Debian/Ubuntu:: apt-get install apache2 SUSE:: zypper install apache2 #. Download the ``etc/apache2/ironic`` file from the `Ironic project tree `_ and copy it to the apache sites: Fedora/RHEL8/CentOS8:: sudo cp etc/apache2/ironic /etc/httpd/conf.d/ironic.conf Debian/Ubuntu:: sudo cp etc/apache2/ironic /etc/apache2/sites-available/ironic.conf SUSE:: sudo cp etc/apache2/ironic /etc/apache2/vhosts.d/ironic.conf #. Edit the recently copied ``/ironic.conf``: #. Modify the ``WSGIDaemonProcess``, ``APACHE_RUN_USER`` and ``APACHE_RUN_GROUP`` directives to set the user and group values to an appropriate user on your server. #. Modify the ``WSGIScriptAlias`` directive to point to the automatically generated ``ironic-api-wsgi`` script that is located in `IRONIC_BIN` directory. #. Modify the ``Directory`` directive to set the path to the Ironic API code. #. Modify the ``ErrorLog`` and ``CustomLog`` to redirect the logs to the right directory (on Red Hat systems this is usually under /var/log/httpd). #. Stop and disable the ironic-api service. If ironic-api service is started, the port will be occupied. Apach will fail to start: Fedora/RHEL8/CentOS8/SUSE:: sudo systemctl stop openstack-ironic-api sudo systemctl disable openstack-ironic-api Debian/Ubuntu:: sudo service ironic-api stop sudo service ironic-api disable #. Enable the apache ``ironic`` in site and reload: Fedora/RHEL8/CentOS8:: sudo systemctl reload httpd Debian/Ubuntu:: sudo a2ensite ironic sudo service apache2 reload SUSE:: sudo systemctl reload apache2 .. note:: The file ``ironic-api-wsgi`` is automatically generated by pbr and is available in `IRONIC_BIN` directory. It should not be modified. Configure another WSGI container -------------------------------- A slightly different approach has to be used for WSGI containers that cannot use ``ironic-api-wsgi``. For example, for *gunicorn*: .. code-block:: console gunicorn -b 0.0.0.0:6385 'ironic.api.wsgi:initialize_wsgi_app(argv=[])' If you want to pass a configuration file, use: .. code-block:: console gunicorn -b 0.0.0.0:6385 \ 'ironic.api.wsgi:initialize_wsgi_app(argv=["ironic-api", "--config-file=/path/to/_ironic.conf"])' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/install/include/configure-ironic-api.inc0000664000175000017500000000724500000000000024752 0ustar00zuulzuul00000000000000Configuring ironic-api service ------------------------------ #. The Bare Metal service stores information in a database. This guide uses the MySQL database that is used by other OpenStack services. Configure the location of the database via the ``connection`` option. In the following, replace ``IRONIC_DBPASSWORD`` with the password of your ``ironic`` user, and replace ``DB_IP`` with the IP address where the DB server is located: .. code-block:: ini [database] # The SQLAlchemy connection string used to connect to the # database (string value) connection=mysql+pymysql://ironic:IRONIC_DBPASSWORD@DB_IP/ironic?charset=utf8 #. Configure the ironic-api service to use the RabbitMQ message broker by setting the following option. Replace ``RPC_*`` with appropriate address details and credentials of RabbitMQ server: .. code-block:: ini [DEFAULT] # A URL representing the messaging driver to use and its full # configuration. (string value) transport_url = rabbit://RPC_USER:RPC_PASSWORD@RPC_HOST:RPC_PORT/ Alternatively, you can use JSON RPC for interactions between ironic-conductor and ironic-api. Enable it in the configuration and provide the keystone credentials to use for authentication: .. code-block:: ini [DEFAULT] rpc_transport = json-rpc [json_rpc] # Authentication type to load (string value) auth_type = password # Authentication URL (string value) auth_url=https://IDENTITY_IP:5000/ # Username (string value) username=ironic # User's password (string value) password=IRONIC_PASSWORD # Project name to scope to (string value) project_name=service # Domain ID containing project (string value) project_domain_id=default # User's domain id (string value) user_domain_id=default If you use port other than the default 8089 for JSON RPC, you have to configure it, for example: .. code-block:: ini [json_rpc] port = 9999 #. Configure the ironic-api service to use these credentials with the Identity service. Replace ``PUBLIC_IDENTITY_IP`` with the public IP of the Identity server, ``PRIVATE_IDENTITY_IP`` with the private IP of the Identity server and replace ``IRONIC_PASSWORD`` with the password you chose for the ``ironic`` user in the Identity service: .. code-block:: ini [DEFAULT] # Authentication strategy used by ironic-api: one of # "keystone" or "noauth". "noauth" should not be used in a # production environment because all authentication will be # disabled. (string value) auth_strategy=keystone [keystone_authtoken] # Authentication type to load (string value) auth_type=password # Complete public Identity API endpoint (string value) www_authenticate_uri=http://PUBLIC_IDENTITY_IP:5000 # Complete admin Identity API endpoint. (string value) auth_url=http://PRIVATE_IDENTITY_IP:5000 # Service username. (string value) username=ironic # Service account password. (string value) password=IRONIC_PASSWORD # Service tenant name. (string value) project_name=service # Domain name containing project (string value) project_domain_name=Default # User's domain name (string value) user_domain_name=Default #. Create the Bare Metal service database tables: .. code-block:: bash $ ironic-dbsync --config-file /etc/ironic/ironic.conf create_schema #. Restart the ironic-api service: Fedora/RHEL8/CentOS8/SUSE:: sudo systemctl restart openstack-ironic-api Ubuntu:: sudo service ironic-api restart ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/install/include/configure-ironic-conductor.inc0000664000175000017500000001736400000000000026204 0ustar00zuulzuul00000000000000Configuring ironic-conductor service ------------------------------------ #. Replace ``HOST_IP`` with IP of the conductor host. .. code-block:: ini [DEFAULT] # IP address of this host. If unset, will determine the IP # programmatically. If unable to do so, will use "127.0.0.1". # (string value) my_ip=HOST_IP .. note:: If a conductor host has multiple IPs, ``my_ip`` should be set to the IP which is on the same network as the bare metal nodes. #. Configure the location of the database. Ironic-conductor should use the same configuration as ironic-api. Replace ``IRONIC_DBPASSWORD`` with the password of your ``ironic`` user, and replace DB_IP with the IP address where the DB server is located: .. code-block:: ini [database] # The SQLAlchemy connection string to use to connect to the # database. (string value) connection=mysql+pymysql://ironic:IRONIC_DBPASSWORD@DB_IP/ironic?charset=utf8 #. Configure the ironic-conductor service to use the RabbitMQ message broker by setting the following option. Ironic-conductor should use the same configuration as ironic-api. Replace ``RPC_*`` with appropriate address details and credentials of RabbitMQ server: .. code-block:: ini [DEFAULT] # A URL representing the messaging driver to use and its full # configuration. (string value) transport_url = rabbit://RPC_USER:RPC_PASSWORD@RPC_HOST:RPC_PORT/ Alternatively, you can use JSON RPC for interactions between ironic-conductor and ironic-api. Enable it in the configuration and provide the keystone credentials to use for authenticating incoming requests (can be the same as for the API): .. code-block:: ini [DEFAULT] rpc_transport = json-rpc [keystone_authtoken] # Authentication type to load (string value) auth_type=password # Complete public Identity API endpoint (string value) www_authenticate_uri=http://PUBLIC_IDENTITY_IP:5000 # Complete admin Identity API endpoint. (string value) auth_url=http://PRIVATE_IDENTITY_IP:5000 # Service username. (string value) username=ironic # Service account password. (string value) password=IRONIC_PASSWORD # Service tenant name. (string value) project_name=service # Domain name containing project (string value) project_domain_name=Default # User's domain name (string value) user_domain_name=Default You can optionally change the host and the port the JSON RPC service will bind to, for example: .. code-block:: ini [json_rpc] host_ip = 192.168.0.10 port = 9999 .. warning:: Hostnames of ironic-conductor machines must be resolvable by ironic-api services when JSON RPC is used. #. Configure credentials for accessing other OpenStack services. In order to communicate with other OpenStack services, the Bare Metal service needs to use service users to authenticate to the OpenStack Identity service when making requests to other services. These users' credentials have to be configured in each configuration file section related to the corresponding service: * ``[neutron]`` - to access the OpenStack Networking service * ``[glance]`` - to access the OpenStack Image service * ``[swift]`` - to access the OpenStack Object Storage service * ``[cinder]`` - to access the OpenStack Block Storage service * ``[inspector]`` - to access the OpenStack Bare Metal Introspection service * ``[service_catalog]`` - a special section holding credentials the Bare Metal service will use to discover its own API URL endpoint as registered in the OpenStack Identity service catalog. For simplicity, you can use the same service user for all services. For backward compatibility, this should be the same user configured in the ``[keystone_authtoken]`` section for the ironic-api service (see "Configuring ironic-api service"). However, this is not necessary, and you can create and configure separate service users for each service. Under the hood, Bare Metal service uses ``keystoneauth`` library together with ``Authentication plugin``, ``Session`` and ``Adapter`` concepts provided by it to instantiate service clients. Please refer to `Keystoneauth documentation`_ for supported plugins, their available options as well as Session- and Adapter-related options for authentication, connection and endpoint discovery respectively. In the example below, authentication information for user to access the OpenStack Networking service is configured to use: * Networking service is deployed in the Identity service region named ``RegionTwo``, with only its ``public`` endpoint interface registered in the service catalog. * HTTPS connection with specific CA SSL certificate when making requests * the same service user as configured for ironic-api service * dynamic ``password`` authentication plugin that will discover appropriate version of Identity service API based on other provided options - replace ``IDENTITY_IP`` with the IP of the Identity server, and replace ``IRONIC_PASSWORD`` with the password you chose for the ``ironic`` user in the Identity service .. code-block:: ini [neutron] # Authentication type to load (string value) auth_type = password # Authentication URL (string value) auth_url=https://IDENTITY_IP:5000/ # Username (string value) username=ironic # User's password (string value) password=IRONIC_PASSWORD # Project name to scope to (string value) project_name=service # Domain ID containing project (string value) project_domain_id=default # User's domain id (string value) user_domain_id=default # PEM encoded Certificate Authority to use when verifying # HTTPs connections. (string value) cafile=/opt/stack/data/ca-bundle.pem # The default region_name for endpoint URL discovery. (string # value) region_name = RegionTwo # List of interfaces, in order of preference, for endpoint # URL. (list value) valid_interfaces=public By default, in order to communicate with another service, the Bare Metal service will attempt to discover an appropriate endpoint for that service via the Identity service's service catalog. The relevant configuration options from that service group in the Bare Metal service configuration file are used for this purpose. If you want to use a different endpoint for a particular service, specify this via the ``endpoint_override`` configuration option of that service group, in the Bare Metal service's configuration file. Taking the previous Networking service example, this would be .. code-block:: ini [neutron] ... endpoint_override = (Replace `` with actual address of a specific Networking service endpoint.) #. Configure enabled drivers and hardware types as described in :doc:`/install/enabling-drivers`. A. If you enabled any driver that uses :ref:`direct-deploy`, Swift backend for the Image service must be installed and configured, see :ref:`image-store`. Ceph Object Gateway (RADOS Gateway) is also supported as the Image service's backend, see :ref:`radosgw support`. #. Configure the network for ironic-conductor service to perform node cleaning, see :ref:`cleaning` from the admin guide. #. Restart the ironic-conductor service: Fedora/RHEL7/CentOS7/SUSE:: sudo systemctl restart openstack-ironic-conductor Ubuntu:: sudo service ironic-conductor restart .. _Keystoneauth documentation: https://docs.openstack.org/keystoneauth/latest/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/install/include/configure-ironic-singleprocess.inc0000664000175000017500000000263600000000000027060 0ustar00zuulzuul00000000000000Configuring single-process ironic --------------------------------- As an alternative to starting separate API and conductor instances, you can start ``ironic`` services that combine an API and a conductor in the same process. This may be particularly beneficial in environments with limited resources and low number of nodes to handle. .. note:: This feature is available starting with the Yoga release series. #. Start with setting up the environment as described in both `Configuring ironic-api service`_ and `Configuring ironic-conductor service`_, but do not start any services. Merge configuration options into a single configuration file. .. note:: Any RPC settings will only take effect if you have more than one combined service started or if you have additional conductors. If you don't plan to have more than one conductor, you can disable the RPC completely: .. code-block:: ini [DEFAULT] rpc_transport = none #. Stop existing services if they are already started: Fedora/RHEL/CentOS/SUSE:: sudo systemctl stop openstack-ironic-api sudo systemctl stop openstack-ironic-conductor Ubuntu:: sudo service ironic-api stop sudo service ironic-conductor stop #. Start or restart the ironic service: Fedora/RHEL8/CentOS8/SUSE:: sudo systemctl restart openstack-ironic Ubuntu:: sudo service ironic restart ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/install/include/console.inc0000664000175000017500000000021400000000000022370 0ustar00zuulzuul00000000000000Configuring node web console ---------------------------- See :ref:`console`. .. TODO(dtantsur): move the installation documentation here ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/install/include/disk-label.inc0000664000175000017500000000475100000000000022747 0ustar00zuulzuul00000000000000.. _choosing_the_disk_label: Choosing the disk label ----------------------- .. note:: The term ``disk label`` is historically used in Ironic and was taken from `parted `_. Apparently everyone seems to have a different word for ``disk label`` - these are all the same thing: disk type, partition table, partition map and so on... Ironic allows operators to choose which disk label they want their bare metal node to be deployed with when Ironic is responsible for partitioning the disk; therefore choosing the disk label does not apply when the image being deployed is a ``whole disk image``. There are some edge cases where someone may want to choose a specific disk label for the images being deployed, including but not limited to: * For machines in ``bios`` boot mode with disks larger than 2 terabytes it's recommended to use a ``gpt`` disk label. That's because a capacity beyond 2 terabytes is not addressable by using the MBR partitioning type. But, although GPT claims to be backward compatible with legacy BIOS systems `that's not always the case `_. * Operators may want to force the partitioning to be always MBR (even if the machine is deployed with boot mode ``uefi``) to avoid breakage of applications and tools running on those instances. The disk label can be configured in two ways; when Ironic is used with the Compute service or in standalone mode. The following bullet points and sections will describe both methods: * When no disk label is provided Ironic will configure it according to the boot mode (see :ref:`boot_mode_support`); ``bios`` boot mode will use ``msdos`` and ``uefi`` boot mode will use ``gpt``. * Only one disk label - either ``msdos`` or ``gpt`` - can be configured for the node. When used with Compute service ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When Ironic is used with the Compute service the disk label should be set to node's ``properties/capabilities`` field and also to the flavor which will request such capability, for example:: baremetal node set --property capabilities='disk_label:gpt' As for the flavor:: nova flavor-key baremetal set capabilities:disk_label="gpt" When used in standalone mode ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When used without the Compute service, the disk label should be set directly to the node's ``instance_info`` field, as below:: baremetal node set --instance-info capabilities='{"disk_label": "gpt"}' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/install/include/kernel-boot-parameters.inc0000664000175000017500000000762600000000000025326 0ustar00zuulzuul00000000000000.. _kernel-boot-parameters: Appending kernel parameters to boot instances --------------------------------------------- The Bare Metal service supports passing custom kernel parameters to boot instances to fit users' requirements. The way to append the kernel parameters is depending on how to boot instances. Network boot ~~~~~~~~~~~~ Currently, the Bare Metal service supports assigning unified kernel parameters to PXE booted instances by: * Modifying the ``[pxe]/kernel_append_params`` configuration option, for example: .. code-block:: ini [pxe] kernel_append_params = quiet splash .. note:: The option was called ``pxe_append_params`` before the Xena cycle. * Copying a template from shipped templates to another place, for example:: https://opendev.org/openstack/ironic/src/branch/master/ironic/drivers/modules/pxe_config.template Making the modifications and pointing to the custom template via the configuration options: ``[pxe]/pxe_config_template`` and ``[pxe]/uefi_pxe_config_template``. Local boot ~~~~~~~~~~ For local boot instances, users can make use of configuration drive (see :ref:`configdrive`) to pass a custom script to append kernel parameters when creating an instance. This is more flexible and can vary per instance. Here is an example for grub2 with ubuntu, users can customize it to fit their use case: .. code:: python #!/usr/bin/env python import os # Default grub2 config file in Ubuntu grub_file = '/etc/default/grub' # Add parameters here to pass to instance. kernel_parameters = ['quiet', 'splash'] grub_cmd = 'GRUB_CMDLINE_LINUX' old_grub_file = grub_file+'~' os.rename(grub_file, old_grub_file) cmdline_existed = False with open(grub_file, 'w') as writer, \ open(old_grub_file, 'r') as reader: for line in reader: key = line.split('=')[0] if key == grub_cmd: #If there is already some value: if line.strip()[-1] == '"': line = line.strip()[:-1] + ' ' + ' '.join(kernel_parameters) + '"' cmdline_existed = True writer.write(line) if not cmdline_existed: line = grub_cmd + '=' + '"' + ' '.join(kernel_parameters) + '"' writer.write(line) os.remove(old_grub_file) os.system('update-grub') os.system('reboot') Console ~~~~~~~ In order to change default console configuration in the Bare Metal service configuration file (``[pxe]`` section in ``/etc/ironic/ironic.conf``), include the serial port terminal and serial speed. Serial speed must be the same as the serial configuration in the BIOS settings, so that the operating system boot process can be seen in the serial console or web console. Following examples represent possible parameters for serial and web console respectively. * Node serial console. The console parameter ``console=ttyS0,115200n8`` uses ``ttyS0`` for console output at ``115200bps, 8bit, non-parity``, e.g.:: [pxe] # Additional append parameters for baremetal PXE boot. kernel_append_params = nofb nomodeset vga=normal console=ttyS0,115200n8 * For node web console configuration is similar with the addition of ``ttyX`` parameter, see example:: [pxe] # Additional append parameters for baremetal PXE boot. kernel_append_params = nofb nomodeset vga=normal console=tty0 console=ttyS0,115200n8 For detailed information on how to add consoles see the reference documents `kernel params`_ and `serial console`_. In case of local boot the Bare Metal service is not able to control kernel boot parameters. To configure console locally, follow 'Local boot' section above. .. _`kernel params`: https://www.kernel.org/doc/html/latest/admin-guide/kernel-parameters.html .. _`serial console`: https://www.kernel.org/doc/html/latest/admin-guide/serial-console.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/install/include/local-boot-partition-images.inc0000664000175000017500000000406300000000000026241 0ustar00zuulzuul00000000000000.. _local-boot-partition-images: Local boot with partition images -------------------------------- The Bare Metal service supports local boot with partition images, meaning that after the deployment the node's subsequent reboots won't happen via PXE or Virtual Media. Instead, it will boot from a local boot loader installed on the disk. .. note:: Whole disk images, on the contrary, support only local boot, and use it by default. It's important to note that in order for this to work the image being deployed with Bare Metal service **must** contain ``grub2`` installed within it. Enabling the local boot is different when Bare Metal service is used with Compute service and without it. The following sections will describe both methods. .. _ironic-python-agent: https://docs.openstack.org/ironic-python-agent/latest/ Enabling local boot with Compute service ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To enable local boot we need to set a capability on the bare metal node, for example:: baremetal node set --property capabilities="boot_option:local" Nodes having ``boot_option`` set to ``local`` may be requested by adding an ``extra_spec`` to the Compute service flavor, for example:: nova flavor-key baremetal set capabilities:boot_option="local" .. note:: If the node is configured to use ``UEFI``, Bare Metal service will create an ``EFI partition`` on the disk and switch the partition table format to ``gpt``. The ``EFI partition`` will be used later by the boot loader (which is installed from the deploy ramdisk). .. _local-boot-without-compute: Enabling local boot without Compute ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Since adding ``capabilities`` to the node's properties is only used by the nova scheduler to perform more advanced scheduling of instances, we need a way to enable local boot when Compute is not present. To do that we can simply specify the capability via the ``instance_info`` attribute of the node, for example:: baremetal node set --instance-info capabilities='{"boot_option": "local"}' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/install/include/notifications.inc0000664000175000017500000000144400000000000023605 0ustar00zuulzuul00000000000000Notifications ------------- The Bare Metal service supports the emission of notifications, which are messages sent on a message broker (like RabbitMQ or anything else supported by the `oslo messaging library `_) that indicate various events which occur, such as when a node changes power states. These can be consumed by an external service reading from the message bus. For example, `Searchlight `_ is an OpenStack service that uses notifications to index (and make searchable) resources from the Bare Metal service. Notifications are disabled by default. For a complete list of available notifications and instructions for how to enable them, see the :doc:`/admin/notifications`. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/install/include/root-device-hints.inc0000664000175000017500000000740000000000000024275 0ustar00zuulzuul00000000000000.. _root-device-hints: Specifying the disk for deployment (root device hints) ------------------------------------------------------ The Bare Metal service supports passing hints to the deploy ramdisk about which disk it should pick for the deployment. The list of supported hints is: * model (STRING): device identifier * vendor (STRING): device vendor * serial (STRING): disk serial number * size (INT): size of the device in GiB .. note:: A node's 'local_gb' property is often set to a value 1 GiB less than the actual disk size to account for partitioning (this is how DevStack, TripleO and Ironic Inspector work, to name a few). However, in this case ``size`` should be the actual size. For example, for a 128 GiB disk ``local_gb`` will be 127, but size hint will be 128. * wwn (STRING): unique storage identifier * wwn_with_extension (STRING): unique storage identifier with the vendor extension appended * wwn_vendor_extension (STRING): unique vendor storage identifier * rotational (BOOLEAN): whether it's a rotational device or not. This hint makes it easier to distinguish HDDs (rotational) and SSDs (not rotational) when choosing which disk Ironic should deploy the image onto. * hctl (STRING): the SCSI address (Host, Channel, Target and Lun), e.g '1:0:0:0' * by_path (STRING): the alternate device name corresponding to a particular PCI or iSCSI path, e.g /dev/disk/by-path/pci-0000:00 * name (STRING): the device name, e.g /dev/md0 .. warning:: The root device hint name should only be used for devices with constant names (e.g RAID volumes). For SATA, SCSI and IDE disk controllers this hint is not recommended because the order in which the device nodes are added in Linux is arbitrary, resulting in devices like /dev/sda and /dev/sdb `switching around at boot time `_. To associate one or more hints with a node, update the node's properties with a ``root_device`` key, for example:: baremetal node set --property root_device='{"wwn": "0x4000cca77fc4dba1"}' That will guarantee that Bare Metal service will pick the disk device that has the ``wwn`` equal to the specified wwn value, or fail the deployment if it can not be found. .. note:: Starting with the Ussuri release, root device hints can be specified per-instance, see :doc:`/install/standalone`. The hints can have an operator at the beginning of the value string. If no operator is specified the default is ``==`` (for numerical values) and ``s==`` (for string values). The supported operators are: * For numerical values: * ``=`` equal to or greater than. This is equivalent to ``>=`` and is supported for `legacy reasons `_ * ``==`` equal to * ``!=`` not equal to * ``>=`` greater than or equal to * ``>`` greater than * ``<=`` less than or equal to * ``<`` less than * For strings (as python comparisons): * ``s==`` equal to * ``s!=`` not equal to * ``s>=`` greater than or equal to * ``s>`` greater than * ``s<=`` less than or equal to * ``s<`` less than * ```` substring * For collections: * ```` all elements contained in collection * ```` find one of these Examples are: * Finding a disk larger or equal to 60 GiB and non-rotational (SSD):: baremetal node set --property root_device='{"size": ">= 60", "rotational": false}' * Finding a disk whose vendor is ``samsung`` or ``winsys``:: baremetal node set --property root_device='{"vendor": " samsung winsys"}' .. note:: If multiple hints are specified, a device must satisfy all the hints. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/install/index.rst0000664000175000017500000000133000000000000020451 0ustar00zuulzuul00000000000000===================================== Bare Metal Service Installation Guide ===================================== The Bare Metal service is a collection of components that provides support to manage and provision physical machines. This chapter assumes a working setup of OpenStack following the `OpenStack Installation Guides `_. It contains the following sections: .. toctree:: :maxdepth: 2 get_started.rst refarch/index install.rst deploy-ramdisk.rst configure-integration.rst setup-drivers.rst enrollment.rst standalone.rst configdrive.rst advanced.rst troubleshooting.rst next-steps.rst .. toctree:: :hidden: creating-images.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/install/install-obs.rst0000664000175000017500000000232000000000000021571 0ustar00zuulzuul00000000000000.. _install-obs: ============================================================ Install and configure for openSUSE and SUSE Linux Enterprise ============================================================ This section describes how to install and configure the Bare Metal service for openSUSE Leap 42.2 and SUSE Linux Enterprise Server 12 SP2. .. note:: Installation of the Bare Metal service on openSUSE and SUSE Linux Enterprise Server is not officially supported. Nevertheless, installation should be possible. .. include:: include/common-prerequisites.inc Install and configure components ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #. Install from packages .. code-block:: console # zypper install openstack-ironic-api openstack-ironic-conductor python3-ironicclient #. Enable services .. code-block:: console # systemctl enable openstack-ironic-api openstack-ironic-conductor # systemctl start openstack-ironic-api openstack-ironic-conductor .. include:: include/common-configure.inc .. include:: include/configure-ironic-api.inc .. include:: include/configure-ironic-api-mod_wsgi.inc .. include:: include/configure-ironic-conductor.inc .. include:: include/configure-ironic-singleprocess.inc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/install/install-rdo.rst0000664000175000017500000000202600000000000021575 0ustar00zuulzuul00000000000000.. _install-rdo: ============================================================= Install and configure for Red Hat Enterprise Linux and CentOS ============================================================= This section describes how to install and configure the Bare Metal service for Red Hat Enterprise Linux 8 and CentOS 8. .. include:: include/common-prerequisites.inc Install and configure components ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #. Install from packages (using dnf) .. code-block:: console # dnf install openstack-ironic-api openstack-ironic-conductor python3-ironicclient #. Enable services .. code-block:: console # systemctl enable openstack-ironic-api openstack-ironic-conductor # systemctl start openstack-ironic-api openstack-ironic-conductor .. include:: include/common-configure.inc .. include:: include/configure-ironic-api.inc .. include:: include/configure-ironic-api-mod_wsgi.inc .. include:: include/configure-ironic-conductor.inc .. include:: include/configure-ironic-singleprocess.inc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/install/install-ubuntu.rst0000664000175000017500000000144100000000000022333 0ustar00zuulzuul00000000000000.. _install-ubuntu: ================================ Install and configure for Ubuntu ================================ This section describes how to install and configure the Bare Metal service for Ubuntu 14.04 (LTS). .. include:: include/common-prerequisites.inc Install and configure components ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #. Install from packages (using apt-get) .. code-block:: console # apt-get install ironic-api ironic-conductor python3-ironicclient #. Enable services Services are enabled by default on Ubuntu. .. include:: include/common-configure.inc .. include:: include/configure-ironic-api.inc .. include:: include/configure-ironic-api-mod_wsgi.inc .. include:: include/configure-ironic-conductor.inc .. include:: include/configure-ironic-singleprocess.inc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/install/install.rst0000664000175000017500000000255400000000000021021 0ustar00zuulzuul00000000000000Install and configure the Bare Metal service ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the Bare Metal service, code-named ironic, manually from packages on one of the three popular families of Linux distributions. Alternatively, you can use one of the numerous projects that install ironic. One of them is provided by the bare metal team: * `Bifrost `_ installs ironic in the standalone mode (without the rest of OpenStack). More installation projects are developed by other OpenStack teams: * `Kolla `_ can install ironic in containers as part of OpenStack. * OpenStack-Ansible has a `role to install ironic `_. * TripleO uses ironic for provisioning bare metal nodes and can also be used `to install ironic `_. .. NOTE(dtantsur): add your favourite installation tool, but please link to the **Ironic guide**, not to the generic page. If a separate Ironic guide does not exist yet, create it first. Contents -------- .. toctree:: :maxdepth: 2 install-rdo.rst install-ubuntu.rst install-obs.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/install/next-steps.rst0000664000175000017500000000016300000000000021457 0ustar00zuulzuul00000000000000.. _next-steps: ========== Next steps ========== Your OpenStack environment now includes the Bare Metal service. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648108935.8986666 ironic-20.1.0/doc/source/install/refarch/0000775000175000017500000000000000000000000020225 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/install/refarch/common.rst0000664000175000017500000003252500000000000022256 0ustar00zuulzuul00000000000000Common Considerations ===================== This section covers considerations that are equally important to all described architectures. .. contents:: :local: .. _refarch-common-components: Components ---------- As explained in :doc:`../get_started`, the Bare Metal service has three components. * The Bare Metal API service (``ironic-api``) should be deployed in a similar way as the control plane API services. The exact location will depend on the architecture used. * The Bare Metal conductor service (``ironic-conductor``) is where most of the provisioning logic lives. The following considerations are the most important when deciding on the way to deploy it: * The conductor manages a certain proportion of nodes, distributed to it via a hash ring. This includes constantly polling these nodes for their current power state and hardware sensor data (if enabled and supported by hardware, see :ref:`ipmi-sensor-data` for an example). * The conductor needs access to the `management controller`_ of each node it manages. * The conductor co-exists with TFTP (for PXE) and/or HTTP (for iPXE) services that provide the kernel and ramdisk to boot the nodes. The conductor manages them by writing files to their root directories. * If serial console is used, the conductor launches console processes locally. If the ``nova-serialproxy`` service (part of the Compute service) is used, it has to be able to reach the conductors. Otherwise, they have to be directly accessible by the users. * There must be mutual connectivity between the conductor and the nodes being deployed or cleaned. See Networking_ for details. * The provisioning ramdisk which runs the ``ironic-python-agent`` service on start up. .. warning:: The ``ironic-python-agent`` service is not intended to be used or executed anywhere other than a provisioning/cleaning/rescue ramdisk. Hardware and drivers -------------------- The Bare Metal service strives to provide the best support possible for a variety of hardware. However, not all hardware is supported equally well. It depends on both the capabilities of hardware itself and the available drivers. This section covers various considerations related to the hardware interfaces. See :doc:`/install/enabling-drivers` for a detailed introduction into hardware types and interfaces before proceeding. Power and management interfaces ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The minimum set of capabilities that the hardware has to provide and the driver has to support is as follows: #. getting and setting the power state of the machine #. getting and setting the current boot device #. booting an image provided by the Bare Metal service (in the simplest case, support booting using PXE_ and/or iPXE_) .. note:: Strictly speaking, it is possible to make the Bare Metal service provision nodes without some of these capabilities via some manual steps. It is not the recommended way of deployment, and thus it is not covered in this guide. Once you make sure that the hardware supports these capabilities, you need to find a suitable driver. Most of enterprise-grade hardware has support for IPMI_ and thus can utilize :doc:`/admin/drivers/ipmitool`. Some newer hardware also supports :doc:`/admin/drivers/redfish`. Several vendors provide more specific drivers that usually provide additional capabilities. Check :doc:`/admin/drivers` to find the most suitable one. .. _refarch-common-boot: Boot interface ~~~~~~~~~~~~~~ The boot interface of a node manages booting of both the deploy ramdisk and the user instances on the bare metal node. The deploy interface orchestrates the deployment and defines how the image gets transferred to the target disk. The main alternatives are to use PXE/iPXE or virtual media - see :doc:`/admin/interfaces/boot` for a detailed explanation. If a virtual media implementation is available for the hardware, it is recommended using it for better scalability and security. Otherwise, it is recommended to use iPXE, when it is supported by target hardware. Hardware specifications ~~~~~~~~~~~~~~~~~~~~~~~ The Bare Metal services does not impose too many restrictions on the characteristics of hardware itself. However, keep in mind that * By default, the Bare Metal service will pick the smallest hard drive that is larger than 4 GiB for deployment. Another hard drive can be used, but it requires setting :ref:`root device hints `. .. note:: This device does not have to match the boot device set in BIOS (or similar firmware). * The machines should have enough RAM to fit the deployment/cleaning ramdisk to run. The minimum varies greatly depending on the way the ramdisk was built. For example, *tinyipa*, the TinyCoreLinux-based ramdisk used in the CI, only needs 400 MiB of RAM, while ramdisks built by *diskimage-builder* may require 3 GiB or more. Image types ----------- The Bare Metal service can deploy two types of images: * *Whole-disk* images that contain a complete partitioning table with all necessary partitions and a bootloader. Such images are the most universal, but may be harder to build. * *Partition images* that contain only the root partition. The Bare Metal service will create the necessary partitions and install a boot loader, if needed. .. warning:: Partition images are only supported with GNU/Linux operating systems. .. warning:: If you plan on using local boot, your partition images must contain GRUB2 bootloader tools to enable ironic to set up the bootloader during deploy. Local vs network boot --------------------- The Bare Metal service supports booting user instances either using a local bootloader or using the driver's boot interface (e.g. via PXE_ or iPXE_ protocol in case of the ``pxe`` interface). Network boot cannot be used with certain architectures (for example, when no tenant networks have access to the control plane). Additional considerations are related to the ``pxe`` boot interface, and other boot interfaces based on it: * Local boot makes node's boot process independent of the Bare Metal conductor managing it. Thus, nodes are able to reboot correctly, even if the Bare Metal TFTP or HTTP service is down. * Network boot (and iPXE) must be used when booting nodes from remote volumes, if the driver does not support attaching volumes out-of-band. The default boot option for the cloud can be changed via the Bare Metal service configuration file, for example: .. code-block:: ini [deploy] default_boot_option = local This default can be overridden by setting the ``boot_option`` capability on a node. See :ref:`local-boot-partition-images` for details. .. note:: Currently, local boot is used by default. It's safer to set the ``default_boot_option`` explicitly. .. _refarch-common-networking: Networking ---------- There are several recommended network topologies to be used with the Bare Metal service. They are explained in depth in specific architecture documentation. However, several considerations are common for all of them: * There has to be a *provisioning* network, which is used by nodes during the deployment process. If allowed by the architecture, this network should not be accessible by end users, and should not have access to the internet. * There has to be a *cleaning* network, which is used by nodes during the cleaning process. * There should be a *rescuing* network, which is used by nodes during the rescue process. It can be skipped if the rescue process is not supported. .. note:: In the majority of cases, the same network should be used for cleaning, provisioning and rescue for simplicity. Unless noted otherwise, everything in these sections apply to all three networks. * The baremetal nodes must have access to the Bare Metal API while connected to the provisioning/cleaning/rescuing network. .. note:: Only two endpoints need to be exposed there:: GET /v1/lookup POST /v1/heartbeat/[a-z0-9\-]+ You may want to limit access from this network to only these endpoints, and make these endpoint not accessible from other networks. * If the ``pxe`` boot interface (or any boot interface based on it) is used, then the baremetal nodes should have untagged (access mode) connectivity to the provisioning/cleaning/rescuing networks. It allows PXE firmware, which does not support VLANs, to communicate with the services required for provisioning. .. note:: It depends on the *network interface* whether the Bare Metal service will handle it automatically. Check the networking documentation for the specific architecture. Sometimes it may be necessary to disable the spanning tree protocol delay on the switch - see :ref:`troubleshooting-stp`. * The Baremetal nodes need to have access to any services required for provisioning/cleaning/rescue, while connected to the provisioning/cleaning/rescuing network. This may include: * a TFTP server for PXE boot and also an HTTP server when iPXE is enabled * either an HTTP server or the Object Storage service in case of the ``direct`` deploy interface and some virtual media boot interfaces * The Baremetal Conductors need to have access to the booted baremetal nodes during provisioning/cleaning/rescue. A conductor communicates with an internal API, provided by **ironic-python-agent**, to conduct actions on nodes. .. _refarch-common-ha: HA and Scalability ------------------ ironic-api ~~~~~~~~~~ The Bare Metal API service is stateless, and thus can be easily scaled horizontally. It is recommended to deploy it as a WSGI application behind e.g. Apache or another WSGI container. .. note:: This service accesses the ironic database for reading entities (e.g. in response to ``GET /v1/nodes`` request) and in rare cases for writing. ironic-conductor ~~~~~~~~~~~~~~~~ High availability ^^^^^^^^^^^^^^^^^ The Bare Metal conductor service utilizes the active/active HA model. Every conductor manages a certain subset of nodes. The nodes are organized in a hash ring that tries to keep the load spread more or less uniformly across the conductors. When a conductor is considered offline, its nodes are taken over by other conductors. As a result of this, you need at least 2 conductor hosts for an HA deployment. Performance ^^^^^^^^^^^ Conductors can be resource intensive, so it is recommended (but not required) to keep all conductors separate from other services in the cloud. The minimum required number of conductors in a deployment depends on several factors: * the performance of the hardware where the conductors will be running, * the speed and reliability of the `management controller`_ of the bare metal nodes (for example, handling slower controllers may require having less nodes per conductor), * the frequency, at which the management controllers are polled by the Bare Metal service (see the ``sync_power_state_interval`` option), * the bare metal driver used for nodes (see `Hardware and drivers`_ above), * the network performance, * the maximum number of bare metal nodes that are provisioned simultaneously (see the ``max_concurrent_builds`` option for the Compute service). We recommend a target of **100** bare metal nodes per conductor for maximum reliability and performance. There is some tolerance for a larger number per conductor. However, it was reported [1]_ [2]_ that reliability degrades when handling approximately 300 bare metal nodes per conductor. Disk space ^^^^^^^^^^ Each conductor needs enough free disk space to cache images it uses. Depending on the combination of the deploy interface and the boot option, the space requirements are different: * The deployment kernel and ramdisk are always cached during the deployment. * When ``[agent]image_download_source`` is set to ``http`` and Glance is used, the conductor will download instances images locally to serve them from its HTTP server. Use ``swift`` to publish images using temporary URLs and convert them on the node's side. When ``[agent]image_download_source`` is set to ``local``, it will happen even for HTTP(s) URLs. For standalone case use ``http`` to avoid unnecessary caching of images. In both cases a cached image is converted to raw if ``force_raw_images`` is ``True`` (the default). .. note:: ``image_download_source`` can also be provided in the node's ``driver_info`` or ``instance_info``. See :ref:`image_download_source`. * When network boot is used, the instance image kernel and ramdisk are cached locally while the instance is active. .. note:: All images may be stored for some time after they are no longer needed. This is done to speed up simultaneous deployments of many similar images. The caching can be configured via the ``image_cache_size`` and ``image_cache_ttl`` configuration options in the ``pxe`` group. .. [1] http://lists.openstack.org/pipermail/openstack-dev/2017-June/118033.html .. [2] http://lists.openstack.org/pipermail/openstack-dev/2017-June/118327.html Other services ~~~~~~~~~~~~~~ When integrating with other OpenStack services, more considerations may need to be applied. This is covered in other parts of this guide. .. _PXE: https://en.wikipedia.org/wiki/Preboot_Execution_Environment .. _iPXE: https://en.wikipedia.org/wiki/IPXE .. _IPMI: https://en.wikipedia.org/wiki/Intelligent_Platform_Management_Interface .. _management controller: https://en.wikipedia.org/wiki/Out-of-band_management ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/install/refarch/index.rst0000664000175000017500000000072700000000000022074 0ustar00zuulzuul00000000000000Reference Deploy Architectures ============================== This section covers the way we recommend the Bare Metal service to be deployed and managed. It is assumed that a reader has already gone through :doc:`/user/index`. It may be also useful to try :ref:`deploy_devstack` first to get better familiar with the concepts used in this guide. .. toctree:: :maxdepth: 2 common Scenarios --------- .. toctree:: :maxdepth: 2 small-cloud-trusted-tenants ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/install/refarch/small-cloud-trusted-tenants.rst0000664000175000017500000002306400000000000026342 0ustar00zuulzuul00000000000000Small cloud with trusted tenants ================================ Story ----- As an operator I would like to build a small cloud with both virtual and bare metal instances or add bare metal provisioning to my existing small or medium scale single-site OpenStack cloud. The expected number of bare metal machines is less than 100, and the rate of provisioning and unprovisioning is expected to be low. All users of my cloud are trusted by me to not conduct malicious actions towards each other or the cloud infrastructure itself. As a user I would like to occasionally provision bare metal instances through the Compute API by selecting an appropriate Compute flavor. I would like to be able to boot them from images provided by the Image service or from volumes provided by the Volume service. Components ---------- This architecture assumes `an OpenStack installation`_ with the following components participating in the bare metal provisioning: * The :nova-doc:`Compute service <>` manages bare metal instances. * The :neutron-doc:`Networking service <>` provides DHCP for bare metal instances. * The :glance-doc:`Image service <>` provides images for bare metal instances. The following services can be optionally used by the Bare Metal service: * The :cinder-doc:`Volume service <>` provides volumes to boot bare metal instances from. * The :ironic-inspector-doc:`Bare Metal Introspection service <>` simplifies enrolling new bare metal machines by conducting in-band introspection. Node roles ---------- An OpenStack installation in this guide has at least these three types of nodes: * A *controller* node hosts the control plane services. * A *compute* node runs the virtual machines and hosts a subset of Compute and Networking components. * A *block storage* node provides persistent storage space for both virtual and bare metal nodes. The *compute* and *block storage* nodes are configured as described in the installation guides of the :nova-doc:`Compute service <>` and the :cinder-doc:`Volume service <>` respectively. The *controller* nodes host the Bare Metal service components. Networking ---------- The networking architecture will highly depend on the exact operating requirements. This guide expects the following existing networks: *control plane*, *storage* and *public*. Additionally, two more networks will be needed specifically for bare metal provisioning: *bare metal* and *management*. .. TODO(dtantsur): describe the storage network? .. TODO(dtantsur): a nice picture to illustrate the layout Control plane network ~~~~~~~~~~~~~~~~~~~~~ The *control plane network* is the network where OpenStack control plane services provide their public API. The Bare Metal API will be served to the operators and to the Compute service through this network. Public network ~~~~~~~~~~~~~~ The *public network* is used in a typical OpenStack deployment to create floating IPs for outside access to instances. Its role is the same for a bare metal deployment. .. note:: Since, as explained below, bare metal nodes will be put on a flat provider network, it is also possible to organize direct access to them, without using floating IPs and bypassing the Networking service completely. Bare metal network ~~~~~~~~~~~~~~~~~~ The *Bare metal network* is a dedicated network for bare metal nodes managed by the Bare Metal service. This architecture uses :ref:`flat bare metal networking `, in which both tenant traffic and technical traffic related to the Bare Metal service operation flow through this one network. Specifically, this network will serve as the *provisioning*, *cleaning* and *rescuing* network. It will also be used for introspection via the Bare Metal Introspection service. See :ref:`common networking considerations ` for an in-depth explanation of the networks used by the Bare Metal service. DHCP and boot parameters will be provided on this network by the Networking service's DHCP agents. For booting from volumes this network has to have a route to the *storage network*. Management network ~~~~~~~~~~~~~~~~~~ *Management network* is an independent network on which BMCs of the bare metal nodes are located. The ``ironic-conductor`` process needs access to this network. The tenants of the bare metal nodes must not have access to it. .. note:: The :ref:`direct deploy interface ` and certain :doc:`/admin/drivers` require the *management network* to have access to the Object storage service backend. Controllers ----------- A *controller* hosts the OpenStack control plane services as described in the `control plane design guide`_. While this architecture allows using *controllers* in a non-HA configuration, it is recommended to have at least three of them for HA. See :ref:`refarch-common-ha` for more details. Bare Metal services ~~~~~~~~~~~~~~~~~~~ The following components of the Bare Metal service are installed on a *controller* (see :ref:`components of the Bare Metal service `): * The Bare Metal API service either as a WSGI application or the ``ironic-api`` process. Typically, a load balancer, such as HAProxy, spreads the load between the API instances on the *controllers*. The API has to be served on the *control plane network*. Additionally, it has to be exposed to the *bare metal network* for the ramdisk callback API. * The ``ironic-conductor`` process. These processes work in active/active HA mode as explained in :ref:`refarch-common-ha`, thus they can be installed on all *controllers*. Each will handle a subset of bare metal nodes. The ``ironic-conductor`` processes have to have access to the following networks: * *control plane* for interacting with other services * *management* for contacting node's BMCs * *bare metal* for contacting deployment, cleaning or rescue ramdisks * TFTP and HTTP service for booting the nodes. Each ``ironic-conductor`` process has to have a matching TFTP and HTTP service. They should be exposed only to the *bare metal network* and must not be behind a load balancer. * The ``nova-compute`` process (from the Compute service). These processes work in active/active HA mode when dealing with bare metal nodes, thus they can be installed on all *controllers*. Each will handle a subset of bare metal nodes. .. note:: There is no 1-1 mapping between ``ironic-conductor`` and ``nova-compute`` processes, as they communicate only through the Bare Metal API service. * The :networking-baremetal-doc:`networking-baremetal <>` ML2 plugin should be loaded into the Networking service to assist with binding bare metal ports. The :ironic-neutron-agent-doc:`ironic-neutron-agent <>` service should be started as well. * If the Bare Metal introspection is used, its ``ironic-inspector`` process has to be installed on all *controllers*. Each such process works as both Bare Metal Introspection API and conductor service. A load balancer should be used to spread the API load between *controllers*. The API has to be served on the *control plane network*. Additionally, it has to be exposed to the *bare metal network* for the ramdisk callback API. .. TODO(dtantsur): a nice picture to illustrate the above Shared services ~~~~~~~~~~~~~~~ A *controller* also hosts two services required for the normal operation of OpenStack: * Database service (MySQL/MariaDB is typically used, but other enterprise-grade database solutions can be used as well). All Bare Metal service components need access to the database service. * Message queue service (RabbitMQ is typically used, but other enterprise-grade message queue brokers can be used as well). Both Bare Metal API (WSGI application or ``ironic-api`` process) and the ``ironic-conductor`` processes need access to the message queue service. The Bare Metal Introspection service does not need it. .. note:: These services are required for all OpenStack services. If you're adding the Bare Metal service to your cloud, you may reuse the existing database and messaging queue services. Bare metal nodes ---------------- Each bare metal node must be capable of booting from network, virtual media or other boot technology supported by the Bare Metal service as explained in :ref:`refarch-common-boot`. Each node must have one NIC on the *bare metal network*, and this NIC (and **only** it) must be configured to be able to boot from network. This is usually done in the *BIOS setup* or a similar firmware configuration utility. There is no need to alter the boot order, as it is managed by the Bare Metal service. Other NICs, if present, will not be managed by OpenStack. The NIC on the *bare metal network* should have untagged connectivity to it, since PXE firmware usually does not support VLANs - see :ref:`refarch-common-networking` for details. Storage ------- If your hardware **and** its bare metal :doc:`driver ` support booting from remote volumes, please check the driver documentation for information on how to enable it. It may include routing *management* and/or *bare metal* networks to the *storage network*. In case of the standard :ref:`pxe-boot`, booting from remote volumes is done via iPXE. In that case, the Volume storage backend must support iSCSI_ protocol, and the *bare metal network* has to have a route to the *storage network*. See :doc:`/admin/boot-from-volume` for more details. .. _an OpenStack installation: https://docs.openstack.org/arch-design/use-cases/use-case-general-compute.html .. _control plane design guide: https://docs.openstack.org/arch-design/design-control-plane.html .. _iSCSI: https://en.wikipedia.org/wiki/ISCSI ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/install/setup-drivers.rst0000664000175000017500000000026200000000000022161 0ustar00zuulzuul00000000000000Set up the drivers for the Bare Metal service ============================================= .. toctree:: :maxdepth: 1 enabling-drivers configure-pxe configure-ipmi ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648108935.8986666 ironic-20.1.0/doc/source/install/standalone/0000775000175000017500000000000000000000000020743 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/install/standalone/configure.rst0000664000175000017500000000750100000000000023461 0ustar00zuulzuul00000000000000Configuration ============= This guide covers manual configuration of the Bare Metal service in the standalone mode. Alternatively, Bifrost_ can be used for automatic configuration. .. _Bifrost: https://docs.openstack.org/bifrost/latest/ Service settings ---------------- It is possible to use the Bare Metal service without other OpenStack services. You should make the following changes to ``/etc/ironic/ironic.conf``: #. Choose an authentication strategy which supports standalone, one option is ``noauth``: .. code-block:: ini [DEFAULT] auth_strategy=noauth Another option is ``http_basic`` where the credentials are stored in an `Apache htpasswd format`_ file: .. code-block:: ini [DEFAULT] auth_strategy=http_basic http_basic_auth_user_file=/etc/ironic/htpasswd Only the ``bcrypt`` format is supported, and the Apache `htpasswd` utility can be used to populate the file with entries, for example: .. code-block:: shell htpasswd -nbB myName myPassword >> /etc/ironic/htpasswd #. If you want to disable the Networking service, you should have your network pre-configured to serve DHCP and TFTP for machines that you're deploying. To disable it, change the following lines: .. code-block:: ini [dhcp] dhcp_provider=none .. note:: If you disabled the Networking service and the driver that you use is supported by at most one conductor, PXE boot will still work for your nodes without any manual config editing. This is because you know all the DHCP options that will be used for deployment and can set up your DHCP server appropriately. If you have multiple conductors per driver, it would be better to use Networking since it will do all the dynamically changing configurations for you. #. If you want to disable using a messaging broker between conductor and API processes, switch to JSON RPC instead: .. code-block:: ini [DEFAULT] rpc_transport = json-rpc JSON RPC also has its own authentication strategy. If it is not specified then the stategy defaults to ``[DEFAULT]`` ``auth_strategy``. The following will set JSON RPC to ``noauth``: .. code-block:: ini [json_rpc] auth_strategy = noauth For ``http_basic`` the conductor server needs a credentials file to validate requests: .. code-block:: ini [json_rpc] auth_strategy = http_basic http_basic_auth_user_file = /etc/ironic/htpasswd-json-rpc The API server also needs client-side credentials to be specified: .. code-block:: ini [json_rpc] auth_type = http_basic username = myName password = myPassword #. Starting with the Yoga release series, you can use a combined API+conductor service and completely disable the RPC. Set .. code-block:: ini [DEFAULT] rpc_transport = none and use the ``ironic`` executable to start the combined service. .. note:: The combined service also works with RPC enabled, which can be useful for some deployments, but may not be advisable for all security models. Using CLI --------- To use the :python-ironicclient-doc:`baremetal CLI `, set up these environment variables. If the ``noauth`` authentication strategy is being used, the value ``none`` must be set for OS_AUTH_TYPE. OS_ENDPOINT is the URL of the ironic-api process. For example: .. code-block:: shell export OS_AUTH_TYPE=none export OS_ENDPOINT=http://localhost:6385/ If the ``http_basic`` authentication strategy is being used, the value ``http_basic`` must be set for OS_AUTH_TYPE. For example: .. code-block:: shell export OS_AUTH_TYPE=http_basic export OS_ENDPOINT=http://localhost:6385/ export OS_USERNAME=myUser export OS_PASSWORD=myPassword .. _`Apache htpasswd format`: https://httpd.apache.org/docs/current/misc/password_encryptions.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/install/standalone/deploy.rst0000664000175000017500000000012400000000000022766 0ustar00zuulzuul00000000000000Deploying ========= The content has been migrated, please see :doc:`/user/deploy`. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/install/standalone/enrollment.rst0000664000175000017500000001104700000000000023657 0ustar00zuulzuul00000000000000Enrollment ========== Preparing images ---------------- If you don't use Image service, it's possible to provide images to Bare Metal service via a URL. At the moment, only two types of URLs are acceptable instead of Image service UUIDs: HTTP(S) URLs (for example, "http://my.server.net/images/img") and file URLs (file:///images/img). There are however some limitations for different hardware interfaces: * If you're using :ref:`direct-deploy` with HTTP(s) URLs, you have to provide the Bare Metal service with the a checksum of your instance image. MD5 is used by default for backward compatibility reasons. To compute an MD5 checksum, you can use the following command: .. code-block:: console $ md5sum image.qcow2 ed82def8730f394fb85aef8a208635f6 image.qcow2 Alternatively, use a SHA256 checksum or any other algorithm supported by the Python's hashlib_, e.g.: .. code-block:: console $ sha256sum image.qcow2 9f6c942ad81690a9926ff530629fb69a82db8b8ab267e2cbd59df417c1a28060 image.qcow2 * :ref:`direct-deploy` started supporting ``file://`` images in the Victoria release cycle, before that only HTTP(s) had been supported. .. warning:: File images must be accessible to every conductor! Use a shared file system if you have more than one conductor. The ironic CLI tool will not transfer the file from a local machine to the conductor(s). .. note:: The Bare Metal service tracks content changes for non-Glance images by checking their modification date and time. For example, for HTTP image, if 'Last-Modified' header value from response to a HEAD request to "http://my.server.net/images/deploy.ramdisk" is greater than cached image modification time, Ironic will re-download the content. For "file://" images, the file system modification time is used. If the HTTP server does not provide the last modification date and time, the image will be redownloaded every time it is used. .. _hashlib: https://docs.python.org/3/library/hashlib.html Enrolling nodes --------------- #. Create a node in Bare Metal service. At minimum, you must specify the driver name (for example, ``ipmi``). You can also specify all the required driver parameters in one command. This will return the node UUID: .. code-block:: console $ baremetal node create --driver ipmi \ --driver-info ipmi_address=ipmi.server.net \ --driver-info ipmi_username=user \ --driver-info ipmi_password=pass \ --driver-info deploy_kernel=file:///images/deploy.vmlinuz \ --driver-info deploy_ramdisk=http://my.server.net/images/deploy.ramdisk +--------------+--------------------------------------------------------------------------+ | Property | Value | +--------------+--------------------------------------------------------------------------+ | uuid | be94df40-b80a-4f63-b92b-e9368ee8d14c | | driver_info | {u'deploy_ramdisk': u'http://my.server.net/images/deploy.ramdisk', | | | u'deploy_kernel': u'file:///images/deploy.vmlinuz', u'ipmi_address': | | | u'ipmi.server.net', u'ipmi_username': u'user', u'ipmi_password': | | | u'******'} | | extra | {} | | driver | ipmi | | chassis_uuid | | | properties | {} | +--------------+--------------------------------------------------------------------------+ Note that here deploy_kernel and deploy_ramdisk contain links to images instead of Image service UUIDs. #. As in case of Compute service, you can also provide ``capabilities`` to node properties, but they will be used only by Bare Metal service (for example, boot mode). Although you don't need to add properties like ``memory_mb``, ``cpus`` etc. as Bare Metal service will require UUID of a node you're going to deploy. #. Then create a port to inform Bare Metal service of the network interface cards which are part of the node by creating a port with each NIC's MAC address. In this case, they're used for naming of PXE configs for a node: .. code-block:: shell baremetal port create $MAC_ADDRESS --node $NODE_UUID ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/install/standalone.rst0000664000175000017500000000106200000000000021474 0ustar00zuulzuul00000000000000Using Bare Metal service as a standalone service ================================================ This guide explains how to configure and use the Bare Metal service standalone, i.e. without other OpenStack services. In this mode users are interacting with the bare metal API directly, not though OpenStack Compute. .. toctree:: :maxdepth: 3 standalone/configure standalone/enrollment Once the installation is done, please see :doc:`/user/deploy` for information on how to deploy bare metal machines. .. toctree:: :hidden: standalone/deploy ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/install/troubleshooting.rst0000664000175000017500000001757400000000000022612 0ustar00zuulzuul00000000000000.. _troubleshooting-install: =============== Troubleshooting =============== Once all the services are running and configured properly, and a node has been enrolled with the Bare Metal service and is in the ``available`` provision state, the Compute service should detect the node as an available resource and expose it to the scheduler. .. note:: There is a delay, and it may take up to a minute (one periodic task cycle) for the Compute service to recognize any changes in the Bare Metal service's resources (both additions and deletions). In addition to watching ``nova-compute`` log files, you can see the available resources by looking at the list of Compute hypervisors. The resources reported therein should match the bare metal node properties, and the Compute service flavor. Here is an example set of commands to compare the resources in Compute service and Bare Metal service:: $ baremetal node list +--------------------------------------+---------------+-------------+--------------------+-------------+ | UUID | Instance UUID | Power State | Provisioning State | Maintenance | +--------------------------------------+---------------+-------------+--------------------+-------------+ | 86a2b1bb-8b29-4964-a817-f90031debddb | None | power off | available | False | +--------------------------------------+---------------+-------------+--------------------+-------------+ $ baremetal node show 86a2b1bb-8b29-4964-a817-f90031debddb +------------------------+----------------------------------------------------------------------+ | Property | Value | +------------------------+----------------------------------------------------------------------+ | instance_uuid | None | | properties | {u'memory_mb': u'1024', u'cpu_arch': u'x86_64', u'local_gb': u'10', | | | u'cpus': u'1'} | | maintenance | False | | driver_info | { [SNIP] } | | extra | {} | | last_error | None | | created_at | 2014-11-20T23:57:03+00:00 | | target_provision_state | None | | driver | ipmi | | updated_at | 2014-11-21T00:47:34+00:00 | | instance_info | {} | | chassis_uuid | 7b49bbc5-2eb7-4269-b6ea-3f1a51448a59 | | provision_state | available | | reservation | None | | power_state | power off | | console_enabled | False | | uuid | 86a2b1bb-8b29-4964-a817-f90031debddb | +------------------------+----------------------------------------------------------------------+ $ nova hypervisor-list +--------------------------------------+--------------------------------------+-------+---------+ | ID | Hypervisor hostname | State | Status | +--------------------------------------+--------------------------------------+-------+---------+ | 584cfdc8-9afd-4fbb-82ef-9ff25e1ad3f3 | 86a2b1bb-8b29-4964-a817-f90031debddb | up | enabled | +--------------------------------------+--------------------------------------+-------+---------+ $ nova hypervisor-show 584cfdc8-9afd-4fbb-82ef-9ff25e1ad3f3 +-------------------------+--------------------------------------+ | Property | Value | +-------------------------+--------------------------------------+ | cpu_info | baremetal cpu | | current_workload | 0 | | disk_available_least | - | | free_disk_gb | 10 | | free_ram_mb | 1024 | | host_ip | [ SNIP ] | | hypervisor_hostname | 86a2b1bb-8b29-4964-a817-f90031debddb | | hypervisor_type | ironic | | hypervisor_version | 1 | | id | 1 | | local_gb | 10 | | local_gb_used | 0 | | memory_mb | 1024 | | memory_mb_used | 0 | | running_vms | 0 | | service_disabled_reason | - | | service_host | my-test-host | | service_id | 6 | | state | up | | status | enabled | | vcpus | 1 | | vcpus_used | 0 | +-------------------------+--------------------------------------+ .. _maintenance_mode: Maintenance mode ---------------- Maintenance mode may be used if you need to take a node out of the resource pool. Putting a node in maintenance mode will prevent Bare Metal service from executing periodic tasks associated with the node. This will also prevent Compute service from placing a tenant instance on the node by not exposing the node to the nova scheduler. Nodes can be placed into maintenance mode with the following command. :: $ baremetal node maintenance set $NODE_UUID A maintenance reason may be included with the optional ``--reason`` command line option. This is a free form text field that will be displayed in the ``maintenance_reason`` section of the ``node show`` command. :: $ baremetal node maintenance set $UUID --reason "Need to add ram." $ baremetal node show $UUID +------------------------+--------------------------------------+ | Property | Value | +------------------------+--------------------------------------+ | target_power_state | None | | extra | {} | | last_error | None | | updated_at | 2015-04-27T15:43:58+00:00 | | maintenance_reason | Need to add ram. | | ... | ... | | maintenance | True | | ... | ... | +------------------------+--------------------------------------+ To remove maintenance mode and clear any ``maintenance_reason`` use the following command. :: $ baremetal node maintenance unset $NODE_UUID ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648108935.8986666 ironic-20.1.0/doc/source/user/0000775000175000017500000000000000000000000016123 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/user/architecture.rst0000664000175000017500000003256000000000000021345 0ustar00zuulzuul00000000000000================================ Understanding Bare Metal service ================================ .. TODO: this file needs to be cleaned up Why Provision Bare Metal ======================== Here are a few use-cases for bare metal (physical server) provisioning in cloud; there are doubtless many more interesting ones: - High-performance computing clusters - Computing tasks that require access to hardware devices which can't be virtualized - Database hosting (some databases run poorly in a hypervisor) - Single tenant, dedicated hardware for performance, security, dependability and other regulatory requirements - Or, rapidly deploying a cloud infrastructure Conceptual Architecture ======================= The following diagram shows the relationships and how all services come into play during the provisioning of a physical server. (Note that Ceilometer and Swift can be used with Ironic, but are missing from this diagram.) .. figure:: ../images/conceptual_architecture.png :alt: ConceptualArchitecture Key Technologies for Bare Metal Hosting ======================================= Preboot Execution Environment (PXE) ----------------------------------- PXE is part of the Wired for Management (WfM) specification developed by Intel and Microsoft. The PXE enables system's BIOS and network interface card (NIC) to bootstrap a computer from the network in place of a disk. Bootstrapping is the process by which a system loads the OS into local memory so that it can be executed by the processor. This capability of allowing a system to boot over a network simplifies server deployment and server management for administrators. Dynamic Host Configuration Protocol (DHCP) ------------------------------------------ DHCP is a standardized networking protocol used on Internet Protocol (IP) networks for dynamically distributing network configuration parameters, such as IP addresses for interfaces and services. Using PXE, the BIOS uses DHCP to obtain an IP address for the network interface and to locate the server that stores the network bootstrap program (NBP). Network Bootstrap Program (NBP) ------------------------------- NBP is equivalent to GRUB (GRand Unified Bootloader) or LILO (LInux LOader) - loaders which are traditionally used in local booting. Like the boot program in a hard drive environment, the NBP is responsible for loading the OS kernel into memory so that the OS can be bootstrapped over a network. Trivial File Transfer Protocol (TFTP) ------------------------------------- TFTP is a simple file transfer protocol that is generally used for automated transfer of configuration or boot files between machines in a local environment. In a PXE environment, TFTP is used to download NBP over the network using information from the DHCP server. Intelligent Platform Management Interface (IPMI) ------------------------------------------------ IPMI is a standardized computer system interface used by system administrators for out-of-band management of computer systems and monitoring of their operation. It is a method to manage systems that may be unresponsive or powered off by using only a network connection to the hardware rather than to an operating system. .. _understanding-deployment: Understanding Bare Metal Deployment =================================== What happens when a boot instance request comes in? The below diagram walks through the steps involved during the provisioning of a bare metal instance. These pre-requisites must be met before the deployment process: * Dependent packages to be configured on the Bare Metal service node(s) where ironic-conductor is running like tftp-server, ipmi, syslinux etc for bare metal provisioning. * Nova must be configured to make use of the bare metal service endpoint and compute driver should be configured to use ironic driver on the Nova compute node(s). * Flavors to be created for the available hardware. Nova must know the flavor to boot from. * Images to be made available in Glance. Listed below are some image types required for successful bare metal deployment: - bm-deploy-kernel - bm-deploy-ramdisk - user-image - user-image-vmlinuz - user-image-initrd * Hardware to be enrolled via the bare metal API service. Deploy Process -------------- This describes a typical bare metal node deployment within OpenStack using PXE to boot the ramdisk. Depending on the ironic driver interfaces used, some of the steps might be marginally different, however the majority of them will remain the same. #. A boot instance request comes in via the Nova API, through the message queue to the Nova scheduler. #. Nova scheduler applies filters and finds the eligible hypervisor. The nova scheduler also uses the flavor's ``extra_specs``, such as ``cpu_arch``, to match the target physical node. #. Nova compute manager claims the resources of the selected hypervisor. #. Nova compute manager creates (unbound) tenant virtual interfaces (VIFs) in the Networking service according to the network interfaces requested in the nova boot request. A caveat here is, the MACs of the ports are going to be randomly generated, and will be updated when the VIF is attached to some node to correspond to the node network interface card's (or bond's) MAC. #. A spawn task is created by the nova compute which contains all the information such as which image to boot from etc. It invokes the ``driver.spawn`` from the virt layer of Nova compute. During the spawn process, the virt driver does the following: #. Updates the target ironic node with the information about deploy image, instance UUID, requested capabilities and various flavor properties. #. Validates node's power and deploy interfaces, by calling the ironic API. #. Attaches the previously created VIFs to the node. Each neutron port can be attached to any ironic port or port group, with port groups having higher priority than ports. On ironic side, this work is done by the network interface. Attachment here means saving the VIF identifier into ironic port or port group and updating VIF MAC to match the port's or port group's MAC, as described in bullet point 4. #. Generates config drive, if requested. #. Nova's ironic virt driver issues a deploy request via the Ironic API to the Ironic conductor servicing the bare metal node. #. Virtual interfaces are plugged in and Neutron API updates DHCP port to set PXE/TFTP options. In case of using ``neutron`` network interface, ironic creates separate provisioning ports in the Networking service, while in case of ``flat`` network interface, the ports created by nova are used both for provisioning and for deployed instance networking. #. The ironic node's boot interface prepares (i)PXE configuration and caches deploy kernel and ramdisk. #. The ironic node's management interface issues commands to enable network boot of a node. #. The ironic node's deploy interface caches the instance image, kernel and ramdisk if needed (it is needed in case of netboot for example). #. The ironic node's power interface instructs the node to power on. #. The node boots the deploy ramdisk. #. Depending on the exact driver used, the deploy ramdisk downloads the image from a URL (:ref:`direct-deploy`) or the conductor uses SSH to execute commands (:ref:`ansible-deploy`). The URL can be generated by Swift API-compatible object stores, for example Swift itself or RadosGW, or provided by a user. The image deployment is done. #. The node's boot interface switches pxe config to refer to instance images (or, in case of local boot, sets boot device to disk), and asks the ramdisk agent to soft power off the node. If the soft power off by the ramdisk agent fails, the bare metal node is powered off via IPMI/BMC call. #. The deploy interface triggers the network interface to remove provisioning ports if they were created, and binds the tenant ports to the node if not already bound. Then the node is powered on. .. note:: There are 2 power cycles during bare metal deployment; the first time the node is powered-on when ramdisk is booted, the second time after the image is deployed. #. The bare metal node's provisioning state is updated to ``active``. Below is the diagram that describes the above process. .. graphviz:: digraph "Deployment Steps" { node [shape=box, style=rounded, fontsize=10]; edge [fontsize=10]; /* cylinder shape works only in graphviz 2.39+ */ { rank=same; node [shape=cylinder]; "Nova DB"; "Ironic DB"; } { rank=same; "Nova API"; "Ironic API"; } { rank=same; "Nova Message Queue"; "Ironic Message Queue"; } { rank=same; "Ironic Conductor"; "TFTP Server"; } { rank=same; "Deploy Interface"; "Boot Interface"; "Power Interface"; "Management Interface"; } { rank=same; "Glance"; "Neutron"; } "Bare Metal Nodes" [shape=box3d]; "Nova API" -> "Nova Message Queue" [label=" 1"]; "Nova Message Queue" -> "Nova Conductor" [dir=both]; "Nova Message Queue" -> "Nova Scheduler" [label=" 2"]; "Nova Conductor" -> "Nova DB" [dir=both, label=" 3"]; "Nova Message Queue" -> "Nova Compute" [dir=both]; "Nova Compute" -> "Neutron" [label=" 4"]; "Nova Compute" -> "Nova Ironic Virt Driver" [label=5]; "Nova Ironic Virt Driver" -> "Ironic API" [label=6]; "Ironic API" -> "Ironic Message Queue"; "Ironic Message Queue" -> "Ironic Conductor" [dir=both]; "Ironic API" -> "Ironic DB" [dir=both]; "Ironic Conductor" -> "Ironic DB" [dir=both, label=16]; "Ironic Conductor" -> "Boot Interface" [label="8, 14"]; "Ironic Conductor" -> "Management Interface" [label=" 9"]; "Ironic Conductor" -> "Deploy Interface" [label=10]; "Deploy Interface" -> "Network Interface" [label="7, 15"]; "Ironic Conductor" -> "Power Interface" [label=11]; "Ironic Conductor" -> "Glance"; "Network Interface" -> "Neutron"; "Power Interface" -> "Bare Metal Nodes"; "Management Interface" -> "Bare Metal Nodes"; "TFTP Server" -> "Bare Metal Nodes" [label=12]; "Ironic Conductor" -> "Bare Metal Nodes" [style=dotted, label=13]; "Boot Interface" -> "TFTP Server"; } The following two examples describe what ironic is doing in more detail, leaving out the actions performed by nova and some of the more advanced options. .. _direct-deploy-example: Example: PXE Boot and Direct Deploy Process --------------------------------------------- This process is how :ref:`direct-deploy` works. .. seqdiag:: :scale: 75 diagram { Nova; API; Conductor; Neutron; HTTPStore; "TFTP/HTTPd"; Node; activation = none; edge_length = 250; span_height = 1; default_note_color = white; default_fontsize = 14; Nova -> API [label = "Set instance_info\n(image_source,\nroot_gb, etc.)"]; Nova -> API [label = "Validate power and deploy\ninterfaces"]; Nova -> API [label = "Plug VIFs to the node"]; Nova -> API [label = "Set provision_state,\noptionally pass configdrive"]; API -> Conductor [label = "do_node_deploy()"]; Conductor -> Conductor [label = "Validate power and deploy interfaces"]; Conductor -> HTTPStore [label = "Store configdrive if configdrive_use_swift \noption is set"]; Conductor -> Node [label = "POWER OFF"]; Conductor -> Neutron [label = "Attach provisioning network to port(s)"]; Conductor -> Neutron [label = "Update DHCP boot options"]; Conductor -> Conductor [label = "Prepare PXE\nenvironment for\ndeployment"]; Conductor -> Node [label = "Set PXE boot device \nthrough the BMC"]; Conductor -> Conductor [label = "Cache deploy\nand instance\nkernel and ramdisk"]; Conductor -> Node [label = "REBOOT"]; Node -> Neutron [label = "DHCP request"]; Neutron -> Node [label = "next-server = Conductor"]; Node -> Node [label = "Runs agent\nramdisk"]; Node -> API [label = "lookup()"]; API -> Node [label = "Pass UUID"]; Node -> API [label = "Heartbeat (UUID)"]; API -> Conductor [label = "Heartbeat"]; Conductor -> Node [label = "Continue deploy asynchronously: Pass image, disk info"]; Node -> HTTPStore [label = "Downloads image, writes to disk, \nwrites configdrive if present"]; === Heartbeat periodically === Conductor -> Node [label = "Is deploy done?"]; Node -> Conductor [label = "Still working..."]; === ... === Node -> Conductor [label = "Deploy is done"]; Conductor -> Node [label = "Install boot loader, if requested"]; Conductor -> Neutron [label = "Update DHCP boot options"]; Conductor -> Conductor [label = "Prepare PXE\nenvironment for\ninstance image\nif needed"]; Conductor -> Node [label = "Set boot device either to PXE or to disk"]; Conductor -> Node [label = "Collect ramdisk logs"]; Conductor -> Node [label = "POWER OFF"]; Conductor -> Neutron [label = "Detach provisioning network\nfrom port(s)"]; Conductor -> Neutron [label = "Bind tenant port"]; Conductor -> Node [label = "POWER ON"]; Conductor -> Conductor [label = "Mark node as\nACTIVE"]; } (From a `talk`_ and `slides`_) .. _talk: https://www.openstack.org/summit/vancouver-2015/summit-videos/presentation/isn-and-039t-it-ironic-the-bare-metal-cloud .. _slides: http://www.slideshare.net/devananda1/isnt-it-ironic-managing-a-bare-metal-cloud-osl-tes-2015 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/user/creating-images.rst0000664000175000017500000000761500000000000021725 0ustar00zuulzuul00000000000000Creating instance images ======================== Bare Metal provisioning requires two sets of images: the deploy images and the user images. The :ref:`deploy images ` are used by the Bare Metal service to prepare the bare metal server for actual OS deployment. Whereas the user images are installed on the bare metal server to be used by the end user. There are two types of user images: *partition images* contain only the contents of the root partition. Additionally, two more images are used together with them when booting from network: an image with a kernel and with an initramfs. .. warning:: To use partition images with local boot, Grub2 must be installed on them. *whole disk images* contain a complete partition table with one or more partitions. .. warning:: The kernel/initramfs pair must not be used with whole disk images, otherwise they'll be mistaken for partition images. Many distributions publish their own cloud images. These are usually whole disk images that are built for legacy boot mode (not UEFI), with Ubuntu being an exception (they publish images that work in both modes). disk-image-builder ------------------ The `disk-image-builder`_ can be used to create user images required for deployment and the actual OS which the user is going to run. - Install diskimage-builder package (use virtualenv, if you don't want to install anything globally): .. code-block:: console # pip install diskimage-builder - Build the image your users will run (Ubuntu image has been taken as an example): - Partition images .. code-block:: console $ disk-image-create ubuntu baremetal dhcp-all-interfaces grub2 -o my-image - Whole disk images .. code-block:: console $ disk-image-create ubuntu vm dhcp-all-interfaces -o my-image … with an EFI partition: .. code-block:: console $ disk-image-create ubuntu vm block-device-efi dhcp-all-interfaces -o my-image The partition image command creates ``my-image.qcow2``, ``my-image.vmlinuz`` and ``my-image.initrd`` files. The ``grub2`` element in the partition image creation command is only needed if local boot will be used to deploy ``my-image.qcow2``, otherwise the images ``my-image.vmlinuz`` and ``my-image.initrd`` will be used for PXE booting after deploying the bare metal with ``my-image.qcow2``. For whole disk images only the main image is used. If you want to use Fedora image, replace ``ubuntu`` with ``fedora`` in the chosen command. .. _disk-image-builder: https://docs.openstack.org/diskimage-builder/latest/ Virtual machine --------------- Virtual machine software can also be used to build user images. There are different software options available, qemu-kvm is usually a good choice on linux platform, it supports emulating many devices and even building images for architectures other than the host machine by software emulation. VirtualBox is another good choice for non-linux host. The procedure varies depending on the software used, but the steps for building an image are similar, the user creates a virtual machine, and installs the target system just like what is done for a real hardware. The system can be highly customized like partition layout, drivers or software shipped, etc. Usually libvirt and its management tools are used to make interaction with qemu-kvm easier, for example, to create a virtual machine with ``virt-install``:: $ virt-install --name centos8 --ram 4096 --vcpus=2 -f centos8.qcow2 \ > --cdrom CentOS-8-x86_64-1905-dvd1.iso Graphic frontend like ``virt-manager`` can also be utilized. The disk file can be used as user image after the system is set up and powered off. The path of the disk file varies depending on the software used, usually it's stored in a user-selected part of the local file system. For qemu-kvm or GUI frontend building upon it, it's typically stored at ``/var/lib/libvirt/images``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/user/deploy.rst0000664000175000017500000003252600000000000020161 0ustar00zuulzuul00000000000000Deploying with Bare Metal service ================================= This guide explains how to use Ironic to deploy nodes without any front-end service, such as OpenStack Compute (nova) or Metal3_. .. note:: To simplify this task you can use the metalsmith_ tool which provides a convenient CLI for the most common cases. .. _Metal3: http://metal3.io/ .. _metalsmith: https://docs.openstack.org/metalsmith/latest/ Allocations ----------- Allocation is a way to find and reserve a node suitable for deployment. When an allocation is created, the list of available nodes is searched for a node with the given *resource class* and *traits*, similarly to how it is done in :doc:`OpenStack Compute flavors `. Only the resource class is mandatory, for example: .. code-block:: console $ baremetal allocation create --resource-class baremetal --wait +-----------------+--------------------------------------+ | Field | Value | +-----------------+--------------------------------------+ | candidate_nodes | [] | | created_at | 2019-04-03T12:18:26+00:00 | | extra | {} | | last_error | None | | name | None | | node_uuid | 5d946337-b1d9-4b06-8eda-4fb77e994a0d | | resource_class | baremetal | | state | active | | traits | [] | | updated_at | 2019-04-03T12:18:26+00:00 | | uuid | e84f5d60-84f1-4701-a635-10ff90e2f3b0 | +-----------------+--------------------------------------+ .. note:: The allocation processing is fast but nonetheless asynchronous. Use the ``--wait`` argument to wait for the results. If an allocation is successful, it sets the node's ``instance_uuid`` to the allocation UUID. The node's UUID can be retrieved from the allocation's ``node_uuid`` field. An allocation is automatically deleted when the associated node is unprovisioned. If you don't provision the node, you're responsible for deleting the allocation. See the `allocation API reference `_ for more information on how to use allocations. Populating instance information ------------------------------- The node's ``instance_info`` field is a JSON object that contains all information required for deploying an instance on bare metal. It has to be populated before deployment and is automatically cleared on tear down. Image information ~~~~~~~~~~~~~~~~~ You need to specify image information in the node's ``instance_info`` (see :doc:`/user/creating-images`): * ``image_source`` - URL of the whole disk or root partition image, mandatory. The following schemes are supported: ``http://``, ``https://`` and ``file://``. Files have to be accessible by the conductor. If the scheme is missing, an Image Service (glance) image UUID is assumed. * ``root_gb`` - size of the root partition, required for partition images. .. note:: Older versions of the Bare Metal service used to require a positive integer for ``root_gb`` even for whole-disk images. You may want to set it for compatibility. * ``image_checksum`` - MD5 checksum of the image specified by ``image_source``, only required for ``http://`` images when using :ref:`direct-deploy`. Other checksum algorithms are supported via the ``image_os_hash_algo`` and ``image_os_hash_value`` fields. They may be used instead of the ``image_checksum`` field. .. warning:: If your operating system is running in FIPS 140-2 mode, MD5 will not be available, and you **must** use SHA256 or another modern algorithm. Starting with the Stein release of ironic-python-agent can also be a URL to a checksums file, e.g. one generated with: .. code-block:: console $ cd /path/to/http/root $ md5sum *.img > checksums * ``kernel``, ``ramdisk`` - HTTP(s) or file URLs of the kernel and initramfs of the target OS. Must be added **only** for partition images and only if network boot is required. Supports the same schemes as ``image_source``. An example for a partition image with local boot: .. code-block:: shell baremetal node set $NODE_UUID \ --instance-info image_source=http://image.server/my-image.qcow2 \ --instance-info image_checksum=1f9c0e1bad977a954ba40928c1e11f33 \ --instance-info image_type=partition \ --instance-info root_gb=10 With a SHA256 hash: .. code-block:: shell baremetal node set $NODE_UUID \ --instance-info image_source=http://image.server/my-image.qcow2 \ --instance-info image_os_hash_algo=sha256 \ --instance-info image_os_hash_value=a64dd95e0c48e61ed741ff026d8c89ca38a51f3799955097c5123b1705ef13d4 \ --instance-info image_type=partition \ --instance-info root_gb=10 If you use network boot (or Ironic before Yoga), two more fields must be set: .. code-block:: shell baremetal node set $NODE_UUID \ --instance-info image_source=http://image.server/my-image.qcow2 \ --instance-info image_checksum=1f9c0e1bad977a954ba40928c1e11f33 \ --instance-info image_type=partition \ --instance-info kernel=http://image.server/my-image.kernel \ --instance-info ramdisk=http://image.server/my-image.initramfs \ --instance-info root_gb=10 With a whole disk image and a checksum URL: .. code-block:: shell baremetal node set $NODE_UUID \ --instance-info image_source=http://image.server/my-image.qcow2 \ --instance-info image_checksum=http://image.server/my-image.qcow2.CHECKSUM .. note:: Certain hardware types and interfaces may require additional or different fields to be provided. See specific guides under :doc:`/admin/drivers`. When using low RAM nodes with ``http://`` images that are not in the RAW format, you may want them cached locally, converted to raw and served from the conductor's HTTP server: .. code-block:: shell baremetal node set $NODE_UUID --instance-info image_download_source=local For software RAID with whole-disk images, the root UUID of the root partition has to be provided so that the bootloader can be correctly installed: .. code-block:: shell baremetal node set $NODE_UUID --instance-info image_rootfs_uuid= Capabilities ~~~~~~~~~~~~ * :ref:`Boot mode ` can be specified per instance: .. code-block:: shell baremetal node set $NODE_UUID \ --instance-info capabilities='{"boot_mode": "uefi"}' Otherwise, the ``boot_mode`` capability from the node's ``properties`` will be used. .. warning:: The two settings must not contradict each other. .. note:: This capability was introduced in the Wallaby release series, previously ironic used a separate ``instance_info/deploy_boot_mode`` field instead. * To override the :ref:`boot option ` used for this instance, set the ``boot_option`` capability: .. code-block:: shell baremetal node set $NODE_UUID \ --instance-info capabilities='{"boot_option": "local"}' * Starting with the Ussuri release, you can set :ref:`root device hints ` per instance: .. code-block:: shell baremetal node set $NODE_UUID \ --instance-info root_device='{"wwn": "0x4000cca77fc4dba1"}' This setting overrides any previous setting in ``properties`` and will be removed on undeployment. Overriding a hardware interface ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Non-admins with temporary access to a node, may wish to specify different node interfaces. However, allowing them to set these interface values directly on the node is problematic, as there is no automated way to ensure that the original interface values are restored. In order to temporarily override a hardware interface, simply set the appropriate value in ``instance_info``. For example, if you'd like to override a node's storage interface, run the following: .. code-block:: shell baremetal node set $NODE_UUID --instance-info storage_interface=cinder ``instance_info`` values persist until after a node is cleaned. .. note:: This feature is available starting with the Wallaby release. Attaching virtual interfaces ---------------------------- If using the OpenStack Networking service (neutron), you can attach its ports to a node before deployment as VIFs: .. code-block:: shell baremetal node vif attach $NODE_UUID $PORT_UUID .. warning:: These are **neutron** ports, not **ironic** ports! VIFs are automatically detached on deprovisioning. Deployment ---------- #. Validate that all parameters are correct: .. code-block:: console $ baremetal node validate $NODE_UUID +------------+--------+----------------------------------------------------------------+ | Interface | Result | Reason | +------------+--------+----------------------------------------------------------------+ | boot | True | | | console | False | Missing 'ipmi_terminal_port' parameter in node's driver_info. | | deploy | True | | | inspect | True | | | management | True | | | network | True | | | power | True | | | raid | True | | | storage | True | | +------------+--------+----------------------------------------------------------------+ #. Now you can start the deployment, run: .. code-block:: shell baremetal node deploy $NODE_UUID #. Starting with the Wallaby release you can also request custom deploy steps, see :ref:`standalone-deploy-steps` for details. .. _deploy-configdrive: Deploying with a config drive ----------------------------- The configuration drive is a small image used to store instance-specific metadata and is present to the instance as a disk partition labeled ``config-2``. See :doc:`/install/configdrive` for a detailed explanation. A configuration drive can be provided either as a whole ISO 9660 image or as JSON input for building an image. A first-boot service, such as cloud-init_, must be running on the instance image for the configuration to be applied. .. _cloud-init: https://cloudinit.readthedocs.io/en/latest/ Building a config drive on the client side ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For the format of the configuration drive, Bare Metal service expects a ``gzipped`` and ``base64`` encoded ISO 9660 file with a ``config-2`` label. The :python-ironicclient-doc:`baremetal client ` can generate a configuration drive in the `expected format`_. Pass a directory path containing the files that will be injected into it via the ``--config-drive`` parameter of the ``baremetal node deploy`` command, for example: .. code-block:: shell baremetal node deploy $NODE_UUID --config-drive /dir/configdrive_files .. note:: A configuration drive could also be a data block with a VFAT filesystem on it instead of ISO 9660. But it's unlikely that it would be needed since ISO 9660 is widely supported across operating systems. .. _expected format: https://docs.openstack.org/nova/latest/user/metadata.html#config-drives Building a config drive on the conductor side ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Starting with the Stein release and `ironicclient` 2.7.0, you can request building a configdrive on the server side by providing a JSON with keys ``meta_data``, ``user_data`` and ``network_data`` (all optional), e.g.: .. code-block:: bash baremetal node deploy $node_identifier \ --config-drive '{"meta_data": {"hostname": "server1.cluster"}}' .. note:: When this feature is used, host name defaults to the node's name or UUID. SSH public keys can be provided as a mapping: .. code-block:: shell baremetal node deploy $NODE_UUID \ --config-drive '{"meta_data": {"public_keys": {"0": "ssh key contents"}}}' If using cloud-init_, its configuration can be supplied as ``user_data``, e.g.: .. code-block:: shell baremetal node deploy $NODE_UUID \ --config-drive '{"user_data": "#cloud-config\n{\"users\": [{\"name\": ...}]}"}' .. warning:: User data is a string, not a JSON! Also note that a prefix, such as ``#cloud-config``, is required, see `user data format `_. Some first-boot services support network configuration in the `OpenStack network data format `_. It can be provided in the ``network_data`` field of the configuration drive. Ramdisk booting --------------- Advanced operators, specifically ones working with ephemeral workloads, may find it more useful to explicitly treat a node as one that would always boot from a Ramdisk. See :doc:`/admin/ramdisk-boot` for details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/user/index.rst0000664000175000017500000000224500000000000017767 0ustar00zuulzuul00000000000000============================= Bare Metal Service User Guide ============================= Ironic is an OpenStack project which provisions bare metal (as opposed to virtual) machines. It may be used independently or as part of an OpenStack Cloud, and integrates with the OpenStack Identity (keystone), Compute (nova), Network (neutron), Image (glance) and Object (swift) services. When the Bare Metal service is appropriately configured with the Compute and Network services, it is possible to provision both virtual and physical machines through the Compute service's API. However, the set of instance actions is limited, arising from the different characteristics of physical servers and switch hardware. For example, live migration can not be performed on a bare metal instance. The community maintains reference drivers that leverage open-source technologies (eg. PXE and IPMI) to cover a wide range of hardware. Ironic's pluggable driver architecture also allows hardware vendors to write and contribute drivers that may improve performance or add functionality not provided by the community drivers. .. toctree:: :maxdepth: 2 architecture states creating-images deploy ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/doc/source/user/states.rst0000664000175000017500000002520400000000000020163 0ustar00zuulzuul00000000000000.. _states: ======================== Bare Metal State Machine ======================== State Machine Diagram ===================== The diagram below shows the provisioning states that an Ironic node goes through during the lifetime of a node. The diagram also depicts the events that transition the node to different states. Stable states are highlighted with a thicker border. All transitions from stable states are initiated by API requests. There are a few other API-initiated-transitions that are possible from non-stable states. The events for these API-initiated transitions are indicated with '(via API)'. Internally, the conductor initiates the other transitions (depicted in gray). .. figure:: ../images/states.svg :width: 660px :align: left :alt: Ironic state transitions .. note:: There are aliases for some transitions: * ``deploy`` is an alias for ``active``. * ``undeploy`` is an alias for ``deleted`` Enrollment and Preparation ========================== enroll (stable state) This is the state that all nodes start off in when created using API version 1.11 or newer. When a node is in the ``enroll`` state, the only thing ironic knows about it is that it exists, and ironic cannot take any further action by itself. Once a node has its driver/interfaces and their required information set in ``node.driver_info``, the node can be transitioned to the ``verifying`` state by setting the node's provision state using the ``manage`` verb. See :doc:`/install/enrollment` for information on enrolling nodes. verifying ironic will validate that it can manage the node using the information given in ``node.driver_info`` and with either the driver/hardware type and interfaces it has been assigned. This involves going out and confirming that the credentials work to access whatever node control mechanism they talk to. manageable (stable state) Once ironic has verified that it can manage the node using the driver/interfaces and credentials passed in at node create time, the node will be transitioned to the ``manageable`` state. From ``manageable``, nodes can transition to: * ``manageable`` (through ``cleaning``) by setting the node's provision state using the ``clean`` verb. * ``manageable`` (through ``inspecting``) by setting the node's provision state using the ``inspect`` verb. * ``available`` (through ``cleaning`` if automatic cleaning is enabled) by setting the node's provision state using the ``provide`` verb. * ``active`` (through ``adopting``) by setting the node's provision state using the ``adopt`` verb. ``manageable`` is the state that a node should be moved into when any updates need to be made to it such as changes to fields in driver_info and updates to networking information on ironic ports assigned to the node. ``manageable`` is also the only stable state that can be transitioned to, from these failure states: * ``adopt failed`` * ``clean failed`` * ``inspect failed`` inspecting ``inspecting`` will utilize node introspection to update hardware-derived node properties to reflect the current state of the hardware. Typically, the node will transition to ``manageable`` if inspection is synchronous, or ``inspect wait`` if asynchronous. The node will transition to ``inspect failed`` if error occurred. See :doc:`/admin/inspection` for information about inspection. inspect wait This is the provision state used when an asynchronous inspection is in progress. A successfully inspected node shall transition to ``manageable`` state. inspect failed This is the state a node will move into when inspection of the node fails. From here the node can transitioned to: * ``inspecting`` by setting the node's provision state using the ``inspect`` verb. * ``manageable`` by setting the node's provision state using the ``manage`` verb cleaning Nodes in the ``cleaning`` state are being scrubbed and reprogrammed into a known configuration. When a node is in the ``cleaning`` state it means that the conductor is executing the clean step (for out-of-band clean steps) or preparing the environment (building PXE configuration files, configuring the DHCP, etc) to boot the ramdisk for running in-band clean steps. clean wait Just like the ``cleaning`` state, the nodes in the ``clean wait`` state are being scrubbed and reprogrammed. The difference is that in the ``clean wait`` state the conductor is waiting for the ramdisk to boot or the clean step which is running in-band to finish. The cleaning process of a node in the ``clean wait`` state can be interrupted by setting the node's provision state using the ``abort`` verb if the task that is running allows it. Deploy and Undeploy =================== available (stable state) After nodes have been successfully preconfigured and cleaned, they are moved into the ``available`` state and are ready to be provisioned. From ``available``, nodes can transition to: * ``active`` (through ``deploying``) by setting the node's provision state using the ``active`` or ``deploy`` verbs. * ``manageable`` by setting the node's provision state using the ``manage`` verb deploying Nodes in ``deploying`` are being prepared to run a workload on them. This consists of running a series of tasks, such as: * Setting appropriate BIOS configurations * Partitioning drives and laying down file systems. * Creating any additional resources (node-specific network config, a config drive partition, etc.) that may be required by additional subsystems. See :doc:`/user/deploy` and :doc:`/admin/node-deployment` for information about deploying nodes. wait call-back Just like the ``deploying`` state, the nodes in ``wait call-back`` are being deployed. The difference is that in ``wait call-back`` the conductor is waiting for the ramdisk to boot or execute parts of the deployment which need to run in-band on the node (for example, installing the bootloader, or writing the image to the disk). The deployment of a node in ``wait call-back`` can be interrupted by setting the node's provision state using the ``deleted`` or ``undeploy`` verbs. deploy failed This is the state a node will move into when a deployment fails, for example a timeout waiting for the ramdisk to PXE boot. From here the node can be transitioned to: * ``active`` (through ``deploying``) by setting the node's provision state using the ``active``, ``deploy`` or ``rebuild`` verbs. * ``available`` (through ``deleting`` and ``cleaning``) by setting the node's provision state using the ``deleted`` or ``undeploy`` verbs. active (stable state) Nodes in ``active`` have a workload running on them. ironic may collect out-of-band sensor information (including power state) on a regular basis. Nodes in ``active`` can transition to: * ``available`` (through ``deleting`` and ``cleaning``) by setting the node's provision state using the ``deleted`` or ``undeploy`` verbs. * ``active`` (through ``deploying``) by setting the node's provision state using the ``rebuild`` verb. * ``rescue`` (through ``rescuing``) by setting the node's provision state using the ``rescue`` verb. deleting Nodes in ``deleting`` state are being torn down from running an active workload. In ``deleting``, ironic tears down and removes any configuration and resources it added in ``deploying`` or ``rescuing``. error (stable state) This is the state a node will move into when deleting an active deployment fails. From ``error``, nodes can transition to: * ``available`` (through ``deleting`` and ``cleaning``) by setting the node's provision state using the ``deleted`` or ``undeploy`` verbs. adopting This state allows ironic to take over management of a baremetal node with an existing workload on it. Ordinarily when a baremetal node is enrolled and managed by ironic, it must transition through ``cleaning`` and ``deploying`` to reach ``active`` state. However, those baremetal nodes that have an existing workload on them, do not need to be deployed or cleaned again, so this transition allows these nodes to move directly from ``manageable`` to ``active``. See :doc:`/admin/adoption` for information about this feature. Rescue ====== rescuing Nodes in ``rescuing`` are being prepared to perform rescue operations. This consists of running a series of tasks, such as: * Setting appropriate BIOS configurations. * Creating any additional resources (node-specific network config, etc.) that may be required by additional subsystems. See :doc:`/admin/rescue` for information about this feature. rescue wait Just like the ``rescuing`` state, the nodes in ``rescue wait`` are being rescued. The difference is that in ``rescue wait`` the conductor is waiting for the ramdisk to boot or execute parts of the rescue which need to run in-band on the node (for example, setting the password for user named ``rescue``). The rescue operation of a node in ``rescue wait`` can be aborted by setting the node's provision state using the ``abort`` verb. rescue failed This is the state a node will move into when a rescue operation fails, for example a timeout waiting for the ramdisk to PXE boot. From here the node can be transitioned to: * ``rescue`` (through ``rescuing``) by setting the node's provision state using the ``rescue`` verb. * ``active`` (through ``unrescuing``) by setting the node's provision state using the ``unrescue`` verb. * ``available`` (through ``deleting``) by setting the node's provision state using the ``deleted`` verb. rescue (stable state) Nodes in ``rescue`` have a rescue ramdisk running on them. Ironic may collect out-of-band sensor information (including power state) on a regular basis. Nodes in ``rescue`` can transition to: * ``active`` (through ``unrescuing``) by setting the node's provision state using the ``unrescue`` verb. * ``available`` (through ``deleting``) by setting the node's provision state using the ``deleted`` verb. unrescuing Nodes in ``unrescuing`` are being prepared to transition to ``active`` state from ``rescue`` state. This consists of running a series of tasks, such as setting appropriate BIOS configurations such as changing boot device. unrescue failed This is the state a node will move into when an unrescue operation fails. From here the node can be transitioned to: * ``rescue`` (through ``rescuing``) by setting the node's provision state using the ``rescue`` verb. * ``active`` (through ``unrescuing``) by setting the node's provision state using the ``unrescue`` verb. * ``available`` (through ``deleting``) by setting the node's provision state using the ``deleted`` verb. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/driver-requirements.txt0000664000175000017500000000115300000000000017655 0ustar00zuulzuul00000000000000# This file lists all python libraries which are utilized by drivers, # but not listed in global-requirements. # It is intended to help package maintainers to discover additional # python projects they should package as optional dependencies for Ironic. # These are available on pypi proliantutils>=2.13.0 pysnmp>=4.3.0,<5.0.0 python-scciclient>=0.8.0 python-dracclient>=5.1.0,<9.0.0 python-xclarityclient>=0.1.6 # Ansible-deploy interface ansible>=2.7 # HUAWEI iBMC hardware type uses the python-ibmcclient library python-ibmcclient>=0.2.2,<0.3.0 # Dell EMC iDRAC sushy OEM extension sushy-oem-idrac>=4.0.0,<5.0.0 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648108935.8546665 ironic-20.1.0/etc/0000775000175000017500000000000000000000000013653 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648108935.8986666 ironic-20.1.0/etc/apache2/0000775000175000017500000000000000000000000015156 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/etc/apache2/ironic0000664000175000017500000000257100000000000016371 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This is an example Apache2 configuration file for using the # Ironic API through mod_wsgi. This version assumes you are # running devstack to configure the software, and PBR has generated # and installed the ironic-api-wsgi script while installing ironic. Listen 6385 WSGIDaemonProcess ironic user=stack group=stack threads=10 display-name=%{GROUP} WSGIScriptAlias / /usr/local/bin/ironic-api-wsgi SetEnv APACHE_RUN_USER stack SetEnv APACHE_RUN_GROUP stack WSGIProcessGroup ironic ErrorLog /var/log/apache2/ironic_error.log LogLevel info CustomLog /var/log/apache2/ironic_access.log combined WSGIProcessGroup ironic WSGIApplicationGroup %{GLOBAL} AllowOverride All Require all granted ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648108935.8986666 ironic-20.1.0/etc/ironic/0000775000175000017500000000000000000000000015136 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/etc/ironic/README-ironic.conf.txt0000664000175000017500000000040400000000000021037 0ustar00zuulzuul00000000000000To generate the sample ironic.conf file, run the following command from the top level of the repo: tox -egenconfig For a pre-generated example of the latest ironic.conf, see: https://docs.openstack.org/ironic/latest/configuration/sample-config.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/etc/ironic/README-policy.yaml.txt0000664000175000017500000000040400000000000021070 0ustar00zuulzuul00000000000000To generate the sample policy.yaml file, run the following command from the top level of the repo: tox -egenpolicy For a pre-generated example of the latest policy.yaml, see: https://docs.openstack.org/ironic/latest/configuration/sample-policy.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/etc/ironic/api_audit_map.conf.sample0000664000175000017500000000134300000000000022062 0ustar00zuulzuul00000000000000[DEFAULT] # default target endpoint type # should match the endpoint type defined in service catalog target_endpoint_type = None # possible end path of API requests # path of api requests for CADF target typeURI # Just need to include top resource path to identify class # of resources. Ex: Log audit event for API requests # path containing "nodes" keyword and node uuid. [path_keywords] nodes = node drivers = driver chassis = chassis ports = port states = state power = None provision = None maintenance = None validate = None boot_device = None supported = None console = None vendor_passthru = vendor_passthru # map endpoint type defined in service catalog to CADF typeURI [service_endpoints] baremetal = service/compute/baremetal ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/etc/ironic/rootwrap.conf0000664000175000017500000000165000000000000017664 0ustar00zuulzuul00000000000000# Configuration for ironic-rootwrap # This file should be owned by (and only writable by) the root user [DEFAULT] # List of directories to load filter definitions from (separated by ','). # These directories MUST all be only writable by root ! filters_path=/etc/ironic/rootwrap.d,/usr/share/ironic/rootwrap # List of directories to search executables in, in case filters do not # explicitly specify a full path (separated by ',') # If not specified, defaults to system PATH environment variable. # These directories MUST all be only writable by root ! exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin # Enable logging to syslog # Default value is False use_syslog=False # Which syslog facility to use. # Valid values include auth, authpriv, syslog, user0, user1... # Default value is 'syslog' syslog_log_facility=syslog # Which messages to log. # INFO means log all usage # ERROR means only log unsuccessful attempts syslog_log_level=ERROR ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648108935.8986666 ironic-20.1.0/etc/ironic/rootwrap.d/0000775000175000017500000000000000000000000017235 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/etc/ironic/rootwrap.d/ironic-utils.filters0000664000175000017500000000034600000000000023253 0ustar00zuulzuul00000000000000# ironic-rootwrap command filters for disk manipulation # This file should be owned by (and only-writable by) the root user [Filters] # ironic/common/utils.py mount: CommandFilter, mount, root umount: CommandFilter, umount, root ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648108935.8986666 ironic-20.1.0/ironic/0000775000175000017500000000000000000000000014363 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/ironic/__init__.py0000664000175000017500000000000000000000000016462 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648108935.9026668 ironic-20.1.0/ironic/api/0000775000175000017500000000000000000000000015134 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/ironic/api/__init__.py0000664000175000017500000000115500000000000017247 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pecan request = pecan.request response = pecan.response del pecan ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/ironic/api/app.py0000664000175000017500000001252200000000000016270 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright © 2012 New Dream Network, LLC (DreamHost) # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ironic_lib import auth_basic import keystonemiddleware.audit as audit_middleware from keystonemiddleware import auth_token from oslo_config import cfg import oslo_middleware.cors as cors_middleware from oslo_middleware import healthcheck from oslo_middleware import http_proxy_to_wsgi import osprofiler.web as osprofiler_web import pecan from ironic.api import config from ironic.api.controllers import base from ironic.api import hooks from ironic.api import middleware from ironic.api.middleware import auth_public_routes from ironic.api.middleware import json_ext from ironic.common import exception from ironic.conf import CONF class IronicCORS(cors_middleware.CORS): """Ironic-specific CORS class We're adding the Ironic-specific version headers to the list of simple headers in order that a request bearing those headers might be accepted by the Ironic REST API. """ simple_headers = cors_middleware.CORS.simple_headers + [ 'X-Auth-Token', base.Version.max_string, base.Version.min_string, base.Version.string ] def get_pecan_config(): # Set up the pecan configuration filename = config.__file__.replace('.pyc', '.py') return pecan.configuration.conf_from_file(filename) def setup_app(pecan_config=None, extra_hooks=None): app_hooks = [hooks.ConfigHook(), hooks.DBHook(), hooks.ContextHook(pecan_config.app.acl_public_routes), hooks.RPCHook(), hooks.NoExceptionTracebackHook(), hooks.PublicUrlHook()] if extra_hooks: app_hooks.extend(extra_hooks) if not pecan_config: pecan_config = get_pecan_config() pecan.configuration.set_config(dict(pecan_config), overwrite=True) app = pecan.make_app( pecan_config.app.root, debug=CONF.pecan_debug, static_root=pecan_config.app.static_root if CONF.pecan_debug else None, force_canonical=getattr(pecan_config.app, 'force_canonical', True), hooks=app_hooks, wrap_app=middleware.ParsableErrorMiddleware, # NOTE(dtantsur): enabling this causes weird issues with nodes named # as if they had a known mime extension, e.g. "mynode.1". We do # simulate the same behaviour for .json extensions for backward # compatibility through JsonExtensionMiddleware. guess_content_type_from_ext=False, ) if CONF.audit.enabled: try: app = audit_middleware.AuditMiddleware( app, audit_map_file=CONF.audit.audit_map_file, ignore_req_list=CONF.audit.ignore_req_list ) except (EnvironmentError, OSError, audit_middleware.PycadfAuditApiConfigError) as e: raise exception.InputFileError( file_name=CONF.audit.audit_map_file, reason=e ) auth_middleware = None if CONF.auth_strategy == "keystone": auth_middleware = auth_token.AuthProtocol( app, {"oslo_config_config": cfg.CONF}) elif CONF.auth_strategy == "http_basic": auth_middleware = auth_basic.BasicAuthMiddleware( app, cfg.CONF.http_basic_auth_user_file) if auth_middleware: app = auth_public_routes.AuthPublicRoutes( app, auth=auth_middleware, public_api_routes=pecan_config.app.acl_public_routes) if CONF.profiler.enabled: app = osprofiler_web.WsgiMiddleware(app) # NOTE(pas-ha) this registers oslo_middleware.enable_proxy_headers_parsing # option, when disabled (default) this is noop middleware app = http_proxy_to_wsgi.HTTPProxyToWSGI(app, CONF) # add in the healthcheck middleware if enabled # NOTE(jroll) this is after the auth token middleware as we don't want auth # in front of this, and WSGI works from the outside in. Requests to # /healthcheck will be handled and returned before the auth middleware # is reached. if CONF.healthcheck.enabled: app = healthcheck.Healthcheck(app, CONF) # Create a CORS wrapper, and attach ironic-specific defaults that must be # included in all CORS responses. app = IronicCORS(app, CONF) cors_middleware.set_defaults( allow_methods=['GET', 'PUT', 'POST', 'DELETE', 'PATCH'], expose_headers=[base.Version.max_string, base.Version.min_string, base.Version.string] ) app = json_ext.JsonExtensionMiddleware(app) return app class VersionSelectorApplication(object): def __init__(self): pc = get_pecan_config() self.v1 = setup_app(pecan_config=pc) def __call__(self, environ, start_response): return self.v1(environ, start_response) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/ironic/api/config.py0000664000175000017500000000251100000000000016752 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Server Specific Configurations # See https://pecan.readthedocs.org/en/latest/configuration.html#server-configuration # noqa server = { 'port': '6385', 'host': '0.0.0.0' } # Pecan Application Configurations # See https://pecan.readthedocs.org/en/latest/configuration.html#application-configuration # noqa app = { 'root': 'ironic.api.controllers.root.RootController', 'modules': ['ironic.api'], 'static_root': '%(confdir)s/public', 'debug': False, 'acl_public_routes': [ '/', '/v1', # IPA ramdisk methods '/v1/lookup', '/v1/heartbeat/[a-z0-9\\-]+', ], } # WSME Configurations # See https://wsme.readthedocs.org/en/latest/integrate.html#configuration wsme = { 'debug': False, } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648108935.9026668 ironic-20.1.0/ironic/api/controllers/0000775000175000017500000000000000000000000017502 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/ironic/api/controllers/__init__.py0000664000175000017500000000000000000000000021601 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/ironic/api/controllers/base.py0000664000175000017500000000540100000000000020766 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools from webob import exc from ironic.common.i18n import _ @functools.total_ordering class Version(object): """API Version object.""" string = 'X-OpenStack-Ironic-API-Version' """HTTP Header string carrying the requested version""" min_string = 'X-OpenStack-Ironic-API-Minimum-Version' """HTTP response header""" max_string = 'X-OpenStack-Ironic-API-Maximum-Version' """HTTP response header""" def __init__(self, headers, default_version, latest_version): """Create an API Version object from the supplied headers. :param headers: webob headers :param default_version: version to use if not specified in headers :param latest_version: version to use if latest is requested :raises: webob.HTTPNotAcceptable """ (self.major, self.minor) = Version.parse_headers( headers, default_version, latest_version) def __repr__(self): return '%s.%s' % (self.major, self.minor) @staticmethod def parse_headers(headers, default_version, latest_version): """Determine the API version requested based on the headers supplied. :param headers: webob headers :param default_version: version to use if not specified in headers :param latest_version: version to use if latest is requested :returns: a tuple of (major, minor) version numbers :raises: webob.HTTPNotAcceptable """ version_str = headers.get(Version.string, default_version) if version_str.lower() == 'latest': parse_str = latest_version else: parse_str = version_str try: version = tuple(int(i) for i in parse_str.split('.')) except ValueError: version = () if len(version) != 2: raise exc.HTTPNotAcceptable(_( "Invalid value for %s header") % Version.string) return version def __gt__(self, other): return (self.major, self.minor) > (other.major, other.minor) def __eq__(self, other): return (self.major, self.minor) == (other.major, other.minor) def __ne__(self, other): return not self.__eq__(other) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/ironic/api/controllers/link.py0000664000175000017500000000303200000000000021007 0ustar00zuulzuul00000000000000# Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ironic import api def build_url(resource, resource_args, bookmark=False, base_url=None): if base_url is None: base_url = api.request.public_url template = '%(url)s/%(res)s' if bookmark else '%(url)s/v1/%(res)s' # FIXME(lucasagomes): I'm getting a 404 when doing a GET on # a nested resource that the URL ends with a '/'. # https://groups.google.com/forum/#!topic/pecan-dev/QfSeviLg5qs template += '%(args)s' if resource_args.startswith('?') else '/%(args)s' return template % {'url': base_url, 'res': resource, 'args': resource_args} def make_link(rel_name, url, resource, resource_args, bookmark=False, type=None): """Build a dict representing a link""" href = build_url(resource, resource_args, bookmark=bookmark, base_url=url) l = { 'href': href, 'rel': rel_name } if type: l['type'] = type return l ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/ironic/api/controllers/root.py0000664000175000017500000000363100000000000021042 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Copyright © 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pecan from ironic.api.controllers import v1 from ironic.api.controllers import version from ironic.api import method V1 = v1.Controller() def root(): return { 'name': "OpenStack Ironic API", 'description': ("Ironic is an OpenStack project which enables the " "provision and management of baremetal machines."), 'default_version': version.default_version(), 'versions': version.all_versions() } class RootController(object): @method.expose() def index(self, *args): if args: pecan.abort(404) return root() @pecan.expose() def _lookup(self, primary_key, *remainder): """Overrides the default routing behavior. It redirects the request to the default version of the ironic API if the version number is not specified in the url. """ # support paths which are missing the first version element if primary_key and primary_key != version.ID_VERSION1: remainder = [primary_key] + list(remainder) # remove any trailing / if remainder and not remainder[-1]: remainder = remainder[:-1] # but ensure /v1 goes to /v1/ if not remainder: remainder = [''] return V1, remainder ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648108935.9026668 ironic-20.1.0/ironic/api/controllers/v1/0000775000175000017500000000000000000000000020030 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/ironic/api/controllers/v1/__init__.py0000664000175000017500000002326000000000000022144 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Version 1 of the Ironic API Specification can be found at doc/source/webapi/v1.rst """ from http import client as http_client import pecan from webob import exc from ironic import api from ironic.api.controllers import base from ironic.api.controllers import link from ironic.api.controllers.v1 import allocation from ironic.api.controllers.v1 import chassis from ironic.api.controllers.v1 import conductor from ironic.api.controllers.v1 import deploy_template from ironic.api.controllers.v1 import driver from ironic.api.controllers.v1 import event from ironic.api.controllers.v1 import node from ironic.api.controllers.v1 import port from ironic.api.controllers.v1 import portgroup from ironic.api.controllers.v1 import ramdisk from ironic.api.controllers.v1 import utils from ironic.api.controllers.v1 import versions from ironic.api.controllers.v1 import volume from ironic.api.controllers import version from ironic.api import method from ironic.common.i18n import _ BASE_VERSION = versions.BASE_VERSION def min_version(): return base.Version( {base.Version.string: versions.min_version_string()}, versions.min_version_string(), versions.max_version_string()) def max_version(): return base.Version( {base.Version.string: versions.max_version_string()}, versions.min_version_string(), versions.max_version_string()) def v1(): v1 = { 'id': "v1", 'links': [ link.make_link('self', api.request.public_url, 'v1', '', bookmark=True), link.make_link('describedby', 'https://docs.openstack.org', '/ironic/latest/contributor/', 'webapi.html', bookmark=True, type='text/html') ], 'media_types': { 'base': 'application/json', 'type': 'application/vnd.openstack.ironic.v1+json' }, 'chassis': [ link.make_link('self', api.request.public_url, 'chassis', ''), link.make_link('bookmark', api.request.public_url, 'chassis', '', bookmark=True) ], 'nodes': [ link.make_link('self', api.request.public_url, 'nodes', ''), link.make_link('bookmark', api.request.public_url, 'nodes', '', bookmark=True) ], 'ports': [ link.make_link('self', api.request.public_url, 'ports', ''), link.make_link('bookmark', api.request.public_url, 'ports', '', bookmark=True) ], 'drivers': [ link.make_link('self', api.request.public_url, 'drivers', ''), link.make_link('bookmark', api.request.public_url, 'drivers', '', bookmark=True) ], 'version': version.default_version() } if utils.allow_portgroups(): v1['portgroups'] = [ link.make_link('self', api.request.public_url, 'portgroups', ''), link.make_link('bookmark', api.request.public_url, 'portgroups', '', bookmark=True) ] if utils.allow_volume(): v1['volume'] = [ link.make_link('self', api.request.public_url, 'volume', ''), link.make_link('bookmark', api.request.public_url, 'volume', '', bookmark=True) ] if utils.allow_ramdisk_endpoints(): v1['lookup'] = [ link.make_link('self', api.request.public_url, 'lookup', ''), link.make_link('bookmark', api.request.public_url, 'lookup', '', bookmark=True) ] v1['heartbeat'] = [ link.make_link('self', api.request.public_url, 'heartbeat', ''), link.make_link('bookmark', api.request.public_url, 'heartbeat', '', bookmark=True) ] if utils.allow_expose_conductors(): v1['conductors'] = [ link.make_link('self', api.request.public_url, 'conductors', ''), link.make_link('bookmark', api.request.public_url, 'conductors', '', bookmark=True) ] if utils.allow_allocations(): v1['allocations'] = [ link.make_link('self', api.request.public_url, 'allocations', ''), link.make_link('bookmark', api.request.public_url, 'allocations', '', bookmark=True) ] if utils.allow_expose_events(): v1['events'] = [ link.make_link('self', api.request.public_url, 'events', ''), link.make_link('bookmark', api.request.public_url, 'events', '', bookmark=True) ] if utils.allow_deploy_templates(): v1['deploy_templates'] = [ link.make_link('self', api.request.public_url, 'deploy_templates', ''), link.make_link('bookmark', api.request.public_url, 'deploy_templates', '', bookmark=True) ] return v1 class Controller(object): """Version 1 API controller root.""" _subcontroller_map = { 'nodes': node.NodesController(), 'ports': port.PortsController(), 'portgroups': portgroup.PortgroupsController(), 'chassis': chassis.ChassisController(), 'drivers': driver.DriversController(), 'volume': volume.VolumeController(), 'lookup': ramdisk.LookupController(), 'heartbeat': ramdisk.HeartbeatController(), 'conductors': conductor.ConductorsController(), 'allocations': allocation.AllocationsController(), 'events': event.EventsController(), 'deploy_templates': deploy_template.DeployTemplatesController() } @method.expose() def index(self): # NOTE: The reason why v1() it's being called for every # request is because we need to get the host url from # the request object to make the links. self._add_version_attributes() if api.request.method != "GET": pecan.abort(http_client.METHOD_NOT_ALLOWED) return v1() def _check_version(self, version, headers=None): if headers is None: headers = {} # ensure that major version in the URL matches the header if version.major != BASE_VERSION: raise exc.HTTPNotAcceptable(_( "Mutually exclusive versions requested. Version %(ver)s " "requested but not supported by this service. The supported " "version range is: [%(min)s, %(max)s].") % {'ver': version, 'min': versions.min_version_string(), 'max': versions.max_version_string()}, headers=headers) # ensure the minor version is within the supported range if version < min_version() or version > max_version(): raise exc.HTTPNotAcceptable(_( "Version %(ver)s was requested but the minor version is not " "supported by this service. The supported version range is: " "[%(min)s, %(max)s].") % {'ver': version, 'min': versions.min_version_string(), 'max': versions.max_version_string()}, headers=headers) def _add_version_attributes(self): v = base.Version(api.request.headers, versions.min_version_string(), versions.max_version_string()) # Always set the min and max headers api.response.headers[base.Version.min_string] = ( versions.min_version_string()) api.response.headers[base.Version.max_string] = ( versions.max_version_string()) # assert that requested version is supported self._check_version(v, api.response.headers) api.response.headers[base.Version.string] = str(v) api.request.version = v @pecan.expose() def _lookup(self, primary_key, *remainder): self._add_version_attributes() controller = self._subcontroller_map.get(primary_key) if not controller: pecan.abort(http_client.NOT_FOUND) return controller, remainder __all__ = ('Controller',) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/ironic/api/controllers/v1/allocation.py0000664000175000017500000006341000000000000022533 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import client as http_client from ironic_lib import metrics_utils from oslo_config import cfg from oslo_utils import uuidutils import pecan from webob import exc as webob_exc from ironic import api from ironic.api.controllers import link from ironic.api.controllers.v1 import collection from ironic.api.controllers.v1 import notification_utils as notify from ironic.api.controllers.v1 import utils as api_utils from ironic.api import method from ironic.common import args from ironic.common import exception from ironic.common.i18n import _ from ironic import objects CONF = cfg.CONF METRICS = metrics_utils.get_metrics_logger(__name__) ALLOCATION_SCHEMA = { 'type': 'object', 'properties': { 'candidate_nodes': { 'type': ['array', 'null'], 'items': {'type': 'string'} }, 'extra': {'type': ['object', 'null']}, 'name': {'type': ['string', 'null']}, 'node': {'type': ['string', 'null']}, 'owner': {'type': ['string', 'null']}, 'resource_class': {'type': ['string', 'null'], 'maxLength': 80}, 'traits': { 'type': ['array', 'null'], 'items': api_utils.TRAITS_SCHEMA }, 'uuid': {'type': ['string', 'null']}, }, 'additionalProperties': False, } ALLOCATION_VALIDATOR = args.and_valid( args.schema(ALLOCATION_SCHEMA), args.dict_valid(uuid=args.uuid) ) PATCH_ALLOWED_FIELDS = ['name', 'extra'] def hide_fields_in_newer_versions(allocation): # if requested version is < 1.60, hide owner field if not api_utils.allow_allocation_owner(): allocation.pop('owner', None) def convert_with_links(rpc_allocation, fields=None, sanitize=True): allocation = api_utils.object_to_dict( rpc_allocation, link_resource='allocations', fields=( 'candidate_nodes', 'extra', 'last_error', 'name', 'owner', 'resource_class', 'state', 'traits' ) ) try: api_utils.populate_node_uuid(rpc_allocation, allocation) except exception.NodeNotFound: allocation['node_uuid'] = None if fields is not None: api_utils.check_for_invalid_fields(fields, set(allocation)) if sanitize: allocation_sanitize(allocation, fields) return allocation def allocation_sanitize(allocation, fields): hide_fields_in_newer_versions(allocation) api_utils.sanitize_dict(allocation, fields) def list_convert_with_links(rpc_allocations, limit, url, fields=None, **kwargs): return collection.list_convert_with_links( items=[convert_with_links(p, fields=fields, sanitize=False) for p in rpc_allocations], item_name='allocations', limit=limit, url=url, fields=fields, sanitize_func=allocation_sanitize, **kwargs ) class AllocationsController(pecan.rest.RestController): """REST controller for allocations.""" invalid_sort_key_list = ['extra', 'candidate_nodes', 'traits'] @pecan.expose() def _route(self, args, request=None): if not api_utils.allow_allocations(): msg = _("The API version does not allow allocations") if api.request.method == "GET": raise webob_exc.HTTPNotFound(msg) else: raise webob_exc.HTTPMethodNotAllowed(msg) return super(AllocationsController, self)._route(args, request) def _get_allocations_collection(self, node_ident=None, resource_class=None, state=None, owner=None, marker=None, limit=None, sort_key='id', sort_dir='asc', resource_url='allocations', fields=None, parent_node=None): """Return allocations collection. :param node_ident: UUID or name of a node. :param marker: Pagination marker for large data sets. :param limit: Maximum number of resources to return in a single result. :param sort_key: Column to sort results by. Default: id. :param sort_dir: Direction to sort. "asc" or "desc". Default: asc. :param resource_url: Optional, URL to the allocation resource. :param fields: Optional, a list with a specified set of fields of the resource to be returned. :param owner: project_id of owner to filter by :param parent_node: The explicit parent node uuid to return if the controller is being accessed as a sub-resource. i.e. /v1/nodes//allocation """ limit = api_utils.validate_limit(limit) sort_dir = api_utils.validate_sort_dir(sort_dir) if sort_key in self.invalid_sort_key_list: raise exception.InvalidParameterValue( _("The sort_key value %(key)s is an invalid field for " "sorting") % {'key': sort_key}) # If the user is not allowed to see everything, we need to filter # based upon access rights. cdict = api.request.context.to_policy_values() if cdict.get('system_scope') != 'all' and not parent_node: # The user is a project scoped, and there is not an explicit # parent node which will be returned. if not api_utils.check_policy_true( 'baremetal:allocation:list_all'): # If the user cannot see everything via the policy, # we need to filter the view down to only what they should # be able to see in the database. owner = cdict.get('project_id') else: # Override if any node_ident was submitted in since this # is a subresource query. node_ident = parent_node marker_obj = None if marker: marker_obj = objects.Allocation.get_by_uuid(api.request.context, marker) if node_ident: try: # Check ability to access the associated node or requested # node to filter by. rpc_node = api_utils.get_rpc_node(node_ident) api_utils.check_owner_policy('node', 'baremetal:node:get', rpc_node.owner, lessee=rpc_node.lessee, conceal_node=False) node_uuid = rpc_node.uuid except exception.NodeNotFound as exc: exc.code = http_client.BAD_REQUEST raise except exception.NotAuthorized as exc: if not parent_node: exc.code = http_client.BAD_REQUEST raise exception.NotFound() else: node_uuid = None possible_filters = { 'node_uuid': node_uuid, 'resource_class': resource_class, 'state': state, 'owner': owner } filters = {} for key, value in possible_filters.items(): if value is not None: filters[key] = value allocations = objects.Allocation.list(api.request.context, limit=limit, marker=marker_obj, sort_key=sort_key, sort_dir=sort_dir, filters=filters) for allocation in allocations: api_utils.check_owner_policy('allocation', 'baremetal:allocation:get', allocation.owner) return list_convert_with_links(allocations, limit, url=resource_url, fields=fields, sort_key=sort_key, sort_dir=sort_dir) def _check_allowed_allocation_fields(self, fields): """Check if fetching a particular field of an allocation is allowed. Check if the required version is being requested for fields that are only allowed to be fetched in a particular API version. :param fields: list or set of fields to check :raises: NotAcceptable if a field is not allowed """ if fields is None: return if 'owner' in fields and not api_utils.allow_allocation_owner(): raise exception.NotAcceptable() @METRICS.timer('AllocationsController.get_all') @method.expose() @args.validate(node=args.uuid_or_name, resource_class=args.string, state=args.string, marker=args.uuid, limit=args.integer, sort_key=args.string, sort_dir=args.string, fields=args.string_list, owner=args.string) def get_all(self, node=None, resource_class=None, state=None, marker=None, limit=None, sort_key='id', sort_dir='asc', fields=None, owner=None): """Retrieve a list of allocations. .. parameters:: ../../api-ref/source/parameters.yaml :node: r_allocation_node :resource_class: req_allocation_resource_class :state: r_allocation_state :marker: marker :limit: limit :sort_key: sort_key :sort_dir: sort_dir :fields: fields :owner: r_owner """ owner = api_utils.check_list_policy('allocation', owner) self._check_allowed_allocation_fields(fields) if owner is not None and not api_utils.allow_allocation_owner(): raise exception.NotAcceptable() return self._get_allocations_collection(node, resource_class, state, owner, marker, limit, sort_key, sort_dir, fields=fields) @METRICS.timer('AllocationsController.get_one') @method.expose() @args.validate(allocation_ident=args.uuid_or_name, fields=args.string_list) def get_one(self, allocation_ident, fields=None): """Retrieve information about the given allocation. .. parameters:: ../../api-ref/source/parameters.yaml :allocation_ident: allocation_ident :fields: fields """ rpc_allocation = api_utils.check_allocation_policy_and_retrieve( 'baremetal:allocation:get', allocation_ident) self._check_allowed_allocation_fields(fields) return convert_with_links(rpc_allocation, fields=fields) def _authorize_create_allocation(self, allocation): try: # PRE-RBAC this rule was logically restricted, it is more-unlocked # post RBAC, but we need to ensure it is not abused. api_utils.check_policy('baremetal:allocation:create') self._check_allowed_allocation_fields(allocation) if (not CONF.oslo_policy.enforce_new_defaults and not allocation.get('owner')): # Even if permitted, we need to go ahead and check if this is # restricted for now until scoped interaction is the default # interaction. api_utils.check_policy('baremetal:allocation:create_pre_rbac') # TODO(TheJulia): This can be removed later once we # move entirely to scope based checking. This requires # that if the scope enforcement is not enabled, that # any user can't create an allocation until the deployment # is in a new operating mode *where* owner will be added # automatically if not a privilged user. except exception.HTTPForbidden: cdict = api.request.context.to_policy_values() project = cdict.get('project_id') if (project and allocation.get('owner') and project != allocation.get('owner')): raise if (allocation.get('owner') and not CONF.oslo_policy.enforce_new_defaults): api_utils.check_policy('baremetal:allocation:create_pre_rbac') api_utils.check_policy('baremetal:allocation:create_restricted') self._check_allowed_allocation_fields(allocation) allocation['owner'] = project return allocation @METRICS.timer('AllocationsController.post') @method.expose(status_code=http_client.CREATED) @method.body('allocation') @args.validate(allocation=ALLOCATION_VALIDATOR) def post(self, allocation): """Create a new allocation. .. parameters:: ../../api-ref/source/parameters.yaml :allocation: req_allocation_name """ context = api.request.context cdict = context.to_policy_values() allocation = self._authorize_create_allocation(allocation) if (allocation.get('name') and not api_utils.is_valid_logical_name(allocation['name'])): msg = _("Cannot create allocation with invalid name " "'%(name)s'") % {'name': allocation['name']} raise exception.Invalid(msg) # TODO(TheJulia): We need to likely look at refactoring post # processing for allocations as pep8 says it is a complexity of 19, # although it is not actually that horrible since it is phased out # just modifying/assembling the allocation. Given that, it seems # not great to try for a full method rewrite at the same time as # RBAC work, so the complexity limit is being raised. :( if (CONF.oslo_policy.enforce_new_defaults and cdict.get('system_scope') != 'all'): # if not a system scope originated request, we need to check/apply # an owner - But we can only do this with when new defaults are # enabled. project_id = cdict.get('project_id') req_alloc_owner = allocation.get('owner') if req_alloc_owner: if not api_utils.check_policy_true( 'baremetal:allocation:create_restricted'): if req_alloc_owner != project_id: msg = _("Cannot create allocation with an owner " "Project ID value %(req_owner)s not matching " "the requestor Project ID %(project)s. " "Policy baremetal:allocation:create_restricted" " is required for this capability." ) % {'req_owner': req_alloc_owner, 'project': project_id} raise exception.NotAuthorized(msg) # NOTE(TheJulia): IF not restricted, i.e. else above, # their supplied allocation owner is okay, they are allowed # to provide an override by policy. else: # An allocation owner was not supplied, we need to save one. allocation['owner'] = project_id node = None if allocation.get('node'): if api_utils.allow_allocation_backfill(): try: node = api_utils.get_rpc_node(allocation['node']) api_utils.check_owner_policy( 'node', 'baremetal:node:get', node.owner, node.lessee, conceal_node=allocation['node']) except exception.NodeNotFound as exc: exc.code = http_client.BAD_REQUEST raise else: msg = _("Cannot set node when creating an allocation " "in this API version") raise exception.Invalid(msg) if not allocation.get('resource_class'): if node: allocation['resource_class'] = node.resource_class else: msg = _("The resource_class field is mandatory when not " "backfilling") raise exception.Invalid(msg) if allocation.get('candidate_nodes'): # Convert nodes from names to UUIDs and check their validity try: owner = None if not api_utils.check_policy_true( 'baremetal:allocation:create_restricted'): owner = cdict.get('project_id') # Filter the candidate search by the requestor project ID # if any. The result is processes authenticating with system # scope will not be impacted, where as project scoped requests # will need additional authorization. converted = api.request.dbapi.check_node_list( allocation['candidate_nodes'], project=owner) except exception.NodeNotFound as exc: exc.code = http_client.BAD_REQUEST raise else: # Make sure we keep the ordering of candidate nodes. allocation['candidate_nodes'] = [ converted[ident] for ident in allocation['candidate_nodes'] ] # NOTE(yuriyz): UUID is mandatory for notifications payload if not allocation.get('uuid'): if node and node.instance_uuid: # When backfilling without UUID requested, assume that the # target instance_uuid is the desired UUID allocation['uuid'] = node.instance_uuid else: allocation['uuid'] = uuidutils.generate_uuid() new_allocation = objects.Allocation(context, **allocation) if node: new_allocation.node_id = node.id topic = api.request.rpcapi.get_topic_for(node) else: topic = api.request.rpcapi.get_random_topic() notify.emit_start_notification(context, new_allocation, 'create') with notify.handle_error_notification(context, new_allocation, 'create'): new_allocation = api.request.rpcapi.create_allocation( context, new_allocation, topic) notify.emit_end_notification(context, new_allocation, 'create') # Set the HTTP Location Header api.response.location = link.build_url('allocations', new_allocation.uuid) return convert_with_links(new_allocation) def _validate_patch(self, patch): fields = api_utils.patch_validate_allowed_fields( patch, PATCH_ALLOWED_FIELDS) self._check_allowed_allocation_fields(fields) @METRICS.timer('AllocationsController.patch') @method.expose() @method.body('patch') @args.validate(allocation_ident=args.string, patch=args.patch) def patch(self, allocation_ident, patch): """Update an existing allocation. .. parameters:: ../../api-ref/source/parameters.yaml :allocation_ident: allocation_ident :patch: allocation_patch """ if not api_utils.allow_allocation_update(): raise webob_exc.HTTPMethodNotAllowed(_( "The API version does not allow updating allocations")) context = api.request.context rpc_allocation = api_utils.check_allocation_policy_and_retrieve( 'baremetal:allocation:update', allocation_ident) self._validate_patch(patch) names = api_utils.get_patch_values(patch, '/name') for name in names: if name and not api_utils.is_valid_logical_name(name): msg = _("Cannot update allocation with invalid name " "'%(name)s'") % {'name': name} raise exception.Invalid(msg) allocation_dict = rpc_allocation.as_dict() allocation_dict = api_utils.apply_jsonpatch(rpc_allocation.as_dict(), patch) api_utils.patched_validate_with_schema( allocation_dict, ALLOCATION_SCHEMA, ALLOCATION_VALIDATOR) api_utils.patch_update_changed_fields( allocation_dict, rpc_allocation, fields=objects.Allocation.fields, schema=ALLOCATION_SCHEMA ) notify.emit_start_notification(context, rpc_allocation, 'update') with notify.handle_error_notification(context, rpc_allocation, 'update'): rpc_allocation.save() notify.emit_end_notification(context, rpc_allocation, 'update') return convert_with_links(rpc_allocation) @METRICS.timer('AllocationsController.delete') @method.expose(status_code=http_client.NO_CONTENT) @args.validate(allocation_ident=args.uuid_or_name) def delete(self, allocation_ident): """Delete an allocation. .. parameters:: ../../api-ref/source/parameters.yaml :allocation_ident: allocation_ident """ context = api.request.context rpc_allocation = api_utils.check_allocation_policy_and_retrieve( 'baremetal:allocation:delete', allocation_ident) if rpc_allocation.node_id: node_uuid = objects.Node.get_by_id(api.request.context, rpc_allocation.node_id).uuid else: node_uuid = None notify.emit_start_notification(context, rpc_allocation, 'delete', node_uuid=node_uuid) with notify.handle_error_notification(context, rpc_allocation, 'delete', node_uuid=node_uuid): topic = api.request.rpcapi.get_random_topic() api.request.rpcapi.destroy_allocation(context, rpc_allocation, topic) notify.emit_end_notification(context, rpc_allocation, 'delete', node_uuid=node_uuid) class NodeAllocationController(pecan.rest.RestController): """REST controller for allocations.""" invalid_sort_key_list = ['extra', 'candidate_nodes', 'traits'] @pecan.expose() def _route(self, args, request=None): if not api_utils.allow_allocations(): raise webob_exc.HTTPNotFound(_( "The API version does not allow allocations")) return super(NodeAllocationController, self)._route(args, request) def __init__(self, node_ident): super(NodeAllocationController, self).__init__() self.parent_node_ident = node_ident self.inner = AllocationsController() @METRICS.timer('NodeAllocationController.get_all') @method.expose() @args.validate(fields=args.string_list) def get_all(self, fields=None): """Get all allocations. .. parameters:: ../../api-ref/source/parameters.yaml :fields: fields """ parent_node = self.parent_node_ident result = self.inner._get_allocations_collection( parent_node, fields=fields, parent_node=parent_node) try: return result['allocations'][0] except IndexError: raise exception.AllocationNotFound( _("Allocation for node %s was not found") % self.parent_node_ident) @METRICS.timer('NodeAllocationController.delete') @method.expose(status_code=http_client.NO_CONTENT) def delete(self): """Delete an allocation.""" context = api.request.context rpc_node = api_utils.get_rpc_node_with_suffix(self.parent_node_ident) # Check the policy, and 404 if not authorized. api_utils.check_owner_policy('node', 'baremetal:node:get', rpc_node.owner, lessee=rpc_node.lessee, conceal_node=self.parent_node_ident) # A project ID is associated, thus we should filter # our search using it. filters = {'node_uuid': rpc_node.uuid} allocations = objects.Allocation.list( api.request.context, filters=filters) try: rpc_allocation = allocations[0] allocation_owner = allocations[0]['owner'] api_utils.check_owner_policy('allocation', 'baremetal:allocation:delete', allocation_owner) except IndexError: raise exception.AllocationNotFound( _("Allocation for node %s was not found") % self.parent_node_ident) notify.emit_start_notification(context, rpc_allocation, 'delete', node_uuid=rpc_node.uuid) with notify.handle_error_notification(context, rpc_allocation, 'delete', node_uuid=rpc_node.uuid): topic = api.request.rpcapi.get_random_topic() api.request.rpcapi.destroy_allocation(context, rpc_allocation, topic) notify.emit_end_notification(context, rpc_allocation, 'delete', node_uuid=rpc_node.uuid) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/ironic/api/controllers/v1/bios.py0000664000175000017500000001012400000000000021334 0ustar00zuulzuul00000000000000# Copyright 2018 Red Hat Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ironic_lib import metrics_utils from pecan import rest from ironic import api from ironic.api.controllers.v1 import utils as api_utils from ironic.api import method from ironic.common import args from ironic.common import exception from ironic import objects METRICS = metrics_utils.get_metrics_logger(__name__) _DEFAULT_RETURN_FIELDS = ('name', 'value') _DEFAULT_FIELDS_WITH_REGISTRY = ('name', 'value', 'attribute_type', 'allowable_values', 'lower_bound', 'max_length', 'min_length', 'read_only', 'reset_required', 'unique', 'upper_bound') def convert_with_links(rpc_bios, node_uuid, detail=None, fields=None): """Build a dict containing a bios setting value.""" if detail: fields = _DEFAULT_FIELDS_WITH_REGISTRY bios = api_utils.object_to_dict( rpc_bios, include_uuid=False, fields=fields, link_resource='nodes', link_resource_args="%s/bios/%s" % (node_uuid, rpc_bios.name), ) return bios def collection_from_list(node_ident, bios_settings, detail=None, fields=None): bios_list = [] for bios_setting in bios_settings: bios_list.append(convert_with_links(bios_setting, node_ident, detail, fields)) return {'bios': bios_list} class NodeBiosController(rest.RestController): """REST controller for bios.""" def __init__(self, node_ident=None): super(NodeBiosController, self).__init__() self.node_ident = node_ident @METRICS.timer('NodeBiosController.get_all') @method.expose() @args.validate(fields=args.string_list, detail=args.boolean) def get_all(self, detail=None, fields=None): """List node bios settings.""" node = api_utils.check_node_policy_and_retrieve( 'baremetal:node:bios:get', self.node_ident) # The BIOS detail and fields query were added in a later # version, check if they are valid based on version allow_query = api_utils.allow_query_bios fields = api_utils.get_request_return_fields(fields, detail, _DEFAULT_RETURN_FIELDS, allow_query, allow_query) settings = objects.BIOSSettingList.get_by_node_id( api.request.context, node.id) return collection_from_list(self.node_ident, settings, detail, fields) @METRICS.timer('NodeBiosController.get_one') @method.expose() @args.validate(setting_name=args.name) def get_one(self, setting_name): """Retrieve information about the given bios setting. :param setting_name: Logical name of the setting to retrieve. """ node = api_utils.check_node_policy_and_retrieve( 'baremetal:node:bios:get', self.node_ident) try: setting = objects.BIOSSetting.get(api.request.context, node.id, setting_name) except exception.BIOSSettingNotFound: raise exception.BIOSSettingNotFound(node=node.uuid, name=setting_name) # Return fields based on version if api_utils.allow_query_bios(): fields = _DEFAULT_FIELDS_WITH_REGISTRY else: fields = _DEFAULT_RETURN_FIELDS return {setting_name: convert_with_links(setting, node.uuid, fields=fields)} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/ironic/api/controllers/v1/chassis.py0000664000175000017500000002553400000000000022050 0ustar00zuulzuul00000000000000# Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import client as http_client from ironic_lib import metrics_utils from oslo_utils import uuidutils from pecan import rest from ironic import api from ironic.api.controllers import link from ironic.api.controllers.v1 import collection from ironic.api.controllers.v1 import node from ironic.api.controllers.v1 import notification_utils as notify from ironic.api.controllers.v1 import utils as api_utils from ironic.api import method from ironic.common import args from ironic.common import exception from ironic.common.i18n import _ from ironic import objects METRICS = metrics_utils.get_metrics_logger(__name__) CHASSIS_SCHEMA = { 'type': 'object', 'properties': { 'uuid': {'type': ['string', 'null']}, 'extra': {'type': ['object', 'null']}, 'description': {'type': ['string', 'null'], 'maxLength': 255}, }, 'additionalProperties': False, } CHASSIS_VALIDATOR = args.and_valid( args.schema(CHASSIS_SCHEMA), args.dict_valid(uuid=args.uuid) ) DEFAULT_RETURN_FIELDS = ['uuid', 'description'] def convert_with_links(rpc_chassis, fields=None, sanitize=True): chassis = api_utils.object_to_dict( rpc_chassis, fields=('description', 'extra'), link_resource='chassis' ) url = api.request.public_url chassis['nodes'] = [ link.make_link('self', url, 'chassis', rpc_chassis.uuid + "/nodes"), link.make_link('bookmark', url, 'chassis', rpc_chassis.uuid + "/nodes", bookmark=True)], if fields is not None: api_utils.check_for_invalid_fields(fields, chassis) if sanitize: api_utils.sanitize_dict(chassis, fields) return chassis def list_convert_with_links(rpc_chassis_list, limit, url, fields=None, **kwargs): return collection.list_convert_with_links( items=[convert_with_links(ch, fields=fields, sanitize=False) for ch in rpc_chassis_list], item_name='chassis', limit=limit, url=url, fields=fields, sanitize_func=api_utils.sanitize_dict, **kwargs ) class ChassisController(rest.RestController): """REST controller for Chassis.""" nodes = node.NodesController() """Expose nodes as a sub-element of chassis""" # Set the flag to indicate that the requests to this resource are # coming from a top-level resource nodes.from_chassis = True _custom_actions = { 'detail': ['GET'], } invalid_sort_key_list = ['extra'] def _get_chassis_collection(self, marker, limit, sort_key, sort_dir, resource_url=None, fields=None, detail=None): limit = api_utils.validate_limit(limit) sort_dir = api_utils.validate_sort_dir(sort_dir) marker_obj = None if marker: marker_obj = objects.Chassis.get_by_uuid(api.request.context, marker) if sort_key in self.invalid_sort_key_list: raise exception.InvalidParameterValue( _("The sort_key value %(key)s is an invalid field for sorting") % {'key': sort_key}) chassis = objects.Chassis.list(api.request.context, limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir) parameters = {} if detail is not None: parameters['detail'] = detail return list_convert_with_links(chassis, limit, url=resource_url, fields=fields, sort_key=sort_key, sort_dir=sort_dir, **parameters) @METRICS.timer('ChassisController.get_all') @method.expose() @args.validate(marker=args.uuid, limit=args.integer, sort_key=args.string, sort_dir=args.string, fields=args.string_list, detail=args.boolean) def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc', fields=None, detail=None): """Retrieve a list of chassis. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. This value cannot be larger than the value of max_limit in the [api] section of the ironic configuration, or only max_limit resources will be returned. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. :param fields: Optional, a list with a specified set of fields of the resource to be returned. """ api_utils.check_policy('baremetal:chassis:get') api_utils.check_allow_specify_fields(fields) fields = api_utils.get_request_return_fields(fields, detail, DEFAULT_RETURN_FIELDS) return self._get_chassis_collection(marker, limit, sort_key, sort_dir, fields=fields, detail=detail, resource_url='chassis') @METRICS.timer('ChassisController.detail') @method.expose() @args.validate(marker=args.uuid, limit=args.integer, sort_key=args.string, sort_dir=args.string) def detail(self, marker=None, limit=None, sort_key='id', sort_dir='asc'): """Retrieve a list of chassis with detail. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. This value cannot be larger than the value of max_limit in the [api] section of the ironic configuration, or only max_limit resources will be returned. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. """ api_utils.check_policy('baremetal:chassis:get') # /detail should only work against collections parent = api.request.path.split('/')[:-1][-1] if parent != "chassis": raise exception.HTTPNotFound() return self._get_chassis_collection(marker, limit, sort_key, sort_dir, resource_url='chassis/detail') @METRICS.timer('ChassisController.get_one') @method.expose() @args.validate(chassis_uuid=args.uuid, fields=args.string_list) def get_one(self, chassis_uuid, fields=None): """Retrieve information about the given chassis. :param chassis_uuid: UUID of a chassis. :param fields: Optional, a list with a specified set of fields of the resource to be returned. """ api_utils.check_policy('baremetal:chassis:get') api_utils.check_allow_specify_fields(fields) rpc_chassis = objects.Chassis.get_by_uuid(api.request.context, chassis_uuid) return convert_with_links(rpc_chassis, fields=fields) @METRICS.timer('ChassisController.post') @method.expose(status_code=http_client.CREATED) @method.body('chassis') @args.validate(chassis=CHASSIS_VALIDATOR) def post(self, chassis): """Create a new chassis. :param chassis: a chassis within the request body. """ context = api.request.context api_utils.check_policy('baremetal:chassis:create') # NOTE(yuriyz): UUID is mandatory for notifications payload if not chassis.get('uuid'): chassis['uuid'] = uuidutils.generate_uuid() new_chassis = objects.Chassis(context, **chassis) notify.emit_start_notification(context, new_chassis, 'create') with notify.handle_error_notification(context, new_chassis, 'create'): new_chassis.create() notify.emit_end_notification(context, new_chassis, 'create') # Set the HTTP Location Header api.response.location = link.build_url('chassis', new_chassis.uuid) return convert_with_links(new_chassis) @METRICS.timer('ChassisController.patch') @method.expose() @method.body('patch') @args.validate(chassis_uuid=args.string, patch=args.patch) def patch(self, chassis_uuid, patch): """Update an existing chassis. :param chassis_uuid: UUID of a chassis. :param patch: a json PATCH document to apply to this chassis. """ context = api.request.context api_utils.check_policy('baremetal:chassis:update') api_utils.patch_validate_allowed_fields( patch, CHASSIS_SCHEMA['properties']) rpc_chassis = objects.Chassis.get_by_uuid(context, chassis_uuid) chassis = api_utils.apply_jsonpatch(rpc_chassis.as_dict(), patch) api_utils.patched_validate_with_schema( chassis, CHASSIS_SCHEMA, CHASSIS_VALIDATOR) api_utils.patch_update_changed_fields( chassis, rpc_chassis, fields=objects.Chassis.fields, schema=CHASSIS_SCHEMA ) notify.emit_start_notification(context, rpc_chassis, 'update') with notify.handle_error_notification(context, rpc_chassis, 'update'): rpc_chassis.save() notify.emit_end_notification(context, rpc_chassis, 'update') return convert_with_links(rpc_chassis) @METRICS.timer('ChassisController.delete') @method.expose(status_code=http_client.NO_CONTENT) @args.validate(chassis_uuid=args.uuid) def delete(self, chassis_uuid): """Delete a chassis. :param chassis_uuid: UUID of a chassis. """ context = api.request.context api_utils.check_policy('baremetal:chassis:delete') rpc_chassis = objects.Chassis.get_by_uuid(context, chassis_uuid) notify.emit_start_notification(context, rpc_chassis, 'delete') with notify.handle_error_notification(context, rpc_chassis, 'delete'): rpc_chassis.destroy() notify.emit_end_notification(context, rpc_chassis, 'delete') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/ironic/api/controllers/v1/collection.py0000664000175000017500000000660200000000000022541 0ustar00zuulzuul00000000000000# Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ironic import api from ironic.api.controllers import link def has_next(collection, limit): """Return whether collection has more items.""" return len(collection) and len(collection) == limit def list_convert_with_links(items, item_name, limit, url, fields=None, sanitize_func=None, key_field='uuid', sanitizer_args=None, **kwargs): """Build a collection dict including the next link for paging support. :param items: List of unsanitized items to include in the collection :param item_name: Name of dict key for items value :param limit: Paging limit :param url: Base URL for building next link :param fields: Optional fields to use for sanitize function :param sanitize_func: Optional sanitize function run on each item, item changes will be done in-place :param key_field: Key name for building next URL :param sanitizer_args: Dictionary with additional arguments to be passed to the sanitizer. :param kwargs: other arguments passed to ``get_next`` :returns: A dict containing ``item_name`` and ``next`` values """ assert url, "BUG: collections require a base URL" assert limit is None or isinstance(limit, int), \ f"BUG: limit must be None or int, got {type(limit)}" items_dict = { item_name: items } next_uuid = get_next( items, limit, url=url, fields=fields, key_field=key_field, **kwargs) if next_uuid: items_dict['next'] = next_uuid if sanitize_func: if sanitizer_args: for item in items: sanitize_func(item, fields, **sanitizer_args) else: for item in items: sanitize_func(item, fields=fields) return items_dict def get_next(collection, limit, url, key_field='uuid', **kwargs): """Return a link to the next subset of the collection.""" if not has_next(collection, limit): return None fields = kwargs.pop('fields', None) # NOTE(saga): If fields argument is present in kwargs and not None. It # is a list so convert it into a comma seperated string. if fields: kwargs['fields'] = ','.join(fields) q_args = ''.join(['%s=%s&' % (key, kwargs[key]) for key in kwargs]) last_item = collection[-1] # handle items which are either objects or dicts if hasattr(last_item, key_field): marker = getattr(last_item, key_field) else: marker = last_item.get(key_field) next_args = '?%(args)slimit=%(limit)d&marker=%(marker)s' % { 'args': q_args, 'limit': limit, 'marker': marker} return link.make_link('next', api.request.public_url, url, next_args)['href'] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/ironic/api/controllers/v1/conductor.py0000664000175000017500000001430500000000000022405 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ironic_lib import metrics_utils from oslo_log import log from oslo_utils import timeutils from pecan import rest from ironic import api from ironic.api.controllers.v1 import collection from ironic.api.controllers.v1 import utils as api_utils from ironic.api import method from ironic.common import args from ironic.common import exception from ironic.common.i18n import _ import ironic.conf from ironic import objects CONF = ironic.conf.CONF LOG = log.getLogger(__name__) METRICS = metrics_utils.get_metrics_logger(__name__) DEFAULT_RETURN_FIELDS = ['hostname', 'conductor_group', 'alive'] def convert_with_links(rpc_conductor, fields=None, sanitize=True): conductor = api_utils.object_to_dict( rpc_conductor, include_uuid=False, fields=('hostname', 'conductor_group', 'drivers'), link_resource='conductors', link_resource_args=rpc_conductor.hostname ) conductor['alive'] = not timeutils.is_older_than( rpc_conductor.updated_at, CONF.conductor.heartbeat_timeout) if fields is not None: api_utils.check_for_invalid_fields(fields, conductor) if sanitize: api_utils.sanitize_dict(conductor, fields) return conductor def list_convert_with_links(rpc_conductors, limit, url=None, fields=None, **kwargs): return collection.list_convert_with_links( items=[convert_with_links(c, fields=fields, sanitize=False) for c in rpc_conductors], item_name='conductors', limit=limit, url=url, fields=fields, key_field='hostname', sanitize_func=api_utils.sanitize_dict, **kwargs ) class ConductorsController(rest.RestController): """REST controller for conductors.""" invalid_sort_key_list = ['alive', 'drivers'] def _get_conductors_collection(self, marker, limit, sort_key, sort_dir, resource_url='conductors', fields=None, detail=None): limit = api_utils.validate_limit(limit) sort_dir = api_utils.validate_sort_dir(sort_dir) if sort_key in self.invalid_sort_key_list: raise exception.InvalidParameterValue( _("The sort_key value %(key)s is an invalid field for " "sorting") % {'key': sort_key}) marker_obj = None if marker: marker_obj = objects.Conductor.get_by_hostname( api.request.context, marker, online=None) conductors = objects.Conductor.list(api.request.context, limit=limit, marker=marker_obj, sort_key=sort_key, sort_dir=sort_dir) parameters = {'sort_key': sort_key, 'sort_dir': sort_dir} if detail is not None: parameters['detail'] = detail return list_convert_with_links(conductors, limit, url=resource_url, fields=fields, **parameters) @METRICS.timer('ConductorsController.get_all') @method.expose() @args.validate(marker=args.name, limit=args.integer, sort_key=args.string, sort_dir=args.string, fields=args.string_list, detail=args.boolean) def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc', fields=None, detail=None): """Retrieve a list of conductors. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. This value cannot be larger than the value of max_limit in the [api] section of the ironic configuration, or only max_limit resources will be returned. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. :param fields: Optional, a list with a specified set of fields of the resource to be returned. :param detail: Optional, boolean to indicate whether retrieve a list of conductors with detail. """ api_utils.check_policy('baremetal:conductor:get') if not api_utils.allow_expose_conductors(): raise exception.NotFound() api_utils.check_allow_specify_fields(fields) api_utils.check_allowed_fields(fields) api_utils.check_allowed_fields([sort_key]) fields = api_utils.get_request_return_fields(fields, detail, DEFAULT_RETURN_FIELDS) return self._get_conductors_collection(marker, limit, sort_key, sort_dir, fields=fields, detail=detail) @METRICS.timer('ConductorsController.get_one') @method.expose() @args.validate(hostname=args.name, fields=args.string_list) def get_one(self, hostname, fields=None): """Retrieve information about the given conductor. :param hostname: hostname of a conductor. :param fields: Optional, a list with a specified set of fields of the resource to be returned. """ api_utils.check_policy('baremetal:conductor:get') if not api_utils.allow_expose_conductors(): raise exception.NotFound() api_utils.check_allow_specify_fields(fields) api_utils.check_allowed_fields(fields) conductor = objects.Conductor.get_by_hostname(api.request.context, hostname, online=None) return convert_with_links(conductor, fields=fields) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/ironic/api/controllers/v1/deploy_template.py0000664000175000017500000003203300000000000023572 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections from http import client as http_client from ironic_lib import metrics_utils from oslo_log import log from oslo_utils import strutils from oslo_utils import uuidutils import pecan from pecan import rest from webob import exc as webob_exc from ironic import api from ironic.api.controllers import link from ironic.api.controllers.v1 import collection from ironic.api.controllers.v1 import notification_utils as notify from ironic.api.controllers.v1 import utils as api_utils from ironic.api import method from ironic.common import args from ironic.common import exception from ironic.common.i18n import _ import ironic.conf from ironic import objects CONF = ironic.conf.CONF LOG = log.getLogger(__name__) METRICS = metrics_utils.get_metrics_logger(__name__) DEFAULT_RETURN_FIELDS = ['uuid', 'name'] TEMPLATE_SCHEMA = { 'type': 'object', 'properties': { 'description': {'type': ['string', 'null'], 'maxLength': 255}, 'extra': {'type': ['object', 'null']}, 'name': api_utils.TRAITS_SCHEMA, 'steps': {'type': 'array', 'items': api_utils.DEPLOY_STEP_SCHEMA, 'minItems': 1}, 'uuid': {'type': ['string', 'null']}, }, 'required': ['steps', 'name'], 'additionalProperties': False, } PATCH_ALLOWED_FIELDS = ['extra', 'name', 'steps', 'description'] STEP_PATCH_ALLOWED_FIELDS = ['args', 'interface', 'priority', 'step'] def duplicate_steps(name, value): """Argument validator to check template for duplicate steps""" # TODO(mgoddard): Determine the consequences of allowing duplicate # steps. # * What if one step has zero priority and another non-zero? # * What if a step that is enabled by default is included in a # template? Do we override the default or add a second invocation? # Check for duplicate steps. Each interface/step combination can be # specified at most once. counter = collections.Counter((step['interface'], step['step']) for step in value['steps']) duplicates = {key for key, count in counter.items() if count > 1} if duplicates: duplicates = {"interface: %s, step: %s" % (interface, step) for interface, step in duplicates} err = _("Duplicate deploy steps. A deploy template cannot have " "multiple deploy steps with the same interface and step. " "Duplicates: %s") % "; ".join(duplicates) raise exception.InvalidDeployTemplate(err=err) return value TEMPLATE_VALIDATOR = args.and_valid( args.schema(TEMPLATE_SCHEMA), duplicate_steps, args.dict_valid(uuid=args.uuid) ) def convert_steps(rpc_steps): for step in rpc_steps: yield { 'interface': step['interface'], 'step': step['step'], 'args': step['args'], 'priority': step['priority'], } def convert_with_links(rpc_template, fields=None, sanitize=True): """Add links to the deploy template.""" template = api_utils.object_to_dict( rpc_template, fields=('name', 'extra'), link_resource='deploy_templates', ) template['steps'] = list(convert_steps(rpc_template.steps)) if fields is not None: api_utils.check_for_invalid_fields(fields, template) if sanitize: template_sanitize(template, fields) return template def template_sanitize(template, fields): """Removes sensitive and unrequested data. Will only keep the fields specified in the ``fields`` parameter. :param fields: list of fields to preserve, or ``None`` to preserve them all :type fields: list of str """ api_utils.sanitize_dict(template, fields) if template.get('steps'): for step in template['steps']: step_sanitize(step) def step_sanitize(step): if step.get('args'): step['args'] = strutils.mask_dict_password(step['args'], "******") def list_convert_with_links(rpc_templates, limit, fields=None, **kwargs): return collection.list_convert_with_links( items=[convert_with_links(t, fields=fields, sanitize=False) for t in rpc_templates], item_name='deploy_templates', url='deploy_templates', limit=limit, fields=fields, sanitize_func=template_sanitize, **kwargs ) class DeployTemplatesController(rest.RestController): """REST controller for deploy templates.""" invalid_sort_key_list = ['extra', 'steps'] @pecan.expose() def _route(self, args, request=None): if not api_utils.allow_deploy_templates(): msg = _("The API version does not allow deploy templates") if api.request.method == "GET": raise webob_exc.HTTPNotFound(msg) else: raise webob_exc.HTTPMethodNotAllowed(msg) return super(DeployTemplatesController, self)._route(args, request) @METRICS.timer('DeployTemplatesController.get_all') @method.expose() @args.validate(marker=args.name, limit=args.integer, sort_key=args.string, sort_dir=args.string, fields=args.string_list, detail=args.boolean) def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc', fields=None, detail=None): """Retrieve a list of deploy templates. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. This value cannot be larger than the value of max_limit in the [api] section of the ironic configuration, or only max_limit resources will be returned. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. :param fields: Optional, a list with a specified set of fields of the resource to be returned. :param detail: Optional, boolean to indicate whether retrieve a list of deploy templates with detail. """ api_utils.check_policy('baremetal:deploy_template:get') api_utils.check_allowed_fields(fields) api_utils.check_allowed_fields([sort_key]) fields = api_utils.get_request_return_fields(fields, detail, DEFAULT_RETURN_FIELDS) limit = api_utils.validate_limit(limit) sort_dir = api_utils.validate_sort_dir(sort_dir) if sort_key in self.invalid_sort_key_list: raise exception.InvalidParameterValue( _("The sort_key value %(key)s is an invalid field for " "sorting") % {'key': sort_key}) marker_obj = None if marker: marker_obj = objects.DeployTemplate.get_by_uuid( api.request.context, marker) templates = objects.DeployTemplate.list( api.request.context, limit=limit, marker=marker_obj, sort_key=sort_key, sort_dir=sort_dir) parameters = {'sort_key': sort_key, 'sort_dir': sort_dir} if detail is not None: parameters['detail'] = detail return list_convert_with_links( templates, limit, fields=fields, **parameters) @METRICS.timer('DeployTemplatesController.get_one') @method.expose() @args.validate(template_ident=args.uuid_or_name, fields=args.string_list) def get_one(self, template_ident, fields=None): """Retrieve information about the given deploy template. :param template_ident: UUID or logical name of a deploy template. :param fields: Optional, a list with a specified set of fields of the resource to be returned. """ api_utils.check_policy('baremetal:deploy_template:get') api_utils.check_allowed_fields(fields) rpc_template = api_utils.get_rpc_deploy_template_with_suffix( template_ident) return convert_with_links(rpc_template, fields=fields) @METRICS.timer('DeployTemplatesController.post') @method.expose(status_code=http_client.CREATED) @method.body('template') @args.validate(template=TEMPLATE_VALIDATOR) def post(self, template): """Create a new deploy template. :param template: a deploy template within the request body. """ api_utils.check_policy('baremetal:deploy_template:create') context = api.request.context # NOTE(mgoddard): UUID is mandatory for notifications payload if not template.get('uuid'): template['uuid'] = uuidutils.generate_uuid() new_template = objects.DeployTemplate(context, **template) notify.emit_start_notification(context, new_template, 'create') with notify.handle_error_notification(context, new_template, 'create'): new_template.create() # Set the HTTP Location Header api.response.location = link.build_url('deploy_templates', new_template.uuid) api_template = convert_with_links(new_template) notify.emit_end_notification(context, new_template, 'create') return api_template @METRICS.timer('DeployTemplatesController.patch') @method.expose() @method.body('patch') @args.validate(template_ident=args.uuid_or_name, patch=args.patch) def patch(self, template_ident, patch=None): """Update an existing deploy template. :param template_ident: UUID or logical name of a deploy template. :param patch: a json PATCH document to apply to this deploy template. """ api_utils.check_policy('baremetal:deploy_template:update') api_utils.patch_validate_allowed_fields(patch, PATCH_ALLOWED_FIELDS) context = api.request.context rpc_template = api_utils.get_rpc_deploy_template_with_suffix( template_ident) template = rpc_template.as_dict() # apply the patch template = api_utils.apply_jsonpatch(template, patch) # validate the result with the patch schema for step in template.get('steps', []): api_utils.patched_validate_with_schema( step, api_utils.DEPLOY_STEP_SCHEMA) api_utils.patched_validate_with_schema( template, TEMPLATE_SCHEMA, TEMPLATE_VALIDATOR) api_utils.patch_update_changed_fields( template, rpc_template, fields=objects.DeployTemplate.fields, schema=TEMPLATE_SCHEMA ) # NOTE(mgoddard): There could be issues with concurrent updates of a # template. This is particularly true for the complex 'steps' field, # where operations such as modifying a single step could result in # changes being lost, e.g. two requests concurrently appending a step # to the same template could result in only one of the steps being # added, due to the read/modify/write nature of this patch operation. # This issue should not be present for 'simple' string fields, or # complete replacement of the steps (the only operation supported by # the openstack baremetal CLI). It's likely that this is an issue for # other resources, even those modified in the conductor under a lock. # This is due to the fact that the patch operation is always applied in # the API. Ways to avoid this include passing the patch to the # conductor to apply while holding a lock, or a collision detection # & retry mechansim using e.g. the updated_at field. notify.emit_start_notification(context, rpc_template, 'update') with notify.handle_error_notification(context, rpc_template, 'update'): rpc_template.save() api_template = convert_with_links(rpc_template) notify.emit_end_notification(context, rpc_template, 'update') return api_template @METRICS.timer('DeployTemplatesController.delete') @method.expose(status_code=http_client.NO_CONTENT) @args.validate(template_ident=args.uuid_or_name) def delete(self, template_ident): """Delete a deploy template. :param template_ident: UUID or logical name of a deploy template. """ api_utils.check_policy('baremetal:deploy_template:delete') context = api.request.context rpc_template = api_utils.get_rpc_deploy_template_with_suffix( template_ident) notify.emit_start_notification(context, rpc_template, 'delete') with notify.handle_error_notification(context, rpc_template, 'delete'): rpc_template.destroy() notify.emit_end_notification(context, rpc_template, 'delete') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/ironic/api/controllers/v1/driver.py0000664000175000017500000003633300000000000021705 0ustar00zuulzuul00000000000000# Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import client as http_client from ironic_lib import metrics_utils from pecan import rest from ironic import api from ironic.api.controllers import link from ironic.api.controllers.v1 import utils as api_utils from ironic.api import method from ironic.common import args from ironic.common import exception from ironic.common.i18n import _ from ironic.drivers import base as driver_base METRICS = metrics_utils.get_metrics_logger(__name__) # Property information for drivers: # key = driver name; # value = dictionary of properties of that driver: # key = property name. # value = description of the property. # NOTE(rloo). This is cached for the lifetime of the API service. If one or # more conductor services are restarted with new driver versions, the API # service should be restarted. _DRIVER_PROPERTIES = {} # Vendor information for drivers: # key = driver name; # value = dictionary of vendor methods of that driver: # key = method name. # value = dictionary with the metadata of that method. # NOTE(lucasagomes). This is cached for the lifetime of the API # service. If one or more conductor services are restarted with new driver # versions, the API service should be restarted. _VENDOR_METHODS = {} # RAID (logical disk) configuration information for drivers: # key = driver name; # value = dictionary of RAID configuration information of that driver: # key = property name. # value = description of the property # NOTE(rloo). This is cached for the lifetime of the API service. If one or # more conductor services are restarted with new driver versions, the API # service should be restarted. _RAID_PROPERTIES = {} def hide_fields_in_newer_versions(driver): """This method hides fields that were added in newer API versions. Certain fields were introduced at certain API versions. These fields are only made available when the request's API version matches or exceeds the versions when these fields were introduced. """ if not api_utils.allow_storage_interface(): driver.pop('default_storage_interface', None) driver.pop('enabled_storage_interfaces', None) if not api_utils.allow_rescue_interface(): driver.pop('default_rescue_interface', None) driver.pop('enabled_rescue_interfaces', None) if not api_utils.allow_bios_interface(): driver.pop('default_bios_interface', None) driver.pop('enabled_bios_interfaces', None) def convert_with_links(name, hosts, detail=False, interface_info=None, fields=None, sanitize=True): """Convert driver/hardware type info to a dict. :param name: name of a hardware type. :param hosts: list of conductor hostnames driver is active on. :param detail: boolean, whether to include detailed info, such as the 'type' field and default/enabled interfaces fields. :param interface_info: optional list of dicts of hardware interface info. :param fields: list of fields to preserve, or ``None`` to preserve default :param sanitize: boolean, sanitize driver :returns: dict representing the driver object. """ driver = { 'name': name, 'hosts': hosts, 'links': [ link.make_link('self', api.request.public_url, 'drivers', name), link.make_link('bookmark', api.request.public_url, 'drivers', name, bookmark=True) ] } if api_utils.allow_links_node_states_and_driver_properties(): driver['properties'] = [ link.make_link('self', api.request.public_url, 'drivers', name + "/properties"), link.make_link('bookmark', api.request.public_url, 'drivers', name + "/properties", bookmark=True) ] if api_utils.allow_dynamic_drivers(): # NOTE(dtantsur): only dynamic drivers (based on hardware types) # are supported starting with the Rocky release. driver['type'] = 'dynamic' if detail: if interface_info is None: # TODO(jroll) objectify this interface_info = (api.request.dbapi .list_hardware_type_interfaces([name])) for iface_type in driver_base.ALL_INTERFACES: default = None enabled = set() for iface in interface_info: if iface['interface_type'] == iface_type: iface_name = iface['interface_name'] enabled.add(iface_name) # NOTE(jroll) this assumes the default is the same # on all conductors if iface['default']: default = iface_name default_key = 'default_%s_interface' % iface_type enabled_key = 'enabled_%s_interfaces' % iface_type driver[default_key] = default driver[enabled_key] = list(enabled) hide_fields_in_newer_versions(driver) if not sanitize: return driver driver_sanitize(driver, fields) return driver def driver_sanitize(driver, fields=None): if fields is not None: api_utils.sanitize_dict(driver, fields) api_utils.check_for_invalid_fields(fields, driver) def _check_allow_driver_fields(fields): if (fields is not None and api.request.version.minor < api.controllers.v1.versions.MINOR_77_DRIVER_FIELDS_SELECTOR): raise exception.NotAcceptable() def list_convert_with_links(hardware_types, detail=False, fields=None): """Convert drivers and hardware types to an API-serializable object. :param hardware_types: dict mapping hardware type names to conductor hostnames. :param detail: boolean, whether to include detailed info, such as the 'type' field and default/enabled interfaces fields. :param fields: list of fields to preserve, or ``None`` to preserve default :returns: an API-serializable driver collection object. """ drivers = [] collection = { 'drivers': drivers } # NOTE(jroll) we return hardware types in all API versions, # but restrict type/default/enabled fields to 1.30. # This is checked in Driver.convert_with_links(), however also # checking here can save us a DB query. if api_utils.allow_dynamic_drivers() and detail: iface_info = api.request.dbapi.list_hardware_type_interfaces( list(hardware_types)) else: iface_info = [] for htname in hardware_types: interface_info = [i for i in iface_info if i['hardware_type'] == htname] drivers.append( convert_with_links(htname, list(hardware_types[htname]), detail=detail, interface_info=interface_info, fields=fields)) return collection class DriverPassthruController(rest.RestController): """REST controller for driver passthru. This controller allow vendors to expose cross-node functionality in the Ironic API. Ironic will merely relay the message from here to the specified driver, no introspection will be made in the message body. """ _custom_actions = { 'methods': ['GET'] } @METRICS.timer('DriverPassthruController.methods') @method.expose() @args.validate(driver_name=args.string) def methods(self, driver_name): """Retrieve information about vendor methods of the given driver. :param driver_name: name of the driver. :returns: dictionary with : entries. :raises DriverNotFound: if the driver name is invalid or the driver cannot be loaded. """ api_utils.check_policy('baremetal:driver:vendor_passthru') if driver_name not in _VENDOR_METHODS: topic = api.request.rpcapi.get_topic_for_driver(driver_name) ret = api.request.rpcapi.get_driver_vendor_passthru_methods( api.request.context, driver_name, topic=topic) _VENDOR_METHODS[driver_name] = ret return _VENDOR_METHODS[driver_name] @METRICS.timer('DriverPassthruController._default') @method.expose() @method.body('data') @args.validate(driver_name=args.string, method=args.string) def _default(self, driver_name, method, data=None): """Call a driver API extension. :param driver_name: name of the driver to call. :param method: name of the method, to be passed to the vendor implementation. :param data: body of data to supply to the specified method. """ api_utils.check_policy('baremetal:driver:vendor_passthru') topic = api.request.rpcapi.get_topic_for_driver(driver_name) resp = api_utils.vendor_passthru(driver_name, method, topic, data=data, driver_passthru=True) api.response.status_code = resp.status_code return resp.obj class DriverRaidController(rest.RestController): _custom_actions = { 'logical_disk_properties': ['GET'] } @METRICS.timer('DriverRaidController.logical_disk_properties') @method.expose() @args.validate(driver_name=args.string) def logical_disk_properties(self, driver_name): """Returns the logical disk properties for the driver. .. parameters:: ../../api-ref/source/parameters.yaml :driver_name: Name of the driver. .. return:: Success: A dictionary containing the properties that can be mentioned Failure: :UnsupportedDriverExtension: If the driver doesn't support RAID configuration. :NotAcceptable: If requested version of the API is less than 1.12. :DriverNotFound: If driver is not loaded on any of the conductors. """ api_utils.check_policy( 'baremetal:driver:get_raid_logical_disk_properties') if not api_utils.allow_raid_config(): raise exception.NotAcceptable() if driver_name not in _RAID_PROPERTIES: topic = api.request.rpcapi.get_topic_for_driver(driver_name) try: info = api.request.rpcapi.get_raid_logical_disk_properties( api.request.context, driver_name, topic=topic) except exception.UnsupportedDriverExtension as e: # Change error code as 404 seems appropriate because RAID is a # standard interface and all drivers might not have it. e.code = http_client.NOT_FOUND raise _RAID_PROPERTIES[driver_name] = info return _RAID_PROPERTIES[driver_name] class DriversController(rest.RestController): """REST controller for Drivers.""" vendor_passthru = DriverPassthruController() raid = DriverRaidController() """Expose RAID as a sub-element of drivers""" _custom_actions = { 'properties': ['GET'], } @METRICS.timer('DriversController.get_all') @method.expose() @args.validate(type=args.string, detail=args.boolean, fields=args.string_list) def get_all(self, type=None, detail=None, fields=None): """Retrieve a list of drivers.""" # FIXME(tenbrae): formatting of the auto-generated REST API docs # will break from a single-line doc string. # This is a result of a bug in sphinxcontrib-pecanwsme # https://github.com/dreamhost/sphinxcontrib-pecanwsme/issues/8 if fields and detail: raise exception.InvalidParameterValue( "Can not specify ?detail=True and fields in the same request.") api_utils.check_policy('baremetal:driver:get') api_utils.check_allow_driver_detail(detail) api_utils.check_allow_filter_driver_type(type) _check_allow_driver_fields(fields) if type not in (None, 'classic', 'dynamic'): raise exception.Invalid(_( '"type" filter must be one of "classic" or "dynamic", ' 'if specified.')) if type is None or type == 'dynamic': hw_type_dict = api.request.dbapi.get_active_hardware_type_dict() else: # NOTE(dtantsur): we don't support classic drivers starting with # the Rocky release. hw_type_dict = {} return list_convert_with_links(hw_type_dict, detail=detail, fields=fields) @METRICS.timer('DriversController.get_one') @method.expose() @args.validate(driver_name=args.string, fields=args.string_list) def get_one(self, driver_name, fields=None): """Retrieve a single driver.""" # NOTE(russell_h): There is no way to make this more efficient than # retrieving a list of drivers using the current sqlalchemy schema, but # this path must be exposed for Pecan to route any paths we might # choose to expose below it. api_utils.check_policy('baremetal:driver:get') _check_allow_driver_fields(fields) hw_type_dict = api.request.dbapi.get_active_hardware_type_dict() for name, hosts in hw_type_dict.items(): if name == driver_name: return convert_with_links(name, list(hosts), detail=True, fields=fields) raise exception.DriverNotFound(driver_name=driver_name) @METRICS.timer('DriversController.properties') @method.expose() @args.validate(driver_name=args.string) def properties(self, driver_name): """Retrieve property information of the given driver. :param driver_name: name of the driver. :returns: dictionary with : entries. :raises DriverNotFound (HTTP 404): if the driver name is invalid or the driver cannot be loaded. """ api_utils.check_policy('baremetal:driver:get_properties') if driver_name not in _DRIVER_PROPERTIES: topic = api.request.rpcapi.get_topic_for_driver(driver_name) properties = api.request.rpcapi.get_driver_properties( api.request.context, driver_name, topic=topic) _DRIVER_PROPERTIES[driver_name] = properties return _DRIVER_PROPERTIES[driver_name] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/ironic/api/controllers/v1/event.py0000664000175000017500000000641500000000000021531 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import client as http_client from ironic_lib import metrics_utils from oslo_log import log import pecan from ironic.api.controllers.v1 import utils as api_utils from ironic.api import method from ironic.common import args from ironic.common import exception METRICS = metrics_utils.get_metrics_logger(__name__) LOG = log.getLogger(__name__) NETWORK_EVENT_VALIDATOR = args.and_valid( args.schema({ 'type': 'object', 'properties': { 'event': {'type': 'string'}, 'port_id': {'type': 'string'}, 'mac_address': {'type': 'string'}, 'status': {'type': 'string'}, 'device_id': {'type': ['string', 'null']}, 'binding:host_id': {'type': ['string', 'null']}, 'binding:vnic_type': {'type': ['string', 'null']}, }, 'required': ['event', 'port_id', 'mac_address', 'status'], 'additionalProperties': False, }), args.dict_valid(**{ 'port_id': args.uuid, 'mac_address': args.mac_address, 'device_id': args.uuid, 'binding:host_id': args.uuid }) ) EVENT_VALIDATORS = { 'network.bind_port': NETWORK_EVENT_VALIDATOR, 'network.unbind_port': NETWORK_EVENT_VALIDATOR, 'network.delete_port': NETWORK_EVENT_VALIDATOR, } EVENTS_SCHEMA = { 'type': 'object', 'properties': { 'events': { 'type': 'array', 'minItems': 1, 'items': { 'type': 'object', 'properties': { 'event': {'type': 'string', 'enum': list(EVENT_VALIDATORS)}, }, 'required': ['event'], 'additionalProperties': True, }, }, }, 'required': ['events'], 'additionalProperties': False, } def events_valid(name, value): """Validator for events""" for event in value['events']: validator = EVENT_VALIDATORS[event['event']] validator(name, event) return value class EventsController(pecan.rest.RestController): """REST controller for Events.""" @pecan.expose() def _lookup(self): if not api_utils.allow_expose_events(): pecan.abort(http_client.NOT_FOUND) @METRICS.timer('EventsController.post') @method.expose(status_code=http_client.NO_CONTENT) @method.body('evts') @args.validate(evts=args.and_valid(args.schema(EVENTS_SCHEMA), events_valid)) def post(self, evts): if not api_utils.allow_expose_events(): raise exception.NotFound() api_utils.check_policy('baremetal:events:post') for e in evts['events']: LOG.debug("Received external event: %s", e) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/ironic/api/controllers/v1/network-data-schema.json0000664000175000017500000004025100000000000024563 0ustar00zuulzuul00000000000000{ "$schema": "http://openstack.org/nova/network_data.json#", "id": "http://openstack.org/nova/network_data.json", "type": "object", "title": "OpenStack Nova network metadata schema", "description": "Schema of Nova instance network configuration information", "required": [ "links", "networks", "services" ], "properties": { "links": { "$id": "#/properties/links", "type": "array", "title": "L2 interfaces settings", "items": { "$id": "#/properties/links/items", "oneOf": [ { "$ref": "#/definitions/l2_link" }, { "$ref": "#/definitions/l2_bond" }, { "$ref": "#/definitions/l2_vlan" } ] } }, "networks": { "$id": "#/properties/networks", "type": "array", "title": "L3 networks", "items": { "$id": "#/properties/networks/items", "oneOf": [ { "$ref": "#/definitions/l3_ipv4_network" }, { "$ref": "#/definitions/l3_ipv6_network" } ] } }, "services": { "$ref": "#/definitions/services" } }, "definitions": { "l2_address": { "$id": "#/definitions/l2_address", "type": "string", "pattern": "(?i)^([0-9A-F]{2}[:-]){5}([0-9A-F]{2})$", "title": "L2 interface address", "examples": [ "fa:16:3e:9c:bf:3d" ] }, "l2_id": { "$id": "#/definitions/l2_id", "type": "string", "title": "L2 interface ID", "examples": [ "eth0" ] }, "l2_mtu": { "$id": "#/definitions/l2_mtu", "title": "L2 interface MTU", "anyOf": [ { "type": "number", "minimum": 1, "maximum": 65535 }, { "type": "null" } ], "examples": [ 1500 ] }, "l2_vif_id": { "$id": "#/definitions/l2_vif_id", "type": "string", "title": "Virtual interface ID", "examples": [ "cd9f6d46-4a3a-43ab-a466-994af9db96fc" ] }, "l2_link": { "$id": "#/definitions/l2_link", "type": "object", "title": "L2 interface configuration settings", "required": [ "ethernet_mac_address", "id", "type" ], "properties": { "id": { "$ref": "#/definitions/l2_id" }, "ethernet_mac_address": { "$ref": "#/definitions/l2_address" }, "mtu": { "$ref": "#/definitions/l2_mtu" }, "type": { "$id": "#/definitions/l2_link/properties/type", "type": "string", "enum": [ "bridge", "dvs", "hw_veb", "hyperv", "ovs", "tap", "vhostuser", "vif", "phy" ], "title": "Interface type", "examples": [ "bridge" ] }, "vif_id": { "$ref": "#/definitions/l2_vif_id" } } }, "l2_bond": { "$id": "#/definitions/l2_bond", "type": "object", "title": "L2 bonding interface configuration settings", "required": [ "ethernet_mac_address", "id", "type", "bond_mode", "bond_links" ], "properties": { "id": { "$ref": "#/definitions/l2_id" }, "ethernet_mac_address": { "$ref": "#/definitions/l2_address" }, "mtu": { "$ref": "#/definitions/l2_mtu" }, "type": { "$id": "#/definitions/l2_bond/properties/type", "type": "string", "enum": [ "bond" ], "title": "Interface type", "examples": [ "bond" ] }, "vif_id": { "$ref": "#/definitions/l2_vif_id" }, "bond_mode": { "$id": "#/definitions/bond/properties/bond_mode", "type": "string", "title": "Port bonding type", "enum": [ "802.1ad", "balance-rr", "active-backup", "balance-xor", "broadcast", "balance-tlb", "balance-alb" ], "examples": [ "802.1ad" ] }, "bond_links": { "$id": "#/definitions/bond/properties/bond_links", "type": "array", "title": "Port bonding links", "items": { "$id": "#/definitions/bond/properties/bond_links/items", "type": "string" } } } }, "l2_vlan": { "$id": "#/definitions/l2_vlan", "type": "object", "title": "L2 VLAN interface configuration settings", "required": [ "vlan_mac_address", "id", "type", "vlan_link", "vlan_id" ], "properties": { "id": { "$ref": "#/definitions/l2_id" }, "vlan_mac_address": { "$ref": "#/definitions/l2_address" }, "mtu": { "$ref": "#/definitions/l2_mtu" }, "type": { "$id": "#/definitions/l2_vlan/properties/type", "type": "string", "enum": [ "vlan" ], "title": "VLAN interface type", "examples": [ "vlan" ] }, "vif_id": { "$ref": "#/definitions/l2_vif_id" }, "vlan_id": { "$id": "#/definitions/l2_vlan/properties/vlan_id", "type": "integer", "title": "VLAN ID" }, "vlan_link": { "$id": "#/definitions/l2_vlan/properties/vlan_link", "type": "string", "title": "VLAN link name" } } }, "l3_id": { "$id": "#/definitions/l3_id", "type": "string", "title": "Network name", "examples": [ "network0" ] }, "l3_link": { "$id": "#/definitions/l3_link", "type": "string", "title": "L2 network link to use for L3 interface", "examples": [ "99e88329-f20d-4741-9593-25bf07847b16" ] }, "l3_network_id": { "$id": "#/definitions/l3_network_id", "type": "string", "title": "Network ID", "examples": [ "99e88329-f20d-4741-9593-25bf07847b16" ] }, "l3_ipv4_type": { "$id": "#/definitions/l3_ipv4_type", "type": "string", "enum": [ "ipv4", "ipv4_dhcp" ], "title": "L3 IPv4 network type", "examples": [ "ipv4_dhcp" ] }, "l3_ipv6_type": { "$id": "#/definitions/l3_ipv6_type", "type": "string", "enum": [ "ipv6", "ipv6_dhcp", "ipv6_slaac" ], "title": "L3 IPv6 network type", "examples": [ "ipv6_dhcp" ] }, "l3_ipv4_host": { "$id": "#/definitions/l3_ipv4_host", "type": "string", "pattern": "^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$", "title": "L3 IPv4 host address", "examples": [ "192.168.81.99" ] }, "l3_ipv6_host": { "$id": "#/definitions/l3_ipv6_host", "type": "string", "pattern": "^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))(/[0-9]{1,2})?$", "title": "L3 IPv6 host address", "examples": [ "2001:db8:3:4::192.168.81.99" ] }, "l3_ipv4_netmask": { "$id": "#/definitions/l3_ipv4_netmask", "type": "string", "pattern": "^(254|252|248|240|224|192|128|0)\\.0\\.0\\.0|255\\.(254|252|248|240|224|192|128|0)\\.0\\.0|255\\.255\\.(254|252|248|240|224|192|128|0)\\.0|255\\.255\\.255\\.(254|252|248|240|224|192|128|0)$", "title": "L3 IPv4 network mask", "examples": [ "255.255.252.0" ] }, "l3_ipv6_netmask": { "$id": "#/definitions/l3_ipv6_netmask", "type": "string", "pattern": "^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7})|(::))$", "title": "L3 IPv6 network mask", "examples": [ "ffff:ffff:ffff:ffff::" ] }, "l3_ipv4_nw": { "$id": "#/definitions/l3_ipv4_nw", "type": "string", "pattern": "^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$", "title": "L3 IPv4 network address", "examples": [ "0.0.0.0" ] }, "l3_ipv6_nw": { "$id": "#/definitions/l3_ipv6_nw", "type": "string", "pattern": "^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7})|(::))$", "title": "L3 IPv6 network address", "examples": [ "8000::" ] }, "l3_ipv4_gateway": { "$id": "#/definitions/l3_ipv4_gateway", "type": "string", "pattern": "^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$", "title": "L3 IPv4 gateway address", "examples": [ "192.168.200.1" ] }, "l3_ipv6_gateway": { "$id": "#/definitions/l3_ipv6_gateway", "type": "string", "pattern": "^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))$", "title": "L3 IPv6 gateway address", "examples": [ "2001:db8:3:4::192.168.81.99" ] }, "l3_ipv4_network_route": { "$id": "#/definitions/l3_ipv4_network_route", "type": "object", "title": "L3 IPv4 routing configuration item", "required": [ "gateway", "netmask", "network" ], "properties": { "network": { "$ref": "#/definitions/l3_ipv4_nw" }, "netmask": { "$ref": "#/definitions/l3_ipv4_netmask" }, "gateway": { "$ref": "#/definitions/l3_ipv4_gateway" }, "services": { "$ref": "#/definitions/ipv4_services" } } }, "l3_ipv6_network_route": { "$id": "#/definitions/l3_ipv6_network_route", "type": "object", "title": "L3 IPv6 routing configuration item", "required": [ "gateway", "netmask", "network" ], "properties": { "network": { "$ref": "#/definitions/l3_ipv6_nw" }, "netmask": { "$ref": "#/definitions/l3_ipv6_netmask" }, "gateway": { "$ref": "#/definitions/l3_ipv6_gateway" }, "services": { "$ref": "#/definitions/ipv6_services" } } }, "l3_ipv4_network": { "$id": "#/definitions/l3_ipv4_network", "type": "object", "title": "L3 IPv4 network configuration", "required": [ "id", "link", "network_id", "type" ], "properties": { "id": { "$ref": "#/definitions/l3_id" }, "link": { "$ref": "#/definitions/l3_link" }, "network_id": { "$ref": "#/definitions/l3_network_id" }, "type": { "$ref": "#/definitions/l3_ipv4_type" }, "ip_address": { "$ref": "#/definitions/l3_ipv4_host" }, "netmask": { "$ref": "#/definitions/l3_ipv4_netmask" }, "routes": { "$id": "#/definitions/l3_ipv4_network/routes", "type": "array", "title": "L3 IPv4 network routes", "items": { "$ref": "#/definitions/l3_ipv4_network_route" } } } }, "l3_ipv6_network": { "$id": "#/definitions/l3_ipv6_network", "type": "object", "title": "L3 IPv6 network configuration", "required": [ "id", "link", "network_id", "type" ], "properties": { "id": { "$ref": "#/definitions/l3_id" }, "link": { "$ref": "#/definitions/l3_link" }, "network_id": { "$ref": "#/definitions/l3_network_id" }, "type": { "$ref": "#/definitions/l3_ipv6_type" }, "ip_address": { "$ref": "#/definitions/l3_ipv6_host" }, "netmask": { "$ref": "#/definitions/l3_ipv6_netmask" }, "routes": { "$id": "#/definitions/properties/l3_ipv6_network/routes", "type": "array", "title": "L3 IPv6 network routes", "items": { "$ref": "#/definitions/l3_ipv6_network_route" } } } }, "ipv4_service": { "$id": "#/definitions/ipv4_service", "type": "object", "title": "Service on a IPv4 network", "required": [ "address", "type" ], "properties": { "address": { "$ref": "#/definitions/l3_ipv4_host" }, "type": { "$id": "#/definitions/ipv4_service/properties/type", "type": "string", "enum": [ "dns" ], "title": "Service type", "examples": [ "dns" ] } } }, "ipv6_service": { "$id": "#/definitions/ipv6_service", "type": "object", "title": "Service on a IPv6 network", "required": [ "address", "type" ], "properties": { "address": { "$ref": "#/definitions/l3_ipv6_host" }, "type": { "$id": "#/definitions/ipv4_service/properties/type", "type": "string", "enum": [ "dns" ], "title": "Service type", "examples": [ "dns" ] } } }, "ipv4_services": { "$id": "#/definitions/ipv4_services", "type": "array", "title": "Network services on IPv4 network", "items": { "$id": "#/definitions/ipv4_services/items", "$ref": "#/definitions/ipv4_service" } }, "ipv6_services": { "$id": "#/definitions/ipv6_services", "type": "array", "title": "Network services on IPv6 network", "items": { "$id": "#/definitions/ipv6_services/items", "$ref": "#/definitions/ipv6_service" } }, "services": { "$id": "#/definitions/services", "type": "array", "title": "Network services", "items": { "$id": "#/definitions/services/items", "anyOf": [ { "$ref": "#/definitions/ipv4_service" }, { "$ref": "#/definitions/ipv6_service" } ] } } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/ironic/api/controllers/v1/node.py0000664000175000017500000034754600000000000021352 0ustar00zuulzuul00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import datetime from http import client as http_client import json from ironic_lib import metrics_utils import jsonschema from jsonschema import exceptions as json_schema_exc from oslo_log import log from oslo_utils import strutils from oslo_utils import uuidutils import pecan from pecan import rest from ironic import api from ironic.api.controllers import link from ironic.api.controllers.v1 import allocation from ironic.api.controllers.v1 import bios from ironic.api.controllers.v1 import collection from ironic.api.controllers.v1 import notification_utils as notify from ironic.api.controllers.v1 import port from ironic.api.controllers.v1 import portgroup from ironic.api.controllers.v1 import utils as api_utils from ironic.api.controllers.v1 import versions from ironic.api.controllers.v1 import volume from ironic.api import method from ironic.common import args from ironic.common import boot_modes from ironic.common import exception from ironic.common.i18n import _ from ironic.common import policy from ironic.common import states as ir_states from ironic.conductor import steps as conductor_steps import ironic.conf from ironic.drivers import base as driver_base from ironic import objects CONF = ironic.conf.CONF LOG = log.getLogger(__name__) _CLEAN_STEPS_SCHEMA = { "$schema": "http://json-schema.org/schema#", "title": "Clean steps schema", "type": "array", # list of clean steps "items": { "type": "object", # args is optional "required": ["interface", "step"], "properties": { "interface": { "description": "driver interface", "enum": list(conductor_steps.CLEANING_INTERFACE_PRIORITY) # interface value must be one of the valid interfaces }, "step": { "description": "name of clean step", "type": "string", "minLength": 1 }, "args": { "description": "additional args", "type": "object", "properties": {} }, }, # interface, step and args are the only expected keys "additionalProperties": False } } _DEPLOY_STEPS_SCHEMA = { "$schema": "http://json-schema.org/schema#", "title": "Deploy steps schema", "type": "array", "items": api_utils.DEPLOY_STEP_SCHEMA } METRICS = metrics_utils.get_metrics_logger(__name__) # Vendor information for node's driver: # key = driver name; # value = dictionary of node vendor methods of that driver: # key = method name. # value = dictionary with the metadata of that method. # NOTE(lucasagomes). This is cached for the lifetime of the API # service. If one or more conductor services are restarted with new driver # versions, the API service should be restarted. _VENDOR_METHODS = {} _DEFAULT_RETURN_FIELDS = ['instance_uuid', 'maintenance', 'power_state', 'provision_state', 'uuid', 'name'] # States where calling do_provisioning_action makes sense PROVISION_ACTION_STATES = (ir_states.VERBS['manage'], ir_states.VERBS['provide'], ir_states.VERBS['abort'], ir_states.VERBS['adopt']) _NODES_CONTROLLER_RESERVED_WORDS = None ALLOWED_TARGET_POWER_STATES = (ir_states.POWER_ON, ir_states.POWER_OFF, ir_states.REBOOT, ir_states.SOFT_REBOOT, ir_states.SOFT_POWER_OFF) ALLOWED_TARGET_BOOT_MODES = (boot_modes.LEGACY_BIOS, boot_modes.UEFI) _NODE_DESCRIPTION_MAX_LENGTH = 4096 _NETWORK_DATA_SCHEMA = None def network_data_schema(): global _NETWORK_DATA_SCHEMA if _NETWORK_DATA_SCHEMA is None: with open(CONF.api.network_data_schema) as fl: _NETWORK_DATA_SCHEMA = json.load(fl) return _NETWORK_DATA_SCHEMA def node_schema(): network_data = network_data_schema() return { 'type': 'object', 'properties': { 'automated_clean': {'type': ['string', 'boolean', 'null']}, 'bios_interface': {'type': ['string', 'null']}, 'boot_interface': {'type': ['string', 'null']}, 'boot_mode': {'type': ['string', 'null']}, 'chassis_uuid': {'type': ['string', 'null']}, 'conductor_group': {'type': ['string', 'null']}, 'console_enabled': {'type': ['string', 'boolean', 'null']}, 'console_interface': {'type': ['string', 'null']}, 'deploy_interface': {'type': ['string', 'null']}, 'description': {'type': ['string', 'null'], 'maxLength': _NODE_DESCRIPTION_MAX_LENGTH}, 'driver': {'type': 'string'}, 'driver_info': {'type': ['object', 'null']}, 'extra': {'type': ['object', 'null']}, 'inspect_interface': {'type': ['string', 'null']}, 'instance_info': {'type': ['object', 'null']}, 'instance_uuid': {'type': ['string', 'null']}, 'lessee': {'type': ['string', 'null']}, 'management_interface': {'type': ['string', 'null']}, 'maintenance': {'type': ['string', 'boolean', 'null']}, 'name': {'type': ['string', 'null']}, 'network_data': {'anyOf': [ {'type': 'null'}, {'type': 'object', 'additionalProperties': False}, network_data ]}, 'network_interface': {'type': ['string', 'null']}, 'owner': {'type': ['string', 'null']}, 'power_interface': {'type': ['string', 'null']}, 'properties': {'type': ['object', 'null']}, 'raid_interface': {'type': ['string', 'null']}, 'rescue_interface': {'type': ['string', 'null']}, 'resource_class': {'type': ['string', 'null'], 'maxLength': 80}, 'retired': {'type': ['string', 'boolean', 'null']}, 'retired_reason': {'type': ['string', 'null']}, 'secure_boot': {'type': ['string', 'boolean', 'null']}, 'storage_interface': {'type': ['string', 'null']}, 'uuid': {'type': ['string', 'null']}, 'vendor_interface': {'type': ['string', 'null']}, }, 'required': ['driver'], 'additionalProperties': False, 'definitions': network_data.get('definitions', {}) } def node_patch_schema(): node_patch = copy.deepcopy(node_schema()) # add schema for patchable fields node_patch['properties']['protected'] = { 'type': ['string', 'boolean', 'null']} node_patch['properties']['protected_reason'] = { 'type': ['string', 'null']} return node_patch NODE_VALIDATE_EXTRA = args.dict_valid( automated_clean=args.boolean, chassis_uuid=args.uuid, console_enabled=args.boolean, instance_uuid=args.uuid, protected=args.boolean, maintenance=args.boolean, retired=args.boolean, uuid=args.uuid, ) _NODE_VALIDATOR = None _NODE_PATCH_VALIDATOR = None def node_validator(name, value): global _NODE_VALIDATOR if _NODE_VALIDATOR is None: _NODE_VALIDATOR = args.and_valid( args.schema(node_schema()), NODE_VALIDATE_EXTRA ) return _NODE_VALIDATOR(name, value) def node_patch_validator(name, value): global _NODE_PATCH_VALIDATOR if _NODE_PATCH_VALIDATOR is None: _NODE_PATCH_VALIDATOR = args.and_valid( args.schema(node_patch_schema()), NODE_VALIDATE_EXTRA ) return _NODE_PATCH_VALIDATOR(name, value) PATCH_ALLOWED_FIELDS = [ 'automated_clean', 'bios_interface', 'boot_interface', 'chassis_uuid', 'conductor_group', 'console_interface', 'deploy_interface', 'description', 'driver', 'driver_info', 'extra', 'inspect_interface', 'instance_info', 'instance_uuid', 'lessee', 'maintenance', 'management_interface', 'name', 'network_data', 'network_interface', 'owner', 'power_interface', 'properties', 'protected', 'protected_reason', 'raid_interface', 'rescue_interface', 'resource_class', 'retired', 'retired_reason', 'storage_interface', 'vendor_interface' ] TRAITS_SCHEMA = { 'type': 'object', 'properties': { 'traits': { 'type': 'array', 'items': api_utils.TRAITS_SCHEMA }, }, 'additionalProperties': False, } VIF_VALIDATOR = args.and_valid( args.schema({ 'type': 'object', 'properties': { 'id': {'type': 'string'}, }, 'required': ['id'], 'additionalProperties': True, }), args.dict_valid(id=args.uuid_or_name) ) def get_nodes_controller_reserved_names(): global _NODES_CONTROLLER_RESERVED_WORDS if _NODES_CONTROLLER_RESERVED_WORDS is None: _NODES_CONTROLLER_RESERVED_WORDS = ( api_utils.get_controller_reserved_names(NodesController)) return _NODES_CONTROLLER_RESERVED_WORDS def hide_fields_in_newer_versions(obj): """This method hides fields that were added in newer API versions. Certain node fields were introduced at certain API versions. These fields are only made available when the request's API version matches or exceeds the versions when these fields were introduced. """ for field in api_utils.disallowed_fields(): obj.pop(field, None) def reject_fields_in_newer_versions(obj): """When creating an object, reject fields that appear in newer versions.""" for field in api_utils.disallowed_fields(): if field == 'conductor_group': # NOTE(jroll) this is special-cased to "" and not Unset, # because it is used in hash ring calculations empty_value = '' elif field == 'name' and obj.get('name') is None: # NOTE(dtantsur): for some reason we allow specifying name=None # explicitly even in old API versions.. continue else: empty_value = None if obj.get(field, empty_value) != empty_value: LOG.debug('Field %(field)s is not acceptable in version %(ver)s', {'field': field, 'ver': api.request.version}) raise exception.NotAcceptable() def reject_patch_in_newer_versions(patch): for field in api_utils.disallowed_fields(): value = api_utils.get_patch_values(patch, '/%s' % field) if value: LOG.debug('Field %(field)s is not acceptable in version %(ver)s', {'field': field, 'ver': api.request.version}) raise exception.NotAcceptable() def update_state_in_older_versions(obj): """Change provision state names for API backwards compatibility. :param obj: The dict being returned to the API client that is to be updated by this method. """ # if requested version is < 1.2, convert AVAILABLE to the old NOSTATE if (api.request.version.minor < versions.MINOR_2_AVAILABLE_STATE and obj.get('provision_state') == ir_states.AVAILABLE): obj['provision_state'] = ir_states.NOSTATE # if requested version < 1.39, convert INSPECTWAIT to INSPECTING if (not api_utils.allow_inspect_wait_state() and obj.get('provision_state') == ir_states.INSPECTWAIT): obj['provision_state'] = ir_states.INSPECTING def validate_network_data(network_data): """Validates node network_data field. This method validates network data configuration against JSON schema. :param network_data: a network_data field to validate :raises: Invalid if network data is not schema-compliant """ try: jsonschema.validate(network_data, network_data_schema()) except json_schema_exc.ValidationError as e: # NOTE: Even though e.message is deprecated in general, it is # said in jsonschema documentation to use this still. msg = _("Invalid network_data: %s ") % e.message raise exception.Invalid(msg) class BootDeviceController(rest.RestController): _custom_actions = { 'supported': ['GET'], } def _get_boot_device(self, rpc_node, supported=False): """Get the current boot device or a list of supported devices. :param rpc_node: RPC Node object. :param supported: Boolean value. If true return a list of supported boot devices, if false return the current boot device. Default: False. :returns: The current boot device or a list of the supported boot devices. """ topic = api.request.rpcapi.get_topic_for(rpc_node) if supported: return api.request.rpcapi.get_supported_boot_devices( api.request.context, rpc_node.uuid, topic) else: return api.request.rpcapi.get_boot_device(api.request.context, rpc_node.uuid, topic) @METRICS.timer('BootDeviceController.put') @method.expose(status_code=http_client.NO_CONTENT) @args.validate(node_ident=args.uuid_or_name, boot_device=args.string, persistent=args.boolean) def put(self, node_ident, boot_device, persistent=False): """Set the boot device for a node. Set the boot device to use on next reboot of the node. :param node_ident: the UUID or logical name of a node. :param boot_device: the boot device, one of :mod:`ironic.common.boot_devices`. :param persistent: Boolean value. True if the boot device will persist to all future boots, False if not. Default: False. """ rpc_node = api_utils.check_node_policy_and_retrieve( 'baremetal:node:set_boot_device', node_ident) topic = api.request.rpcapi.get_topic_for(rpc_node) api.request.rpcapi.set_boot_device(api.request.context, rpc_node.uuid, boot_device, persistent=persistent, topic=topic) @METRICS.timer('BootDeviceController.get') @method.expose() @args.validate(node_ident=args.uuid_or_name) def get(self, node_ident): """Get the current boot device for a node. :param node_ident: the UUID or logical name of a node. :returns: a json object containing: :boot_device: the boot device, one of :mod:`ironic.common.boot_devices` or None if it is unknown. :persistent: Whether the boot device will persist to all future boots or not, None if it is unknown. """ rpc_node = api_utils.check_node_policy_and_retrieve( 'baremetal:node:get_boot_device', node_ident) return self._get_boot_device(rpc_node) @METRICS.timer('BootDeviceController.supported') @method.expose() @args.validate(node_ident=args.uuid_or_name) def supported(self, node_ident): """Get a list of the supported boot devices. :param node_ident: the UUID or logical name of a node. :returns: A json object with the list of supported boot devices. """ rpc_node = api_utils.check_node_policy_and_retrieve( 'baremetal:node:get_boot_device', node_ident) boot_devices = self._get_boot_device(rpc_node, supported=True) return {'supported_boot_devices': boot_devices} class IndicatorAtComponent(object): def __init__(self, **kwargs): name = kwargs.get('name') component = kwargs.get('component') unique_name = kwargs.get('unique_name') if name and component: self.unique_name = name + '@' + component self.name = name self.component = component elif unique_name: try: index = unique_name.index('@') except ValueError: raise exception.InvalidParameterValue( _('Malformed indicator name "%s"') % unique_name) self.component = unique_name[index + 1:] self.name = unique_name[:index] self.unique_name = unique_name else: raise exception.MissingParameterValue( _('Missing indicator name "%s"')) def indicator_convert_with_links(node_uuid, rpc_component, rpc_name, **rpc_fields): """Add links to the indicator.""" url = api.request.public_url return { 'name': rpc_name, 'component': rpc_component, 'readonly': rpc_fields.get('readonly', True), 'states': rpc_fields.get('states', []), 'links': [ link.make_link( 'self', url, 'nodes', '%s/management/indicators/%s' % ( node_uuid, rpc_name)), link.make_link( 'bookmark', url, 'nodes', '%s/management/indicators/%s' % ( node_uuid, rpc_name), bookmark=True) ] } def indicator_list_from_dict(node_ident, indicators): indicator_list = [] for component, names in indicators.items(): for name, fields in names.items(): indicator_at_component = IndicatorAtComponent( component=component, name=name) indicator = indicator_convert_with_links( node_ident, component, indicator_at_component.unique_name, **fields) indicator_list.append(indicator) return {'indicators': indicator_list} class IndicatorController(rest.RestController): @METRICS.timer('IndicatorController.put') @method.expose(status_code=http_client.NO_CONTENT) @args.validate(node_ident=args.uuid_or_name, indicator=args.string, state=args.string) def put(self, node_ident, indicator, state): """Set node hardware component indicator to the desired state. :param node_ident: the UUID or logical name of a node. :param indicator: Indicator ID (as reported by `get_supported_indicators`). :param state: Indicator state, one of mod:`ironic.common.indicator_states`. """ rpc_node = api_utils.check_node_policy_and_retrieve( 'baremetal:node:set_indicator_state', node_ident) topic = pecan.request.rpcapi.get_topic_for(rpc_node) indicator_at_component = IndicatorAtComponent(unique_name=indicator) pecan.request.rpcapi.set_indicator_state( pecan.request.context, rpc_node.uuid, indicator_at_component.component, indicator_at_component.name, state, topic=topic) @METRICS.timer('IndicatorController.get_one') @method.expose() @args.validate(node_ident=args.uuid_or_name, indicator=args.string) def get_one(self, node_ident, indicator): """Get node hardware component indicator and its state. :param node_ident: the UUID or logical name of a node. :param indicator: Indicator ID (as reported by `get_supported_indicators`). :returns: a dict with the "state" key and one of mod:`ironic.common.indicator_states` as a value. """ rpc_node = api_utils.check_node_policy_and_retrieve( 'baremetal:node:get_indicator_state', node_ident) topic = pecan.request.rpcapi.get_topic_for(rpc_node) indicator_at_component = IndicatorAtComponent(unique_name=indicator) state = pecan.request.rpcapi.get_indicator_state( pecan.request.context, rpc_node.uuid, indicator_at_component.component, indicator_at_component.name, topic=topic) return {'state': state} @METRICS.timer('IndicatorController.get_all') @method.expose() @args.validate(node_ident=args.uuid_or_name) def get_all(self, node_ident, **kwargs): """Get node hardware components and their indicators. :param node_ident: the UUID or logical name of a node. :returns: A json object of hardware components (:mod:`ironic.common.components`) as keys with indicator IDs (from `get_supported_indicators`) as values. """ rpc_node = api_utils.check_node_policy_and_retrieve( 'baremetal:node:get_indicator_state', node_ident) topic = pecan.request.rpcapi.get_topic_for(rpc_node) indicators = pecan.request.rpcapi.get_supported_indicators( pecan.request.context, rpc_node.uuid, topic=topic) return indicator_list_from_dict( node_ident, indicators) class InjectNmiController(rest.RestController): @METRICS.timer('InjectNmiController.put') @method.expose(status_code=http_client.NO_CONTENT) @args.validate(node_ident=args.uuid_or_name) def put(self, node_ident): """Inject NMI for a node. Inject NMI (Non Maskable Interrupt) for a node immediately. :param node_ident: the UUID or logical name of a node. :raises: NotFound if requested version of the API doesn't support inject nmi. :raises: HTTPForbidden if the policy is not authorized. :raises: NodeNotFound if the node is not found. :raises: NodeLocked if the node is locked by another conductor. :raises: UnsupportedDriverExtension if the node's driver doesn't support management or management.inject_nmi. :raises: InvalidParameterValue when the wrong driver info is specified or an invalid boot device is specified. :raises: MissingParameterValue if missing supplied info. """ if not api_utils.allow_inject_nmi(): raise exception.NotFound() rpc_node = api_utils.check_node_policy_and_retrieve( 'baremetal:node:inject_nmi', node_ident) topic = api.request.rpcapi.get_topic_for(rpc_node) api.request.rpcapi.inject_nmi(api.request.context, rpc_node.uuid, topic=topic) class NodeManagementController(rest.RestController): boot_device = BootDeviceController() """Expose boot_device as a sub-element of management""" inject_nmi = InjectNmiController() """Expose inject_nmi as a sub-element of management""" indicators = IndicatorController() """Expose indicators as a sub-element of management""" class NodeConsoleController(rest.RestController): @METRICS.timer('NodeConsoleController.get') @method.expose() @args.validate(node_ident=args.uuid_or_name) def get(self, node_ident): """Get connection information about the console. :param node_ident: UUID or logical name of a node. """ rpc_node = api_utils.check_node_policy_and_retrieve( 'baremetal:node:get_console', node_ident) topic = api.request.rpcapi.get_topic_for(rpc_node) try: console = api.request.rpcapi.get_console_information( api.request.context, rpc_node.uuid, topic) console_state = True except exception.NodeConsoleNotEnabled: console = None console_state = False return {'console_enabled': console_state, 'console_info': console} @METRICS.timer('NodeConsoleController.put') @method.expose(status_code=http_client.ACCEPTED) @args.validate(node_ident=args.uuid_or_name, enabled=args.boolean) def put(self, node_ident, enabled): """Start and stop the node console. :param node_ident: UUID or logical name of a node. :param enabled: Boolean value; whether to enable or disable the console. """ rpc_node = api_utils.check_node_policy_and_retrieve( 'baremetal:node:set_console_state', node_ident) topic = api.request.rpcapi.get_topic_for(rpc_node) api.request.rpcapi.set_console_mode(api.request.context, rpc_node.uuid, enabled, topic) # Set the HTTP Location Header url_args = '/'.join([node_ident, 'states', 'console']) api.response.location = link.build_url('nodes', url_args) def node_states_convert(rpc_node): attr_list = ['console_enabled', 'last_error', 'power_state', 'provision_state', 'target_power_state', 'target_provision_state', 'provision_updated_at'] if api_utils.allow_raid_config(): attr_list.extend(['raid_config', 'target_raid_config']) if api.request.version.minor >= versions.MINOR_75_NODE_BOOT_MODE: attr_list.extend(['boot_mode', 'secure_boot']) states = {} for attr in attr_list: states[attr] = getattr(rpc_node, attr) if isinstance(states[attr], datetime.datetime): states[attr] = states[attr].isoformat() update_state_in_older_versions(states) return states class NodeStatesController(rest.RestController): _custom_actions = { 'boot_mode': ['PUT'], 'secure_boot': ['PUT'], 'power': ['PUT'], 'provision': ['PUT'], 'raid': ['PUT'], } console = NodeConsoleController() """Expose console as a sub-element of states""" @METRICS.timer('NodeStatesController.get') @method.expose() @args.validate(node_ident=args.uuid_or_name) def get(self, node_ident): """List the states of the node. :param node_ident: the UUID or logical_name of a node. """ rpc_node = api_utils.check_node_policy_and_retrieve( 'baremetal:node:get_states', node_ident) # NOTE(lucasagomes): All these state values come from the # DB. Ironic counts with a periodic task that verify the current # power states of the nodes and update the DB accordingly. return node_states_convert(rpc_node) @METRICS.timer('NodeStatesController.raid') @method.expose(status_code=http_client.NO_CONTENT) @method.body('target_raid_config') @args.validate(node_ident=args.uuid_or_name, target_raid_config=args.types(dict)) def raid(self, node_ident, target_raid_config): """Set the target raid config of the node. :param node_ident: the UUID or logical name of a node. :param target_raid_config: Desired target RAID configuration of the node. It may be an empty dictionary as well. :raises: UnsupportedDriverExtension, if the node's driver doesn't support RAID configuration. :raises: InvalidParameterValue, if validation of target raid config fails. :raises: NotAcceptable, if requested version of the API is less than 1.12. """ rpc_node = api_utils.check_node_policy_and_retrieve( 'baremetal:node:set_raid_state', node_ident) if not api_utils.allow_raid_config(): raise exception.NotAcceptable() topic = api.request.rpcapi.get_topic_for(rpc_node) try: api.request.rpcapi.set_target_raid_config( api.request.context, rpc_node.uuid, target_raid_config, topic=topic) except exception.UnsupportedDriverExtension as e: # Change error code as 404 seems appropriate because RAID is a # standard interface and all drivers might not have it. e.code = http_client.NOT_FOUND raise @METRICS.timer('NodeStatesController.power') @method.expose(status_code=http_client.ACCEPTED) @args.validate(node_ident=args.uuid_or_name, target=args.string, timeout=args.integer) def power(self, node_ident, target, timeout=None): """Set the power state of the node. :param node_ident: the UUID or logical name of a node. :param target: The desired power state of the node. :param timeout: timeout (in seconds) positive integer (> 0) for any power state. ``None`` indicates to use default timeout. :raises: ClientSideError (HTTP 409) if a power operation is already in progress. :raises: InvalidStateRequested (HTTP 400) if the requested target state is not valid or if the node is in CLEANING state. :raises: NotAcceptable (HTTP 406) for soft reboot, soft power off or timeout parameter, if requested version of the API is less than 1.27. :raises: Invalid (HTTP 400) if timeout value is less than 1. """ rpc_node = api_utils.check_node_policy_and_retrieve( 'baremetal:node:set_power_state', node_ident) # TODO(lucasagomes): Test if it's able to transition to the # target state from the current one topic = api.request.rpcapi.get_topic_for(rpc_node) if ((target in [ir_states.SOFT_REBOOT, ir_states.SOFT_POWER_OFF] or timeout) and not api_utils.allow_soft_power_off()): raise exception.NotAcceptable() if timeout is not None and timeout < 1: raise exception.Invalid( _("timeout has to be positive integer")) if target not in ALLOWED_TARGET_POWER_STATES: raise exception.InvalidStateRequested( action=target, node=node_ident, state=rpc_node.power_state) # Don't change power state for nodes being cleaned elif rpc_node.provision_state in (ir_states.CLEANWAIT, ir_states.CLEANING): raise exception.InvalidStateRequested( action=target, node=node_ident, state=rpc_node.provision_state) api.request.rpcapi.change_node_power_state(api.request.context, rpc_node.uuid, target, timeout=timeout, topic=topic) # Set the HTTP Location Header url_args = '/'.join([node_ident, 'states']) api.response.location = link.build_url('nodes', url_args) @METRICS.timer('NodeStatesController.boot_mode') @method.expose(status_code=http_client.ACCEPTED) @args.validate(node_ident=args.uuid_or_name, target=args.string) def boot_mode(self, node_ident, target): """Asynchronous set the boot mode of the node. :param node_ident: the UUID or logical name of a node. :param target: The desired boot_mode for the node (uefi/bios). :raises: InvalidParameterValue (HTTP 400) if the requested target state is not valid. :raises: NotFound (HTTP 404) if requested version of the API is less than 1.76. :raises: Conflict (HTTP 409) if a node is in adopting state or another transient state. """ rpc_node = api_utils.check_node_policy_and_retrieve( 'baremetal:node:set_boot_mode', node_ident) topic = api.request.rpcapi.get_topic_for(rpc_node) if (api.request.version.minor < versions.MINOR_76_NODE_CHANGE_BOOT_MODE): raise exception.NotFound( (_("This endpoint is supported starting with the API version " "1.%(min_version)s") % {'min_version': versions.MINOR_76_NODE_CHANGE_BOOT_MODE})) if target not in ALLOWED_TARGET_BOOT_MODES: msg = (_("Invalid boot mode %(mode)s requested for node. " "Allowed boot modes are: " "%(modes)s") % {'mode': target, 'modes': ', '.join(ALLOWED_TARGET_BOOT_MODES)}) raise exception.InvalidParameterValue(msg) # NOTE(cenne): This currenly includes the ADOPTING state if rpc_node.provision_state in ir_states.UNSTABLE_STATES: msg = _("Node is in %(state)s state. Since node is transitioning, " "the boot mode will not be set as this may interfere " "with ongoing changes and result in erroneous modification" ". Try again later.") raise exception.Conflict(msg, action=target, node=node_ident, state=rpc_node.provision_state) api.request.rpcapi.change_node_boot_mode(api.request.context, rpc_node.uuid, target, topic=topic) # Set the HTTP Location Header url_args = '/'.join([node_ident, 'states']) api.response.location = link.build_url('nodes', url_args) @METRICS.timer('NodeStatesController.secure_boot') @method.expose(status_code=http_client.ACCEPTED) @args.validate(node_ident=args.uuid_or_name, target=args.boolean) def secure_boot(self, node_ident, target): """Asynchronous set the secure_boot state of the node. :param node_ident: the UUID or logical name of a node. :param target: Should secure_boot be enabled on node (True/False). :raises: InvalidParameterValue (HTTP 400) if the requested target state is not valid. :raises: NotFound (HTTP 404) if requested version of the API is less than 1.76. :raises: Conflict (HTTP 409) if a node is in adopting state. """ rpc_node = api_utils.check_node_policy_and_retrieve( 'baremetal:node:set_secure_boot', node_ident) topic = api.request.rpcapi.get_topic_for(rpc_node) if (api.request.version.minor < versions.MINOR_76_NODE_CHANGE_BOOT_MODE): raise exception.NotFound( (_("This endpoint is supported starting with the API version " "1.%(min_version)s") % {'min_version': versions.MINOR_76_NODE_CHANGE_BOOT_MODE})) # NOTE(cenne): This is to exclude target=None or other invalid values if target not in (True, False): msg = (_("Invalid secure_boot %(state)s requested for node. " "Allowed secure_boot states are: True, False) ") % {'state': target}) raise exception.InvalidParameterValue(msg) # NOTE(cenne): This currenly includes the ADOPTING state if rpc_node.provision_state in ir_states.UNSTABLE_STATES: msg = _("Node is in %(state)s state. Since node is transitioning, " "the boot mode will not be set as this may interfere " "with ongoing changes and result in erroneous modification" ". Try again later.") raise exception.Conflict(msg, action=target, node=node_ident, state=rpc_node.provision_state ) api.request.rpcapi.change_node_secure_boot(api.request.context, rpc_node.uuid, target, topic=topic) # Set the HTTP Location Header url_args = '/'.join([node_ident, 'states']) api.response.location = link.build_url('nodes', url_args) def _do_provision_action(self, rpc_node, target, configdrive=None, clean_steps=None, deploy_steps=None, rescue_password=None, disable_ramdisk=None): topic = api.request.rpcapi.get_topic_for(rpc_node) # Note that there is a race condition. The node state(s) could change # by the time the RPC call is made and the TaskManager manager gets a # lock. if target in (ir_states.ACTIVE, ir_states.REBUILD, ir_states.DEPLOY): rebuild = (target == ir_states.REBUILD) if deploy_steps: _check_deploy_steps(deploy_steps) api.request.rpcapi.do_node_deploy(context=api.request.context, node_id=rpc_node.uuid, rebuild=rebuild, configdrive=configdrive, topic=topic, deploy_steps=deploy_steps) elif (target == ir_states.VERBS['unrescue']): api.request.rpcapi.do_node_unrescue( api.request.context, rpc_node.uuid, topic) elif (target == ir_states.VERBS['rescue']): if not (rescue_password and rescue_password.strip()): msg = (_('A non-empty "rescue_password" is required when ' 'setting target provision state to %s') % ir_states.VERBS['rescue']) raise exception.ClientSideError( msg, status_code=http_client.BAD_REQUEST) api.request.rpcapi.do_node_rescue( api.request.context, rpc_node.uuid, rescue_password, topic) elif target in (ir_states.DELETED, ir_states.UNDEPLOY): api.request.rpcapi.do_node_tear_down( api.request.context, rpc_node.uuid, topic) elif target == ir_states.VERBS['inspect']: api.request.rpcapi.inspect_hardware( api.request.context, rpc_node.uuid, topic=topic) elif target == ir_states.VERBS['clean']: if not clean_steps: msg = (_('"clean_steps" is required when setting target ' 'provision state to %s') % ir_states.VERBS['clean']) raise exception.ClientSideError( msg, status_code=http_client.BAD_REQUEST) _check_clean_steps(clean_steps) api.request.rpcapi.do_node_clean( api.request.context, rpc_node.uuid, clean_steps, disable_ramdisk, topic=topic) elif target in PROVISION_ACTION_STATES: api.request.rpcapi.do_provisioning_action( api.request.context, rpc_node.uuid, target, topic) else: msg = (_('The requested action "%(action)s" could not be ' 'understood.') % {'action': target}) raise exception.InvalidStateRequested(message=msg) @METRICS.timer('NodeStatesController.provision') @method.expose(status_code=http_client.ACCEPTED) @args.validate(node_ident=args.uuid_or_name, target=args.string, configdrive=args.types(type(None), dict, str), clean_steps=args.types(type(None), list), deploy_steps=args.types(type(None), list), rescue_password=args.string, disable_ramdisk=args.boolean) def provision(self, node_ident, target, configdrive=None, clean_steps=None, deploy_steps=None, rescue_password=None, disable_ramdisk=None): """Asynchronous trigger the provisioning of the node. This will set the target provision state of the node, and a background task will begin which actually applies the state change. This call will return a 202 (Accepted) indicating the request was accepted and is in progress; the client should continue to GET the status of this node to observe the status of the requested action. :param node_ident: UUID or logical name of a node. :param target: The desired provision state of the node or verb. :param configdrive: Optional. A gzipped and base64 encoded configdrive or a dict to build a configdrive from. Only valid when setting provision state to "active" or "rebuild". :param clean_steps: An ordered list of cleaning steps that will be performed on the node. A cleaning step is a dictionary with required keys 'interface' and 'step', and optional key 'args'. If specified, the value for 'args' is a keyword variable argument dictionary that is passed to the cleaning step method.:: { 'interface': , 'step': , 'args': {: , ..., : } } For example (this isn't a real example, this cleaning step doesn't exist):: { 'interface': 'deploy', 'step': 'upgrade_firmware', 'args': {'force': True} } This is required (and only valid) when target is "clean". :param deploy_steps: A list of deploy steps that will be performed on the node. A deploy step is a dictionary with required keys 'interface', 'step', 'priority' and 'args'. If specified, the value for 'args' is a keyword variable argument dictionary that is passed to the deploy step method.:: { 'interface': , 'step': , 'args': {: , ..., : } 'priority': } For example (this isn't a real example, this deploy step doesn't exist):: { 'interface': 'deploy', 'step': 'upgrade_firmware', 'args': {'force': True}, 'priority': 90 } This is used only when target is "active" or "rebuild" and is optional. :param rescue_password: A string representing the password to be set inside the rescue environment. This is required (and only valid), when target is "rescue". :param disable_ramdisk: Whether to skip booting ramdisk for cleaning. :raises: NodeLocked (HTTP 409) if the node is currently locked. :raises: ClientSideError (HTTP 409) if the node is already being provisioned. :raises: InvalidParameterValue (HTTP 400), if validation of clean_steps, deploy_steps or power driver interface fails. :raises: InvalidStateRequested (HTTP 400) if the requested transition is not possible from the current state. :raises: NodeInMaintenance (HTTP 400), if operation cannot be performed because the node is in maintenance mode. :raises: NoFreeConductorWorker (HTTP 503) if no workers are available. :raises: NotAcceptable (HTTP 406) if the API version specified does not allow the requested state transition or parameters. """ rpc_node = api_utils.check_node_policy_and_retrieve( 'baremetal:node:set_provision_state', node_ident) api_utils.check_allow_management_verbs(target) if (target in (ir_states.ACTIVE, ir_states.REBUILD) and rpc_node.maintenance): raise exception.NodeInMaintenance(op=_('provisioning'), node=rpc_node.uuid) m = ir_states.machine.copy() m.initialize(rpc_node.provision_state) if not m.is_actionable_event(ir_states.VERBS.get(target, target)): # Normally, we let the task manager recognize and deal with # NodeLocked exceptions. However, that isn't done until the RPC # calls below. # In order to main backward compatibility with our API HTTP # response codes, we have this check here to deal with cases where # a node is already being operated on (DEPLOYING or such) and we # want to continue returning 409. Without it, we'd return 400. if rpc_node.reservation: raise exception.NodeLocked(node=rpc_node.uuid, host=rpc_node.reservation) raise exception.InvalidStateRequested( action=target, node=rpc_node.uuid, state=rpc_node.provision_state) api_utils.check_allow_configdrive(target, configdrive) api_utils.check_allow_clean_disable_ramdisk(target, disable_ramdisk) if clean_steps and target != ir_states.VERBS['clean']: msg = (_('"clean_steps" is only valid when setting target ' 'provision state to %s') % ir_states.VERBS['clean']) raise exception.ClientSideError( msg, status_code=http_client.BAD_REQUEST) api_utils.check_allow_deploy_steps(target, deploy_steps) if (rescue_password is not None and target != ir_states.VERBS['rescue']): msg = (_('"rescue_password" is only valid when setting target ' 'provision state to %s') % ir_states.VERBS['rescue']) raise exception.ClientSideError( msg, status_code=http_client.BAD_REQUEST) if (rpc_node.provision_state == ir_states.INSPECTWAIT and target == ir_states.VERBS['abort']): if not api_utils.allow_inspect_abort(): raise exception.NotAcceptable() self._do_provision_action(rpc_node, target, configdrive, clean_steps, deploy_steps, rescue_password, disable_ramdisk) # Set the HTTP Location Header url_args = '/'.join([node_ident, 'states']) api.response.location = link.build_url('nodes', url_args) def _check_clean_steps(clean_steps): """Ensure all necessary keys are present and correct in steps for clean :param clean_steps: a list of steps. For more details, see the clean_steps parameter of :func:`NodeStatesController.provision`. :raises: InvalidParameterValue if validation of steps fails. """ _check_steps(clean_steps, 'clean', _CLEAN_STEPS_SCHEMA) def _check_deploy_steps(deploy_steps): """Ensure all necessary keys are present and correct in steps for deploy :param deploy_steps: a list of steps. For more details, see the deploy_steps parameter of :func:`NodeStatesController.provision`. :raises: InvalidParameterValue if validation of steps fails. """ _check_steps(deploy_steps, 'deploy', _DEPLOY_STEPS_SCHEMA) def _check_steps(steps, step_type, schema): """Ensure all necessary keys are present and correct in steps. Check that the user-specified steps are in the expected format and include the required information. :param steps: a list of steps. For more details, see the clean_steps and deploy_steps parameter of :func:`NodeStatesController.provision`. :param step_type: 'clean' or 'deploy' step type :param schema: JSON schema to use for validation. :raises: InvalidParameterValue if validation of steps fails. """ try: jsonschema.validate(steps, schema) except jsonschema.ValidationError as exc: raise exception.InvalidParameterValue(_('Invalid %s_steps: %s') % (step_type, exc)) def _get_chassis_uuid(node): """Return the UUID of a node's chassis, or None. :param node: a Node object. :returns: the UUID of the node's chassis, or None if the node has no chassis set. """ if not node.chassis_id: return chassis = objects.Chassis.get_by_id(api.request.context, node.chassis_id) return chassis.uuid def _replace_chassis_uuid_with_id(node_dict): chassis_uuid = node_dict.pop('chassis_uuid', None) if not chassis_uuid: node_dict['chassis_id'] = None return try: chassis = objects.Chassis.get_by_uuid(api.request.context, chassis_uuid) node_dict['chassis_id'] = chassis.id except exception.ChassisNotFound as e: # Change error code because 404 (NotFound) is inappropriate # response for requests acting on nodes e.code = http_client.BAD_REQUEST # BadRequest raise return chassis def _make_trait_list(context, node_id, traits): """Return a TraitList object for the specified node and traits. The Trait objects will not be created in the database. :param context: a request context. :param node_id: the ID of a node. :param traits: a list of trait strings to add to the TraitList. :returns: a TraitList object. """ trait_objs = [objects.Trait(context, node_id=node_id, trait=t) for t in traits] return objects.TraitList(context, objects=trait_objs) class NodeTraitsController(rest.RestController): def __init__(self, node_ident): super(NodeTraitsController, self).__init__() self.node_ident = node_ident @METRICS.timer('NodeTraitsController.get_all') @method.expose() def get_all(self): """List node traits.""" node = api_utils.check_node_policy_and_retrieve( 'baremetal:node:traits:list', self.node_ident) traits = objects.TraitList.get_by_node_id(api.request.context, node.id) return {'traits': traits.get_trait_names()} @METRICS.timer('NodeTraitsController.put') @method.expose(status_code=http_client.NO_CONTENT) @method.body('body') @args.validate(trait=args.schema(api_utils.TRAITS_SCHEMA), body=args.schema(TRAITS_SCHEMA)) def put(self, trait=None, body=None): """Add a trait to a node. :param trait: String value; trait to add to a node, or None. Mutually exclusive with 'traits'. If not None, adds this trait to the node. :param traits: List of Strings; traits to set for a node, or None. Mutually exclusive with 'trait'. If not None, replaces the node's traits with this list. """ context = api.request.context node = api_utils.check_node_policy_and_retrieve( 'baremetal:node:traits:set', self.node_ident) traits = None if body and 'traits' in body: traits = body['traits'] if (trait and traits is not None) or not (trait or traits is not None): msg = _("A single node trait may be added via PUT " "/v1/nodes//traits/ with no body, " "or all node traits may be replaced via PUT " "/v1/nodes//traits with the list of " "traits specified in the request body.") raise exception.Invalid(msg) if trait: if api.request.body and api.request.json_body: # Ensure PUT nodes/uuid1/traits/trait1 with a non-empty body # fails. msg = _("No body should be provided when adding a trait") raise exception.Invalid(msg) traits = [trait] replace = False new_traits = {t.trait for t in node.traits} | {trait} else: replace = True new_traits = set(traits) # Update the node's traits to reflect the desired state. node.traits = _make_trait_list(context, node.id, sorted(new_traits)) node.obj_reset_changes() chassis_uuid = _get_chassis_uuid(node) notify.emit_start_notification(context, node, 'update', chassis_uuid=chassis_uuid) with notify.handle_error_notification(context, node, 'update', chassis_uuid=chassis_uuid): topic = api.request.rpcapi.get_topic_for(node) api.request.rpcapi.add_node_traits( context, node.id, traits, replace=replace, topic=topic) notify.emit_end_notification(context, node, 'update', chassis_uuid=chassis_uuid) if not replace: # For single traits, set the HTTP Location Header. url_args = '/'.join((self.node_ident, 'traits', trait)) api.response.location = link.build_url('nodes', url_args) @METRICS.timer('NodeTraitsController.delete') @method.expose(status_code=http_client.NO_CONTENT) @args.validate(trait=args.string) def delete(self, trait=None): """Remove one or all traits from a node. :param trait: String value; trait to remove from a node, or None. If None, all traits are removed. """ context = api.request.context node = api_utils.check_node_policy_and_retrieve( 'baremetal:node:traits:delete', self.node_ident) if trait: traits = [trait] new_traits = {t.trait for t in node.traits} - {trait} else: traits = None new_traits = set() # Update the node's traits to reflect the desired state. node.traits = _make_trait_list(context, node.id, sorted(new_traits)) node.obj_reset_changes() chassis_uuid = _get_chassis_uuid(node) notify.emit_start_notification(context, node, 'update', chassis_uuid=chassis_uuid) with notify.handle_error_notification(context, node, 'update', chassis_uuid=chassis_uuid): topic = api.request.rpcapi.get_topic_for(node) try: api.request.rpcapi.remove_node_traits( context, node.id, traits, topic=topic) except exception.NodeTraitNotFound: # NOTE(hshiina): Internal node ID should not be exposed. raise exception.NodeTraitNotFound(node_id=node.uuid, trait=trait) notify.emit_end_notification(context, node, 'update', chassis_uuid=chassis_uuid) def _get_fields_for_node_query(fields=None): valid_fields = ['automated_clean', 'bios_interface', 'boot_interface', 'boot_mode', 'clean_step', 'conductor_group', 'console_enabled', 'console_interface', 'deploy_interface', 'deploy_step', 'description', 'driver', 'driver_info', 'driver_internal_info', 'extra', 'fault', 'inspection_finished_at', 'inspection_started_at', 'inspect_interface', 'instance_info', 'instance_uuid', 'last_error', 'lessee', 'maintenance', 'maintenance_reason', 'management_interface', 'name', 'network_data', 'network_interface', 'owner', 'power_interface', 'power_state', 'properties', 'protected', 'protected_reason', 'provision_state', 'provision_updated_at', 'raid_config', 'raid_interface', 'rescue_interface', 'reservation', 'resource_class', 'retired', 'retired_reason', 'secure_boot', 'storage_interface', 'target_power_state', 'target_provision_state', 'target_raid_config', 'traits', 'vendor_interface'] if not fields: return valid_fields else: object_fields = fields[:] api_fulfilled_fields = ['allocation_uuid', 'chassis_uuid', 'conductor'] for api_field in api_fulfilled_fields: if api_field in object_fields: object_fields.remove(api_field) query_fields = ['uuid', 'traits'] + api_fulfilled_fields \ + valid_fields for field in fields: if field not in query_fields: msg = 'Field %s is not a valid field.' % field raise exception.Invalid(msg) return object_fields def node_convert_with_links(rpc_node, fields=None, sanitize=True): # NOTE(TheJulia): This takes approximately 10% of the time to # collect and return requests to API consumer, specifically # for the nova sync query which is the most intense overhead # an integrated deployment can really face. node = api_utils.object_to_dict( rpc_node, link_resource='nodes', fields=_get_fields_for_node_query(fields)) if node.get('traits') is not None: node['traits'] = rpc_node.traits.get_trait_names() if (api_utils.allow_expose_conductors() and (fields is None or 'conductor' in fields)): # NOTE(kaifeng) It is possible a node gets orphaned in certain # circumstances, set conductor to None in such case. try: host = api.request.rpcapi.get_conductor_for(rpc_node) node['conductor'] = host except (exception.NoValidHost, exception.TemporaryFailure): LOG.debug('Currently there is no conductor servicing node ' '%(node)s.', {'node': rpc_node.uuid}) node['conductor'] = None # If allocations ever become the primary use path, this absolutely # needs to become a join. :\ if (api_utils.allow_allocations() and (fields is None or 'allocation_uuid' in fields)): node['allocation_uuid'] = None if rpc_node.allocation_id: try: allocation = objects.Allocation.get_by_id( api.request.context, rpc_node.allocation_id) node['allocation_uuid'] = allocation.uuid except exception.AllocationNotFound: pass if fields is None or 'chassis_uuid' in fields: node['chassis_uuid'] = _get_chassis_uuid(rpc_node) if fields is not None: api_utils.check_for_invalid_fields( fields, set(node)) show_states_links = ( api_utils.allow_links_node_states_and_driver_properties()) show_portgroups = api_utils.allow_portgroups_subcontrollers() show_volume = api_utils.allow_volume() url = api.request.public_url if fields is None: node['ports'] = [link.make_link('self', url, 'nodes', node['uuid'] + "/ports"), link.make_link('bookmark', url, 'nodes', node['uuid'] + "/ports", bookmark=True)] if show_states_links: node['states'] = [link.make_link('self', url, 'nodes', node['uuid'] + "/states"), link.make_link('bookmark', url, 'nodes', node['uuid'] + "/states", bookmark=True)] if show_portgroups: node['portgroups'] = [ link.make_link('self', url, 'nodes', node['uuid'] + "/portgroups"), link.make_link('bookmark', url, 'nodes', node['uuid'] + "/portgroups", bookmark=True)] if show_volume: node['volume'] = [ link.make_link('self', url, 'nodes', node['uuid'] + "/volume"), link.make_link('bookmark', url, 'nodes', node['uuid'] + "/volume", bookmark=True)] if not sanitize: return node node_sanitize(node, fields) return node def node_sanitize(node, fields, cdict=None, show_driver_secrets=None, show_instance_secrets=None, evaluate_additional_policies=None): """Removes sensitive and unrequested data. Will only keep the fields specified in the ``fields`` parameter. :param fields: list of fields to preserve, or ``None`` to preserve them all :type fields: list of str :param cdict: Context dictionary for policy values evaluation. If not provided, it will be executed by the method, however for enumerating node lists, it is more efficent to provide. :param show_driver_secrets: A boolean value to allow external single evaluation of policy instead of once per node. Default None. :param show_instance_secrets: A boolean value to allow external evaluation of policy instead of once per node. Default None. :param evaluate_additional_policies: A boolean value to allow external evaluation of policy instead of once per node. Default None. """ # NOTE(TheJulia): As of ironic 18.0, this method is about 88% of # the time spent preparing to return a node to. If it takes us # ~ 4.5 seconds to get 1000 nodes, we spend approximately 4 seconds # PER 1000 in this call. When the calling method provides # cdict, show_driver_secrets, show_instance_secrets, and # evaluate_additional_policies, then performance increases # in excess of 200% as policy checks are costly. if not cdict: cdict = api.request.context.to_policy_values() # We need a new target_dict for each node as owner/lessee field have # explicit associations and target comparison. target_dict = dict(cdict) # These fields are node specific. owner = node.get('owner') lessee = node.get('lessee') if owner: target_dict['node.owner'] = owner if lessee: target_dict['node.lessee'] = lessee # Scrub the dictionary's contents down to what was requested. api_utils.sanitize_dict(node, fields) # NOTE(tenbrae): the 'show_password' policy setting name exists for # legacy purposes and can not be changed. Changing it will # cause upgrade problems for any operators who have # customized the value of this field # NOTE(TheJulia): These methods use policy.check and normally return # False in a noauth or password auth based situation, because the # effective caller doesn't match the policy check rule. if show_driver_secrets is None: show_driver_secrets = policy.check("show_password", cdict, target_dict) if show_instance_secrets is None: show_instance_secrets = policy.check("show_instance_secrets", cdict, target_dict) # TODO(TheJulia): The above checks need to be migrated in some direction, # but until we have auditing clarity, it might not be a big deal. # Determine if we need to do the additional checks. Keep in mind # nova integrated with ironic is API read heavy, so it is ideal # to keep the policy checks for say system-member based roles to # a minimum as they are likely the regular API users as well. # Also, the default for the filter_threshold is system-member. if evaluate_additional_policies is None: evaluate_additional_policies = not policy.check_policy( "baremetal:node:get:filter_threshold", target_dict, cdict) node_keys = node.keys() if evaluate_additional_policies: # Perform extended sanitization of nodes based upon policy # baremetal:node:get:filter_threshold _node_sanitize_extended(node, node_keys, target_dict, cdict) if 'driver_info' in node_keys: if (evaluate_additional_policies and not policy.check("baremetal:node:get:driver_info", target_dict, cdict)): # Guard infrastructure intenral details from being visible. node['driver_info'] = { 'content': '** Redacted - requires baremetal:node:get:' 'driver_info permission. **'} if not show_driver_secrets: node['driver_info'] = strutils.mask_dict_password( node['driver_info'], "******") if not show_instance_secrets and 'instance_info' in node_keys: node['instance_info'] = strutils.mask_dict_password( node['instance_info'], "******") # NOTE(dtantsur): configdrive may be a dict if node['instance_info'].get('configdrive'): node['instance_info']['configdrive'] = "******" # NOTE(tenbrae): agent driver may store a swift temp_url on the # instance_info, which shouldn't be exposed to non-admin users. # Now that ironic supports additional policies, we need to hide # it here, based on this policy. # Related to bug #1613903 if node['instance_info'].get('image_url'): node['instance_info']['image_url'] = "******" if node.get('driver_internal_info', {}).get('agent_secret_token'): node['driver_internal_info']['agent_secret_token'] = "******" if 'provision_state' in node_keys: # Update legacy state data for provision state, but only if # the key is present. update_state_in_older_versions(node) hide_fields_in_newer_versions(node) show_states_links = ( api_utils.allow_links_node_states_and_driver_properties()) show_portgroups = api_utils.allow_portgroups_subcontrollers() show_volume = api_utils.allow_volume() if not show_volume: node.pop('volume', None) if not show_portgroups: node.pop('portgroups', None) if not show_states_links: node.pop('states', None) def _node_sanitize_extended(node, node_keys, target_dict, cdict): # NOTE(TheJulia): The net effect of this is that by default, # at least matching common/policy.py defaults. is these should # be stripped out. if ('last_error' in node_keys and not policy.check("baremetal:node:get:last_error", target_dict, cdict)): # Guard the last error from being visible as it can contain # hostnames revealing infrastucture internal details. node['last_error'] = ('** Value Redacted - Requires ' 'baremetal:node:get:last_error ' 'permission. **') if ('reservation' in node_keys and not policy.check("baremetal:node:get:reservation", target_dict, cdict)): # Guard conductor names from being visible. node['reservation'] = ('** Redacted - requires baremetal:' 'node:get:reservation permission. **') if ('driver_internal_info' in node_keys and not policy.check("baremetal:node:get:driver_internal_info", target_dict, cdict)): # Guard conductor names from being visible. node['driver_internal_info'] = { 'content': '** Redacted - Requires baremetal:node:get:' 'driver_internal_info permission. **'} def node_list_convert_with_links(nodes, limit, url, fields=None, **kwargs): cdict = api.request.context.to_policy_values() target_dict = dict(cdict) sanitizer_args = { 'cdict': cdict, 'show_driver_secrets': policy.check("show_password", cdict, target_dict), 'show_instance_secrets': policy.check("show_instance_secrets", cdict, target_dict), 'evaluate_additional_policies': not policy.check_policy( "baremetal:node:get:filter_threshold", target_dict, cdict), } return collection.list_convert_with_links( items=[node_convert_with_links(n, fields=fields, sanitize=False) for n in nodes], item_name='nodes', limit=limit, url=url, fields=fields, sanitize_func=node_sanitize, sanitizer_args=sanitizer_args, **kwargs ) class NodeVendorPassthruController(rest.RestController): """REST controller for VendorPassthru. This controller allow vendors to expose a custom functionality in the Ironic API. Ironic will merely relay the message from here to the appropriate driver, no introspection will be made in the message body. """ _custom_actions = { 'methods': ['GET'] } @METRICS.timer('NodeVendorPassthruController.methods') @method.expose() @args.validate(node_ident=args.uuid_or_name) def methods(self, node_ident): """Retrieve information about vendor methods of the given node. :param node_ident: UUID or logical name of a node. :returns: dictionary with : entries. :raises: NodeNotFound if the node is not found. """ rpc_node = api_utils.check_node_policy_and_retrieve( 'baremetal:node:vendor_passthru', node_ident) # Raise an exception if node is not found if rpc_node.driver not in _VENDOR_METHODS: topic = api.request.rpcapi.get_topic_for(rpc_node) ret = api.request.rpcapi.get_node_vendor_passthru_methods( api.request.context, rpc_node.uuid, topic=topic) _VENDOR_METHODS[rpc_node.driver] = ret return _VENDOR_METHODS[rpc_node.driver] @METRICS.timer('NodeVendorPassthruController._default') @method.expose() @method.body('data') @args.validate(node_ident=args.uuid_or_name, method=args.string) def _default(self, node_ident, method, data=None): """Call a vendor extension. :param node_ident: UUID or logical name of a node. :param method: name of the method in vendor driver. :param data: body of data to supply to the specified method. """ rpc_node = api_utils.check_node_policy_and_retrieve( 'baremetal:node:vendor_passthru', node_ident) # Raise an exception if node is not found topic = api.request.rpcapi.get_topic_for(rpc_node) resp = api_utils.vendor_passthru(rpc_node.uuid, method, topic, data=data) api.response.status_code = resp.status_code return resp.obj class NodeMaintenanceController(rest.RestController): def _set_maintenance(self, rpc_node, maintenance_mode, reason=None): context = api.request.context rpc_node.maintenance = maintenance_mode rpc_node.maintenance_reason = reason notify.emit_start_notification(context, rpc_node, 'maintenance_set') with notify.handle_error_notification(context, rpc_node, 'maintenance_set'): try: topic = api.request.rpcapi.get_topic_for(rpc_node) except exception.NoValidHost as e: e.code = http_client.BAD_REQUEST raise new_node = api.request.rpcapi.update_node(context, rpc_node, topic=topic) notify.emit_end_notification(context, new_node, 'maintenance_set') @METRICS.timer('NodeMaintenanceController.put') @method.expose(status_code=http_client.ACCEPTED) @args.validate(node_ident=args.uuid_or_name, reason=args.string) def put(self, node_ident, reason=None): """Put the node in maintenance mode. :param node_ident: the UUID or logical_name of a node. :param reason: Optional, the reason why it's in maintenance. """ rpc_node = api_utils.check_node_policy_and_retrieve( 'baremetal:node:set_maintenance', node_ident) self._set_maintenance(rpc_node, True, reason=reason) @METRICS.timer('NodeMaintenanceController.delete') @method.expose(status_code=http_client.ACCEPTED) @args.validate(node_ident=args.uuid_or_name) def delete(self, node_ident): """Remove the node from maintenance mode. :param node_ident: the UUID or logical name of a node. """ rpc_node = api_utils.check_node_policy_and_retrieve( 'baremetal:node:clear_maintenance', node_ident) self._set_maintenance(rpc_node, False) class NodeVIFController(rest.RestController): def __init__(self, node_ident): self.node_ident = node_ident def _get_node_and_topic(self, policy_name): rpc_node = api_utils.check_node_policy_and_retrieve( policy_name, self.node_ident) try: return rpc_node, api.request.rpcapi.get_topic_for(rpc_node) except exception.NoValidHost as e: e.code = http_client.BAD_REQUEST raise @METRICS.timer('NodeVIFController.get_all') @method.expose() def get_all(self): """Get a list of attached VIFs""" rpc_node, topic = self._get_node_and_topic('baremetal:node:vif:list') vifs = api.request.rpcapi.vif_list(api.request.context, rpc_node.uuid, topic=topic) return {'vifs': vifs} @METRICS.timer('NodeVIFController.post') @method.expose(status_code=http_client.NO_CONTENT) @method.body('vif') @args.validate(vif=VIF_VALIDATOR) def post(self, vif): """Attach a VIF to this node :param vif: a dictionary of information about a VIF. It must have an 'id' key, whose value is a unique identifier for that VIF. """ rpc_node, topic = self._get_node_and_topic('baremetal:node:vif:attach') if api.request.version.minor >= versions.MINOR_67_NODE_VIF_ATTACH_PORT: if 'port_uuid' in vif and 'portgroup_uuid' in vif: msg = _("Cannot specify both port_uuid and portgroup_uuid") raise exception.Invalid(msg) api.request.rpcapi.vif_attach(api.request.context, rpc_node.uuid, vif_info=vif, topic=topic) @METRICS.timer('NodeVIFController.delete') @method.expose(status_code=http_client.NO_CONTENT) @args.validate(vif_id=args.uuid_or_name) def delete(self, vif_id): """Detach a VIF from this node :param vif_id: The ID of a VIF to detach """ rpc_node, topic = self._get_node_and_topic('baremetal:node:vif:detach') api.request.rpcapi.vif_detach(api.request.context, rpc_node.uuid, vif_id=vif_id, topic=topic) class NodeHistoryController(rest.RestController): detail_fields = ['uuid', 'created_at', 'severity', 'event_type', 'event', 'conductor', 'user'] standard_fields = ['uuid', 'created_at', 'severity', 'event'] def __init__(self, node_ident): super(NodeHistoryController).__init__() self.node_ident = node_ident def _history_event_convert_with_links(self, node_uuid, event, detail=False): """Add link and convert history event""" url = api.request.public_url if not detail: fields = self.standard_fields else: fields = self.detail_fields event_entry = api_utils.object_to_dict( event, link_resource='nodes', fields=fields) if not detail: # The spec for this feature calls to truncate the event # field if not detailed, which makes sense in some environments # with many events, espescialy if the event text is particullarlly # long. entry_len = len(event_entry['event']) if entry_len > 255: event_entry['event'] = event_entry['event'][0:251] + '...' else: event_entry['event'] = event_entry['event'][0:entry_len] # These records cannot be changed by the API consumer, # and updated_at gets handed up from the db model # regardless if we want it or not. As such, strip from # the reply. event_entry.pop('updated_at') event_entry['links'] = [ link.make_link( 'self', url, 'nodes', '%s/history/%s' % (node_uuid, event.uuid) ) ] return event_entry @METRICS.timer('NodeHistoryController.get_all') @method.expose() @args.validate(detail=args.boolean, marker=args.uuid, limit=args.integer) def get_all(self, detail=False, marker=None, limit=None): """List node history.""" node = api_utils.check_node_policy_and_retrieve( 'baremetal:node:history:get', self.node_ident) fields = self.detail_fields if detail else self.standard_fields marker_obj = None if marker: marker_obj = objects.NodeHistory.get_by_uuid(api.request.context, marker) limit = api_utils.validate_limit(limit) events = objects.NodeHistory.list_by_node_id(api.request.context, node.id, marker=marker_obj, limit=limit) return collection.list_convert_with_links( items=[ self._history_event_convert_with_links( node.uuid, event, detail=detail) for event in events ], item_name='history', url=f'nodes/{self.node_ident}/history', fields=fields, marker=marker_obj, limit=limit, ) @METRICS.timer('NodeHistoryController.get_one') @method.expose() @args.validate(event=args.uuid_or_name) def get_one(self, event): """Get a node history entry""" node = api_utils.check_node_policy_and_retrieve( 'baremetal:node:history:get', self.node_ident) # TODO(TheJulia): Need to check policy to make sure if policy # check fails, that the entry cannot be found. event = objects.NodeHistory.get_by_uuid(api.request.context, event) return self._history_event_convert_with_links( node.uuid, event, detail=True) class NodesController(rest.RestController): """REST controller for Nodes.""" # NOTE(lucasagomes): For future reference. If we happen # to need to add another sub-controller in this class let's # try to make it a parameter instead of an endpoint due # https://bugs.launchpad.net/ironic/+bug/1572651, e.g, instead of # v1/nodes/(ident)/detail we could have v1/nodes/(ident)?detail=True states = NodeStatesController() """Expose the state controller action as a sub-element of nodes""" vendor_passthru = NodeVendorPassthruController() """A resource used for vendors to expose a custom functionality in the API""" management = NodeManagementController() """Expose management as a sub-element of nodes""" maintenance = NodeMaintenanceController() """Expose maintenance as a sub-element of nodes""" from_chassis = False """A flag to indicate if the requests to this controller are coming from the top-level resource Chassis""" _custom_actions = { 'detail': ['GET'], 'validate': ['GET'], } invalid_sort_key_list = ['properties', 'driver_info', 'extra', 'instance_info', 'driver_internal_info', 'clean_step', 'deploy_step', 'raid_config', 'target_raid_config', 'traits', 'network_data'] _subcontroller_map = { 'ports': port.PortsController, 'portgroups': portgroup.PortgroupsController, 'vifs': NodeVIFController, 'volume': volume.VolumeController, 'traits': NodeTraitsController, 'bios': bios.NodeBiosController, 'allocation': allocation.NodeAllocationController, 'history': NodeHistoryController, } @pecan.expose() def _lookup(self, ident, *remainder): if ident in self._subcontroller_map: pecan.abort(http_client.NOT_FOUND) try: ident = args.uuid_or_name('node', ident) except exception.InvalidParameterValue as e: pecan.abort(http_client.BAD_REQUEST, e.args[0]) if not remainder: return if ((remainder[0] == 'portgroups' and not api_utils.allow_portgroups_subcontrollers()) or (remainder[0] == 'vifs' and not api_utils.allow_vifs_subcontroller()) or (remainder[0] == 'bios' and not api_utils.allow_bios_interface()) or (remainder[0] == 'allocation' and not api_utils.allow_allocations()) or (remainder[0] == 'history' and not api_utils.allow_node_history())): pecan.abort(http_client.NOT_FOUND) if remainder[0] == 'traits' and not api_utils.allow_traits(): # NOTE(mgoddard): Returning here will ensure we exhibit the # behaviour of previous releases for microversions without this # endpoint. return subcontroller = self._subcontroller_map.get(remainder[0]) if subcontroller: return subcontroller(node_ident=ident), remainder[1:] def _filter_by_conductor(self, nodes, conductor): filtered_nodes = [] for n in nodes: try: host = api.request.rpcapi.get_conductor_for(n) if host == conductor: filtered_nodes.append(n) except (exception.NoValidHost, exception.TemporaryFailure): # NOTE(kaifeng) Node gets orphaned in case some conductor # offline or all conductors are offline. pass return filtered_nodes def _get_nodes_collection(self, chassis_uuid, instance_uuid, associated, maintenance, retired, provision_state, marker, limit, sort_key, sort_dir, driver=None, resource_class=None, resource_url=None, fields=None, fault=None, conductor_group=None, detail=None, conductor=None, owner=None, lessee=None, project=None, description_contains=None): if self.from_chassis and not chassis_uuid: raise exception.MissingParameterValue( _("Chassis id not specified.")) limit = api_utils.validate_limit(limit) sort_dir = api_utils.validate_sort_dir(sort_dir) if sort_key in self.invalid_sort_key_list: raise exception.InvalidParameterValue( _("The sort_key value %(key)s is an invalid field for " "sorting") % {'key': sort_key}) marker_obj = None if marker: marker_obj = objects.Node.get_by_uuid(api.request.context, marker) # The query parameters for the 'next' URL parameters = {} possible_filters = { 'maintenance': maintenance, 'chassis_uuid': chassis_uuid, 'associated': associated, 'provision_state': provision_state, 'driver': driver, 'resource_class': resource_class, 'fault': fault, 'conductor_group': conductor_group, 'owner': owner, 'lessee': lessee, 'project': project, 'description_contains': description_contains, 'retired': retired, 'instance_uuid': instance_uuid } filters = {} for key, value in possible_filters.items(): if value is not None: filters[key] = value if fields: obj_fields = fields[:] required_object_fields = ('allocation_id', 'chassis_id', 'uuid', 'owner', 'lessee', 'created_at', 'updated_at') for req_field in required_object_fields: if req_field not in obj_fields: obj_fields.append(req_field) else: # map the name for the call, as we did not pickup a specific # list of fields to return. obj_fields = fields # NOTE(TheJulia): When a data set of the nodeds list is being # requested, this method takes approximately 3-3.5% of the time # when requesting specific fields aligning with Nova's sync # process. (Local DB though) nodes = objects.Node.list(api.request.context, limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir, filters=filters, fields=obj_fields) # Special filtering on results based on conductor field if conductor: nodes = self._filter_by_conductor(nodes, conductor) parameters = {'sort_key': sort_key, 'sort_dir': sort_dir} if associated: parameters['associated'] = associated if maintenance: parameters['maintenance'] = maintenance if retired: parameters['retired'] = retired if detail is not None: parameters['detail'] = detail if instance_uuid: # NOTE(rloo) if limit==1 and len(nodes)==1 (see # Collection.has_next()), a 'next' link will # be generated, which we don't want. # NOTE(TheJulia): This is done after the query as # instance_uuid is a unique constraint in the DB # and we cannot pass a limit of 0 to sqlalchemy # and expect a response. limit = 0 return node_list_convert_with_links(nodes, limit, url=resource_url, fields=fields, **parameters) def _check_names_acceptable(self, names, error_msg): """Checks all node 'name's are acceptable, it does not return a value. This function will raise an exception for unacceptable names. :param names: list of node names to check :param error_msg: error message in case of exception.ClientSideError, should contain %(name)s placeholder. :raises: exception.NotAcceptable :raises: exception.ClientSideError """ if not api_utils.allow_node_logical_names(): raise exception.NotAcceptable() reserved_names = get_nodes_controller_reserved_names() for name in names: if not api_utils.is_valid_node_name(name): raise exception.ClientSideError( error_msg % {'name': name}, status_code=http_client.BAD_REQUEST) if name in reserved_names: raise exception.ClientSideError( 'The word "%(name)s" is reserved and can not be used as a ' 'node name. Reserved words are: %(reserved)s.' % {'name': name, 'reserved': ', '.join(reserved_names)}, status_code=http_client.BAD_REQUEST) def _update_changed_fields(self, node, rpc_node): """Update rpc_node based on changed fields in a node. """ original_chassis_id = rpc_node.chassis_id chassis = _replace_chassis_uuid_with_id(node) # conductor_group is case-insensitive, and we use it to # calculate the conductor to send an update too. lowercase # it here instead of just before saving so we calculate # correctly. node['conductor_group'] = node['conductor_group'].lower() # Node object protected field is not nullable if node.get('protected') is None: node['protected'] = False # NOTE(mgoddard): Traits cannot be updated via a node PATCH. api_utils.patch_update_changed_fields( node, rpc_node, fields=set(objects.Node.fields) - {'traits'}, schema=node_patch_schema(), id_map={'chassis_id': chassis and chassis.id or None} ) if original_chassis_id and not rpc_node.chassis_id: if not api_utils.allow_remove_chassis_uuid(): raise exception.NotAcceptable() def _check_driver_changed_and_console_enabled(self, rpc_node, node_ident): """Checks if the driver and the console is enabled in a node. If it does, is necessary to prevent updating it because the new driver will not be able to stop a console started by the previous one. :param rpc_node: RPC Node object to be verified. :param node_ident: the UUID or logical name of a node. :raises: exception.ClientSideError """ delta = rpc_node.obj_what_changed() if 'driver' in delta and rpc_node.console_enabled: raise exception.ClientSideError( _("Node %s can not update the driver while the console is " "enabled. Please stop the console first.") % node_ident, status_code=http_client.CONFLICT) @METRICS.timer('NodesController.get_all') @method.expose() @args.validate(chassis_uuid=args.uuid, instance_uuid=args.uuid, associated=args.boolean, maintenance=args.boolean, retired=args.boolean, provision_state=args.string, marker=args.uuid, limit=args.integer, sort_key=args.string, sort_dir=args.string, driver=args.string, fields=args.string_list, resource_class=args.string, fault=args.string, conductor_group=args.string, detail=args.boolean, conductor=args.string, owner=args.string, description_contains=args.string, lessee=args.string, project=args.string) def get_all(self, chassis_uuid=None, instance_uuid=None, associated=None, maintenance=None, retired=None, provision_state=None, marker=None, limit=None, sort_key='id', sort_dir='asc', driver=None, fields=None, resource_class=None, fault=None, conductor_group=None, detail=None, conductor=None, owner=None, description_contains=None, lessee=None, project=None): """Retrieve a list of nodes. :param chassis_uuid: Optional UUID of a chassis, to get only nodes for that chassis. :param instance_uuid: Optional UUID of an instance, to find the node associated with that instance. :param associated: Optional boolean whether to return a list of associated or unassociated nodes. May be combined with other parameters. :param maintenance: Optional boolean value that indicates whether to get nodes in maintenance mode ("True"), or not in maintenance mode ("False"). :param retired: Optional boolean value that indicates whether to get retired nodes. :param provision_state: Optional string value to get only nodes in that provision state. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. This value cannot be larger than the value of max_limit in the [api] section of the ironic configuration, or only max_limit resources will be returned. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. :param driver: Optional string value to get only nodes using that driver. :param resource_class: Optional string value to get only nodes with that resource_class. :param conductor_group: Optional string value to get only nodes with that conductor_group. :param conductor: Optional string value to get only nodes managed by that conductor. :param owner: Optional string value that set the owner whose nodes are to be retrurned. :param lessee: Optional string value that set the lessee whose nodes are to be returned. :param project: Optional string value that set the project - lessee or owner - whose nodes are to be returned. :param fields: Optional, a list with a specified set of fields of the resource to be returned. :param fault: Optional string value to get only nodes with that fault. :param description_contains: Optional string value to get only nodes with description field contains matching value. """ project = api_utils.check_list_policy('node', project) api_utils.check_allow_specify_fields(fields) api_utils.check_allowed_fields(fields) api_utils.check_allowed_fields([sort_key]) api_utils.check_for_invalid_state_and_allow_filter(provision_state) api_utils.check_allow_specify_driver(driver) api_utils.check_allow_specify_resource_class(resource_class) api_utils.check_allow_filter_by_fault(fault) api_utils.check_allow_filter_by_conductor_group(conductor_group) api_utils.check_allow_filter_by_conductor(conductor) api_utils.check_allow_filter_by_owner(owner) api_utils.check_allow_filter_by_lessee(lessee) fields = api_utils.get_request_return_fields(fields, detail, _DEFAULT_RETURN_FIELDS) extra_args = {'description_contains': description_contains} return self._get_nodes_collection(chassis_uuid, instance_uuid, associated, maintenance, retired, provision_state, marker, limit, sort_key, sort_dir, driver=driver, resource_class=resource_class, resource_url='nodes', fields=fields, fault=fault, conductor_group=conductor_group, detail=detail, conductor=conductor, owner=owner, lessee=lessee, project=project, **extra_args) @METRICS.timer('NodesController.detail') @method.expose() @args.validate(chassis_uuid=args.uuid, instance_uuid=args.uuid, associated=args.boolean, maintenance=args.boolean, retired=args.boolean, provision_state=args.string, marker=args.uuid, limit=args.integer, sort_key=args.string, sort_dir=args.string, driver=args.string, resource_class=args.string, fault=args.string, conductor_group=args.string, conductor=args.string, owner=args.string, description_contains=args.string, lessee=args.string, project=args.string) def detail(self, chassis_uuid=None, instance_uuid=None, associated=None, maintenance=None, retired=None, provision_state=None, marker=None, limit=None, sort_key='id', sort_dir='asc', driver=None, resource_class=None, fault=None, conductor_group=None, conductor=None, owner=None, description_contains=None, lessee=None, project=None): """Retrieve a list of nodes with detail. :param chassis_uuid: Optional UUID of a chassis, to get only nodes for that chassis. :param instance_uuid: Optional UUID of an instance, to find the node associated with that instance. :param associated: Optional boolean whether to return a list of associated or unassociated nodes. May be combined with other parameters. :param maintenance: Optional boolean value that indicates whether to get nodes in maintenance mode ("True"), or not in maintenance mode ("False"). :param retired: Optional boolean value that indicates whether to get nodes which are retired. :param provision_state: Optional string value to get only nodes in that provision state. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. This value cannot be larger than the value of max_limit in the [api] section of the ironic configuration, or only max_limit resources will be returned. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. :param driver: Optional string value to get only nodes using that driver. :param resource_class: Optional string value to get only nodes with that resource_class. :param fault: Optional string value to get only nodes with that fault. :param conductor_group: Optional string value to get only nodes with that conductor_group. :param owner: Optional string value that set the owner whose nodes are to be retrurned. :param lessee: Optional string value that set the lessee whose nodes are to be returned. :param project: Optional string value that set the project - lessee or owner - whose nodes are to be returned. :param description_contains: Optional string value to get only nodes with description field contains matching value. """ project = api_utils.check_list_policy('node', project) api_utils.check_for_invalid_state_and_allow_filter(provision_state) api_utils.check_allow_specify_driver(driver) api_utils.check_allow_specify_resource_class(resource_class) api_utils.check_allow_filter_by_fault(fault) api_utils.check_allow_filter_by_conductor_group(conductor_group) api_utils.check_allow_filter_by_owner(owner) api_utils.check_allow_filter_by_lessee(lessee) api_utils.check_allowed_fields([sort_key]) # /detail should only work against collections parent = api.request.path.split('/')[:-1][-1] if parent != "nodes": raise exception.HTTPNotFound() api_utils.check_allow_filter_by_conductor(conductor) extra_args = {'description_contains': description_contains} return self._get_nodes_collection(chassis_uuid, instance_uuid, associated, maintenance, retired, provision_state, marker, limit, sort_key, sort_dir, driver=driver, resource_class=resource_class, resource_url='nodes/detail', fault=fault, conductor_group=conductor_group, conductor=conductor, owner=owner, lessee=lessee, project=project, **extra_args) @METRICS.timer('NodesController.validate') @method.expose() @args.validate(node=args.uuid_or_name, node_uuid=args.uuid) def validate(self, node=None, node_uuid=None): """Validate the driver interfaces, using the node's UUID or name. Note that the 'node_uuid' interface is deprecated in favour of the 'node' interface :param node: UUID or name of a node. :param node_uuid: UUID of a node. """ if node is not None: # We're invoking this interface using positional notation, or # explicitly using 'node'. Try and determine which one. if (not api_utils.allow_node_logical_names() and not uuidutils.is_uuid_like(node)): raise exception.NotAcceptable() rpc_node = api_utils.check_node_policy_and_retrieve( 'baremetal:node:validate', node_uuid or node) topic = api.request.rpcapi.get_topic_for(rpc_node) return api.request.rpcapi.validate_driver_interfaces( api.request.context, rpc_node.uuid, topic) @METRICS.timer('NodesController.get_one') @method.expose() @args.validate(node_ident=args.uuid_or_name, fields=args.string_list) def get_one(self, node_ident, fields=None): """Retrieve information about the given node. :param node_ident: UUID or logical name of a node. :param fields: Optional, a list with a specified set of fields of the resource to be returned. """ if self.from_chassis: raise exception.OperationNotPermitted() rpc_node = api_utils.check_node_policy_and_retrieve( 'baremetal:node:get', node_ident, with_suffix=True) api_utils.check_allow_specify_fields(fields) api_utils.check_allowed_fields(fields) return node_convert_with_links(rpc_node, fields=fields) @METRICS.timer('NodesController.post') @method.expose(status_code=http_client.CREATED) @method.body('node') @args.validate(node=node_validator) def post(self, node): """Create a new node. :param node: a node within the request body. **Example Node creation request:** .. literalinclude:: ../../../../api-ref/source/samples/node-create-request-dynamic.json :language: javascript """ if self.from_chassis: raise exception.OperationNotPermitted() context = api.request.context api_utils.check_policy('baremetal:node:create') reject_fields_in_newer_versions(node) # NOTE(tenbrae): get_topic_for checks if node.driver is in the hash # ring and raises NoValidHost if it is not. # We need to ensure that node has a UUID before it can # be mapped onto the hash ring. if not node.get('uuid'): node['uuid'] = uuidutils.generate_uuid() # NOTE(jroll) this is special-cased to "" and not None, # because it is used in hash ring calculations if not node.get('conductor_group'): node['conductor_group'] = '' if node.get('name') is not None: error_msg = _("Cannot create node with invalid name '%(name)s'") self._check_names_acceptable([node['name']], error_msg) node['provision_state'] = api_utils.initial_node_provision_state() if not node.get('resource_class'): node['resource_class'] = CONF.default_resource_class chassis = _replace_chassis_uuid_with_id(node) chassis_uuid = chassis and chassis.uuid or None new_node = objects.Node(context, **node) try: topic = api.request.rpcapi.get_topic_for(new_node) except exception.NoValidHost as e: # NOTE(tenbrae): convert from 404 to 400 because client can see # list of available drivers and shouldn't request # one that doesn't exist. e.code = http_client.BAD_REQUEST raise notify.emit_start_notification(context, new_node, 'create', chassis_uuid=chassis_uuid) with notify.handle_error_notification(context, new_node, 'create', chassis_uuid=chassis_uuid): new_node = api.request.rpcapi.create_node(context, new_node, topic) # Set the HTTP Location Header api.response.location = link.build_url('nodes', new_node.uuid) api_node = node_convert_with_links(new_node) chassis_uuid = api_node.get('chassis_uuid') notify.emit_end_notification(context, new_node, 'create', chassis_uuid=chassis_uuid) return api_node def _validate_patch(self, patch, reset_interfaces): if self.from_chassis: raise exception.OperationNotPermitted() api_utils.patch_validate_allowed_fields(patch, PATCH_ALLOWED_FIELDS) reject_patch_in_newer_versions(patch) traits = api_utils.get_patch_values(patch, '/traits') if traits: msg = _("Cannot update node traits via node patch. Node traits " "should be updated via the node traits API.") raise exception.Invalid(msg) driver = api_utils.get_patch_values(patch, '/driver') if reset_interfaces and not driver: msg = _("The reset_interfaces parameter can only be used when " "changing the node's driver.") raise exception.Invalid(msg) description = api_utils.get_patch_values(patch, '/description') if description and len(description[0]) > _NODE_DESCRIPTION_MAX_LENGTH: msg = _("Cannot update node with description exceeding %s " "characters") % _NODE_DESCRIPTION_MAX_LENGTH raise exception.Invalid(msg) network_data_fields = api_utils.get_patch_values( patch, '/network_data') for network_data in network_data_fields: validate_network_data(network_data) def _authorize_patch_and_get_node(self, node_ident, patch): # deal with attribute-specific policy rules policy_checks = [] generic_update = False for p in patch: if p['path'].startswith('/instance_info'): policy_checks.append('baremetal:node:update_instance_info') elif p['path'].startswith('/extra'): policy_checks.append('baremetal:node:update_extra') elif (p['path'].startswith('/automated_clean') and strutils.bool_from_string(p['value'], default=None) is False): policy_checks.append('baremetal:node:disable_cleaning') elif p['path'].startswith('/driver_info'): policy_checks.append('baremetal:node:update:driver_info') elif p['path'].startswith('/properties'): policy_checks.append('baremetal:node:update:properties') elif p['path'].startswith('/chassis_uuid'): policy_checks.append('baremetal:node:update:chassis_uuid') elif p['path'].startswith('/instance_uuid'): policy_checks.append('baremetal:node:update:instance_uuid') elif p['path'].startswith('/lessee'): policy_checks.append('baremetal:node:update:lessee') elif p['path'].startswith('/owner'): policy_checks.append('baremetal:node:update:owner') elif p['path'].startswith('/driver'): policy_checks.append('baremetal:node:update:driver_interfaces') elif ((p['path'].lstrip('/').rsplit(sep="_", maxsplit=1)[0] in driver_base.ALL_INTERFACES) and (p['path'].lstrip('/').rsplit(sep="_", maxsplit=1)[-1] == "interface")): # TODO(TheJulia): Replace the above check with something like # elif (p['path'].lstrip('/').removesuffix('_interface') # when the minimum supported version is Python 3.9. policy_checks.append('baremetal:node:update:driver_interfaces') elif p['path'].startswith('/network_data'): policy_checks.append('baremetal:node:update:network_data') elif p['path'].startswith('/conductor_group'): policy_checks.append('baremetal:node:update:conductor_group') elif p['path'].startswith('/name'): policy_checks.append('baremetal:node:update:name') elif p['path'].startswith('/retired'): policy_checks.append('baremetal:node:update:retired') else: generic_update = True # always do at least one check if generic_update or not policy_checks: policy_checks.append('baremetal:node:update') return api_utils.check_multiple_node_policies_and_retrieve( policy_checks, node_ident, with_suffix=True) @METRICS.timer('NodesController.patch') @method.expose() @method.body('patch') @args.validate(node_ident=args.uuid_or_name, reset_interfaces=args.boolean, patch=args.patch) def patch(self, node_ident, reset_interfaces=None, patch=None): """Update an existing node. :param node_ident: UUID or logical name of a node. :param reset_interfaces: whether to reset hardware interfaces to their defaults. Only valid when updating the driver field. :param patch: a json PATCH document to apply to this node. """ if (reset_interfaces is not None and not api_utils.allow_reset_interfaces()): raise exception.NotAcceptable() self._validate_patch(patch, reset_interfaces) context = api.request.context rpc_node = self._authorize_patch_and_get_node(node_ident, patch) remove_inst_uuid_patch = [{'op': 'remove', 'path': '/instance_uuid'}] if rpc_node.maintenance and patch == remove_inst_uuid_patch: LOG.debug('Removing instance uuid %(instance)s from node %(node)s', {'instance': rpc_node.instance_uuid, 'node': rpc_node.uuid}) # Check if node is transitioning state, although nodes in some states # can be updated. elif (rpc_node.target_provision_state and rpc_node.provision_state not in ir_states.UPDATE_ALLOWED_STATES): msg = _("Node %s can not be updated while a state transition " "is in progress.") raise exception.ClientSideError( msg % node_ident, status_code=http_client.CONFLICT) elif (rpc_node.provision_state == ir_states.INSPECTING and api_utils.allow_inspect_wait_state()): msg = _('Cannot update node "%(node)s" while it is in state ' '"%(state)s".') % {'node': rpc_node.uuid, 'state': ir_states.INSPECTING} raise exception.ClientSideError(msg, status_code=http_client.CONFLICT) elif api_utils.get_patch_values(patch, '/owner'): # check if updating a provisioned node's owner is allowed if rpc_node.provision_state == ir_states.ACTIVE: try: api_utils.check_owner_policy( 'node', 'baremetal:node:update_owner_provisioned', rpc_node['owner'], rpc_node['lessee']) except exception.HTTPForbidden: msg = _('Cannot update owner of node "%(node)s" while it ' 'is in state "%(state)s".') % { 'node': rpc_node.uuid, 'state': ir_states.ACTIVE} raise exception.ClientSideError( msg, status_code=http_client.CONFLICT) # check if node has an associated allocation with an owner if rpc_node.allocation_id: try: allocation = objects.Allocation.get_by_id( context, rpc_node.allocation_id) if allocation.owner is not None: msg = _('Cannot update owner of node "%(node)s" while ' 'it is allocated to an allocation with an ' ' owner.') % {'node': rpc_node.uuid} raise exception.ClientSideError( msg, status_code=http_client.CONFLICT) except exception.AllocationNotFound: pass names = api_utils.get_patch_values(patch, '/name') if len(names): error_msg = (_("Node %s: Cannot change name to invalid name ") % node_ident) error_msg += "'%(name)s'" self._check_names_acceptable(names, error_msg) node_dict = rpc_node.as_dict() # NOTE(lucasagomes): # 1) Remove chassis_id because it's an internal value and # not present in the API object # 2) Add chassis_uuid node_dict['chassis_uuid'] = _get_chassis_uuid(rpc_node) node_dict = api_utils.apply_jsonpatch(node_dict, patch) api_utils.patched_validate_with_schema( node_dict, node_patch_schema(), node_patch_validator) self._update_changed_fields(node_dict, rpc_node) # NOTE(tenbrae): we calculate the rpc topic here in case node.driver # has changed, so that update is sent to the # new conductor, not the old one which may fail to # load the new driver. try: topic = api.request.rpcapi.get_topic_for(rpc_node) except exception.NoValidHost as e: # NOTE(tenbrae): convert from 404 to 400 because client can see # list of available drivers and shouldn't request # one that doesn't exist. e.code = http_client.BAD_REQUEST raise self._check_driver_changed_and_console_enabled(rpc_node, node_ident) chassis_uuid = _get_chassis_uuid(rpc_node) notify.emit_start_notification(context, rpc_node, 'update', chassis_uuid=chassis_uuid) with notify.handle_error_notification(context, rpc_node, 'update', chassis_uuid=chassis_uuid): new_node = api.request.rpcapi.update_node(context, rpc_node, topic, reset_interfaces) api_node = node_convert_with_links(new_node) chassis_uuid = api_node.get('chassis_uuid') notify.emit_end_notification(context, new_node, 'update', chassis_uuid=chassis_uuid) return api_node @METRICS.timer('NodesController.delete') @method.expose(status_code=http_client.NO_CONTENT) @args.validate(node_ident=args.uuid_or_name) def delete(self, node_ident, *args): """Delete a node. :param node_ident: UUID or logical name of a node. """ # occurs when deleting traits with an old API version if args: raise exception.NotFound() if self.from_chassis: raise exception.OperationNotPermitted() context = api.request.context rpc_node = api_utils.check_node_policy_and_retrieve( 'baremetal:node:delete', node_ident, with_suffix=True) chassis_uuid = _get_chassis_uuid(rpc_node) notify.emit_start_notification(context, rpc_node, 'delete', chassis_uuid=chassis_uuid) with notify.handle_error_notification(context, rpc_node, 'delete', chassis_uuid=chassis_uuid): try: topic = api.request.rpcapi.get_topic_for(rpc_node) except exception.NoValidHost as e: e.code = http_client.BAD_REQUEST raise api.request.rpcapi.destroy_node(context, rpc_node.uuid, topic) notify.emit_end_notification(context, rpc_node, 'delete', chassis_uuid=chassis_uuid) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/ironic/api/controllers/v1/notification_utils.py0000664000175000017500000001622100000000000024312 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib from oslo_config import cfg from oslo_log import log from oslo_messaging import exceptions as oslo_msg_exc from oslo_utils import excutils from oslo_versionedobjects import exception as oslo_vo_exc from ironic.common import exception from ironic.common.i18n import _ from ironic.objects import allocation as allocation_objects from ironic.objects import chassis as chassis_objects from ironic.objects import deploy_template as deploy_template_objects from ironic.objects import fields from ironic.objects import node as node_objects from ironic.objects import notification from ironic.objects import port as port_objects from ironic.objects import portgroup as portgroup_objects from ironic.objects import volume_connector as volume_connector_objects from ironic.objects import volume_target as volume_target_objects LOG = log.getLogger(__name__) CONF = cfg.CONF CRUD_NOTIFY_OBJ = { 'allocation': (allocation_objects.AllocationCRUDNotification, allocation_objects.AllocationCRUDPayload), 'chassis': (chassis_objects.ChassisCRUDNotification, chassis_objects.ChassisCRUDPayload), 'deploytemplate': (deploy_template_objects.DeployTemplateCRUDNotification, deploy_template_objects.DeployTemplateCRUDPayload), 'node': (node_objects.NodeCRUDNotification, node_objects.NodeCRUDPayload), 'port': (port_objects.PortCRUDNotification, port_objects.PortCRUDPayload), 'portgroup': (portgroup_objects.PortgroupCRUDNotification, portgroup_objects.PortgroupCRUDPayload), 'volumeconnector': (volume_connector_objects.VolumeConnectorCRUDNotification, volume_connector_objects.VolumeConnectorCRUDPayload), 'volumetarget': (volume_target_objects.VolumeTargetCRUDNotification, volume_target_objects.VolumeTargetCRUDPayload), } def _emit_api_notification(context, obj, action, level, status, **kwargs): """Helper for emitting API notifications. :param context: request context. :param obj: resource rpc object. :param action: Action string to go in the EventType. :param level: Notification level. One of `ironic.objects.fields.NotificationLevel.ALL` :param status: Status to go in the EventType. One of `ironic.objects.fields.NotificationStatus.ALL` :param kwargs: kwargs to use when creating the notification payload. """ resource = obj.__class__.__name__.lower() extra_args = kwargs try: try: if action == 'maintenance_set': notification_method = node_objects.NodeMaintenanceNotification payload_method = node_objects.NodePayload elif resource not in CRUD_NOTIFY_OBJ: notification_name = payload_name = _("is not defined") raise KeyError(_("Unsupported resource: %s") % resource) else: notification_method, payload_method = CRUD_NOTIFY_OBJ[resource] notification_name = notification_method.__name__ payload_name = payload_method.__name__ finally: # Prepare our exception message just in case exception_values = {"resource": resource, "uuid": obj.uuid, "action": action, "status": status, "level": level, "notification_method": notification_name, "payload_method": payload_name} exception_message = (_("Failed to send baremetal.%(resource)s." "%(action)s.%(status)s notification for " "%(resource)s %(uuid)s with level " "%(level)s, notification method " "%(notification_method)s, payload method " "%(payload_method)s, error %(error)s")) payload = payload_method(obj, **extra_args) if resource == 'node': notification.mask_secrets(payload) notification_method( publisher=notification.NotificationPublisher( service='ironic-api', host=CONF.host), event_type=notification.EventType( object=resource, action=action, status=status), level=level, payload=payload).emit(context) except (exception.NotificationSchemaObjectError, exception.NotificationSchemaKeyError, exception.NotificationPayloadError, oslo_msg_exc.MessageDeliveryFailure, oslo_vo_exc.VersionedObjectsException) as e: exception_values['error'] = e LOG.warning(exception_message, exception_values) except Exception as e: exception_values['error'] = e LOG.exception(exception_message, exception_values) def emit_start_notification(context, obj, action, **kwargs): """Helper for emitting API 'start' notifications. :param context: request context. :param obj: resource rpc object. :param action: Action string to go in the EventType. :param kwargs: kwargs to use when creating the notification payload. """ _emit_api_notification(context, obj, action, fields.NotificationLevel.INFO, fields.NotificationStatus.START, **kwargs) @contextlib.contextmanager def handle_error_notification(context, obj, action, **kwargs): """Context manager to handle any error notifications. :param context: request context. :param obj: resource rpc object. :param action: Action string to go in the EventType. :param kwargs: kwargs to use when creating the notification payload. """ try: yield except Exception: with excutils.save_and_reraise_exception(): _emit_api_notification(context, obj, action, fields.NotificationLevel.ERROR, fields.NotificationStatus.ERROR, **kwargs) def emit_end_notification(context, obj, action, **kwargs): """Helper for emitting API 'end' notifications. :param context: request context. :param obj: resource rpc object. :param action: Action string to go in the EventType. :param kwargs: kwargs to use when creating the notification payload. """ _emit_api_notification(context, obj, action, fields.NotificationLevel.INFO, fields.NotificationStatus.END, **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/ironic/api/controllers/v1/port.py0000664000175000017500000007646700000000000021412 0ustar00zuulzuul00000000000000# Copyright 2013 UnitedStack Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import client as http_client from ironic_lib import metrics_utils from oslo_log import log from oslo_utils import uuidutils from pecan import rest from ironic import api from ironic.api.controllers import link from ironic.api.controllers.v1 import collection from ironic.api.controllers.v1 import notification_utils as notify from ironic.api.controllers.v1 import utils as api_utils from ironic.api import method from ironic.common import args from ironic.common import exception from ironic.common.i18n import _ from ironic.common import states as ir_states from ironic import objects METRICS = metrics_utils.get_metrics_logger(__name__) LOG = log.getLogger(__name__) _DEFAULT_RETURN_FIELDS = ['uuid', 'address'] PORT_SCHEMA = { 'type': 'object', 'properties': { 'address': {'type': 'string'}, 'extra': {'type': ['object', 'null']}, 'is_smartnic': {'type': ['string', 'boolean', 'null']}, 'local_link_connection': {'type': ['null', 'object']}, 'node_uuid': {'type': 'string'}, 'physical_network': {'type': ['string', 'null'], 'maxLength': 64}, 'portgroup_uuid': {'type': ['string', 'null']}, 'pxe_enabled': {'type': ['string', 'boolean', 'null']}, 'uuid': {'type': ['string', 'null']}, }, 'required': ['address', 'node_uuid'], 'additionalProperties': False, } PORT_PATCH_SCHEMA = PORT_SCHEMA PATCH_ALLOWED_FIELDS = [ 'address', 'extra', 'is_smartnic', 'local_link_connection', 'node_uuid', 'physical_network', 'portgroup_uuid', 'pxe_enabled' ] PORT_VALIDATOR_EXTRA = args.dict_valid( address=args.mac_address, node_uuid=args.uuid, is_smartnic=args.boolean, local_link_connection=api_utils.LOCAL_LINK_VALIDATOR, portgroup_uuid=args.uuid, pxe_enabled=args.boolean, uuid=args.uuid, ) PORT_VALIDATOR = args.and_valid( args.schema(PORT_SCHEMA), PORT_VALIDATOR_EXTRA ) PORT_PATCH_VALIDATOR = args.and_valid( args.schema(PORT_PATCH_SCHEMA), PORT_VALIDATOR_EXTRA ) def hide_fields_in_newer_versions(port): # if requested version is < 1.18, hide internal_info field if not api_utils.allow_port_internal_info(): port.pop('internal_info', None) # if requested version is < 1.19, hide local_link_connection and # pxe_enabled fields if not api_utils.allow_port_advanced_net_fields(): port.pop('pxe_enabled', None) port.pop('local_link_connection', None) # if requested version is < 1.24, hide portgroup_uuid field if not api_utils.allow_portgroups_subcontrollers(): port.pop('portgroup_uuid', None) # if requested version is < 1.34, hide physical_network field. if not api_utils.allow_port_physical_network(): port.pop('physical_network', None) # if requested version is < 1.53, hide is_smartnic field. if not api_utils.allow_port_is_smartnic(): port.pop('is_smartnic', None) def convert_with_links(rpc_port, fields=None, sanitize=True): port = api_utils.object_to_dict( rpc_port, link_resource='ports', fields=( 'address', 'extra', 'internal_info', 'is_smartnic', 'local_link_connection', 'physical_network', 'pxe_enabled', ) ) api_utils.populate_node_uuid(rpc_port, port) if rpc_port.portgroup_id: pg = objects.Portgroup.get(api.request.context, rpc_port.portgroup_id) port['portgroup_uuid'] = pg.uuid else: port['portgroup_uuid'] = None _validate_fields(port, fields) if not sanitize: return port port_sanitize(port, fields=fields) return port def _validate_fields(port, fields=None): if fields is not None: api_utils.check_for_invalid_fields(fields, port) def port_sanitize(port, fields=None): """Removes sensitive and unrequested data. Will only keep the fields specified in the ``fields`` parameter. :param fields: list of fields to preserve, or ``None`` to preserve them all :type fields: list of str """ hide_fields_in_newer_versions(port) api_utils.sanitize_dict(port, fields) def list_convert_with_links(rpc_ports, limit, url, fields=None, **kwargs): ports = [] for rpc_port in rpc_ports: try: port = convert_with_links(rpc_port, fields=fields, sanitize=False) except exception.NodeNotFound: # NOTE(dtantsur): node was deleted after we fetched the port # list, meaning that the port was also deleted. Skip it. LOG.debug('Skipping port %s as its node was deleted', rpc_port.uuid) continue except exception.PortgroupNotFound: # NOTE(dtantsur): port group was deleted after we fetched the # port list, it may mean that the port was deleted too, but # we don't know it. Pretend that the port group was removed. LOG.debug('Removing port group UUID from port %s as the port ' 'group was deleted', rpc_port.uuid) rpc_port.portgroup_id = None port = convert_with_links(rpc_port, fields=fields, sanitize=False) ports.append(port) return collection.list_convert_with_links( items=ports, item_name='ports', limit=limit, url=url, fields=fields, sanitize_func=port_sanitize, **kwargs ) class PortsController(rest.RestController): """REST controller for Ports.""" _custom_actions = { 'detail': ['GET'], } invalid_sort_key_list = ['extra', 'internal_info', 'local_link_connection'] advanced_net_fields = ['pxe_enabled', 'local_link_connection'] def __init__(self, node_ident=None, portgroup_ident=None): super(PortsController, self).__init__() self.parent_node_ident = node_ident self.parent_portgroup_ident = portgroup_ident def _get_ports_collection(self, node_ident, address, portgroup_ident, marker, limit, sort_key, sort_dir, resource_url=None, fields=None, detail=None, project=None): """Retrieve a collection of ports. :param node_ident: UUID or name of a node, to get only ports for that node. :param address: MAC address of a port, to get the port which has this MAC address. :param portgroup_ident: UUID or name of a portgroup, to get only ports for that portgroup. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. This value cannot be larger than the value of max_limit in the [api] section of the ironic configuration, or only max_limit resources will be returned. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. :param resource_url: Optional, base url to be used for links :param fields: Optional, a list with a specified set of fields of the resource to be returned. :param detail: Optional, show detailed list of ports :param project: Optional, filter by project :returns: a list of ports. """ limit = api_utils.validate_limit(limit) sort_dir = api_utils.validate_sort_dir(sort_dir) marker_obj = None if marker: marker_obj = objects.Port.get_by_uuid(api.request.context, marker) if sort_key in self.invalid_sort_key_list: raise exception.InvalidParameterValue( _("The sort_key value %(key)s is an invalid field for " "sorting") % {'key': sort_key}) node_ident = self.parent_node_ident or node_ident portgroup_ident = self.parent_portgroup_ident or portgroup_ident if node_ident and portgroup_ident: raise exception.OperationNotPermitted() if portgroup_ident: # FIXME: Since all we need is the portgroup ID, we can # make this more efficient by only querying # for that column. This will get cleaned up # as we move to the object interface. portgroup = api_utils.get_rpc_portgroup(portgroup_ident) ports = objects.Port.list_by_portgroup_id(api.request.context, portgroup.id, limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir, project=project) elif node_ident: # FIXME(comstud): Since all we need is the node ID, we can # make this more efficient by only querying # for that column. This will get cleaned up # as we move to the object interface. node = api_utils.get_rpc_node(node_ident) ports = objects.Port.list_by_node_id(api.request.context, node.id, limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir, project=project) elif address: ports = self._get_ports_by_address(address, project=project) else: ports = objects.Port.list(api.request.context, limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir, project=project) parameters = {} if detail is not None: parameters['detail'] = detail return list_convert_with_links(ports, limit, url=resource_url, fields=fields, sort_key=sort_key, sort_dir=sort_dir, **parameters) def _get_ports_by_address(self, address, project=None): """Retrieve a port by its address. :param address: MAC address of a port, to get the port which has this MAC address. :param project: Optional, filter by project :returns: a list with the port, or an empty list if no port is found. """ try: port = objects.Port.get_by_address(api.request.context, address, project=project) return [port] except exception.PortNotFound: return [] def _check_allowed_port_fields(self, fields): """Check if fetching a particular field of a port is allowed. Check if the required version is being requested for fields that are only allowed to be fetched in a particular API version. :param fields: list or set of fields to check :raises: NotAcceptable if a field is not allowed """ if fields is None: return if (not api_utils.allow_port_advanced_net_fields() and set(fields).intersection(self.advanced_net_fields)): raise exception.NotAcceptable() if ('portgroup_uuid' in fields and not api_utils.allow_portgroups_subcontrollers()): raise exception.NotAcceptable() if ('physical_network' in fields and not api_utils.allow_port_physical_network()): raise exception.NotAcceptable() if ('is_smartnic' in fields and not api_utils.allow_port_is_smartnic()): raise exception.NotAcceptable() if ('local_link_connection/network_type' in fields and not api_utils.allow_local_link_connection_network_type()): raise exception.NotAcceptable() if (isinstance(fields, dict) and fields.get('local_link_connection') is not None): if (not api_utils.allow_local_link_connection_network_type() and 'network_type' in fields['local_link_connection']): raise exception.NotAcceptable() @METRICS.timer('PortsController.get_all') @method.expose() @args.validate(node=args.uuid_or_name, node_uuid=args.uuid, address=args.mac_address, marker=args.uuid, limit=args.integer, sort_key=args.string, sort_dir=args.string, fields=args.string_list, portgroup=args.uuid_or_name, detail=args.boolean) def get_all(self, node=None, node_uuid=None, address=None, marker=None, limit=None, sort_key='id', sort_dir='asc', fields=None, portgroup=None, detail=None): """Retrieve a list of ports. Note that the 'node_uuid' interface is deprecated in favour of the 'node' interface :param node: UUID or name of a node, to get only ports for that node. :param node_uuid: UUID of a node, to get only ports for that node. :param address: MAC address of a port, to get the port which has this MAC address. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. This value cannot be larger than the value of max_limit in the [api] section of the ironic configuration, or only max_limit resources will be returned. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. :param fields: Optional, a list with a specified set of fields of the resource to be returned. :param portgroup: UUID or name of a portgroup, to get only ports for that portgroup. :raises: NotAcceptable, HTTPNotFound """ project = api_utils.check_port_list_policy( parent_node=self.parent_node_ident, parent_portgroup=self.parent_portgroup_ident) if self.parent_node_ident: node = self.parent_node_ident if self.parent_portgroup_ident: portgroup = self.parent_portgroup_ident api_utils.check_allow_specify_fields(fields) self._check_allowed_port_fields(fields) self._check_allowed_port_fields([sort_key]) if portgroup and not api_utils.allow_portgroups_subcontrollers(): raise exception.NotAcceptable() fields = api_utils.get_request_return_fields(fields, detail, _DEFAULT_RETURN_FIELDS) if not node_uuid and node: # We're invoking this interface using positional notation, or # explicitly using 'node'. Try and determine which one. # Make sure only one interface, node or node_uuid is used if (not api_utils.allow_node_logical_names() and not uuidutils.is_uuid_like(node)): raise exception.NotAcceptable() return self._get_ports_collection(node_uuid or node, address, portgroup, marker, limit, sort_key, sort_dir, resource_url='ports', fields=fields, detail=detail, project=project) @METRICS.timer('PortsController.detail') @method.expose() @args.validate(node=args.uuid_or_name, node_uuid=args.uuid, address=args.mac_address, marker=args.uuid, limit=args.integer, sort_key=args.string, sort_dir=args.string, portgroup=args.uuid_or_name) def detail(self, node=None, node_uuid=None, address=None, marker=None, limit=None, sort_key='id', sort_dir='asc', portgroup=None): """Retrieve a list of ports with detail. Note that the 'node_uuid' interface is deprecated in favour of the 'node' interface :param node: UUID or name of a node, to get only ports for that node. :param node_uuid: UUID of a node, to get only ports for that node. :param address: MAC address of a port, to get the port which has this MAC address. :param portgroup: UUID or name of a portgroup, to get only ports for that portgroup. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. This value cannot be larger than the value of max_limit in the [api] section of the ironic configuration, or only max_limit resources will be returned. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. :raises: NotAcceptable, HTTPNotFound """ project = api_utils.check_port_list_policy( parent_node=self.parent_node_ident, parent_portgroup=self.parent_portgroup_ident) self._check_allowed_port_fields([sort_key]) if portgroup and not api_utils.allow_portgroups_subcontrollers(): raise exception.NotAcceptable() if not node_uuid and node: # We're invoking this interface using positional notation, or # explicitly using 'node'. Try and determine which one. # Make sure only one interface, node or node_uuid is used if (not api_utils.allow_node_logical_names() and not uuidutils.is_uuid_like(node)): raise exception.NotAcceptable() # NOTE(lucasagomes): /detail should only work against collections parent = api.request.path.split('/')[:-1][-1] if parent != "ports": raise exception.HTTPNotFound() return self._get_ports_collection(node_uuid or node, address, portgroup, marker, limit, sort_key, sort_dir, resource_url='ports/detail', project=project) @METRICS.timer('PortsController.get_one') @method.expose() @args.validate(port_uuid=args.uuid, fields=args.string_list) def get_one(self, port_uuid, fields=None): """Retrieve information about the given port. :param port_uuid: UUID of a port. :param fields: Optional, a list with a specified set of fields of the resource to be returned. :raises: NotAcceptable, HTTPNotFound """ if self.parent_node_ident or self.parent_portgroup_ident: raise exception.OperationNotPermitted() rpc_port, rpc_node = api_utils.check_port_policy_and_retrieve( 'baremetal:port:get', port_uuid) api_utils.check_allow_specify_fields(fields) self._check_allowed_port_fields(fields) return convert_with_links(rpc_port, fields=fields) @METRICS.timer('PortsController.post') @method.expose(status_code=http_client.CREATED) @method.body('port') @args.validate(port=PORT_VALIDATOR) def post(self, port): """Create a new port. :param port: a port within the request body. :raises: NotAcceptable, HTTPNotFound, Conflict """ if self.parent_node_ident or self.parent_portgroup_ident: raise exception.OperationNotPermitted() # NOTE(lucasagomes): Create the node_id attribute on-the-fly # to satisfy the api -> rpc object # conversion. # NOTE(TheJulia): The get of the node *does* check if the node # can be accessed. We need to be able to get the node regardless # in order to perform the actual policy check. raise_node_not_found = False node = None owner = None lessee = None node_uuid = port.get('node_uuid') try: node = api_utils.replace_node_uuid_with_id(port) owner = node.owner lessee = node.lessee except exception.NotFound: raise_node_not_found = True # While the rule is for the port, the base object that controls access # is the node. api_utils.check_owner_policy('node', 'baremetal:port:create', owner, lessee=lessee, conceal_node=False) if raise_node_not_found: # Delayed raise of NodeNotFound because we want to check # the access policy first. raise exception.NodeNotFound(node=node_uuid, code=http_client.BAD_REQUEST) context = api.request.context self._check_allowed_port_fields(port) portgroup = None if port.get('portgroup_uuid'): try: portgroup = objects.Portgroup.get(api.request.context, port.pop('portgroup_uuid')) if portgroup.node_id != node.id: raise exception.BadRequest(_('Port can not be added to a ' 'portgroup belonging to a ' 'different node.')) # NOTE(lucasagomes): Create the portgroup_id attribute # on-the-fly to satisfy the api -> # rpc object conversion. port['portgroup_id'] = portgroup.id except exception.PortgroupNotFound as e: # Change error code because 404 (NotFound) is inappropriate # response for a POST request to create a Port e.code = http_client.BAD_REQUEST # BadRequest raise e if port.get('is_smartnic'): try: api_utils.LOCAL_LINK_SMART_NIC_VALIDATOR( 'local_link_connection', port.get('local_link_connection')) except exception.Invalid: raise exception.Invalid( "Smart NIC port must have port_id " "and hostname in local_link_connection") physical_network = port.get('physical_network') if physical_network is not None and not physical_network: raise exception.Invalid('A non-empty value is required when ' 'setting physical_network') if (portgroup and (port.get('pxe_enabled'))): if not portgroup.standalone_ports_supported: msg = _("Port group %s doesn't support standalone ports. " "This port cannot be created as a member of that " "portgroup as the port's 'pxe_enabled' field was " "set to True.") raise exception.Conflict( msg % portgroup.uuid) # NOTE(yuriyz): UUID is mandatory for notifications payload if not port.get('uuid'): port['uuid'] = uuidutils.generate_uuid() rpc_port = objects.Port(context, **port) notify_extra = { 'node_uuid': node.uuid, 'portgroup_uuid': portgroup and portgroup.uuid or None } notify.emit_start_notification(context, rpc_port, 'create', **notify_extra) with notify.handle_error_notification(context, rpc_port, 'create', **notify_extra): topic = api.request.rpcapi.get_topic_for(node) new_port = api.request.rpcapi.create_port(context, rpc_port, topic) notify.emit_end_notification(context, new_port, 'create', **notify_extra) # Set the HTTP Location Header api.response.location = link.build_url('ports', new_port.uuid) return convert_with_links(new_port) @METRICS.timer('PortsController.patch') @method.expose() @method.body('patch') @args.validate(port_uuid=args.uuid, patch=args.patch) def patch(self, port_uuid, patch): """Update an existing port. :param port_uuid: UUID of a port. :param patch: a json PATCH document to apply to this port. :raises: NotAcceptable, HTTPNotFound """ if self.parent_node_ident or self.parent_portgroup_ident: raise exception.OperationNotPermitted() api_utils.patch_validate_allowed_fields(patch, PATCH_ALLOWED_FIELDS) context = api.request.context fields_to_check = set() for field in (self.advanced_net_fields + ['portgroup_uuid', 'physical_network', 'is_smartnic', 'local_link_connection/network_type']): field_path = '/%s' % field if (api_utils.get_patch_values(patch, field_path) or api_utils.is_path_removed(patch, field_path)): fields_to_check.add(field) self._check_allowed_port_fields(fields_to_check) rpc_port, rpc_node = api_utils.check_port_policy_and_retrieve( 'baremetal:port:update', port_uuid) port_dict = rpc_port.as_dict() # NOTE(lucasagomes): # 1) Remove node_id because it's an internal value and # not present in the API object # 2) Add node_uuid port_dict.pop('node_id', None) port_dict['node_uuid'] = rpc_node.uuid # NOTE(vsaienko): # 1) Remove portgroup_id because it's an internal value and # not present in the API object # 2) Add portgroup_uuid portgroup = None if port_dict.get('portgroup_id'): portgroup = objects.Portgroup.get_by_id( context, port_dict.pop('portgroup_id')) port_dict['portgroup_uuid'] = portgroup and portgroup.uuid or None port_dict = api_utils.apply_jsonpatch(port_dict, patch) try: if api_utils.is_path_updated(patch, '/portgroup_uuid'): if port_dict.get('portgroup_uuid'): portgroup = objects.Portgroup.get_by_uuid( context, port_dict['portgroup_uuid']) else: portgroup = None except exception.PortGroupNotFound as e: # Change error code because 404 (NotFound) is inappropriate # response for a PATCH request to change a Port e.code = http_client.BAD_REQUEST # BadRequest raise try: if port_dict['node_uuid'] != rpc_node.uuid: rpc_node = objects.Node.get( api.request.context, port_dict['node_uuid']) except exception.NodeNotFound as e: # Change error code because 404 (NotFound) is inappropriate # response for a PATCH request to change a Port e.code = http_client.BAD_REQUEST # BadRequest raise api_utils.patched_validate_with_schema( port_dict, PORT_PATCH_SCHEMA, PORT_PATCH_VALIDATOR) api_utils.patch_update_changed_fields( port_dict, rpc_port, fields=objects.Port.fields, schema=PORT_PATCH_SCHEMA, id_map={ 'node_id': rpc_node.id, 'portgroup_id': portgroup and portgroup.id or None } ) if (rpc_node.provision_state == ir_states.INSPECTING and api_utils.allow_inspect_wait_state()): msg = _('Cannot update port "%(port)s" on "%(node)s" while it is ' 'in state "%(state)s".') % {'port': rpc_port.uuid, 'node': rpc_node.uuid, 'state': ir_states.INSPECTING} raise exception.ClientSideError(msg, status_code=http_client.CONFLICT) if (api_utils.is_path_updated(patch, '/physical_network') and rpc_port['physical_network'] is not None and not rpc_port['physical_network']): raise exception.Invalid('A non-empty value is required when ' 'setting physical_network') notify_extra = {'node_uuid': rpc_node.uuid, 'portgroup_uuid': portgroup and portgroup.uuid or None} notify.emit_start_notification(context, rpc_port, 'update', **notify_extra) with notify.handle_error_notification(context, rpc_port, 'update', **notify_extra): topic = api.request.rpcapi.get_topic_for(rpc_node) new_port = api.request.rpcapi.update_port(context, rpc_port, topic) api_port = convert_with_links(new_port) notify.emit_end_notification(context, new_port, 'update', **notify_extra) return api_port @METRICS.timer('PortsController.delete') @method.expose(status_code=http_client.NO_CONTENT) @args.validate(port_uuid=args.uuid) def delete(self, port_uuid): """Delete a port. :param port_uuid: UUID of a port. :raises: OperationNotPermitted, HTTPNotFound """ if self.parent_node_ident or self.parent_portgroup_ident: raise exception.OperationNotPermitted() rpc_port, rpc_node = api_utils.check_port_policy_and_retrieve( 'baremetal:port:delete', port_uuid) context = api.request.context portgroup_uuid = None if rpc_port.portgroup_id: portgroup = objects.Portgroup.get_by_id(context, rpc_port.portgroup_id) portgroup_uuid = portgroup.uuid notify_extra = {'node_uuid': rpc_node.uuid, 'portgroup_uuid': portgroup_uuid} notify.emit_start_notification(context, rpc_port, 'delete', **notify_extra) with notify.handle_error_notification(context, rpc_port, 'delete', **notify_extra): topic = api.request.rpcapi.get_topic_for(rpc_node) api.request.rpcapi.destroy_port(context, rpc_port, topic) notify.emit_end_notification(context, rpc_port, 'delete', **notify_extra) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/ironic/api/controllers/v1/portgroup.py0000664000175000017500000005427100000000000022454 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import client as http_client from ironic_lib import metrics_utils from oslo_utils import uuidutils import pecan from ironic import api from ironic.api.controllers import link from ironic.api.controllers.v1 import collection from ironic.api.controllers.v1 import notification_utils as notify from ironic.api.controllers.v1 import port from ironic.api.controllers.v1 import utils as api_utils from ironic.api import method from ironic.common import args from ironic.common import exception from ironic.common.i18n import _ from ironic.common import states as ir_states from ironic import objects METRICS = metrics_utils.get_metrics_logger(__name__) _DEFAULT_RETURN_FIELDS = ['uuid', 'address', 'name'] PORTGROUP_SCHEMA = { 'type': 'object', 'properties': { 'address': {'type': ['string', 'null']}, 'extra': {'type': ['object', 'null']}, 'mode': {'type': ['string', 'null']}, 'name': {'type': ['string', 'null']}, 'node_uuid': {'type': 'string'}, 'properties': {'type': ['object', 'null']}, 'standalone_ports_supported': {'type': ['string', 'boolean', 'null']}, 'uuid': {'type': ['string', 'null']}, }, 'required': ['node_uuid'], 'additionalProperties': False, } PORTGROUP_PATCH_SCHEMA = PORTGROUP_SCHEMA PORTGROUP_VALIDATOR_EXTRA = args.dict_valid( address=args.mac_address, node_uuid=args.uuid, standalone_ports_supported=args.boolean, uuid=args.uuid ) PORTGROUP_VALIDATOR = args.and_valid( args.schema(PORTGROUP_SCHEMA), PORTGROUP_VALIDATOR_EXTRA ) PORTGROUP_PATCH_VALIDATOR = args.and_valid( args.schema(PORTGROUP_PATCH_SCHEMA), PORTGROUP_VALIDATOR_EXTRA ) PATCH_ALLOWED_FIELDS = [ 'address', 'extra', 'mode', 'name', 'node_uuid', 'properties', 'standalone_ports_supported' ] def convert_with_links(rpc_portgroup, fields=None, sanitize=True): """Add links to the portgroup.""" portgroup = api_utils.object_to_dict( rpc_portgroup, link_resource='portgroups', fields=( 'address', 'extra', 'internal_info', 'mode', 'name', 'properties', 'standalone_ports_supported' ) ) api_utils.populate_node_uuid(rpc_portgroup, portgroup) url = api.request.public_url portgroup['ports'] = [ link.make_link('self', url, 'portgroups', rpc_portgroup.uuid + "/ports"), link.make_link('bookmark', url, 'portgroups', rpc_portgroup.uuid + "/ports", bookmark=True) ] if fields is not None: api_utils.check_for_invalid_fields(fields, portgroup) if not sanitize: return portgroup api_utils.sanitize_dict(portgroup, fields) return portgroup def list_convert_with_links(rpc_portgroups, limit, url, fields=None, **kwargs): return collection.list_convert_with_links( items=[convert_with_links(p, fields=fields, sanitize=False) for p in rpc_portgroups], item_name='portgroups', limit=limit, url=url, fields=fields, sanitize_func=api_utils.sanitize_dict, **kwargs ) class PortgroupsController(pecan.rest.RestController): """REST controller for portgroups.""" _custom_actions = { 'detail': ['GET'], } invalid_sort_key_list = ['extra', 'internal_info', 'properties'] _subcontroller_map = { 'ports': port.PortsController, } @pecan.expose() def _lookup(self, ident, *remainder): if not api_utils.allow_portgroups(): pecan.abort(http_client.NOT_FOUND) try: ident = args.uuid_or_name('portgroup', ident) except exception.InvalidParameterValue as e: pecan.abort(http_client.BAD_REQUEST, e.args[0]) if not remainder: return subcontroller = self._subcontroller_map.get(remainder[0]) if subcontroller: if api_utils.allow_portgroups_subcontrollers(): return subcontroller( portgroup_ident=ident, node_ident=self.parent_node_ident), remainder[1:] pecan.abort(http_client.NOT_FOUND) def __init__(self, node_ident=None): super(PortgroupsController, self).__init__() self.parent_node_ident = node_ident def _get_portgroups_collection(self, node_ident, address, marker, limit, sort_key, sort_dir, resource_url=None, fields=None, detail=None, project=None): """Return portgroups collection. :param node_ident: UUID or name of a node. :param address: MAC address of a portgroup. :param marker: Pagination marker for large data sets. :param limit: Maximum number of resources to return in a single result. :param sort_key: Column to sort results by. Default: id. :param sort_dir: Direction to sort. "asc" or "desc". Default: asc. :param resource_url: Optional, URL to the portgroup resource. :param fields: Optional, a list with a specified set of fields of the resource to be returned. :param project: Optional, project ID to filter the request by. """ limit = api_utils.validate_limit(limit) sort_dir = api_utils.validate_sort_dir(sort_dir) marker_obj = None if marker: marker_obj = objects.Portgroup.get_by_uuid(api.request.context, marker) if sort_key in self.invalid_sort_key_list: raise exception.InvalidParameterValue( _("The sort_key value %(key)s is an invalid field for " "sorting") % {'key': sort_key}) node_ident = self.parent_node_ident or node_ident if node_ident: # FIXME: Since all we need is the node ID, we can # make this more efficient by only querying # for that column. This will get cleaned up # as we move to the object interface. node = api_utils.get_rpc_node(node_ident) portgroups = objects.Portgroup.list_by_node_id( api.request.context, node.id, limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir, project=project) elif address: portgroups = self._get_portgroups_by_address(address, project=project) else: portgroups = objects.Portgroup.list(api.request.context, limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir, project=project) parameters = {} if detail is not None: parameters['detail'] = detail return list_convert_with_links(portgroups, limit, url=resource_url, fields=fields, sort_key=sort_key, sort_dir=sort_dir, **parameters) def _get_portgroups_by_address(self, address, project=None): """Retrieve a portgroup by its address. :param address: MAC address of a portgroup, to get the portgroup which has this MAC address. :returns: a list with the portgroup, or an empty list if no portgroup is found. """ try: portgroup = objects.Portgroup.get_by_address(api.request.context, address, project=project) return [portgroup] except exception.PortgroupNotFound: return [] @METRICS.timer('PortgroupsController.get_all') @method.expose() @args.validate(node=args.uuid_or_name, address=args.mac_address, marker=args.uuid, limit=args.integer, sort_key=args.string, sort_dir=args.string, fields=args.string_list, detail=args.boolean) def get_all(self, node=None, address=None, marker=None, limit=None, sort_key='id', sort_dir='asc', fields=None, detail=None): """Retrieve a list of portgroups. :param node: UUID or name of a node, to get only portgroups for that node. :param address: MAC address of a portgroup, to get the portgroup which has this MAC address. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. This value cannot be larger than the value of max_limit in the [api] section of the ironic configuration, or only max_limit resources will be returned. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. :param fields: Optional, a list with a specified set of fields of the resource to be returned. """ if not api_utils.allow_portgroups(): raise exception.NotFound() if self.parent_node_ident: # Override the node, since this is being called by another # controller with a linked view. node = self.parent_node_ident project = api_utils.check_port_list_policy( portgroup=True, parent_node=self.parent_node_ident) api_utils.check_allowed_portgroup_fields(fields) api_utils.check_allowed_portgroup_fields([sort_key]) fields = api_utils.get_request_return_fields(fields, detail, _DEFAULT_RETURN_FIELDS) return self._get_portgroups_collection(node, address, marker, limit, sort_key, sort_dir, fields=fields, resource_url='portgroups', detail=detail, project=project) @METRICS.timer('PortgroupsController.detail') @method.expose() @args.validate(node=args.uuid_or_name, address=args.mac_address, marker=args.uuid, limit=args.integer, sort_key=args.string, sort_dir=args.string) def detail(self, node=None, address=None, marker=None, limit=None, sort_key='id', sort_dir='asc'): """Retrieve a list of portgroups with detail. :param node: UUID or name of a node, to get only portgroups for that node. :param address: MAC address of a portgroup, to get the portgroup which has this MAC address. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. This value cannot be larger than the value of max_limit in the [api] section of the ironic configuration, or only max_limit resources will be returned. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. """ if not api_utils.allow_portgroups(): raise exception.NotFound() if self.parent_node_ident: # If we have a parent node, then we need to override this method's # node filter. node = self.parent_node_ident project = api_utils.check_port_list_policy( portgroup=True, parent_node=self.parent_node_ident) api_utils.check_allowed_portgroup_fields([sort_key]) # NOTE: /detail should only work against collections parent = api.request.path.split('/')[:-1][-1] if parent != "portgroups": raise exception.HTTPNotFound() return self._get_portgroups_collection( node, address, marker, limit, sort_key, sort_dir, resource_url='portgroups/detail', project=project) @METRICS.timer('PortgroupsController.get_one') @method.expose() @args.validate(portgroup_ident=args.uuid_or_name, fields=args.string_list) def get_one(self, portgroup_ident, fields=None): """Retrieve information about the given portgroup. :param portgroup_ident: UUID or logical name of a portgroup. :param fields: Optional, a list with a specified set of fields of the resource to be returned. """ if not api_utils.allow_portgroups(): raise exception.NotFound() rpc_portgroup, rpc_node = api_utils.check_port_policy_and_retrieve( 'baremetal:portgroup:get', portgroup_ident, portgroup=True) if self.parent_node_ident: raise exception.OperationNotPermitted() api_utils.check_allowed_portgroup_fields(fields) rpc_portgroup = api_utils.get_rpc_portgroup_with_suffix( portgroup_ident) return convert_with_links(rpc_portgroup, fields=fields) @METRICS.timer('PortgroupsController.post') @method.expose(status_code=http_client.CREATED) @method.body('portgroup') @args.validate(portgroup=PORTGROUP_VALIDATOR) def post(self, portgroup): """Create a new portgroup. :param portgroup: a portgroup within the request body. """ if not api_utils.allow_portgroups(): raise exception.NotFound() raise_node_not_found = False node = None owner = None lessee = None node_uuid = portgroup.get('node_uuid') try: # The replace_node_uuid_with_id also checks access to the node # and will raise an exception if access is not permitted. node = api_utils.replace_node_uuid_with_id(portgroup) owner = node.owner lessee = node.lessee except exception.NotFound: raise_node_not_found = True # While the rule is for the port, the base object that controls access # is the node. api_utils.check_owner_policy('node', 'baremetal:portgroup:create', owner, lessee=lessee, conceal_node=False) if raise_node_not_found: # Delayed raise of NodeNotFound because we want to check # the access policy first. raise exception.NodeNotFound(node=node_uuid, code=http_client.BAD_REQUEST) context = api.request.context if self.parent_node_ident: raise exception.OperationNotPermitted() if (not api_utils.allow_portgroup_mode_properties() and (portgroup.get('mode') or portgroup.get('properties'))): raise exception.NotAcceptable() if (portgroup.get('name') and not api_utils.is_valid_logical_name(portgroup['name'])): error_msg = _("Cannot create portgroup with invalid name " "'%(name)s'") % {'name': portgroup['name']} raise exception.ClientSideError( error_msg, status_code=http_client.BAD_REQUEST) # NOTE(yuriyz): UUID is mandatory for notifications payload if not portgroup.get('uuid'): portgroup['uuid'] = uuidutils.generate_uuid() new_portgroup = objects.Portgroup(context, **portgroup) notify.emit_start_notification(context, new_portgroup, 'create', node_uuid=node.uuid) with notify.handle_error_notification(context, new_portgroup, 'create', node_uuid=node.uuid): new_portgroup.create() notify.emit_end_notification(context, new_portgroup, 'create', node_uuid=node.uuid) # Set the HTTP Location Header api.response.location = link.build_url('portgroups', new_portgroup.uuid) return convert_with_links(new_portgroup) @METRICS.timer('PortgroupsController.patch') @method.expose() @method.body('patch') @args.validate(portgroup_ident=args.uuid_or_name, patch=args.patch) def patch(self, portgroup_ident, patch): """Update an existing portgroup. :param portgroup_ident: UUID or logical name of a portgroup. :param patch: a json PATCH document to apply to this portgroup. """ if not api_utils.allow_portgroups(): raise exception.NotFound() context = api.request.context rpc_portgroup, rpc_node = api_utils.check_port_policy_and_retrieve( 'baremetal:portgroup:update', portgroup_ident, portgroup=True) if self.parent_node_ident: raise exception.OperationNotPermitted() if (not api_utils.allow_portgroup_mode_properties() and (api_utils.is_path_updated(patch, '/mode') or api_utils.is_path_updated(patch, '/properties'))): raise exception.NotAcceptable() api_utils.patch_validate_allowed_fields(patch, PATCH_ALLOWED_FIELDS) names = api_utils.get_patch_values(patch, '/name') for name in names: if (name and not api_utils.is_valid_logical_name(name)): error_msg = _("Portgroup %(portgroup)s: Cannot change name to" " invalid name '%(name)s'") % {'portgroup': portgroup_ident, 'name': name} raise exception.ClientSideError( error_msg, status_code=http_client.BAD_REQUEST) portgroup_dict = rpc_portgroup.as_dict() # NOTE: # 1) Remove node_id because it's an internal value and # not present in the API object # 2) Add node_uuid portgroup_dict.pop('node_id') portgroup_dict['node_uuid'] = rpc_node.uuid portgroup_dict = api_utils.apply_jsonpatch(portgroup_dict, patch) if 'mode' not in portgroup_dict: msg = _("'mode' is a mandatory attribute and can not be removed") raise exception.ClientSideError(msg) try: if portgroup_dict['node_uuid'] != rpc_node.uuid: rpc_node = objects.Node.get(api.request.context, portgroup_dict['node_uuid']) except exception.NodeNotFound as e: # Change error code because 404 (NotFound) is inappropriate # response for a POST request to patch a Portgroup e.code = http_client.BAD_REQUEST # BadRequest raise api_utils.patched_validate_with_schema( portgroup_dict, PORTGROUP_PATCH_SCHEMA, PORTGROUP_PATCH_VALIDATOR) api_utils.patch_update_changed_fields( portgroup_dict, rpc_portgroup, fields=objects.Portgroup.fields, schema=PORTGROUP_PATCH_SCHEMA, id_map={'node_id': rpc_node.id} ) if (rpc_node.provision_state == ir_states.INSPECTING and api_utils.allow_inspect_wait_state()): msg = _('Cannot update portgroup "%(portgroup)s" on node ' '"%(node)s" while it is in state "%(state)s".') % { 'portgroup': rpc_portgroup.uuid, 'node': rpc_node.uuid, 'state': ir_states.INSPECTING} raise exception.ClientSideError(msg, status_code=http_client.CONFLICT) notify.emit_start_notification(context, rpc_portgroup, 'update', node_uuid=rpc_node.uuid) with notify.handle_error_notification(context, rpc_portgroup, 'update', node_uuid=rpc_node.uuid): topic = api.request.rpcapi.get_topic_for(rpc_node) new_portgroup = api.request.rpcapi.update_portgroup( context, rpc_portgroup, topic) api_portgroup = convert_with_links(new_portgroup) notify.emit_end_notification(context, new_portgroup, 'update', node_uuid=rpc_node.uuid) return api_portgroup @METRICS.timer('PortgroupsController.delete') @method.expose(status_code=http_client.NO_CONTENT) @args.validate(portgroup_ident=args.uuid_or_name) def delete(self, portgroup_ident): """Delete a portgroup. :param portgroup_ident: UUID or logical name of a portgroup. """ if not api_utils.allow_portgroups(): raise exception.NotFound() rpc_portgroup, rpc_node = api_utils.check_port_policy_and_retrieve( 'baremetal:portgroup:delete', portgroup_ident, portgroup=True) context = api.request.context if self.parent_node_ident: raise exception.OperationNotPermitted() notify.emit_start_notification(context, rpc_portgroup, 'delete', node_uuid=rpc_node.uuid) with notify.handle_error_notification(context, rpc_portgroup, 'delete', node_uuid=rpc_node.uuid): topic = api.request.rpcapi.get_topic_for(rpc_node) api.request.rpcapi.destroy_portgroup(context, rpc_portgroup, topic) notify.emit_end_notification(context, rpc_portgroup, 'delete', node_uuid=rpc_node.uuid) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/ironic/api/controllers/v1/ramdisk.py0000664000175000017500000002505500000000000022043 0ustar00zuulzuul00000000000000# Copyright 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import client as http_client from oslo_config import cfg from oslo_log import log from pecan import rest from ironic import api from ironic.api.controllers.v1 import node as node_ctl from ironic.api.controllers.v1 import utils as api_utils from ironic.api import method from ironic.common import args from ironic.common import exception from ironic.common.i18n import _ from ironic.common import states from ironic.common import utils from ironic import objects CONF = cfg.CONF LOG = log.getLogger(__name__) _LOOKUP_RETURN_FIELDS = ['uuid', 'properties', 'instance_info', 'driver_internal_info'] AGENT_VALID_STATES = ['start', 'end', 'error'] def config(token): return { 'metrics': { 'backend': CONF.metrics.agent_backend, 'prepend_host': CONF.metrics.agent_prepend_host, 'prepend_uuid': CONF.metrics.agent_prepend_uuid, 'prepend_host_reverse': CONF.metrics.agent_prepend_host_reverse, 'global_prefix': CONF.metrics.agent_global_prefix }, 'metrics_statsd': { 'statsd_host': CONF.metrics_statsd.agent_statsd_host, 'statsd_port': CONF.metrics_statsd.agent_statsd_port }, 'heartbeat_timeout': CONF.api.ramdisk_heartbeat_timeout, 'agent_token': token, # Since this is for the Victoria release, we send this as an # explicit True statement for newer agents to lock the setting # and behavior into place. 'agent_token_required': True, } def convert_with_links(node): token = node.driver_internal_info.get('agent_secret_token') node = node_ctl.node_convert_with_links(node, _LOOKUP_RETURN_FIELDS) return {'node': node, 'config': config(token)} class LookupController(rest.RestController): """Controller handling node lookup for a deploy ramdisk.""" def lookup_allowed(self, node): if utils.fast_track_enabled(node): return ( node.provision_state in states.FASTTRACK_LOOKUP_ALLOWED_STATES ) else: return node.provision_state in states.LOOKUP_ALLOWED_STATES @method.expose() @args.validate(addresses=args.string_list, node_uuid=args.uuid) def get_all(self, addresses=None, node_uuid=None): """Look up a node by its MAC addresses and optionally UUID. If the "restrict_lookup" option is set to True (the default), limit the search to nodes in certain transient states (e.g. deploy wait). :param addresses: list of MAC addresses for a node. :param node_uuid: UUID of a node. :raises: NotFound if requested API version does not allow this endpoint. :raises: NotFound if suitable node was not found or node's provision state is not allowed for the lookup. :raises: IncompleteLookup if neither node UUID nor any valid MAC address was provided. """ if not api_utils.allow_ramdisk_endpoints(): raise exception.NotFound() api_utils.check_policy('baremetal:driver:ipa_lookup') # Validate the list of MAC addresses if addresses is None: addresses = [] valid_addresses = [] invalid_addresses = [] for addr in addresses: try: mac = utils.validate_and_normalize_mac(addr) valid_addresses.append(mac) except exception.InvalidMAC: invalid_addresses.append(addr) if invalid_addresses: node_log = ('' if not node_uuid else '(Node UUID: %s)' % node_uuid) LOG.warning('The following MAC addresses "%(addrs)s" are ' 'invalid and will be ignored by the lookup ' 'request %(node)s', {'addrs': ', '.join(invalid_addresses), 'node': node_log}) if not valid_addresses and not node_uuid: raise exception.IncompleteLookup() try: if node_uuid: node = objects.Node.get_by_uuid( api.request.context, node_uuid) else: node = objects.Node.get_by_port_addresses( api.request.context, valid_addresses) except exception.NotFound: # NOTE(dtantsur): we are reraising the same exception to make sure # we don't disclose the difference between nodes that are not found # at all and nodes in a wrong state by different error messages. raise exception.NotFound() if CONF.api.restrict_lookup and not self.lookup_allowed(node): raise exception.NotFound() if api_utils.allow_agent_token(): try: topic = api.request.rpcapi.get_topic_for(node) except exception.NoValidHost as e: e.code = http_client.BAD_REQUEST raise found_node = api.request.rpcapi.get_node_with_token( api.request.context, node.uuid, topic=topic) else: found_node = node return convert_with_links(found_node) class HeartbeatController(rest.RestController): """Controller handling heartbeats from deploy ramdisk.""" @method.expose(status_code=http_client.ACCEPTED) @args.validate(node_ident=args.uuid_or_name, callback_url=args.string, agent_version=args.string, agent_token=args.string, agent_verify_ca=args.string, agent_status=args.string, agent_status_message=args.string) def post(self, node_ident, callback_url, agent_version=None, agent_token=None, agent_verify_ca=None, agent_status=None, agent_status_message=None): """Process a heartbeat from the deploy ramdisk. :param node_ident: the UUID or logical name of a node. :param callback_url: the URL to reach back to the ramdisk. :param agent_version: The version of the agent that is heartbeating. ``None`` indicates that the agent that is heartbeating is a version before sending agent_version was introduced so agent v3.0.0 (the last release before sending agent_version was introduced) will be assumed. :param agent_token: randomly generated validation token. :param agent_verify_ca: TLS certificate to use to connect to the agent. :param agent_status: Current status of the heartbeating agent. Used by anaconda ramdisk to send status back to Ironic. The valid states are 'start', 'end', 'error' :param agent_status_message: Optional status message describing current agent_status :raises: NodeNotFound if node with provided UUID or name was not found. :raises: InvalidUuidOrName if node_ident is not valid name or UUID. :raises: NoValidHost if RPC topic for node could not be retrieved. :raises: NotFound if requested API version does not allow this endpoint. """ if not api_utils.allow_ramdisk_endpoints(): raise exception.NotFound() if agent_version and not api_utils.allow_agent_version_in_heartbeat(): raise exception.InvalidParameterValue( _('Field "agent_version" not recognised')) if ((agent_status or agent_status_message) and not api_utils.allow_status_in_heartbeat()): raise exception.InvalidParameterValue( _('Fields "agent_status" and "agent_status_message" ' 'not recognised.') ) api_utils.check_policy('baremetal:node:ipa_heartbeat') if (agent_verify_ca is not None and not api_utils.allow_verify_ca_in_heartbeat()): raise exception.InvalidParameterValue( _('Field "agent_verify_ca" not recognised in this version')) rpc_node = api_utils.get_rpc_node_with_suffix(node_ident) dii = rpc_node['driver_internal_info'] agent_url = dii.get('agent_url') # If we have an agent_url on file, and we get something different # we should fail because this is unexpected behavior of the agent. if agent_url is not None and agent_url != callback_url: LOG.error('Received heartbeat for node %(node)s with ' 'callback URL %(url)s. This is not expected, ' 'and the heartbeat will not be processed.', {'node': rpc_node.uuid, 'url': callback_url}) raise exception.Invalid( _('Detected change in ramdisk provided ' '"callback_url"')) # NOTE(TheJulia): If tokens are required, lets go ahead and fail the # heartbeat very early on. if agent_token is None: LOG.error('Agent heartbeat received for node %(node)s ' 'without an agent token.', {'node': node_ident}) raise exception.InvalidParameterValue( _('Agent token is required for heartbeat processing.')) if agent_status is not None and agent_status not in AGENT_VALID_STATES: valid_states = ','.join(AGENT_VALID_STATES) LOG.error('Agent heartbeat received for node %(node)s ' 'has an invalid agent status: %(agent_status)s. ' 'Valid states are %(valid_states)s ', {'node': node_ident, 'agent_status': agent_status, 'valid_states': valid_states}) msg = (_('Agent status is invalid. Valid states are %s.') % valid_states) raise exception.InvalidParameterValue(msg) try: topic = api.request.rpcapi.get_topic_for(rpc_node) except exception.NoValidHost as e: e.code = http_client.BAD_REQUEST raise api.request.rpcapi.heartbeat( api.request.context, rpc_node.uuid, callback_url, agent_version, agent_token, agent_verify_ca, agent_status, agent_status_message, topic=topic) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/ironic/api/controllers/v1/utils.py0000664000175000017500000021157700000000000021557 0ustar00zuulzuul00000000000000# Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from http import client as http_client import inspect import io import re import string import jsonpatch import jsonschema from jsonschema import exceptions as json_schema_exc import os_traits from oslo_config import cfg from oslo_policy import policy as oslo_policy from oslo_utils import uuidutils from pecan import rest from ironic import api from ironic.api.controllers import link from ironic.api.controllers.v1 import versions from ironic.common import args from ironic.common import exception from ironic.common import faults from ironic.common.i18n import _ from ironic.common import policy from ironic.common import states from ironic.common import utils from ironic.conductor import steps as conductor_steps from ironic import objects from ironic.objects import fields as ofields CONF = cfg.CONF _JSONPATCH_EXCEPTIONS = (jsonpatch.JsonPatchConflict, jsonpatch.JsonPatchException, jsonpatch.JsonPointerException, KeyError, IndexError) # Minimum API version to use for certain verbs MIN_VERB_VERSIONS = { # v1.4 added the MANAGEABLE state and two verbs to move nodes into # and out of that state. Reject requests to do this in older versions states.VERBS['manage']: versions.MINOR_4_MANAGEABLE_STATE, states.VERBS['provide']: versions.MINOR_4_MANAGEABLE_STATE, states.VERBS['inspect']: versions.MINOR_6_INSPECT_STATE, states.VERBS['abort']: versions.MINOR_13_ABORT_VERB, states.VERBS['clean']: versions.MINOR_15_MANUAL_CLEAN, states.VERBS['adopt']: versions.MINOR_17_ADOPT_VERB, states.VERBS['rescue']: versions.MINOR_38_RESCUE_INTERFACE, states.VERBS['unrescue']: versions.MINOR_38_RESCUE_INTERFACE, states.VERBS['deploy']: versions.MINOR_73_DEPLOY_UNDEPLOY_VERBS, states.VERBS['undeploy']: versions.MINOR_73_DEPLOY_UNDEPLOY_VERBS, } V31_FIELDS = [ 'boot_interface', 'console_interface', 'deploy_interface', 'inspect_interface', 'management_interface', 'power_interface', 'raid_interface', 'vendor_interface', ] STANDARD_TRAITS = os_traits.get_traits() CUSTOM_TRAIT_PATTERN = "^%s[A-Z0-9_]+$" % os_traits.CUSTOM_NAMESPACE CUSTOM_TRAIT_REGEX = re.compile(CUSTOM_TRAIT_PATTERN) TRAITS_SCHEMA = {'anyOf': [ {'type': 'string', 'minLength': 1, 'maxLength': 255, 'pattern': CUSTOM_TRAIT_PATTERN}, {'type': 'string', 'enum': STANDARD_TRAITS}, ]} LOCAL_LINK_BASE_SCHEMA = { 'type': 'object', 'properties': { 'port_id': {'type': 'string'}, 'switch_id': {'type': 'string'}, 'hostname': {'type': 'string'}, 'switch_info': {'type': 'string'}, 'network_type': {'type': 'string', 'enum': ['managed', 'unmanaged']}, }, 'additionalProperties': False } LOCAL_LINK_SCHEMA = copy.deepcopy(LOCAL_LINK_BASE_SCHEMA) # set mandatory fields for a local link LOCAL_LINK_SCHEMA['required'] = ['port_id', 'switch_id'] LOCAL_LINK_SMART_NIC_SCHEMA = copy.deepcopy(LOCAL_LINK_BASE_SCHEMA) # set mandatory fields for a smart nic LOCAL_LINK_SMART_NIC_SCHEMA['required'] = ['port_id', 'hostname'] # no other mandatory fields for a network_type=unmanaged link LOCAL_LINK_UNMANAGED_SCHEMA = copy.deepcopy(LOCAL_LINK_BASE_SCHEMA) LOCAL_LINK_UNMANAGED_SCHEMA['properties']['network_type']['enum'] = [ 'unmanaged'] LOCAL_LINK_UNMANAGED_SCHEMA['required'] = ['network_type'] LOCAL_LINK_CONN_SCHEMA = {'anyOf': [ LOCAL_LINK_SCHEMA, LOCAL_LINK_SMART_NIC_SCHEMA, LOCAL_LINK_UNMANAGED_SCHEMA, {'type': 'object', 'additionalProperties': False}, ]} DEPLOY_STEP_SCHEMA = { 'type': 'object', 'properties': { 'args': {'type': 'object'}, 'interface': { 'type': 'string', 'enum': list(conductor_steps.DEPLOYING_INTERFACE_PRIORITY) }, 'priority': {'anyOf': [ {'type': 'integer', 'minimum': 0}, {'type': 'string', 'minLength': 1, 'pattern': '^[0-9]+$'} ]}, 'step': {'type': 'string', 'minLength': 1}, }, 'required': ['interface', 'step', 'args', 'priority'], 'additionalProperties': False, } def local_link_normalize(name, value): if not value: return value # Check switch_id is either a valid mac address or # OpenFlow datapath_id and normalize it. try: value['switch_id'] = utils.validate_and_normalize_mac( value['switch_id']) except exception.InvalidMAC: try: value['switch_id'] = utils.validate_and_normalize_datapath_id( value['switch_id']) except exception.InvalidDatapathID: raise exception.InvalidSwitchID(switch_id=value['switch_id']) except KeyError: # In Smart NIC case 'switch_id' is optional. pass return value LOCAL_LINK_VALIDATOR = args.and_valid( args.schema(LOCAL_LINK_CONN_SCHEMA), local_link_normalize ) LOCAL_LINK_SMART_NIC_VALIDATOR = args.schema(LOCAL_LINK_SMART_NIC_SCHEMA) def object_to_dict(obj, include_created_at=True, include_updated_at=True, include_uuid=True, link_resource=None, link_resource_args=None, fields=None): """Helper function to convert RPC objects to REST API dicts. :param obj: RPC object to convert to a dict :param include_created_at: Whether to include standard base class attribute created_at :param include_updated_at: Whether to include standard base class attribute updated_at :param include_uuid: Whether to include standard base class attribute uuid :param link_resource: When specified, generate a ``links`` value with a ``self`` and ``bookmark`` using this resource name :param link_resource_args: Resource arguments to be added to generated links. When not specified, the object ``uuid`` will be used. :param fields: Key names for dict values to populate directly from object attributes :returns: A dict containing values from the object """ url = api.request.public_url to_dict = {} all_fields = [] if include_uuid: all_fields.append('uuid') if include_created_at: all_fields.append('created_at') if include_updated_at: all_fields.append('updated_at') if fields: all_fields.extend(fields) for field in all_fields: value = to_dict[field] = getattr(obj, field) empty_value = None if isinstance(obj.fields[field], ofields.ListOfStringsField): empty_value = [] elif isinstance(obj.fields[field], ofields.FlexibleDictField): empty_value = {} elif isinstance(obj.fields[field], ofields.DateTimeField): if value: value = value.isoformat() if value is not None: to_dict[field] = value else: to_dict[field] = empty_value if link_resource: if not link_resource_args: link_resource_args = obj.uuid to_dict['links'] = [ link.make_link('self', url, link_resource, link_resource_args), link.make_link('bookmark', url, link_resource, link_resource_args, bookmark=True) ] return to_dict def populate_node_uuid(obj, to_dict): """Look up the node referenced in the object and populate a dict. The node is fetched with the object ``node_id`` attribute and the dict ``node_uuid`` value is populated with the node uuid :param obj: object to get the node_id attribute :param to_dict: dict to populate with a ``node_uuid`` value :raises: exception.NodeNotFound if the node is not found """ if not obj.node_id: to_dict['node_uuid'] = None return to_dict['node_uuid'] = objects.Node.get_by_id( api.request.context, obj.node_id).uuid def replace_node_uuid_with_id(to_dict): """Replace ``node_uuid`` dict value with ``node_id`` ``node_id`` is found by fetching the node by uuid lookup. :param to_dict: Dict to set ``node_id`` value on :returns: The node object from the lookup :raises: NodeNotFound with status_code set to 400 BAD_REQUEST when node is not found. """ try: node = objects.Node.get_by_uuid(api.request.context, to_dict.pop('node_uuid')) to_dict['node_id'] = node.id # if they cannot get the node, then this will error # helping guard access to all users of this method as # users which may have rights at a minimum need to be able # to see the node they are trying to do something with. check_owner_policy('node', 'baremetal:node:get', node['owner'], node['lessee'], conceal_node=node.uuid) except exception.NodeNotFound as e: # Change error code because 404 (NotFound) is inappropriate # response for requests acting on non-nodes e.code = http_client.BAD_REQUEST # BadRequest raise return node def replace_node_id_with_uuid(to_dict): """Replace ``node_id`` dict value with ``node_uuid`` ``node_uuid`` is found by fetching the node by id lookup. :param to_dict: Dict to set ``node_uuid`` value on :returns: The node object from the lookup :raises: NodeNotFound with status_code set to 400 BAD_REQUEST when node is not found. """ try: node = objects.Node.get_by_id(api.request.context, to_dict.pop('node_id')) to_dict['node_uuid'] = node.uuid except exception.NodeNotFound as e: # Change error code because 404 (NotFound) is inappropriate # response for requests acting on non-nodes e.code = http_client.BAD_REQUEST # BadRequest raise return node def patch_update_changed_fields(from_dict, rpc_object, fields, schema, id_map=None): """Update rpc object based on changed fields in a dict. Only fields which have a corresponding schema field are updated when changed. Other values can be updated using the id_map. :param from_dict: Dict containing changed field values :param rpc_object: Object to update changed fields on :param fields: Field names on the rpc object :param schema: jsonschema to get field names of the dict :param id_map: Optional dict mapping object field names to arbitrary values when there is no matching field in the schema """ schema_fields = schema['properties'] def _patch_val(field, patch_val): if field in rpc_object and rpc_object[field] != patch_val: rpc_object[field] = patch_val for field in fields: if id_map and field in id_map: _patch_val(field, id_map[field]) elif field in schema_fields: _patch_val(field, from_dict.get(field)) def patched_validate_with_schema(patched_dict, schema, validator=None): """Validate a patched dict object against a validator or schema. This function has the side-effect of deleting any dict value which is not in the schema. This allows database-loaded objects to be pruned of their internal values before validation. :param patched_dict: dict representation of the object with patch updates applied :param schema: Any dict key not in the schema will be deleted from the dict. If no validator is specified then the resulting ``patched_dict`` will be validated agains the schema :param validator: Optional validator to use if there is extra validation required beyond the schema :raises: exception.Invalid if validation fails """ schema_fields = schema['properties'] for field in set(patched_dict): if field not in schema_fields: patched_dict.pop(field, None) if not validator: validator = args.schema(schema) validator('patch', patched_dict) def patch_validate_allowed_fields(patch, allowed_fields): """Validate that a patch list only modifies allowed fields. :param patch: List of patch dicts to validate :param allowed_fields: List of fields which are allowed to be patched :returns: The list of fields which will be patched :raises: exception.Invalid if any patch changes a field not in ``allowed_fields`` """ fields = set() for p in patch: path = p['path'].split('/')[1] if path not in allowed_fields: msg = _("Cannot patch %s. Only the following can be updated: %s") raise exception.Invalid( msg % (p['path'], ', '.join(allowed_fields))) fields.add(path) return fields def sanitize_dict(to_sanitize, fields): """Removes sensitive and unrequested data. Will only keep the fields specified in the ``fields`` parameter (plus the ``links`` field). :param to_sanitize: dict to sanitize :param fields: list of fields to preserve, or ``None`` to preserve them all :type fields: list of str """ if fields is None: return for key in set(to_sanitize): if key not in fields and key != 'links': to_sanitize.pop(key, None) def validate_limit(limit): if limit is None: return CONF.api.max_limit if limit <= 0: raise exception.ClientSideError(_("Limit must be positive")) return min(CONF.api.max_limit, limit) def validate_sort_dir(sort_dir): if sort_dir not in ['asc', 'desc']: raise exception.ClientSideError(_("Invalid sort direction: %s. " "Acceptable values are " "'asc' or 'desc'") % sort_dir) return sort_dir def apply_jsonpatch(doc, patch): """Apply a JSON patch, one operation at a time. If the patch fails to apply, this allows us to determine which operation failed, making the error message a little less cryptic. :param doc: The JSON document to patch. :param patch: The JSON patch to apply. :returns: The result of the patch operation. :raises: PatchError if the patch fails to apply. :raises: exception.ClientSideError if the patch adds a new root attribute. """ # Prevent removal of root attributes. for p in patch: if p['op'] == 'add' and p['path'].count('/') == 1: if p['path'].lstrip('/') not in doc: msg = _('Adding a new attribute (%s) to the root of ' 'the resource is not allowed') raise exception.ClientSideError(msg % p['path']) # Apply operations one at a time, to improve error reporting. for patch_op in patch: try: doc = jsonpatch.apply_patch(doc, jsonpatch.JsonPatch([patch_op])) except _JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch_op, reason=e) return doc def get_patch_values(patch, path): """Get the patch values corresponding to the specified path. If there are multiple values specified for the same path, for example :: [{'op': 'add', 'path': '/name', 'value': 'abc'}, {'op': 'add', 'path': '/name', 'value': 'bca'}] return all of them in a list (preserving order) :param patch: HTTP PATCH request body. :param path: the path to get the patch values for. :returns: list of values for the specified path in the patch. """ return [p['value'] for p in patch if p['path'] == path and p['op'] != 'remove'] def is_path_removed(patch, path): """Returns whether the patch includes removal of the path (or subpath of). :param patch: HTTP PATCH request body. :param path: the path to check. :returns: True if path or subpath being removed, False otherwise. """ path = path.rstrip('/') for p in patch: if ((p['path'] == path or p['path'].startswith(path + '/')) and p['op'] == 'remove'): return True def is_path_updated(patch, path): """Returns whether the patch includes operation on path (or its subpath). :param patch: HTTP PATCH request body. :param path: the path to check. :returns: True if path or subpath being patched, False otherwise. """ path = path.rstrip('/') for p in patch: return p['path'] == path or p['path'].startswith(path + '/') def allow_node_logical_names(): # v1.5 added logical name aliases return api.request.version.minor >= versions.MINOR_5_NODE_NAME def _get_with_suffix(get_func, ident, exc_class): """Helper to get a resource taking into account API .json suffix.""" try: return get_func(ident) except exc_class: if not api.request.environ['HAS_JSON_SUFFIX']: raise # NOTE(dtantsur): strip .json prefix to maintain compatibility # with the guess_content_type_from_ext feature. Try to return it # back if the resulting resource was not found. return get_func(ident + '.json') def get_rpc_node(node_ident): """Get the RPC node from the node uuid or logical name. :param node_ident: the UUID or logical name of a node. :returns: The RPC Node. :raises: InvalidUuidOrName if the name or uuid provided is not valid. :raises: NodeNotFound if the node is not found. """ # Check to see if the node_ident is a valid UUID. If it is, treat it # as a UUID. if uuidutils.is_uuid_like(node_ident): return objects.Node.get_by_uuid(api.request.context, node_ident) # We can refer to nodes by their name, if the client supports it if allow_node_logical_names(): if is_valid_logical_name(node_ident): return objects.Node.get_by_name(api.request.context, node_ident) raise exception.InvalidUuidOrName(name=node_ident) # Ensure we raise the same exception as we did for the Juno release raise exception.NodeNotFound(node=node_ident) def get_rpc_node_with_suffix(node_ident): """Get the RPC node from the node uuid or logical name. If HAS_JSON_SUFFIX flag is set in the pecan environment, try also looking for node_ident with '.json' suffix. Otherwise identical to get_rpc_node. :param node_ident: the UUID or logical name of a node. :returns: The RPC Node. :raises: InvalidUuidOrName if the name or uuid provided is not valid. :raises: NodeNotFound if the node is not found. """ return _get_with_suffix(get_rpc_node, node_ident, exception.NodeNotFound) def get_rpc_portgroup(portgroup_ident): """Get the RPC portgroup from the portgroup UUID or logical name. :param portgroup_ident: the UUID or logical name of a portgroup. :returns: The RPC portgroup. :raises: InvalidUuidOrName if the name or uuid provided is not valid. :raises: PortgroupNotFound if the portgroup is not found. """ # Check to see if the portgroup_ident is a valid UUID. If it is, treat it # as a UUID. if uuidutils.is_uuid_like(portgroup_ident): return objects.Portgroup.get_by_uuid(api.request.context, portgroup_ident) # We can refer to portgroups by their name if utils.is_valid_logical_name(portgroup_ident): return objects.Portgroup.get_by_name(api.request.context, portgroup_ident) raise exception.InvalidUuidOrName(name=portgroup_ident) def get_rpc_portgroup_with_suffix(portgroup_ident): """Get the RPC portgroup from the portgroup UUID or logical name. If HAS_JSON_SUFFIX flag is set in the pecan environment, try also looking for portgroup_ident with '.json' suffix. Otherwise identical to get_rpc_portgroup. :param portgroup_ident: the UUID or logical name of a portgroup. :returns: The RPC portgroup. :raises: InvalidUuidOrName if the name or uuid provided is not valid. :raises: PortgroupNotFound if the portgroup is not found. """ return _get_with_suffix(get_rpc_portgroup, portgroup_ident, exception.PortgroupNotFound) def get_rpc_allocation(allocation_ident): """Get the RPC allocation from the allocation UUID or logical name. :param allocation_ident: the UUID or logical name of an allocation. :returns: The RPC allocation. :raises: InvalidUuidOrName if the name or uuid provided is not valid. :raises: AllocationNotFound if the allocation is not found. """ # Check to see if the allocation_ident is a valid UUID. If it is, treat it # as a UUID. if uuidutils.is_uuid_like(allocation_ident): return objects.Allocation.get_by_uuid(api.request.context, allocation_ident) # We can refer to allocations by their name if utils.is_valid_logical_name(allocation_ident): return objects.Allocation.get_by_name(api.request.context, allocation_ident) raise exception.InvalidUuidOrName(name=allocation_ident) def get_rpc_allocation_with_suffix(allocation_ident): """Get the RPC allocation from the allocation UUID or logical name. If HAS_JSON_SUFFIX flag is set in the pecan environment, try also looking for allocation_ident with '.json' suffix. Otherwise identical to get_rpc_allocation. :param allocation_ident: the UUID or logical name of an allocation. :returns: The RPC allocation. :raises: InvalidUuidOrName if the name or uuid provided is not valid. :raises: AllocationNotFound if the allocation is not found. """ return _get_with_suffix(get_rpc_allocation, allocation_ident, exception.AllocationNotFound) def get_rpc_deploy_template(template_ident): """Get the RPC deploy template from the UUID or logical name. :param template_ident: the UUID or logical name of a deploy template. :returns: The RPC deploy template. :raises: InvalidUuidOrName if the name or uuid provided is not valid. :raises: DeployTemplateNotFound if the deploy template is not found. """ # Check to see if the template_ident is a valid UUID. If it is, treat it # as a UUID. if uuidutils.is_uuid_like(template_ident): return objects.DeployTemplate.get_by_uuid(api.request.context, template_ident) # We can refer to templates by their name if utils.is_valid_logical_name(template_ident): return objects.DeployTemplate.get_by_name(api.request.context, template_ident) raise exception.InvalidUuidOrName(name=template_ident) def get_rpc_deploy_template_with_suffix(template_ident): """Get the RPC deploy template from the UUID or logical name. If HAS_JSON_SUFFIX flag is set in the pecan environment, try also looking for template_ident with '.json' suffix. Otherwise identical to get_rpc_deploy_template. :param template_ident: the UUID or logical name of a deploy template. :returns: The RPC deploy template. :raises: InvalidUuidOrName if the name or uuid provided is not valid. :raises: DeployTemplateNotFound if the deploy template is not found. """ return _get_with_suffix(get_rpc_deploy_template, template_ident, exception.DeployTemplateNotFound) def is_valid_node_name(name): """Determine if the provided name is a valid node name. Check to see that the provided node name is valid, and isn't a UUID. :param name: the node name to check. :returns: True if the name is valid, False otherwise. """ return is_valid_logical_name(name) and not uuidutils.is_uuid_like(name) def is_valid_logical_name(name): """Determine if the provided name is a valid hostname.""" if api.request.version.minor < versions.MINOR_10_UNRESTRICTED_NODE_NAME: return utils.is_hostname_safe(name) else: return utils.is_valid_logical_name(name) class PassthruResponse(object): """Object to hold the "response" from a passthru call""" def __init__(self, obj, status_code=None): #: Store the result object from the view self.obj = obj #: Store an optional status_code self.status_code = status_code def vendor_passthru(ident, method, topic, data=None, driver_passthru=False): """Call a vendor passthru API extension. Call the vendor passthru API extension and process the method response to set the right return code for methods that are asynchronous or synchronous; Attach the return value to the response object if it's being served statically. :param ident: The resource identification. For node's vendor passthru this is the node's UUID, for driver's vendor passthru this is the driver's name. :param method: The vendor method name. :param topic: The RPC topic. :param data: The data passed to the vendor method. Defaults to None. :param driver_passthru: Boolean value. Whether this is a node or driver vendor passthru. Defaults to False. :returns: A WSME response object to be returned by the API. """ if not method: raise exception.ClientSideError(_("Method not specified")) if data is None: data = {} http_method = api.request.method.upper() params = (api.request.context, ident, method, http_method, data, topic) if driver_passthru: response = api.request.rpcapi.driver_vendor_passthru(*params) else: response = api.request.rpcapi.vendor_passthru(*params) status_code = http_client.ACCEPTED if response['async'] else http_client.OK return_value = response['return'] # Attach the return value to the response object if response.get('attach'): if isinstance(return_value, str): # If unicode, convert to bytes return_value = return_value.encode('utf-8') return_value = io.BytesIO(return_value) return PassthruResponse(return_value, status_code=status_code) def check_for_invalid_fields(fields, object_fields): """Check for requested non-existent fields. Check if the user requested non-existent fields. :param fields: A list of fields requested by the user :object_fields: A list of fields supported by the object. :raises: InvalidParameterValue if invalid fields were requested. """ invalid_fields = set(fields) - set(object_fields) if invalid_fields: raise exception.InvalidParameterValue( _('Field(s) "%s" are not valid') % ', '.join(invalid_fields)) def check_allow_specify_fields(fields): """Check if fetching a subset of the resource attributes is allowed. Version 1.8 of the API allows fetching a subset of the resource attributes, this method checks if the required version is being requested. """ if (fields is not None and api.request.version.minor < versions.MINOR_8_FETCHING_SUBSET_OF_FIELDS): raise exception.NotAcceptable() VERSIONED_FIELDS = { 'driver_internal_info': versions.MINOR_3_DRIVER_INTERNAL_INFO, 'name': versions.MINOR_5_NODE_NAME, 'inspection_finished_at': versions.MINOR_6_INSPECT_STATE, 'inspection_started_at': versions.MINOR_6_INSPECT_STATE, 'clean_step': versions.MINOR_7_NODE_CLEAN, 'raid_config': versions.MINOR_12_RAID_CONFIG, 'target_raid_config': versions.MINOR_12_RAID_CONFIG, 'network_interface': versions.MINOR_20_NETWORK_INTERFACE, 'resource_class': versions.MINOR_21_RESOURCE_CLASS, 'storage_interface': versions.MINOR_33_STORAGE_INTERFACE, 'traits': versions.MINOR_37_NODE_TRAITS, 'rescue_interface': versions.MINOR_38_RESCUE_INTERFACE, 'bios_interface': versions.MINOR_40_BIOS_INTERFACE, 'fault': versions.MINOR_42_FAULT, 'deploy_step': versions.MINOR_44_NODE_DEPLOY_STEP, 'conductor_group': versions.MINOR_46_NODE_CONDUCTOR_GROUP, 'automated_clean': versions.MINOR_47_NODE_AUTOMATED_CLEAN, 'protected': versions.MINOR_48_NODE_PROTECTED, 'protected_reason': versions.MINOR_48_NODE_PROTECTED, 'conductor': versions.MINOR_49_CONDUCTORS, 'owner': versions.MINOR_50_NODE_OWNER, 'description': versions.MINOR_51_NODE_DESCRIPTION, 'allocation_uuid': versions.MINOR_52_ALLOCATION, 'events': versions.MINOR_54_EVENTS, 'retired': versions.MINOR_61_NODE_RETIRED, 'retired_reason': versions.MINOR_61_NODE_RETIRED, 'lessee': versions.MINOR_65_NODE_LESSEE, 'network_data': versions.MINOR_66_NODE_NETWORK_DATA, 'boot_mode': versions.MINOR_75_NODE_BOOT_MODE, 'secure_boot': versions.MINOR_75_NODE_BOOT_MODE, } for field in V31_FIELDS: VERSIONED_FIELDS[field] = versions.MINOR_31_DYNAMIC_INTERFACES def allow_field(field): """Check if a field is allowed in the current version.""" return api.request.version.minor >= VERSIONED_FIELDS[field] def disallowed_fields(): """Generator of fields not allowed in the current request.""" for field in VERSIONED_FIELDS: if not allow_field(field): yield field def check_allowed_fields(fields): """Check if fetching a particular field is allowed. This method checks if the required version is being requested for fields that are only allowed to be fetched in a particular API version. """ if fields is None: return for field in disallowed_fields(): if field in fields: raise exception.NotAcceptable() def check_allowed_portgroup_fields(fields): """Check if fetching a particular field of a portgroup is allowed. This method checks if the required version is being requested for fields that are only allowed to be fetched in a particular API version. """ if fields is None: return if (('mode' in fields or 'properties' in fields) and not allow_portgroup_mode_properties()): raise exception.NotAcceptable() def check_allow_management_verbs(verb): min_version = MIN_VERB_VERSIONS.get(verb) if min_version is not None and api.request.version.minor < min_version: raise exception.NotAcceptable() def check_for_invalid_state_and_allow_filter(provision_state): """Check if filtering nodes by provision state is allowed. Version 1.9 of the API allows filter nodes by provision state. """ if provision_state is not None: if (api.request.version.minor < versions.MINOR_9_PROVISION_STATE_FILTER): raise exception.NotAcceptable() valid_states = states.machine.states if provision_state not in valid_states: raise exception.InvalidParameterValue( _('Provision state "%s" is not valid') % provision_state) def check_allow_specify_driver(driver): """Check if filtering nodes by driver is allowed. Version 1.16 of the API allows filter nodes by driver. """ if (driver is not None and api.request.version.minor < versions.MINOR_16_DRIVER_FILTER): raise exception.NotAcceptable(_( "Request not acceptable. The minimal required API version " "should be %(base)s.%(opr)s") % {'base': versions.BASE_VERSION, 'opr': versions.MINOR_16_DRIVER_FILTER}) def check_allow_specify_resource_class(resource_class): """Check if filtering nodes by resource_class is allowed. Version 1.21 of the API allows filtering nodes by resource_class. """ if (resource_class is not None and api.request.version.minor < versions.MINOR_21_RESOURCE_CLASS): raise exception.NotAcceptable(_( "Request not acceptable. The minimal required API version " "should be %(base)s.%(opr)s") % {'base': versions.BASE_VERSION, 'opr': versions.MINOR_21_RESOURCE_CLASS}) def check_allow_filter_driver_type(driver_type): """Check if filtering drivers by classic/dynamic is allowed. Version 1.30 of the API allows this. """ if driver_type is not None and not allow_dynamic_drivers(): raise exception.NotAcceptable(_( "Request not acceptable. The minimal required API version " "should be %(base)s.%(opr)s") % {'base': versions.BASE_VERSION, 'opr': versions.MINOR_30_DYNAMIC_DRIVERS}) def check_allow_driver_detail(detail): """Check if getting detailed driver info is allowed. Version 1.30 of the API allows this. """ if detail is not None and not allow_dynamic_drivers(): raise exception.NotAcceptable(_( "Request not acceptable. The minimal required API version " "should be %(base)s.%(opr)s") % {'base': versions.BASE_VERSION, 'opr': versions.MINOR_30_DYNAMIC_DRIVERS}) _CONFIG_DRIVE_SCHEMA = { 'anyOf': [ { 'type': 'object', 'properties': { 'meta_data': {'type': 'object'}, 'network_data': {'type': 'object'}, 'user_data': { 'type': ['object', 'array', 'string', 'null'] }, 'vendor_data': {'type': 'object'}, }, 'additionalProperties': False }, { 'type': ['string', 'null'] } ] } # Include newlines and spaces since they're common in base64 values. _B64_ALPHABET = frozenset(string.ascii_letters + string.digits + '+/=\n\r\t ') def check_allow_configdrive(target, configdrive=None): if not configdrive: return allowed_targets = [states.ACTIVE] if allow_node_rebuild_with_configdrive(): allowed_targets.append(states.REBUILD) if target not in allowed_targets: msg = (_('Adding a config drive is only supported when setting ' 'provision state to %s') % ', '.join(allowed_targets)) raise exception.ClientSideError( msg, status_code=http_client.BAD_REQUEST) try: jsonschema.validate(configdrive, _CONFIG_DRIVE_SCHEMA) except json_schema_exc.ValidationError as e: msg = _('Invalid configdrive format: %s') % e raise exception.ClientSideError( msg, status_code=http_client.BAD_REQUEST) if isinstance(configdrive, dict): if not allow_build_configdrive(): msg = _('Providing a JSON object for configdrive is only supported' ' starting with API version %(base)s.%(opr)s') % { 'base': versions.BASE_VERSION, 'opr': versions.MINOR_56_BUILD_CONFIGDRIVE} raise exception.ClientSideError( msg, status_code=http_client.BAD_REQUEST) if ('vendor_data' in configdrive and not allow_configdrive_vendor_data()): msg = _('Providing vendor_data in configdrive is only supported' ' starting with API version %(base)s.%(opr)s') % { 'base': versions.BASE_VERSION, 'opr': versions.MINOR_59_CONFIGDRIVE_VENDOR_DATA} raise exception.ClientSideError( msg, status_code=http_client.BAD_REQUEST) else: # : is not a valid base64 symbol, so we can use this simple check if '://' in configdrive: return # This is not 100% robust but it does solve the case of invalid # JSON assumed to be a base64 string. letters = set(configdrive) if letters - _B64_ALPHABET: msg = _('Invalid configdrive format: it is neither a JSON, nor ' 'a URL, nor a base64 string') raise exception.ClientSideError( msg, status_code=http_client.BAD_REQUEST) def check_allow_filter_by_fault(fault): """Check if filtering nodes by fault is allowed. Version 1.42 of the API allows filtering nodes by fault. """ if (fault is not None and api.request.version.minor < versions.MINOR_42_FAULT): raise exception.NotAcceptable(_( "Request not acceptable. The minimal required API version " "should be %(base)s.%(opr)s") % {'base': versions.BASE_VERSION, 'opr': versions.MINOR_42_FAULT}) if fault is not None and fault not in faults.VALID_FAULTS: msg = (_('Unrecognized fault "%(fault)s" is specified, allowed faults ' 'are %(valid_faults)s') % {'fault': fault, 'valid_faults': faults.VALID_FAULTS}) raise exception.ClientSideError( msg, status_code=http_client.BAD_REQUEST) def check_allow_filter_by_conductor_group(conductor_group): """Check if filtering nodes by conductor_group is allowed. Version 1.46 of the API allows filtering nodes by conductor_group. """ if (conductor_group is not None and api.request.version.minor < versions.MINOR_46_NODE_CONDUCTOR_GROUP): raise exception.NotAcceptable(_( "Request not acceptable. The minimal required API version " "should be %(base)s.%(opr)s") % {'base': versions.BASE_VERSION, 'opr': versions.MINOR_46_NODE_CONDUCTOR_GROUP}) def check_allow_filter_by_owner(owner): """Check if filtering nodes by owner is allowed. Version 1.50 of the API allows filtering nodes by owner. """ if (owner is not None and api.request.version.minor < versions.MINOR_50_NODE_OWNER): raise exception.NotAcceptable(_( "Request not acceptable. The minimal required API version " "should be %(base)s.%(opr)s") % {'base': versions.BASE_VERSION, 'opr': versions.MINOR_50_NODE_OWNER}) def check_allow_filter_by_lessee(lessee): """Check if filtering nodes by lessee is allowed. Version 1.62 of the API allows filtering nodes by lessee. """ if (lessee is not None and api.request.version.minor < versions.MINOR_65_NODE_LESSEE): raise exception.NotAcceptable(_( "Request not acceptable. The minimal required API version " "should be %(base)s.%(opr)s") % {'base': versions.BASE_VERSION, 'opr': versions.MINOR_65_NODE_LESSEE}) def initial_node_provision_state(): """Return node state to use by default when creating new nodes. Previously the default state for new nodes was AVAILABLE. Starting with API 1.11 it is ENROLL. """ return (states.AVAILABLE if api.request.version.minor < versions.MINOR_11_ENROLL_STATE else states.ENROLL) def allow_raid_config(): """Check if RAID configuration is allowed for the node. Version 1.12 of the API allows RAID configuration for the node. """ return api.request.version.minor >= versions.MINOR_12_RAID_CONFIG def allow_soft_power_off(): """Check if Soft Power Off is allowed for the node. Version 1.27 of the API allows Soft Power Off, including Soft Reboot, for the node. """ return api.request.version.minor >= versions.MINOR_27_SOFT_POWER_OFF def allow_inject_nmi(): """Check if Inject NMI is allowed for the node. Version 1.29 of the API allows Inject NMI for the node. """ return api.request.version.minor >= versions.MINOR_29_INJECT_NMI def allow_links_node_states_and_driver_properties(): """Check if links are displayable. Version 1.14 of the API allows the display of links to node states and driver properties. """ return (api.request.version.minor >= versions.MINOR_14_LINKS_NODESTATES_DRIVERPROPERTIES) def allow_port_internal_info(): """Check if accessing internal_info is allowed for the port. Version 1.18 of the API exposes internal_info readonly field for the port. """ return (api.request.version.minor >= versions.MINOR_18_PORT_INTERNAL_INFO) def allow_port_advanced_net_fields(): """Check if we should return local_link_connection and pxe_enabled fields. Version 1.19 of the API added support for these new fields in port object. """ return (api.request.version.minor >= versions.MINOR_19_PORT_ADVANCED_NET_FIELDS) def allow_ramdisk_endpoints(): """Check if heartbeat and lookup endpoints are allowed. Version 1.22 of the API introduced them. """ return api.request.version.minor >= versions.MINOR_22_LOOKUP_HEARTBEAT def allow_portgroups(): """Check if we should support portgroup operations. Version 1.23 of the API added support for PortGroups. """ return (api.request.version.minor >= versions.MINOR_23_PORTGROUPS) def allow_portgroups_subcontrollers(): """Check if portgroups can be used as subcontrollers. Version 1.24 of the API added support for Portgroups as subcontrollers """ return (api.request.version.minor >= versions.MINOR_24_PORTGROUPS_SUBCONTROLLERS) def allow_remove_chassis_uuid(): """Check if chassis_uuid can be removed from node. Version 1.25 of the API added support for chassis_uuid removal """ return (api.request.version.minor >= versions.MINOR_25_UNSET_CHASSIS_UUID) def allow_portgroup_mode_properties(): """Check if mode and properties can be added to/queried from a portgroup. Version 1.26 of the API added mode and properties fields to portgroup object. """ return (api.request.version.minor >= versions.MINOR_26_PORTGROUP_MODE_PROPERTIES) def allow_vifs_subcontroller(): """Check if node/vifs can be used. Version 1.28 of the API added support for VIFs to be attached to Nodes. """ return (api.request.version.minor >= versions.MINOR_28_VIFS_SUBCONTROLLER) def allow_dynamic_drivers(): """Check if dynamic driver API calls are allowed. Version 1.30 of the API added support for all of the driver composition related calls in the /v1/drivers API. """ return (api.request.version.minor >= versions.MINOR_30_DYNAMIC_DRIVERS) def allow_dynamic_interfaces(): """Check if dynamic interface fields are allowed. Version 1.31 of the API added support for viewing and setting the fields in ``V31_FIELDS`` on the node object. """ return (api.request.version.minor >= versions.MINOR_31_DYNAMIC_INTERFACES) def allow_volume(): """Check if volume connectors and targets are allowed. Version 1.32 of the API added support for volume connectors and targets """ return api.request.version.minor >= versions.MINOR_32_VOLUME def allow_storage_interface(): """Check if we should support storage_interface node and driver fields. Version 1.33 of the API added support for storage interfaces. """ return (api.request.version.minor >= versions.MINOR_33_STORAGE_INTERFACE) def allow_port_physical_network(): """Check if port physical network field is allowed. Version 1.34 of the API added the physical network field to the port object. We also check whether the target version of the Port object supports the physical_network field as this may not be the case during a rolling upgrade. """ return ((api.request.version.minor >= versions.MINOR_34_PORT_PHYSICAL_NETWORK) and objects.Port.supports_physical_network()) def allow_node_rebuild_with_configdrive(): """Check if we should support node rebuild with configdrive. Version 1.35 of the API added support for node rebuild with configdrive. """ return (api.request.version.minor >= versions.MINOR_35_REBUILD_CONFIG_DRIVE) def allow_agent_version_in_heartbeat(): """Check if agent version is allowed to be passed into heartbeat. Version 1.36 of the API added the ability for agents to pass their version information to Ironic on heartbeat. """ return (api.request.version.minor >= versions.MINOR_36_AGENT_VERSION_HEARTBEAT) def allow_rescue_interface(): """Check if we should support rescue and unrescue operations and interface. Version 1.38 of the API added support for rescue and unrescue. """ return api.request.version.minor >= versions.MINOR_38_RESCUE_INTERFACE def allow_bios_interface(): """Check if we should support bios interface and endpoints. Version 1.40 of the API added support for bios interface. """ return api.request.version.minor >= versions.MINOR_40_BIOS_INTERFACE def get_controller_reserved_names(cls): """Get reserved names for a given controller. Inspect the controller class and return the reserved names within it. Reserved names are names that can not be used as an identifier for a resource because the names are either being used as a custom action or is the name of a nested controller inside the given class. :param cls: The controller class to be inspected. """ reserved_names = [ name for name, member in inspect.getmembers(cls) if isinstance(member, rest.RestController)] if hasattr(cls, '_custom_actions'): reserved_names += list(cls._custom_actions) return reserved_names def allow_traits(): """Check if traits are allowed for the node. Version 1.37 of the API allows traits for the node. """ return api.request.version.minor >= versions.MINOR_37_NODE_TRAITS def allow_inspect_wait_state(): """Check if inspect wait is allowed for the node. Version 1.39 of the API adds 'inspect wait' state to substitute 'inspecting' state during asynchronous hardware inspection. """ return api.request.version.minor >= versions.MINOR_39_INSPECT_WAIT def allow_inspect_abort(): """Check if inspection abort is allowed. Version 1.41 of the API added support for inspection abort """ return api.request.version.minor >= versions.MINOR_41_INSPECTION_ABORT def allow_detail_query(): """Check if passing a detail=True query string is allowed. Version 1.43 allows a user to pass the detail query string to list the resource with all the fields. """ return api.request.version.minor >= versions.MINOR_43_ENABLE_DETAIL_QUERY def allow_query_bios(): """Check if BIOS queries should be allowed based on version""" return api.request.version.minor >= versions.MINOR_74_BIOS_REGISTRY def allow_reset_interfaces(): """Check if passing a reset_interfaces query string is allowed.""" return api.request.version.minor >= versions.MINOR_45_RESET_INTERFACES def allow_node_history(): """Check if node history access is permitted by API version.""" return api.request.version.minor >= versions.MINOR_78_NODE_HISTORY def get_request_return_fields(fields, detail, default_fields, check_detail_version=allow_detail_query, check_fields_version=None): """Calculate fields to return from an API request The fields query and detail=True query can not be passed into a request at the same time. To use the detail query we need to be on a version of the API greater than expected, likewise some APIs require a certain version for the fields query. This function raises an InvalidParameterValue exception if any of these conditions are not met. If these checks pass then this function will return either the fields passed in or the default fields provided. :param fields: The fields query passed into the API request. :param detail: The detail query passed into the API request. :param default_fields: The default fields to return if fields=None and detail=None. :param check_detail_version: Function to check if detail query is allowed based on the version. :param check_fields_version: Function to check if fields query is allowed based on the version. :raises: InvalidParameterValue if there is an invalid combination of query strings or API version. :returns: 'fields' passed in value or 'default_fields' """ if detail is not None and not check_detail_version(): raise exception.InvalidParameterValue( "Invalid query parameter ?detail=%s received." % detail) if (fields is not None and callable(check_fields_version) and not check_fields_version()): raise exception.InvalidParameterValue( "Invalid query parameter ?fields=%s received." % fields) if fields is not None and detail: raise exception.InvalidParameterValue( "Can not specify ?detail=True and fields in the same request.") if fields is None and not detail: return default_fields return fields def allow_expose_conductors(): """Check if accessing conductor endpoints is allowed. Version 1.49 of the API exposed conductor endpoints and conductor field for the node. """ return api.request.version.minor >= versions.MINOR_49_CONDUCTORS def check_allow_filter_by_conductor(conductor): """Check if filtering nodes by conductor is allowed. Version 1.49 of the API allows filtering nodes by conductor. """ if conductor is not None and not allow_expose_conductors(): raise exception.NotAcceptable(_( "Request not acceptable. The minimal required API version " "should be %(base)s.%(opr)s") % {'base': versions.BASE_VERSION, 'opr': versions.MINOR_49_CONDUCTORS}) def allow_allocations(): """Check if accessing allocation endpoints is allowed. Version 1.52 of the API exposed allocation endpoints and allocation_uuid field for the node. """ return api.request.version.minor >= versions.MINOR_52_ALLOCATION def allow_port_is_smartnic(): """Check if port is_smartnic field is allowed. Version 1.53 of the API added is_smartnic field to the port object. """ return ((api.request.version.minor >= versions.MINOR_53_PORT_SMARTNIC) and objects.Port.supports_is_smartnic()) def allow_expose_events(): """Check if accessing events endpoint is allowed. Version 1.54 of the API added the events endpoint. """ return api.request.version.minor >= versions.MINOR_54_EVENTS def allow_deploy_templates(): """Check if accessing deploy template endpoints is allowed. Version 1.55 of the API exposed deploy template endpoints. """ return api.request.version.minor >= versions.MINOR_55_DEPLOY_TEMPLATES def check_policy(policy_name): """Check if the specified policy is authorised for this request. :policy_name: Name of the policy to check. :raises: HTTPForbidden if the policy forbids access. """ # NOTE(lbragstad): Mapping context attributes into a target dictionary is # effectively a noop from an authorization perspective because the values # we're comparing are coming from the same place. cdict = api.request.context.to_policy_values() policy.authorize(policy_name, cdict, api.request.context) def check_policy_true(policy_name): """Check if the specified policy is authorised for this request. :policy_name: Name of the policy to check. :returns: True if policy is matched, otherwise false. """ # NOTE(lbragstad): Mapping context attributes into a target dictionary is # effectively a noop from an authorization perspective because the values # we're comparing are coming from the same place. cdict = api.request.context.to_policy_values() return policy.check_policy(policy_name, cdict, api.request.context) def check_owner_policy(object_type, policy_name, owner, lessee=None, conceal_node=False): """Check if the policy authorizes this request on an object. :param: object_type: type of object being checked :param: policy_name: Name of the policy to check. :param: owner: the owner :param: lessee: the lessee :param: conceal_node: the UUID of the node IF we should conceal the existence of the node with a 404 Error instead of a 403 Error. :raises: HTTPForbidden if the policy forbids access. """ cdict = api.request.context.to_policy_values() target_dict = dict(cdict) target_dict[object_type + '.owner'] = owner if lessee: target_dict[object_type + '.lessee'] = lessee try: policy.authorize(policy_name, target_dict, api.request.context) except exception.HTTPForbidden: if conceal_node: # The caller does NOT have access to the node and we've been told # we should return a 404 instead of HTTPForbidden. raise exception.NodeNotFound(node=conceal_node) else: raise def check_node_policy_and_retrieve(policy_name, node_ident, with_suffix=False): """Check if the specified policy authorizes this request on a node. :param: policy_name: Name of the policy to check. :param: node_ident: the UUID or logical name of a node. :param: with_suffix: whether the RPC node should include the suffix :raises: HTTPForbidden if the policy forbids access. :raises: NodeNotFound if the node is not found. :return: RPC node identified by node_ident """ conceal_node = False try: if with_suffix: rpc_node = get_rpc_node_with_suffix(node_ident) else: rpc_node = get_rpc_node(node_ident) except exception.NodeNotFound: raise # Project scoped users will get a 404 where as system # scoped should get a 403 cdict = api.request.context.to_policy_values() if cdict.get('project_id', False): conceal_node = node_ident try: # Always check the ability to see the node BEFORE anything else. check_owner_policy('node', 'baremetal:node:get', rpc_node['owner'], rpc_node['lessee'], conceal_node=conceal_node) except exception.NotAuthorized: raise exception.NodeNotFound(node=node_ident) # If we've reached here, we can see the node and we have # access to view it. check_owner_policy('node', policy_name, rpc_node['owner'], rpc_node['lessee'], conceal_node=False) return rpc_node def check_allocation_policy_and_retrieve(policy_name, allocation_ident): """Check if the specified policy authorizes request on allocation. :param: policy_name: Name of the policy to check. :param: allocation_ident: the UUID or logical name of a node. :raises: HTTPForbidden if the policy forbids access. :raises: AllocationNotFound if the node is not found. :return: RPC node identified by node_ident """ try: rpc_allocation = get_rpc_allocation_with_suffix( allocation_ident) # If the user is not allowed to view the allocation, then # we need to check that and respond with a 404. check_owner_policy('allocation', 'baremetal:allocation:get', rpc_allocation['owner']) except exception.NotAuthorized: raise exception.AllocationNotFound(allocation=allocation_ident) # The primary policy check for allocation. check_owner_policy('allocation', policy_name, rpc_allocation['owner']) return rpc_allocation def check_multiple_node_policies_and_retrieve(policy_names, node_ident, with_suffix=False): """Check if the specified policies authorize this request on a node. :param: policy_names: List of policy names to check. :param: node_ident: the UUID or logical name of a node. :param: with_suffix: whether the RPC node should include the suffix :raises: HTTPForbidden if the policy forbids access. :raises: NodeNotFound if the node is not found. :return: RPC node identified by node_ident """ rpc_node = None for policy_name in policy_names: if rpc_node is None: rpc_node = check_node_policy_and_retrieve(policy_names[0], node_ident, with_suffix) else: check_owner_policy('node', policy_name, rpc_node['owner'], rpc_node['lessee']) return rpc_node def check_list_policy(object_type, owner=None): """Check if the list policy authorizes this request on an object. :param: object_type: type of object being checked :param: owner: owner filter for list query, if any :raises: HTTPForbidden if the policy forbids access. :return: owner that should be used for list query, if needed """ cdict = api.request.context.to_policy_values() try: policy.authorize('baremetal:%s:list_all' % object_type, cdict, api.request.context) except (exception.HTTPForbidden, oslo_policy.InvalidScope): # In the event the scoped policy fails, falling back to the # policy governing a filtered view. project_owner = cdict.get('project_id') if (not project_owner or (owner and owner != project_owner)): raise policy.authorize('baremetal:%s:list' % object_type, cdict, api.request.context) return project_owner return owner def check_port_policy_and_retrieve(policy_name, port_ident, portgroup=False): """Check if the specified policy authorizes this request on a port. :param: policy_name: Name of the policy to check. :param: port_ident: The name, uuid, or other valid ID value to find a port or portgroup by. :raises: HTTPForbidden if the policy forbids access. :raises: NodeNotFound if the node is not found. :return: RPC port identified by port_ident associated node """ context = api.request.context cdict = context.to_policy_values() owner = None lessee = None try: if not portgroup: rpc_port = objects.Port.get(context, port_ident) else: rpc_port = objects.Portgroup.get(context, port_ident) except (exception.PortNotFound, exception.PortgroupNotFound): # don't expose non-existence of port unless requester # has generic access to policy raise target_dict = dict(cdict) try: rpc_node = objects.Node.get_by_id(context, rpc_port.node_id) owner = rpc_node['owner'] lessee = rpc_node['lessee'] except exception.NodeNotFound: # There is no spoon, err, node. rpc_node = None pass target_dict = dict(cdict) target_dict['node.owner'] = owner target_dict['node.lessee'] = lessee try: policy.authorize('baremetal:node:get', target_dict, context) except exception.NotAuthorized: if not portgroup: raise exception.PortNotFound(port=port_ident) else: raise exception.PortgroupNotFound(portgroup=port_ident) policy.authorize(policy_name, target_dict, context) return rpc_port, rpc_node def check_port_list_policy(portgroup=False, parent_node=None, parent_portgroup=None): """Check if the specified policy authorizes this request on a port. :param portgroup: Boolean value, default false, indicating if the list policy check is for a portgroup as the policy names are different between ports and portgroups. :param parent_node: The UUID of a node, if any, to apply a policy check to as well before applying other policy check operations. :param parent_portgroup: The UUID of the parent portgroup if the list of ports was retrieved via the /v1/portgroups//ports. :raises: HTTPForbidden if the policy forbids access. :return: owner that should be used for list query, if needed """ cdict = api.request.context.to_policy_values() # No node is associated with this request, yet. rpc_node = None conceal_linked_node = None if parent_portgroup: # lookup the portgroup via the db, and then set parent_node rpc_portgroup = objects.Portgroup.get_by_uuid(api.request.context, parent_portgroup) rpc_node = objects.Node.get_by_id(api.request.context, rpc_portgroup.node_id) parent_node = rpc_node.uuid if parent_node and not rpc_node: try: rpc_node = objects.Node.get_by_uuid(api.request.context, parent_node) conceal_linked_node = rpc_node.uuid except exception.NotFound: # NOTE(TheJulia): This only covers portgroups since # you can't go from ports to other items. raise exception.PortgroupNotFound(portgroup=parent_portgroup) if parent_node: try: check_owner_policy( 'node', 'baremetal:node:get', rpc_node.owner, rpc_node.lessee, conceal_node=conceal_linked_node) except exception.NotAuthorized: if parent_portgroup: # If this call was invoked with a parent portgroup # then we need to signal the parent portgroup was not # found. raise exception.PortgroupNotFound( portgroup=parent_portgroup) if parent_node: # This should likely never be hit, because # the existence of a parent node should # trigger the node not found exception to be # explicitly raised. raise exception.NodeNotFound( node=parent_node) raise try: if not portgroup: policy.authorize('baremetal:port:list_all', cdict, api.request.context) else: policy.authorize('baremetal:portgroup:list_all', cdict, api.request.context) except exception.HTTPForbidden: owner = cdict.get('project_id') if not owner: raise if not portgroup: policy.authorize('baremetal:port:list', cdict, api.request.context) else: policy.authorize('baremetal:portgroup:list', cdict, api.request.context) return owner def check_volume_list_policy(parent_node=None): """Check if the specified policy authorizes this request on a volume. :param parent_node: The UUID of a node, if any, to apply a policy check to as well before applying other policy check operations. :raises: HTTPForbidden if the policy forbids access. :return: owner that should be used for list query, if needed """ cdict = api.request.context.to_policy_values() # No node is associated with this request, yet. rpc_node = None conceal_linked_node = None if parent_node: try: rpc_node = objects.Node.get_by_uuid(api.request.context, parent_node) conceal_linked_node = rpc_node.uuid except exception.NotFound: raise exception.NodeNotFound(node=parent_node) if parent_node: try: check_owner_policy( 'node', 'baremetal:node:get', rpc_node.owner, rpc_node.lessee, conceal_node=conceal_linked_node) except exception.NotAuthorized: if parent_node: # This should likely never be hit, because # the existence of a parent node should # trigger the node not found exception to be # explicitly raised. raise exception.NodeNotFound( node=parent_node) raise try: policy.authorize('baremetal:volume:list_all', cdict, api.request.context) except exception.HTTPForbidden: project_id = cdict.get('project_id') if not project_id: raise policy.authorize('baremetal:volume:list', cdict, api.request.context) return project_id def check_volume_policy_and_retrieve(policy_name, vol_ident, target=False): """Check if the specified policy authorizes this request on a volume. :param: policy_name: Name of the policy to check. :param: vol_ident: The name, uuid, or other valid ID value to find a volume target or connector by. :param: target: Boolean value to indicate if the check is for a volume target or connector. Default value is False, implying connector. :raises: HTTPForbidden if the policy forbids access. :raises: VolumeConnectorNotFound if the node is not found. :raises: VolumeTargetNotFound if the node is not found. :return: RPC port identified by port_ident associated node """ context = api.request.context cdict = context.to_policy_values() owner = None lessee = None try: if not target: rpc_vol = objects.VolumeConnector.get(context, vol_ident) else: rpc_vol = objects.VolumeTarget.get(context, vol_ident) except (exception.VolumeConnectorNotFound, exception.VolumeTargetNotFound): # don't expose non-existence of volume unless requester # has generic access to policy raise target_dict = dict(cdict) try: rpc_node = objects.Node.get_by_id(context, rpc_vol.node_id) owner = rpc_node['owner'] lessee = rpc_node['lessee'] except exception.NodeNotFound: pass target_dict = dict(cdict) target_dict['node.owner'] = owner target_dict['node.lessee'] = lessee try: policy.authorize('baremetal:node:get', target_dict, context) except exception.NotAuthorized: if not target: raise exception.VolumeConnectorNotFound(connector=vol_ident) else: raise exception.VolumeTargetNotFound(target=vol_ident) policy.authorize(policy_name, target_dict, context) return rpc_vol, rpc_node def allow_build_configdrive(): """Check if building configdrive is allowed. Version 1.56 of the API added support for building configdrive. """ return api.request.version.minor >= versions.MINOR_56_BUILD_CONFIGDRIVE def allow_configdrive_vendor_data(): """Check if configdrive can contain a vendor_data key. Version 1.59 of the API added support for configdrive vendor_data. """ return (api.request.version.minor >= versions.MINOR_59_CONFIGDRIVE_VENDOR_DATA) def allow_allocation_update(): """Check if updating an existing allocation is allowed or not. Version 1.57 of the API added support for updating an allocation. """ return api.request.version.minor >= versions.MINOR_57_ALLOCATION_UPDATE def allow_allocation_backfill(): """Check if backfilling allocations is allowed. Version 1.58 of the API added support for backfilling allocations. """ return api.request.version.minor >= versions.MINOR_58_ALLOCATION_BACKFILL def allow_allocation_owner(): """Check if allocation owner field is allowed. Version 1.60 of the API added the owner field to the allocation object. """ return api.request.version.minor >= versions.MINOR_60_ALLOCATION_OWNER def allow_agent_token(): """Check if agent token is available.""" return api.request.version.minor >= versions.MINOR_62_AGENT_TOKEN def allow_local_link_connection_network_type(): """Check if network_type is allowed in ports link_local_connection""" return (api.request.version.minor >= versions.MINOR_64_LOCAL_LINK_CONNECTION_NETWORK_TYPE) def allow_verify_ca_in_heartbeat(): """Check if heartbeat accepts agent_verify_ca.""" return api.request.version.minor >= versions.MINOR_68_HEARTBEAT_VERIFY_CA def allow_deploy_steps(): """Check if deploy_steps are available.""" return api.request.version.minor >= versions.MINOR_69_DEPLOY_STEPS def allow_status_in_heartbeat(): """Check if heartbeat accepts agent_status and agent_status_message.""" return api.request.version.minor >= versions.MINOR_72_HEARTBEAT_STATUS def check_allow_deploy_steps(target, deploy_steps): """Check if deploy steps are allowed""" if not deploy_steps: return if not allow_deploy_steps(): raise exception.NotAcceptable(_( "Request not acceptable. The minimal required API version " "should be %(base)s.%(opr)s") % {'base': versions.BASE_VERSION, 'opr': versions.MINOR_69_DEPLOY_STEPS}) allowed_states = (states.ACTIVE, states.REBUILD) if target not in allowed_states: msg = (_('"deploy_steps" is only valid when setting target ' 'provision state to %s or %s') % allowed_states) raise exception.ClientSideError( msg, status_code=http_client.BAD_REQUEST) def check_allow_clean_disable_ramdisk(target, disable_ramdisk): if disable_ramdisk is None: return elif api.request.version.minor < versions.MINOR_70_CLEAN_DISABLE_RAMDISK: raise exception.NotAcceptable( _("disable_ramdisk is not acceptable in this API version")) elif target != "clean": raise exception.BadRequest( _("disable_ramdisk is supported only with manual cleaning")) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/ironic/api/controllers/v1/versions.py0000664000175000017500000002077400000000000022264 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Intel Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from ironic.common import release_mappings CONF = cfg.CONF # This is the version 1 API BASE_VERSION = 1 # Here goes a short log of changes in every version. # Refer to doc/source/contributor/webapi-version-history.rst for a detailed # explanation of what each version contains. # # v1.0: corresponds to Juno API, not supported since Kilo # v1.1: API at the point in time when versioning support was added, # covers the following commits from Kilo cycle: # 827db7fe: Add Node.maintenance_reason # 68eed82b: Add API endpoint to set/unset the node maintenance mode # bc973889: Add sync and async support for passthru methods # e03f443b: Vendor endpoints to support different HTTP methods # e69e5309: Make vendor methods discoverable via the Ironic API # edf532db: Add logic to store the config drive passed by Nova # v1.2: Renamed NOSTATE ("None") to AVAILABLE ("available") # v1.3: Add node.driver_internal_info # v1.4: Add MANAGEABLE state # v1.5: Add logical node names # v1.6: Add INSPECT* states # v1.7: Add node.clean_step # v1.8: Add ability to return a subset of resource fields # v1.9: Add ability to filter nodes by provision state # v1.10: Logical node names support RFC 3986 unreserved characters # v1.11: Nodes appear in ENROLL state by default # v1.12: Add support for RAID # v1.13: Add 'abort' verb to CLEANWAIT # v1.14: Make the following endpoints discoverable via API: # 1. '/v1/nodes//states' # 2. '/v1/drivers//properties' # v1.15: Add ability to do manual cleaning of nodes # v1.16: Add ability to filter nodes by driver. # v1.17: Add 'adopt' verb for ADOPTING active nodes. # v1.18: Add port.internal_info. # v1.19: Add port.local_link_connection and port.pxe_enabled. # v1.20: Add node.network_interface # v1.21: Add node.resource_class # v1.22: Ramdisk lookup and heartbeat endpoints. # v1.23: Add portgroup support. # v1.24: Add subcontrollers: node.portgroup, portgroup.ports. # Add port.portgroup_uuid field. # v1.25: Add possibility to unset chassis_uuid from node. # v1.26: Add portgroup.mode and portgroup.properties. # v1.27: Add soft reboot, soft power off and timeout. # v1.28: Add vifs subcontroller to node # v1.29: Add inject nmi. # v1.30: Add dynamic driver interactions. # v1.31: Add dynamic interfaces fields to node. # v1.32: Add volume support. # v1.33: Add node storage interface # v1.34: Add physical network field to port. # v1.35: Add ability to provide configdrive when rebuilding node. # v1.36: Add Ironic Python Agent version support. # v1.37: Add node traits. # v1.38: Add rescue and unrescue provision states # v1.39: Add inspect wait provision state. # v1.40: Add bios.properties. # Add bios_interface to the node object. # v1.41: Add inspection abort support. # v1.42: Expose fault field to node. # v1.43: Add detail=True flag to all API endpoints # v1.44: Add node deploy_step field # v1.45: reset_interfaces parameter to node's PATCH # v1.46: Add conductor_group to the node object. # v1.47: Add automated_clean to the node object. # v1.48: Add protected to the node object. # v1.49: Add conductor to the node object and /v1/conductors. # v1.50: Add owner to the node object. # v1.51: Add description to the node object. # v1.52: Add allocation API. # v1.53: Add support for Smart NIC port # v1.54: Add events support. # v1.55: Add deploy templates API. # v1.56: Add support for building configdrives. # v1.57: Add support for updating an exisiting allocation. # v1.58: Add support for backfilling allocations. # v1.59: Add support vendor data in configdrives. # v1.60: Add owner to the allocation object. # v1.61: Add retired and retired_reason to the node object. # v1.62: Add agent_token support for agent communication. # v1.63: Add support for indicators # v1.64: Add network_type to port.local_link_connection # v1.65: Add lessee to the node object. # v1.66: Add support for node network_data field. # v1.67: Add support for port_uuid/portgroup_uuid in node vif_attach # v1.68: Add agent_verify_ca to heartbeat. # v1.69: Add deploy_steps to provisioning # v1.70: Add disable_ramdisk to manual cleaning. # v1.71: Add signifier for Scope based roles. # v1.72: Add agent_status and agent_status_message to /v1/heartbeat # v1.73: Add support for deploy and undeploy verbs # v1.74: Add bios registry to /v1/nodes/{node}/bios/{setting} # v1.75: Add boot_mode, secure_boot fields to node object. # v1.76: Add support for changing boot_mode and secure_boot state # v1.77: Add fields selector to drivers list and driver detail. # v1.78: Add node history endpoint MINOR_0_JUNO = 0 MINOR_1_INITIAL_VERSION = 1 MINOR_2_AVAILABLE_STATE = 2 MINOR_3_DRIVER_INTERNAL_INFO = 3 MINOR_4_MANAGEABLE_STATE = 4 MINOR_5_NODE_NAME = 5 MINOR_6_INSPECT_STATE = 6 MINOR_7_NODE_CLEAN = 7 MINOR_8_FETCHING_SUBSET_OF_FIELDS = 8 MINOR_9_PROVISION_STATE_FILTER = 9 MINOR_10_UNRESTRICTED_NODE_NAME = 10 MINOR_11_ENROLL_STATE = 11 MINOR_12_RAID_CONFIG = 12 MINOR_13_ABORT_VERB = 13 MINOR_14_LINKS_NODESTATES_DRIVERPROPERTIES = 14 MINOR_15_MANUAL_CLEAN = 15 MINOR_16_DRIVER_FILTER = 16 MINOR_17_ADOPT_VERB = 17 MINOR_18_PORT_INTERNAL_INFO = 18 MINOR_19_PORT_ADVANCED_NET_FIELDS = 19 MINOR_20_NETWORK_INTERFACE = 20 MINOR_21_RESOURCE_CLASS = 21 MINOR_22_LOOKUP_HEARTBEAT = 22 MINOR_23_PORTGROUPS = 23 MINOR_24_PORTGROUPS_SUBCONTROLLERS = 24 MINOR_25_UNSET_CHASSIS_UUID = 25 MINOR_26_PORTGROUP_MODE_PROPERTIES = 26 MINOR_27_SOFT_POWER_OFF = 27 MINOR_28_VIFS_SUBCONTROLLER = 28 MINOR_29_INJECT_NMI = 29 MINOR_30_DYNAMIC_DRIVERS = 30 MINOR_31_DYNAMIC_INTERFACES = 31 MINOR_32_VOLUME = 32 MINOR_33_STORAGE_INTERFACE = 33 MINOR_34_PORT_PHYSICAL_NETWORK = 34 MINOR_35_REBUILD_CONFIG_DRIVE = 35 MINOR_36_AGENT_VERSION_HEARTBEAT = 36 MINOR_37_NODE_TRAITS = 37 MINOR_38_RESCUE_INTERFACE = 38 MINOR_39_INSPECT_WAIT = 39 MINOR_40_BIOS_INTERFACE = 40 MINOR_41_INSPECTION_ABORT = 41 MINOR_42_FAULT = 42 MINOR_43_ENABLE_DETAIL_QUERY = 43 MINOR_44_NODE_DEPLOY_STEP = 44 MINOR_45_RESET_INTERFACES = 45 MINOR_46_NODE_CONDUCTOR_GROUP = 46 MINOR_47_NODE_AUTOMATED_CLEAN = 47 MINOR_48_NODE_PROTECTED = 48 MINOR_49_CONDUCTORS = 49 MINOR_50_NODE_OWNER = 50 MINOR_51_NODE_DESCRIPTION = 51 MINOR_52_ALLOCATION = 52 MINOR_53_PORT_SMARTNIC = 53 MINOR_54_EVENTS = 54 MINOR_55_DEPLOY_TEMPLATES = 55 MINOR_56_BUILD_CONFIGDRIVE = 56 MINOR_57_ALLOCATION_UPDATE = 57 MINOR_58_ALLOCATION_BACKFILL = 58 MINOR_59_CONFIGDRIVE_VENDOR_DATA = 59 MINOR_60_ALLOCATION_OWNER = 60 MINOR_61_NODE_RETIRED = 61 MINOR_62_AGENT_TOKEN = 62 MINOR_63_INDICATORS = 63 MINOR_64_LOCAL_LINK_CONNECTION_NETWORK_TYPE = 64 MINOR_65_NODE_LESSEE = 65 MINOR_66_NODE_NETWORK_DATA = 66 MINOR_67_NODE_VIF_ATTACH_PORT = 67 MINOR_68_HEARTBEAT_VERIFY_CA = 68 MINOR_69_DEPLOY_STEPS = 69 MINOR_70_CLEAN_DISABLE_RAMDISK = 70 MINOR_71_RBAC_SCOPES = 71 MINOR_72_HEARTBEAT_STATUS = 72 MINOR_73_DEPLOY_UNDEPLOY_VERBS = 73 MINOR_74_BIOS_REGISTRY = 74 MINOR_75_NODE_BOOT_MODE = 75 MINOR_76_NODE_CHANGE_BOOT_MODE = 76 MINOR_77_DRIVER_FIELDS_SELECTOR = 77 MINOR_78_NODE_HISTORY = 78 # When adding another version, update: # - MINOR_MAX_VERSION # - doc/source/contributor/webapi-version-history.rst with a detailed # explanation of what changed in the new version # - common/release_mappings.py, RELEASE_MAPPING['master']['api'] MINOR_MAX_VERSION = MINOR_78_NODE_HISTORY # String representations of the minor and maximum versions _MIN_VERSION_STRING = '{}.{}'.format(BASE_VERSION, MINOR_1_INITIAL_VERSION) _MAX_VERSION_STRING = '{}.{}'.format(BASE_VERSION, MINOR_MAX_VERSION) def min_version_string(): """Returns the minimum supported API version (as a string)""" return _MIN_VERSION_STRING def max_version_string(): """Returns the maximum supported API version (as a string). If the service is pinned, the maximum API version is the pinned version. Otherwise, it is the maximum supported API version. """ release_ver = release_mappings.RELEASE_MAPPING.get( CONF.pin_release_version) if release_ver: return release_ver['api'] else: return _MAX_VERSION_STRING ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/ironic/api/controllers/v1/volume.py0000664000175000017500000000525700000000000021722 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Hitachi, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import client as http_client import pecan from pecan import rest from ironic import api from ironic.api.controllers import link from ironic.api.controllers.v1 import utils as api_utils from ironic.api.controllers.v1 import volume_connector from ironic.api.controllers.v1 import volume_target from ironic.api import method from ironic.common import exception def convert(node_ident=None): url = api.request.public_url volume = {} if node_ident: resource = 'nodes' rargs = '%s/volume/' % node_ident else: resource = 'volume' rargs = '' volume['links'] = [ link.make_link('self', url, resource, rargs), link.make_link('bookmark', url, resource, rargs, bookmark=True)] volume['connectors'] = [ link.make_link('self', url, resource, rargs + 'connectors'), link.make_link('bookmark', url, resource, rargs + 'connectors', bookmark=True)] volume['targets'] = [ link.make_link('self', url, resource, rargs + 'targets'), link.make_link('bookmark', url, resource, rargs + 'targets', bookmark=True)] return volume class VolumeController(rest.RestController): """REST controller for volume root""" _subcontroller_map = { 'connectors': volume_connector.VolumeConnectorsController, 'targets': volume_target.VolumeTargetsController } def __init__(self, node_ident=None): super(VolumeController, self).__init__() self.parent_node_ident = node_ident @method.expose() def get(self): if not api_utils.allow_volume(): raise exception.NotFound() api_utils.check_policy('baremetal:volume:get') return convert(self.parent_node_ident) @pecan.expose() def _lookup(self, subres, *remainder): if not api_utils.allow_volume(): pecan.abort(http_client.NOT_FOUND) subcontroller = self._subcontroller_map.get(subres) if subcontroller: return subcontroller(node_ident=self.parent_node_ident), remainder ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/ironic/api/controllers/v1/volume_connector.py0000664000175000017500000004077400000000000023777 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Hitachi, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import client as http_client from ironic_lib import metrics_utils from oslo_utils import uuidutils from pecan import rest from ironic import api from ironic.api.controllers import link from ironic.api.controllers.v1 import collection from ironic.api.controllers.v1 import notification_utils as notify from ironic.api.controllers.v1 import utils as api_utils from ironic.api import method from ironic.common import args from ironic.common import exception from ironic.common.i18n import _ from ironic import objects METRICS = metrics_utils.get_metrics_logger(__name__) _DEFAULT_RETURN_FIELDS = ['uuid', 'node_uuid', 'type', 'connector_id'] CONNECTOR_SCHEMA = { 'type': 'object', 'properties': { 'connector_id': {'type': 'string'}, 'extra': {'type': ['object', 'null']}, 'node_uuid': {'type': 'string'}, 'type': {'type': 'string'}, 'uuid': {'type': ['string', 'null']}, }, 'required': ['connector_id', 'node_uuid', 'type'], 'additionalProperties': False, } CONNECTOR_VALIDATOR_EXTRA = args.dict_valid( node_uuid=args.uuid, uuid=args.uuid, ) CONNECTOR_VALIDATOR = args.and_valid( args.schema(CONNECTOR_SCHEMA), CONNECTOR_VALIDATOR_EXTRA ) PATCH_ALLOWED_FIELDS = [ 'connector_id', 'extra', 'node_uuid', 'type' ] def convert_with_links(rpc_connector, fields=None, sanitize=True): connector = api_utils.object_to_dict( rpc_connector, link_resource='volume/connectors', fields=('connector_id', 'extra', 'type') ) api_utils.populate_node_uuid(rpc_connector, connector) if fields is not None: api_utils.check_for_invalid_fields(fields, connector) if not sanitize: return connector api_utils.sanitize_dict(connector, fields) return connector def list_convert_with_links(rpc_connectors, limit, url, fields=None, detail=None, **kwargs): if detail: kwargs['detail'] = detail return collection.list_convert_with_links( items=[convert_with_links(p, fields=fields, sanitize=False) for p in rpc_connectors], item_name='connectors', limit=limit, url=url, fields=fields, sanitize_func=api_utils.sanitize_dict, **kwargs ) class VolumeConnectorsController(rest.RestController): """REST controller for VolumeConnectors.""" invalid_sort_key_list = ['extra'] def __init__(self, node_ident=None): super(VolumeConnectorsController, self).__init__() self.parent_node_ident = node_ident def _get_volume_connectors_collection(self, node_ident, marker, limit, sort_key, sort_dir, resource_url=None, fields=None, detail=None, project=None): limit = api_utils.validate_limit(limit) sort_dir = api_utils.validate_sort_dir(sort_dir) marker_obj = None if marker: marker_obj = objects.VolumeConnector.get_by_uuid( api.request.context, marker) if sort_key in self.invalid_sort_key_list: raise exception.InvalidParameterValue( _("The sort_key value %(key)s is an invalid field for " "sorting") % {'key': sort_key}) node_ident = self.parent_node_ident or node_ident if node_ident: # FIXME(comstud): Since all we need is the node ID, we can # make this more efficient by only querying # for that column. This will get cleaned up # as we move to the object interface. node = api_utils.get_rpc_node(node_ident) connectors = objects.VolumeConnector.list_by_node_id( api.request.context, node.id, limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir, project=project) else: connectors = objects.VolumeConnector.list(api.request.context, limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir, project=project) return list_convert_with_links(connectors, limit, url=resource_url, fields=fields, sort_key=sort_key, sort_dir=sort_dir, detail=detail) @METRICS.timer('VolumeConnectorsController.get_all') @method.expose() @args.validate(node=args.uuid_or_name, marker=args.uuid, limit=args.integer, sort_key=args.string, sort_dir=args.string, fields=args.string_list, detail=args.boolean) def get_all(self, node=None, marker=None, limit=None, sort_key='id', sort_dir='asc', fields=None, detail=None, project=None): """Retrieve a list of volume connectors. :param node: UUID or name of a node, to get only volume connectors for that node. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. This value cannot be larger than the value of max_limit in the [api] section of the ironic configuration, or only max_limit resources will be returned. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: "asc". :param fields: Optional, a list with a specified set of fields of the resource to be returned. :param detail: Optional, whether to retrieve with detail. :returns: a list of volume connectors, or an empty list if no volume connector is found. :raises: InvalidParameterValue if sort_key does not exist :raises: InvalidParameterValue if sort key is invalid for sorting. :raises: InvalidParameterValue if both fields and detail are specified. """ project = api_utils.check_volume_list_policy( parent_node=self.parent_node_ident) if fields is None and not detail: fields = _DEFAULT_RETURN_FIELDS if fields and detail: raise exception.InvalidParameterValue( _("Can't fetch a subset of fields with 'detail' set")) resource_url = 'volume/connectors' return self._get_volume_connectors_collection( node, marker, limit, sort_key, sort_dir, resource_url=resource_url, fields=fields, detail=detail, project=project) @METRICS.timer('VolumeConnectorsController.get_one') @method.expose() @args.validate(connector_uuid=args.uuid, fields=args.string_list) def get_one(self, connector_uuid, fields=None): """Retrieve information about the given volume connector. :param connector_uuid: UUID of a volume connector. :param fields: Optional, a list with a specified set of fields of the resource to be returned. :returns: API-serializable volume connector object. :raises: OperationNotPermitted if accessed with specifying a parent node. :raises: VolumeConnectorNotFound if no volume connector exists with the specified UUID. """ rpc_connector, _ = api_utils.check_volume_policy_and_retrieve( 'baremetal:volume:get', connector_uuid, target=False) if self.parent_node_ident: raise exception.OperationNotPermitted() return convert_with_links(rpc_connector, fields=fields) @METRICS.timer('VolumeConnectorsController.post') @method.expose(status_code=http_client.CREATED) @method.body('connector') @args.validate(connector=CONNECTOR_VALIDATOR) def post(self, connector): """Create a new volume connector. :param connector: a volume connector within the request body. :returns: API-serializable volume connector object. :raises: OperationNotPermitted if accessed with specifying a parent node. :raises: VolumeConnectorTypeAndIdAlreadyExists if a volume connector already exists with the same type and connector_id :raises: VolumeConnectorAlreadyExists if a volume connector with the same UUID already exists """ context = api.request.context owner = None lessee = None raise_node_not_found = False node_uuid = connector.get('node_uuid') try: node = api_utils.replace_node_uuid_with_id(connector) owner = node.owner lessee = node.lessee except exception.NotFound: raise_node_not_found = True api_utils.check_owner_policy('node', 'baremetal:volume:create', owner, lessee=lessee, conceal_node=False) if raise_node_not_found: raise exception.InvalidInput(fieldname='node_uuid', value=node_uuid) if self.parent_node_ident: raise exception.OperationNotPermitted() # NOTE(hshiina): UUID is mandatory for notification payload if not connector.get('uuid'): connector['uuid'] = uuidutils.generate_uuid() new_connector = objects.VolumeConnector(context, **connector) notify.emit_start_notification(context, new_connector, 'create', node_uuid=node.uuid) with notify.handle_error_notification(context, new_connector, 'create', node_uuid=node.uuid): new_connector.create() notify.emit_end_notification(context, new_connector, 'create', node_uuid=node.uuid) # Set the HTTP Location Header api.response.location = link.build_url('volume/connectors', new_connector.uuid) return convert_with_links(new_connector) @METRICS.timer('VolumeConnectorsController.patch') @method.expose() @method.body('patch') @args.validate(connector_uuid=args.uuid, patch=args.patch) def patch(self, connector_uuid, patch): """Update an existing volume connector. :param connector_uuid: UUID of a volume connector. :param patch: a json PATCH document to apply to this volume connector. :returns: API-serializable volume connector object. :raises: OperationNotPermitted if accessed with specifying a parent node. :raises: PatchError if a given patch can not be applied. :raises: VolumeConnectorNotFound if no volume connector exists with the specified UUID. :raises: InvalidParameterValue if the volume connector's UUID is being changed :raises: NodeLocked if node is locked by another conductor :raises: NodeNotFound if the node associated with the connector does not exist :raises: VolumeConnectorTypeAndIdAlreadyExists if another connector already exists with the same values for type and connector_id fields :raises: InvalidUUID if invalid node UUID is passed in the patch. :raises: InvalidStateRequested If a node associated with the volume connector is not powered off. """ context = api.request.context rpc_connector, rpc_node = api_utils.check_volume_policy_and_retrieve( 'baremetal:volume:update', connector_uuid, target=False) if self.parent_node_ident: raise exception.OperationNotPermitted() api_utils.patch_validate_allowed_fields(patch, PATCH_ALLOWED_FIELDS) for value in api_utils.get_patch_values(patch, '/node_uuid'): if not uuidutils.is_uuid_like(value): message = _("Expected a UUID for node_uuid, but received " "%(uuid)s.") % {'uuid': str(value)} raise exception.InvalidUUID(message=message) connector_dict = rpc_connector.as_dict() # NOTE(smoriya): # 1) Remove node_id because it's an internal value and # not present in the API object # 2) Add node_uuid rpc_node = api_utils.replace_node_id_with_uuid(connector_dict) connector_dict = api_utils.apply_jsonpatch(connector_dict, patch) try: if connector_dict['node_uuid'] != rpc_node.uuid: rpc_node = objects.Node.get( api.request.context, connector_dict['node_uuid']) except exception.NodeNotFound as e: # Change error code because 404 (NotFound) is inappropriate # response for a PATCH request to change a Port e.code = http_client.BAD_REQUEST # BadRequest raise api_utils.patched_validate_with_schema( connector_dict, CONNECTOR_SCHEMA, CONNECTOR_VALIDATOR) api_utils.patch_update_changed_fields( connector_dict, rpc_connector, fields=objects.VolumeConnector.fields, schema=CONNECTOR_SCHEMA, id_map={'node_id': rpc_node.id} ) notify.emit_start_notification(context, rpc_connector, 'update', node_uuid=rpc_node.uuid) with notify.handle_error_notification(context, rpc_connector, 'update', node_uuid=rpc_node.uuid): topic = api.request.rpcapi.get_topic_for(rpc_node) new_connector = api.request.rpcapi.update_volume_connector( context, rpc_connector, topic) api_connector = convert_with_links(new_connector) notify.emit_end_notification(context, new_connector, 'update', node_uuid=rpc_node.uuid) return api_connector @METRICS.timer('VolumeConnectorsController.delete') @method.expose(status_code=http_client.NO_CONTENT) @args.validate(connector_uuid=args.uuid) def delete(self, connector_uuid): """Delete a volume connector. :param connector_uuid: UUID of a volume connector. :raises: OperationNotPermitted if accessed with specifying a parent node. :raises: NodeLocked if node is locked by another conductor :raises: NodeNotFound if the node associated with the connector does not exist :raises: VolumeConnectorNotFound if the volume connector cannot be found :raises: InvalidStateRequested If a node associated with the volume connector is not powered off. """ context = api.request.context rpc_connector, rpc_node = api_utils.check_volume_policy_and_retrieve( 'baremetal:volume:delete', connector_uuid, target=False) if self.parent_node_ident: raise exception.OperationNotPermitted() notify.emit_start_notification(context, rpc_connector, 'delete', node_uuid=rpc_node.uuid) with notify.handle_error_notification(context, rpc_connector, 'delete', node_uuid=rpc_node.uuid): topic = api.request.rpcapi.get_topic_for(rpc_node) api.request.rpcapi.destroy_volume_connector(context, rpc_connector, topic) notify.emit_end_notification(context, rpc_connector, 'delete', node_uuid=rpc_node.uuid) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/ironic/api/controllers/v1/volume_target.py0000664000175000017500000004400400000000000023261 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Hitachi, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import client as http_client from ironic_lib import metrics_utils from oslo_utils import uuidutils from pecan import rest from ironic import api from ironic.api.controllers import link from ironic.api.controllers.v1 import collection from ironic.api.controllers.v1 import notification_utils as notify from ironic.api.controllers.v1 import utils as api_utils from ironic.api import method from ironic.common import args from ironic.common import exception from ironic.common.i18n import _ from ironic.common import policy from ironic import objects METRICS = metrics_utils.get_metrics_logger(__name__) _DEFAULT_RETURN_FIELDS = ['uuid', 'node_uuid', 'volume_type', 'boot_index', 'volume_id'] TARGET_SCHEMA = { 'type': 'object', 'properties': { 'boot_index': {'type': 'integer'}, 'extra': {'type': ['object', 'null']}, 'node_uuid': {'type': 'string'}, 'properties': {'type': ['object', 'null']}, 'volume_id': {'type': 'string'}, 'volume_type': {'type': 'string'}, 'uuid': {'type': ['string', 'null']}, }, 'required': ['boot_index', 'node_uuid', 'volume_id', 'volume_type'], 'additionalProperties': False, } TARGET_VALIDATOR_EXTRA = args.dict_valid( node_uuid=args.uuid, uuid=args.uuid, ) TARGET_VALIDATOR = args.and_valid( args.schema(TARGET_SCHEMA), TARGET_VALIDATOR_EXTRA ) PATCH_ALLOWED_FIELDS = [ 'boot_index', 'extra', 'node_uuid', 'properties', 'volume_id', 'volume_type' ] def convert_with_links(rpc_target, fields=None, sanitize=True): target = api_utils.object_to_dict( rpc_target, link_resource='volume/targets', fields=( 'boot_index', 'extra', 'properties', 'volume_id', 'volume_type' ) ) api_utils.populate_node_uuid(rpc_target, target) if fields is not None: api_utils.check_for_invalid_fields(fields, target) if not sanitize: return target api_utils.sanitize_dict(target, fields) return target def list_convert_with_links(rpc_targets, limit, url, fields=None, detail=None, **kwargs): if detail: kwargs['detail'] = detail return collection.list_convert_with_links( items=[convert_with_links(p, fields=fields, sanitize=False) for p in rpc_targets], item_name='targets', limit=limit, url=url, fields=fields, sanitize_func=api_utils.sanitize_dict, **kwargs ) class VolumeTargetsController(rest.RestController): """REST controller for VolumeTargets.""" invalid_sort_key_list = ['extra', 'properties'] def __init__(self, node_ident=None): super(VolumeTargetsController, self).__init__() self.parent_node_ident = node_ident def _redact_target_properties(self, target): # Filters what could contain sensitive information. For iSCSI # volumes this can include iscsi connection details which may # be sensitive. redacted = ('** Value redacted: Requires permission ' 'baremetal:volume:view_target_properties ' 'access. Permission denied. **') redacted_message = { 'redacted_contents': redacted } target.properties = redacted_message def _get_volume_targets_collection(self, node_ident, marker, limit, sort_key, sort_dir, resource_url=None, fields=None, detail=None, project=None): limit = api_utils.validate_limit(limit) sort_dir = api_utils.validate_sort_dir(sort_dir) marker_obj = None if marker: marker_obj = objects.VolumeTarget.get_by_uuid( api.request.context, marker) if sort_key in self.invalid_sort_key_list: raise exception.InvalidParameterValue( _("The sort_key value %(key)s is an invalid field for " "sorting") % {'key': sort_key}) node_ident = self.parent_node_ident or node_ident if node_ident: # FIXME(comstud): Since all we need is the node ID, we can # make this more efficient by only querying # for that column. This will get cleaned up # as we move to the object interface. node = api_utils.get_rpc_node(node_ident) targets = objects.VolumeTarget.list_by_node_id( api.request.context, node.id, limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir, project=project) else: targets = objects.VolumeTarget.list(api.request.context, limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir, project=project) cdict = api.request.context.to_policy_values() if not policy.check_policy('baremetal:volume:view_target_properties', cdict, cdict): for target in targets: self._redact_target_properties(target) return list_convert_with_links(targets, limit, url=resource_url, fields=fields, sort_key=sort_key, sort_dir=sort_dir, detail=detail) @METRICS.timer('VolumeTargetsController.get_all') @method.expose() @args.validate(node=args.uuid_or_name, marker=args.uuid, limit=args.integer, sort_key=args.string, sort_dir=args.string, fields=args.string_list, detail=args.boolean) def get_all(self, node=None, marker=None, limit=None, sort_key='id', sort_dir='asc', fields=None, detail=None, project=None): """Retrieve a list of volume targets. :param node: UUID or name of a node, to get only volume targets for that node. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. This value cannot be larger than the value of max_limit in the [api] section of the ironic configuration, or only max_limit resources will be returned. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: "asc". :param fields: Optional, a list with a specified set of fields of the resource to be returned. :param detail: Optional, whether to retrieve with detail. :param project: Optional, an associated node project (owner, or lessee) to filter the query upon. :returns: a list of volume targets, or an empty list if no volume target is found. :raises: InvalidParameterValue if sort_key does not exist :raises: InvalidParameterValue if sort key is invalid for sorting. :raises: InvalidParameterValue if both fields and detail are specified. """ project = api_utils.check_volume_list_policy( parent_node=self.parent_node_ident) if fields is None and not detail: fields = _DEFAULT_RETURN_FIELDS if fields and detail: raise exception.InvalidParameterValue( _("Can't fetch a subset of fields with 'detail' set")) resource_url = 'volume/targets' return self._get_volume_targets_collection(node, marker, limit, sort_key, sort_dir, resource_url=resource_url, fields=fields, detail=detail, project=project) @METRICS.timer('VolumeTargetsController.get_one') @method.expose() @args.validate(target_uuid=args.uuid, fields=args.string_list) def get_one(self, target_uuid, fields=None): """Retrieve information about the given volume target. :param target_uuid: UUID of a volume target. :param fields: Optional, a list with a specified set of fields of the resource to be returned. :returns: API-serializable volume target object. :raises: OperationNotPermitted if accessed with specifying a parent node. :raises: VolumeTargetNotFound if no volume target with this UUID exists """ rpc_target, _ = api_utils.check_volume_policy_and_retrieve( 'baremetal:volume:get', target_uuid, target=True) if self.parent_node_ident: raise exception.OperationNotPermitted() cdict = api.request.context.to_policy_values() if not policy.check_policy('baremetal:volume:view_target_properties', cdict, cdict): self._redact_target_properties(rpc_target) return convert_with_links(rpc_target, fields=fields) @METRICS.timer('VolumeTargetsController.post') @method.expose(status_code=http_client.CREATED) @method.body('target') @args.validate(target=TARGET_VALIDATOR) def post(self, target): """Create a new volume target. :param target: a volume target within the request body. :returns: API-serializable volume target object. :raises: OperationNotPermitted if accessed with specifying a parent node. :raises: VolumeTargetBootIndexAlreadyExists if a volume target already exists with the same node ID and boot index :raises: VolumeTargetAlreadyExists if a volume target with the same UUID exists """ context = api.request.context raise_node_not_found = False node = None owner = None lessee = None node_uuid = target.get('node_uuid') try: node = api_utils.replace_node_uuid_with_id(target) owner = node.owner lessee = node.lessee except exception.NotFound: raise_node_not_found = True api_utils.check_owner_policy('node', 'baremetal:volume:create', owner, lessee=lessee, conceal_node=False) if raise_node_not_found: raise exception.InvalidInput(fieldname='node_uuid', value=node_uuid) if self.parent_node_ident: raise exception.OperationNotPermitted() # NOTE(hshiina): UUID is mandatory for notification payload if not target.get('uuid'): target['uuid'] = uuidutils.generate_uuid() new_target = objects.VolumeTarget(context, **target) notify.emit_start_notification(context, new_target, 'create', node_uuid=node.uuid) with notify.handle_error_notification(context, new_target, 'create', node_uuid=node.uuid): new_target.create() notify.emit_end_notification(context, new_target, 'create', node_uuid=node.uuid) # Set the HTTP Location Header api.response.location = link.build_url('volume/targets', new_target.uuid) return convert_with_links(new_target) @METRICS.timer('VolumeTargetsController.patch') @method.expose() @method.body('patch') @args.validate(target_uuid=args.uuid, patch=args.patch) def patch(self, target_uuid, patch): """Update an existing volume target. :param target_uuid: UUID of a volume target. :param patch: a json PATCH document to apply to this volume target. :returns: API-serializable volume target object. :raises: OperationNotPermitted if accessed with specifying a parent node. :raises: PatchError if a given patch can not be applied. :raises: InvalidParameterValue if the volume target's UUID is being changed :raises: NodeLocked if the node is already locked :raises: NodeNotFound if the node associated with the volume target does not exist :raises: VolumeTargetNotFound if the volume target cannot be found :raises: VolumeTargetBootIndexAlreadyExists if a volume target already exists with the same node ID and boot index values :raises: InvalidUUID if invalid node UUID is passed in the patch. :raises: InvalidStateRequested If a node associated with the volume target is not powered off. """ context = api.request.context api_utils.check_volume_policy_and_retrieve('baremetal:volume:update', target_uuid, target=True) if self.parent_node_ident: raise exception.OperationNotPermitted() api_utils.patch_validate_allowed_fields(patch, PATCH_ALLOWED_FIELDS) values = api_utils.get_patch_values(patch, '/node_uuid') for value in values: if not uuidutils.is_uuid_like(value): message = _("Expected a UUID for node_uuid, but received " "%(uuid)s.") % {'uuid': str(value)} raise exception.InvalidUUID(message=message) rpc_target = objects.VolumeTarget.get_by_uuid(context, target_uuid) target_dict = rpc_target.as_dict() # NOTE(smoriya): # 1) Remove node_id because it's an internal value and # not present in the API object # 2) Add node_uuid rpc_node = api_utils.replace_node_id_with_uuid(target_dict) target_dict = api_utils.apply_jsonpatch(target_dict, patch) try: if target_dict['node_uuid'] != rpc_node.uuid: # TODO(TheJulia): I guess the intention is to # permit the mapping to be changed # should we even allow this at all? rpc_node = objects.Node.get( api.request.context, target_dict['node_uuid']) except exception.NodeNotFound as e: # Change error code because 404 (NotFound) is inappropriate # response for a PATCH request to change a volume target e.code = http_client.BAD_REQUEST # BadRequest raise api_utils.patched_validate_with_schema( target_dict, TARGET_SCHEMA, TARGET_VALIDATOR) api_utils.patch_update_changed_fields( target_dict, rpc_target, fields=objects.VolumeTarget.fields, schema=TARGET_SCHEMA, id_map={'node_id': rpc_node.id} ) notify.emit_start_notification(context, rpc_target, 'update', node_uuid=rpc_node.uuid) with notify.handle_error_notification(context, rpc_target, 'update', node_uuid=rpc_node.uuid): topic = api.request.rpcapi.get_topic_for(rpc_node) new_target = api.request.rpcapi.update_volume_target( context, rpc_target, topic) api_target = convert_with_links(new_target) notify.emit_end_notification(context, new_target, 'update', node_uuid=rpc_node.uuid) return api_target @METRICS.timer('VolumeTargetsController.delete') @method.expose(status_code=http_client.NO_CONTENT) @args.validate(target_uuid=args.uuid) def delete(self, target_uuid): """Delete a volume target. :param target_uuid: UUID of a volume target. :raises: OperationNotPermitted if accessed with specifying a parent node. :raises: NodeLocked if node is locked by another conductor :raises: NodeNotFound if the node associated with the target does not exist :raises: VolumeTargetNotFound if the volume target cannot be found :raises: InvalidStateRequested If a node associated with the volume target is not powered off. """ context = api.request.context api_utils.check_volume_policy_and_retrieve('baremetal:volume:delete', target_uuid, target=True) if self.parent_node_ident: raise exception.OperationNotPermitted() rpc_target = objects.VolumeTarget.get_by_uuid(context, target_uuid) rpc_node = objects.Node.get_by_id(context, rpc_target.node_id) notify.emit_start_notification(context, rpc_target, 'delete', node_uuid=rpc_node.uuid) with notify.handle_error_notification(context, rpc_target, 'delete', node_uuid=rpc_node.uuid): topic = api.request.rpcapi.get_topic_for(rpc_node) api.request.rpcapi.destroy_volume_target(context, rpc_target, topic) notify.emit_end_notification(context, rpc_target, 'delete', node_uuid=rpc_node.uuid) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/ironic/api/controllers/version.py0000664000175000017500000000310300000000000021536 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ironic import api from ironic.api.controllers import link ID_VERSION1 = 'v1' def all_versions(): return [default_version()] def default_version(): """Return a dict representing the current default version id: The ID of the (major) version, also acts as the release number links: A list containing one link that points to the current version of the API status: Status of the version, one of CURRENT, SUPPORTED, DEPRECATED min_version: The current, maximum supported (major.minor) version of API. version: Minimum supported (major.minor) version of API. """ # NOTE(dtantsur): avoid circular imports from ironic.api.controllers.v1 import versions return { 'id': ID_VERSION1, 'links': [ link.make_link('self', api.request.public_url, ID_VERSION1, '', bookmark=True) ], 'status': 'CURRENT', 'min_version': versions.min_version_string(), 'version': versions.max_version_string() } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/ironic/api/functions.py0000664000175000017500000001364100000000000017523 0ustar00zuulzuul00000000000000# Copyright 2011-2019 the WSME authors and contributors # (See https://opendev.org/x/wsme/) # # This module is part of WSME and is also released under # the MIT License: http://www.opensource.org/licenses/mit-license.php # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import inspect import logging log = logging.getLogger(__name__) def iswsmefunction(f): return hasattr(f, '_wsme_definition') def wrapfunc(f): @functools.wraps(f) def wrapper(*args, **kwargs): return f(*args, **kwargs) wrapper._wsme_original_func = f return wrapper def getargspec(f): f = getattr(f, '_wsme_original_func', f) func_argspec = inspect.getfullargspec(f) return func_argspec[0:4] class FunctionArgument(object): """An argument definition of an api entry""" def __init__(self, name, datatype, mandatory, default): #: argument name self.name = name #: Data type self.datatype = datatype #: True if the argument is mandatory self.mandatory = mandatory #: Default value if argument is omitted self.default = default def resolve_type(self, registry): self.datatype = registry.resolve_type(self.datatype) class FunctionDefinition(object): """An api entry definition""" def __init__(self, func): #: Function name self.name = func.__name__ #: Function documentation self.doc = func.__doc__ #: Return type self.return_type = None #: The function arguments (list of :class:`FunctionArgument`) self.arguments = [] #: If the body carry the datas of a single argument, its type self.body_type = None #: Status code self.status_code = 200 #: True if extra arguments should be ignored, NOT inserted in #: the kwargs of the function and not raise UnknownArgument #: exceptions self.ignore_extra_args = False #: Dictionnary of protocol-specific options. self.extra_options = None @staticmethod def get(func): """Returns the :class:`FunctionDefinition` of a method.""" if not hasattr(func, '_wsme_definition'): fd = FunctionDefinition(func) func._wsme_definition = fd return func._wsme_definition def get_arg(self, name): """Returns a :class:`FunctionArgument` from its name""" for arg in self.arguments: if arg.name == name: return arg return None def resolve_types(self, registry): self.return_type = registry.resolve_type(self.return_type) self.body_type = registry.resolve_type(self.body_type) for arg in self.arguments: arg.resolve_type(registry) def set_options(self, body=None, ignore_extra_args=False, status_code=200, rest_content_types=('json', 'xml'), **extra_options): self.body_type = body self.status_code = status_code self.ignore_extra_args = ignore_extra_args self.rest_content_types = rest_content_types self.extra_options = extra_options def set_arg_types(self, argspec, arg_types): args, varargs, keywords, defaults = argspec if args[0] == 'self': args = args[1:] arg_types = list(arg_types) if self.body_type is not None: arg_types.append(self.body_type) for i, argname in enumerate(args): datatype = arg_types[i] mandatory = defaults is None or i < (len(args) - len(defaults)) default = None if not mandatory: default = defaults[i - (len(args) - len(defaults))] self.arguments.append(FunctionArgument(argname, datatype, mandatory, default)) class signature(object): """Decorator that specify the argument types of an exposed function. :param return_type: Type of the value returned by the function :param argN: Type of the Nth argument :param body: If the function takes a final argument that is supposed to be the request body by itself, its type. :param status_code: HTTP return status code of the function. :param ignore_extra_args: Allow extra/unknow arguments (default to False) Most of the time this decorator is not supposed to be used directly, unless you are not using WSME on top of another framework. If an adapter is used, it will provide either a specialised version of this decororator, either a new decorator named @wsexpose that takes the same parameters (it will in addition expose the function, hence its name). """ def __init__(self, *types, **options): self.return_type = types[0] if types else None self.arg_types = [] if len(types) > 1: self.arg_types.extend(types[1:]) if 'body' in options: self.arg_types.append(options['body']) self.wrap = options.pop('wrap', False) self.options = options def __call__(self, func): argspec = getargspec(func) if self.wrap: func = wrapfunc(func) fd = FunctionDefinition.get(func) if fd.extra_options is not None: raise ValueError("This function is already exposed") fd.return_type = self.return_type fd.set_options(**self.options) if self.arg_types: fd.set_arg_types(argspec, self.arg_types) return func sig = signature ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/ironic/api/hooks.py0000664000175000017500000001535300000000000016640 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Copyright © 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import client as http_client import re from oslo_config import cfg from oslo_log import log from pecan import hooks from ironic.common import context from ironic.common import policy from ironic.conductor import rpcapi from ironic.db import api as dbapi LOG = log.getLogger(__name__) CHECKED_DEPRECATED_POLICY_ARGS = False INBOUND_HEADER = 'X-Openstack-Request-Id' GLOBAL_REQ_ID = 'openstack.global_request_id' ID_FORMAT = (r'^req-[a-f0-9]{8}-[a-f0-9]{4}-' r'[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$') def policy_deprecation_check(): global CHECKED_DEPRECATED_POLICY_ARGS if not CHECKED_DEPRECATED_POLICY_ARGS: enforcer = policy.get_enforcer() substitution_dict = { 'user': 'user_id', 'domain_id': 'user_domain_id', 'domain_name': 'user_domain_id', 'tenant': 'project_name', } policy_rules = enforcer.file_rules.values() for rule in policy_rules: str_rule = str(rule) for deprecated, replacement in substitution_dict.items(): if re.search(r'\b%s\b' % deprecated, str_rule): LOG.warning( "Deprecated argument %(deprecated)s is used in policy " "file rule (%(rule)s), please use %(replacement)s " "argument instead. The possibility to use deprecated " "arguments will be removed in the Pike release.", {'deprecated': deprecated, 'replacement': replacement, 'rule': str_rule}) if deprecated == 'domain_name': LOG.warning( "Please note that user_domain_id is an ID of the " "user domain, while the deprecated domain_name is " "its name. The policy rule has to be updated " "accordingly.") CHECKED_DEPRECATED_POLICY_ARGS = True class ConfigHook(hooks.PecanHook): """Attach the config object to the request so controllers can get to it.""" def before(self, state): state.request.cfg = cfg.CONF class DBHook(hooks.PecanHook): """Attach the dbapi object to the request so controllers can get to it.""" def before(self, state): state.request.dbapi = dbapi.get_instance() class ContextHook(hooks.PecanHook): """Configures a request context and attaches it to the request.""" def __init__(self, public_api_routes): self.public_api_routes = public_api_routes super(ContextHook, self).__init__() def before(self, state): is_public_api = state.request.environ.get('is_public_api', False) # set the global_request_id if we have an inbound request id gr_id = state.request.headers.get(INBOUND_HEADER, "") if re.match(ID_FORMAT, gr_id): state.request.environ[GLOBAL_REQ_ID] = gr_id ctx = context.RequestContext.from_environ(state.request.environ, is_public_api=is_public_api) # Do not pass any token with context for noauth mode if cfg.CONF.auth_strategy != 'keystone': ctx.auth_token = None policy_deprecation_check() state.request.context = ctx def after(self, state): if state.request.context == {}: # An incorrect url path will not create RequestContext return # NOTE(lintan): RequestContext will generate a request_id if no one # passing outside, so it always contain a request_id. request_id = state.request.context.request_id state.response.headers['Openstack-Request-Id'] = request_id class RPCHook(hooks.PecanHook): """Attach the rpcapi object to the request so controllers can get to it.""" def before(self, state): state.request.rpcapi = rpcapi.ConductorAPI() class NoExceptionTracebackHook(hooks.PecanHook): """Workaround rpc.common: deserialize_remote_exception. deserialize_remote_exception builds rpc exception traceback into error message which is then sent to the client. Such behavior is a security concern so this hook is aimed to cut-off traceback from the error message. """ # NOTE(max_lobur): 'after' hook used instead of 'on_error' because # 'on_error' never fired for wsme+pecan pair. wsme @wsexpose decorator # catches and handles all the errors, so 'on_error' dedicated for unhandled # exceptions never fired. def after(self, state): # Omit empty body. Some errors may not have body at this level yet. if not state.response.body: return # Do nothing if there is no error. # Status codes in the range 200 (OK) to 399 (400 = BAD_REQUEST) are not # an error. if (http_client.OK <= state.response.status_int < http_client.BAD_REQUEST): return json_body = state.response.json # Do not remove traceback when traceback config is set if cfg.CONF.debug_tracebacks_in_api: return faultstring = json_body.get('faultstring') traceback_marker = 'Traceback (most recent call last):' if faultstring and traceback_marker in faultstring: # Cut-off traceback. faultstring = faultstring.split(traceback_marker, 1)[0] # Remove trailing newlines and spaces if any. json_body['faultstring'] = faultstring.rstrip() # Replace the whole json. Cannot change original one because it's # generated on the fly. state.response.json = json_body class PublicUrlHook(hooks.PecanHook): """Attach the right public_url to the request. Attach the right public_url to the request so resources can create links even when the API service is behind a proxy or SSL terminator. """ def before(self, state): if cfg.CONF.oslo_middleware.enable_proxy_headers_parsing: state.request.public_url = state.request.application_url else: state.request.public_url = (cfg.CONF.api.public_endpoint or state.request.host_url) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/ironic/api/method.py0000664000175000017500000001051700000000000016772 0ustar00zuulzuul00000000000000# # Copyright 2015 Rackspace, Inc # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools from http import client as http_client import json import sys import traceback from oslo_config import cfg from oslo_log import log import pecan LOG = log.getLogger(__name__) pecan_json_decorate = pecan.expose( content_type='application/json', generic=False) def expose(status_code=None): def decorate(f): @functools.wraps(f) def callfunction(self, *args, **kwargs): try: result = f(self, *args, **kwargs) if status_code: pecan.response.status = status_code except Exception: try: exception_info = sys.exc_info() orig_exception = exception_info[1] orig_code = getattr(orig_exception, 'code', None) result = format_exception( exception_info, cfg.CONF.debug_tracebacks_in_api ) finally: del exception_info if orig_code and orig_code in http_client.responses: pecan.response.status = orig_code else: pecan.response.status = 500 def _empty(): # This is for a pecan workaround originally in WSME, # but the original issue description is in an issue tracker # that is now offline pecan.request.pecan['content_type'] = None pecan.response.content_type = None # never return content for NO_CONTENT if pecan.response.status_code == 204: return _empty() # don't encode None for ACCEPTED responses if result is None and pecan.response.status_code == 202: return _empty() return json.dumps(result) pecan_json_decorate(callfunction) return callfunction return decorate def body(body_arg): """Decorator which places HTTP request body JSON into a method argument :param body_arg: Name of argument to populate with body JSON """ def inner_function(function): @functools.wraps(function) def inner_body(*args, **kwargs): if pecan.request.body: data = pecan.request.json else: data = {} if isinstance(data, dict): # remove any keyword arguments which pecan has # extracted from the body for field in data.keys(): kwargs.pop(field, None) kwargs[body_arg] = data return function(*args, **kwargs) return inner_body return inner_function def format_exception(excinfo, debug=False): """Extract informations that can be sent to the client.""" error = excinfo[1] code = getattr(error, 'code', None) if code and code in http_client.responses and (400 <= code < 500): faultstring = (error.faultstring if hasattr(error, 'faultstring') else str(error)) faultcode = getattr(error, 'faultcode', 'Client') r = dict(faultcode=faultcode, faultstring=faultstring) LOG.debug("Client-side error: %s", r['faultstring']) r['debuginfo'] = None return r else: faultstring = str(error) debuginfo = "\n".join(traceback.format_exception(*excinfo)) LOG.error('Server-side error: "%s". Detail: \n%s', faultstring, debuginfo) faultcode = getattr(error, 'faultcode', 'Server') r = dict(faultcode=faultcode, faultstring=faultstring) if debug: r['debuginfo'] = debuginfo else: r['debuginfo'] = None return r ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648108935.9066668 ironic-20.1.0/ironic/api/middleware/0000775000175000017500000000000000000000000017251 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/ironic/api/middleware/__init__.py0000664000175000017500000000176000000000000021366 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ironic.api.middleware import auth_public_routes from ironic.api.middleware import json_ext from ironic.api.middleware import parsable_error ParsableErrorMiddleware = parsable_error.ParsableErrorMiddleware AuthPublicRoutes = auth_public_routes.AuthPublicRoutes JsonExtensionMiddleware = json_ext.JsonExtensionMiddleware __all__ = ('ParsableErrorMiddleware', 'AuthPublicRoutes', 'JsonExtensionMiddleware') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/ironic/api/middleware/auth_public_routes.py0000664000175000017500000000416000000000000023524 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from ironic.common import exception from ironic.common.i18n import _ from ironic.common import utils class AuthPublicRoutes(object): """A wrapper on authentication middleware. Does not perform verification of authentication tokens for public routes in the API. """ def __init__(self, app, auth, public_api_routes=None): api_routes = [] if public_api_routes is None else public_api_routes self._ironic_app = app self._middleware = auth # TODO(mrda): Remove .xml and ensure that doesn't result in a # 401 Authentication Required instead of 404 Not Found route_pattern_tpl = '%s(\\.json|\\.xml)?$' try: self.public_api_routes = [re.compile(route_pattern_tpl % route_tpl) for route_tpl in api_routes] except re.error as e: raise exception.ConfigInvalid( error_msg=_('Cannot compile public API routes: %s') % e) def __call__(self, env, start_response): path = utils.safe_rstrip(env.get('PATH_INFO'), '/') # The information whether the API call is being performed against the # public API is required for some other components. Saving it to the # WSGI environment is reasonable thereby. env['is_public_api'] = any(map(lambda pattern: re.match(pattern, path), self.public_api_routes)) if env['is_public_api']: return self._ironic_app(env, start_response) return self._middleware(env, start_response) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/ironic/api/middleware/json_ext.py0000664000175000017500000000272100000000000021456 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from ironic.common import utils LOG = log.getLogger(__name__) class JsonExtensionMiddleware(object): """Simplified processing of .json extension. Previously Ironic API used the "guess_content_type_from_ext" feature. It was never needed, as we never allowed non-JSON content types anyway. Now that it is removed, this middleware strips .json extension for backward compatibility. """ def __init__(self, app): self.app = app def __call__(self, env, start_response): path = utils.safe_rstrip(env.get('PATH_INFO'), '/') if path and path.endswith('.json'): LOG.debug('Stripping .json prefix from %s for compatibility ' 'with pecan', path) env['PATH_INFO'] = path[:-5] env['HAS_JSON_SUFFIX'] = True else: env['HAS_JSON_SUFFIX'] = False return self.app(env, start_response) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/ironic/api/middleware/parsable_error.py0000664000175000017500000000614200000000000022630 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Copyright © 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Middleware to replace the plain text message body of an error response with one formatted so the client can parse it. Based on pecan.middleware.errordocument """ import json from oslo_log import log from ironic.common.i18n import _ LOG = log.getLogger(__name__) class ParsableErrorMiddleware(object): """Replace error body with something the client can parse.""" def __init__(self, app): self.app = app def __call__(self, environ, start_response): # Request for this state, modified by replace_start_response() # and used when an error is being reported. state = {} def replacement_start_response(status, headers, exc_info=None): """Overrides the default response to make errors parsable.""" try: status_code = int(status.split(' ')[0]) state['status_code'] = status_code except (ValueError, TypeError): # pragma: nocover raise Exception(_( 'ErrorDocumentMiddleware received an invalid ' 'status %s') % status) else: if (state['status_code'] // 100) not in (2, 3): # Remove some headers so we can replace them later # when we have the full error message and can # compute the length. headers = [(h, v) for (h, v) in headers if h not in ('Content-Length', 'Content-Type') ] # Save the headers in case we need to modify them. state['headers'] = headers return start_response(status, headers, exc_info) # The default for ironic is application/json. However, Pecan will try # to output HTML errors if no Accept header is provided. if 'HTTP_ACCEPT' not in environ or environ['HTTP_ACCEPT'] == '*/*': environ['HTTP_ACCEPT'] = 'application/json' app_iter = self.app(environ, replacement_start_response) if (state['status_code'] // 100) not in (2, 3): app_iter = [i.decode('utf-8') for i in app_iter] body = [json.dumps({'error_message': '\n'.join(app_iter)})] body = [item.encode('utf-8') for item in body] state['headers'].append(('Content-Type', 'application/json')) state['headers'].append(('Content-Length', str(len(body[0])))) else: body = app_iter return body ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/ironic/api/wsgi.py0000664000175000017500000000215500000000000016462 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """WSGI script for Ironic API, installed by pbr.""" import sys from oslo_config import cfg from oslo_log import log from ironic.api import app from ironic.common import i18n from ironic.common import service CONF = cfg.CONF LOG = log.getLogger(__name__) # NOTE(dtantsur): WSGI containers may need to override the passed argv. def initialize_wsgi_app(argv=sys.argv): i18n.install('ironic') service.prepare_command(argv) service.ensure_rpc_transport() LOG.debug("Configuration:") CONF.log_opt_values(LOG, log.DEBUG) return app.VersionSelectorApplication() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648108935.9066668 ironic-20.1.0/ironic/cmd/0000775000175000017500000000000000000000000015126 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/ironic/cmd/__init__.py0000664000175000017500000000274000000000000017242 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE(yuriyz): Do eventlet monkey patching here, instead of in # ironic/__init__.py. This allows the API service to run without monkey # patching under Apache (which uses its own concurrency model). Mixing # concurrency models can cause undefined behavior and potentially API timeouts. import os os.environ['EVENTLET_NO_GREENDNS'] = 'yes' import eventlet eventlet.monkey_patch(os=False) # Monkey patch the original current_thread to use the up-to-date _active # global variable. See https://bugs.launchpad.net/bugs/1863021 and # https://github.com/eventlet/eventlet/issues/592 # all the noqa below are for I202 due to 'import eventlet' above import __original_module_threading as orig_threading # noqa import threading # noqa orig_threading.current_thread.__globals__['_active'] = threading._active from ironic.common import i18n # noqa i18n.install('ironic') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/ironic/cmd/api.py0000664000175000017500000000257700000000000016264 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The Ironic Service API.""" import sys from oslo_config import cfg from oslo_log import log from ironic.common import service as ironic_service from ironic.common import wsgi_service CONF = cfg.CONF LOG = log.getLogger(__name__) def main(): # Parse config file and command line options, then start logging ironic_service.prepare_service('ironic_api', sys.argv) ironic_service.ensure_rpc_transport() # Build and start the WSGI app launcher = ironic_service.process_launcher() server = wsgi_service.WSGIService('ironic_api', CONF.api.enable_ssl_api) launcher.launch_service(server, workers=server.workers) sys.exit(launcher.wait()) if __name__ == '__main__': sys.exit(main()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/ironic/cmd/conductor.py0000664000175000017500000000541700000000000017507 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ The Ironic Management Service """ import sys from oslo_config import cfg from oslo_log import log from oslo_service import service from ironic.common import rpc_service from ironic.common import service as ironic_service CONF = cfg.CONF LOG = log.getLogger(__name__) def warn_about_unsafe_shred_parameters(conf): iterations = conf.deploy.shred_random_overwrite_iterations overwrite_with_zeros = conf.deploy.shred_final_overwrite_with_zeros if iterations == 0 and overwrite_with_zeros is False: LOG.warning('With shred_random_overwrite_iterations set to 0 and ' 'shred_final_overwrite_with_zeros set to False, disks ' 'may NOT be shredded at all, unless they support ATA ' 'Secure Erase. This is a possible SECURITY ISSUE!') def issue_startup_warnings(conf): warn_about_unsafe_shred_parameters(conf) def main(): # NOTE(lucasagomes): Safeguard to prevent 'ironic.conductor.manager' # from being imported prior to the configuration options being loaded. # If this happened, the periodic decorators would always use the # default values of the options instead of the configured ones. For # more information see: https://bugs.launchpad.net/ironic/+bug/1562258 # and https://bugs.launchpad.net/ironic/+bug/1279774. assert 'ironic.conductor.manager' not in sys.modules # Parse config file and command line options, then start logging ironic_service.prepare_service('ironic_conductor', sys.argv) ironic_service.ensure_rpc_transport(CONF) mgr = rpc_service.RPCService(CONF.host, 'ironic.conductor.manager', 'ConductorManager') issue_startup_warnings(CONF) launcher = service.launch(CONF, mgr, restart_method='mutate') # NOTE(dtantsur): handling start-up failures before launcher.wait() helps # notify systemd about them. Otherwise the launcher will report successful # service start-up before checking the threads. mgr.wait_for_start() sys.exit(launcher.wait()) if __name__ == '__main__': sys.exit(main()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648108905.0 ironic-20.1.0/ironic/cmd/dbsync.py0000664000175000017500000003503700000000000016772 0ustar00zuulzuul00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Run storage database migration. """ import sys from oslo_config import cfg from ironic.common import context from ironic.common import exception from ironic.common.i18n import _ from ironic.common import service from ironic.conf import CONF from ironic.db import api as db_api from ironic.db import migration from ironic import version dbapi = db_api.get_instance() # NOTE(rloo): This is a list of functions to perform online data migrations # (from previous releases) for this release, in batches. It may be empty. # The migration functions should be ordered by execution order; from earlier # to later releases. # # Each migration function takes two arguments -- the context and maximum # number of objects to migrate, and returns a 2-tuple -- the total number of # objects that need to be migrated at the beginning of the function, and the # number migrated. If the function determines that no migrations are needed, # it returns (0, 0). # # The last migration step should always remain the last one -- it migrates # all objects to their latest known versions. # # Example of a function docstring: # # def sample_data_migration(context, max_count): # """Sample method to migrate data to new format. # # :param context: an admin context # :param max_count: The maximum number of objects to migrate. Must be # >= 0. If zero, all the objects will be migrated. # :returns: A 2-tuple -- the total number of objects that need to be # migrated (at the beginning of this call) and the number # of migrated objects. # """ # NOTE(vdrok): Do not access objects' attributes, instead only provide object # and attribute name tuples, so that not to trigger the load of the whole # object, in case it is lazy loaded. The attribute will be accessed when needed # by doing getattr on the object ONLINE_MIGRATIONS = ( # NOTE(rloo): Don't remove this; it should always be last (dbapi, 'update_to_latest_versions'), ) class DBCommand(object): def check_obj_versions(self, ignore_missing_tables=False): """Check the versions of objects. Check that the object versions are compatible with this release of ironic. It does this by comparing the objects' .version field in the database, with the expected versions of these objects. Returns None if compatible; a string describing the issue otherwise. """ if migration.version() is None: # no tables, nothing to check return msg = None try: if not dbapi.check_versions( permit_initial_version=ignore_missing_tables): msg = (_('The database is not compatible with this ' 'release of ironic (%s). Please run ' '"ironic-dbsync online_data_migrations" using ' 'the previous release.\n') % version.version_info.release_string()) except exception.DatabaseVersionTooOld: msg = (_('The database version is not compatible with this ' 'release of ironic (%s). This can happen if you are ' 'attempting to upgrade from a version older than ' 'the previous release (skip versions upgrade). ' 'This is an unsupported upgrade method. ' 'Please run "ironic-dbsync upgrade" using the previous ' 'releases for a fast-forward upgrade.\n') % version.version_info.release_string()) return msg def _check_versions(self, ignore_missing_tables=False): msg = self.check_obj_versions( ignore_missing_tables=ignore_missing_tables) if not msg: return else: sys.stderr.write(msg) # NOTE(rloo): We return 1 in online_data_migrations() to indicate # that there are more objects to migrate, so don't use 1 here. sys.exit(2) def upgrade(self): self._check_versions(ignore_missing_tables=True) migration.upgrade(CONF.command.revision) def revision(self): migration.revision(CONF.command.message, CONF.command.autogenerate) def stamp(self): migration.stamp(CONF.command.revision) def version(self): print(migration.version()) def create_schema(self): migration.create_schema() def online_data_migrations(self): self._check_versions() self._run_online_data_migrations(max_count=CONF.command.max_count, options=CONF.command.options) def _run_migration_functions(self, context, max_count, options): """Runs the migration functions. Runs the data migration functions in the ONLINE_MIGRATIONS list. It makes sure the total number of object migrations doesn't exceed the specified max_count. A migration of an object will typically migrate one row of data inside the database. :param context: an admin context :param max_count: the maximum number of objects (rows) to migrate; a value >= 1. :param options: migration options - dict mapping migration name to a dictionary of options for this migration. :raises: Exception from the migration function :returns: Boolean value indicating whether migrations are done. Returns False if max_count objects have been migrated (since at that point, it is unknown whether all migrations are done). Returns True if migrations are all done (i.e. fewer than max_count objects were migrated when the migrations are done). """ total_migrated = 0 for migration_func_obj, migration_func_name in ONLINE_MIGRATIONS: migration_func = getattr(migration_func_obj, migration_func_name) migration_opts = options.get(migration_func_name, {}) num_to_migrate = max_count - total_migrated try: total_to_do, num_migrated = migration_func(context, num_to_migrate, **migration_opts) except Exception as e: print(_("Error while running %(migration)s: %(err)s.") % {'migration': migration_func.__name__, 'err': e}, file=sys.stderr) raise print(_('%(migration)s() migrated %(done)i of %(total)i objects.') % {'migration': migration_func.__name__, 'total': total_to_do, 'done': num_migrated}) total_migrated += num_migrated if total_migrated >= max_count: # NOTE(rloo). max_count objects have been migrated so we have # to stop. We return False because there is no look-ahead so # we don't know if the migrations have been all done. All we # know is that we've migrated max_count. It is possible that # the migrations are done and that there aren't any more to # migrate after this, but that would involve checking: # 1. num_migrated == total_to_do (easy enough), AND # 2. whether there are other migration functions and whether # they need to do any object migrations (not so easy to # check) return False return True def _run_online_data_migrations(self, max_count=None, options=None): """Perform online data migrations for the release. Online data migrations are done by running all the data migration functions in the ONLINE_MIGRATIONS list. If max_count is None, all the functions will be run in batches of 50 objects, until the migrations are done. Otherwise, this will run (some of) the functions until max_count objects have been migrated. :param max_count: the maximum number of individual object migrations or modified rows, a value >= 1. If None, migrations are run in a loop in batches of 50, until completion. :param options: options to pass to migrations. List of values in the form of .