././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538406.3824005 ironic-14.0.1.dev163/0000755000175000017500000000000000000000000014354 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/.mailmap0000644000175000017500000000022400000000000015773 0ustar00coreycorey00000000000000# Format is: # # Joe Gordon ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/.stestr.conf0000644000175000017500000000010200000000000016616 0ustar00coreycorey00000000000000[DEFAULT] test_path=${TESTS_DIR:-./ironic/tests/unit/} top_dir=./ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538405.0 ironic-14.0.1.dev163/AUTHORS0000644000175000017500000005232500000000000015433 0ustar00coreycorey00000000000000119Vik Abhishek Kekane Adam Gandelman Adam Kimball Akhila Kishore Akilan Pughazhendi Alberto Planas Alessandro Pilotti Alex Meade Alexander Gordeev Alexandra Settle Alexandra Settle Alexey Galkin Alexis Lee Aline Bousquet Ana Krivokapic Andrea Frittoli Andreas Jaeger Andreas Jaeger Andrew Bogott Andrey Kurilin Andrey Shestakov Angus Thomas Anh Tran Anita Kuno Ankit Kumar Anne Gentle Annie Lezil Anshul Jain Anson Y.W Anton Arefiev Anup Navare Anusha Ramineni Anusha Ramineni Aparna Arata Notsu Armando Migliaccio Arne Wiebalck Arne Wiebalck Artem Rozumenko Arun S A G Atsushi SAKAI Bernard Van De Walle Bertrand Lallau Bharath kumar Bill Dodd Bob Ball Bob Fournier Boris Pavlovic Brian Elliott Brian Waldon Bruno Cornec Béla Vancsics Caio Oliveira Cameron.C Cao Shufeng Cao Xuan Hoang Carmelo Ragusa Carol Bouchard Chang Bo Guo ChangBo Guo(gcb) Charlle Daniel Charlle Dias Chris Behrens Chris Dearborn Chris Jones Chris Krelle Chris Krelle Chris Krelle Chris St. Pierre Christian Berendt Christopher Dearborn Christopher Dearborn Chuck Short Chuck Short Clark Boylan Claudiu Belu Clenimar Filemon Clif Houck Clint Byrum Colleen Murphy Corey Bryant Cuong Nguyen D G Lee Dan Prince Dan Smith Dan Smith Daniel Abad Dao Cong Tien Daryl Walleck Davanum Srinivas Davanum Srinivas David Edery David Hewson David Kang David McNally David Shrewsbury Davide Guerri Debayan Ray Derek Higgins Devananda van der Veen Dima Shulyak Dirk Mueller Dmitry Galkin Dmitry Nikishov Dmitry Tantsur Dmitry Tantsur Dmitry Tantsur DongCan Dongcan Ye Dongdong Zhou Doug Hellmann Edan David Edwin Zhai Eli Qiao Elizabeth Elwell Ellen Hui Emilien Macchi Erhan Ekici Eric Fried Eric Guo Eric Windisch Faizan Barmawer Fang Jinxing Fellype Cavalcante Fengqian Gao Flavio Percoco Félix Bouliane Gabriel Assis Bezerra Galyna Zholtkevych Gary Kotton Gaëtan Trellu Ghe Rivero Ghe Rivero Ghe Rivero Gleb Stepanov Gonéri Le Bouder Graham Hayes Gregory Haynes Grzegorz Grasza Gábor Antal Ha Van Tu Hadi Bannazadeh Hamdy Khader Hans Lindgren Haomeng, Wang Harald Jensas Harald Jensås Harshada Mangesh Kakad He Yongli Hieu LE Hironori Shiina Hoang Trung Hieu Honza Pokorny Hugo Nicodemos Hugo Nicodemos IWAMOTO Toshihiro Ian Wienand Igor Kalnitsky Ihar Hrachyshka Ilya Etingof Ilya Pekelny Imre Farkas Ionut Balutoiu Iury Gregory Melo Ferreira Iury Gregory Melo Ferreira Jacek Tomasiak Jakub Libosvar James E. Blair James E. Blair James Slagle Jan Gutter Jan Horstmann Jason Kölker Javier Pena Jay Faulkner Jens Harbott Jeremy Stanley Jerry Jesse Andrews Jesse Pretorius Jim Rollenhagen Jing Sun Joanna Taryma Joe Gordon Johannes Erdfelt John Garbutt John Garbutt John L. Villalovos John L. Villalovos John Trowbridge Jonathan Provost Josh Gachnang Joshua Harlow Joshua Harlow Juan Antonio Osorio Robles Julia Kreger Julian Edwards Julien Danjou Junya Akahira KATO Tomoyuki Kaifeng Wang Kan Ken Igarashi Ken'ichi Ohmichi Kobi Samoray Kun Huang Kurt Taylor Kurt Taylor Kyle Stevenson Kyrylo Romanenko Lance Bragstad Lars Kellogg-Stedman Laura Moore Lenny Verkhovsky LiYucai Lilia Lilia Sampaio Lin Tan Lin Tan Lokesh S Lucas Alvares Gomes Luong Anh Tuan M V P Nitesh Madhuri Kumari Madhuri Kumari Manuel Buil MaoyangLiu Marc Methot Marcin Juszkiewicz Marco Morais Marcus Rafael Mario Villaplana Mark Atwood Mark Beierl Mark Goddard Mark Goddard Mark McLoughlin Mark Silence Martin Kletzander Martin Roy Martyn Taylor Mathieu Gagné Mathieu Mitchell Matt Joyce Matt Keeann Matt Riedemann Matt Riedemann Matt Wagner Matthew Gilliard Matthew Thode Matthew Treinish Mauro S. M. Rodrigues Max Lobur Max Lobur Michael Davies Michael Kerrin Michael Krotscheck Michael Still Michael Tupitsyn Michael Turek Michael Turek Michal Arbet Michey Mehta michey.mehta@hp.com Mike Bayer Mike Turek MikeG451 Mikhail Durnosvistov Mikyung Kang Miles Gould Mitsuhiro SHIGEMATSU Mitsuhiro SHIGEMATSU Monty Taylor Moshe Levi Motohiro OTSUKA Motohiro Otsuka Nam Nguyen Hoai Naohiro Tamura Ngo Quoc Cuong Nguyen Hai Nguyen Hung Phuong Nguyen Phuong An Nguyen Van Duc Nguyen Van Trung Nikolay Fedotov Nisha Agarwal Nisha Brahmankar Noam Angel OctopusZhang Oleksiy Petrenko Om Kumar Ondřej Nový OpenStack Release Bot Pablo Fernando Cargnelutti Paul Belanger Pavlo Shchelokovskyy Pavlo Shchelokovskyy Peeyush Gupta Peng Yong Peter Kendall Phil Day Philippe Godin Pierre Riteau PollyZ Pradip Kadam Pádraig Brady Qian Min Chen Qianbiao NG R-Vaishnavi Rachit7194 Rafi Khardalian Rakesh H S Ramakrishnan G Ramamani Yeleswarapu Raphael Glon Raphael Glon Ricardo Araújo Santos Riccardo Pittau Richard Pioso Rick Harris Robert Collins Robert Collins Rohan Kanade Rohan Kanade Roman Bogorodskiy Roman Dashevsky Roman Podoliaka Roman Prykhodchenko Roman Prykhodchenko Ruby Loo Ruby Loo Ruby Loo Ruby Loo Ruby Loo Rushil Chugh Russell Bryant Russell Haering Ryan Bridges SHIGEMATSU Mitsuhiro Sam Betts Sana Khan Sandhya Balakrishnan Sandy Walsh Sanjay Kumar Singh Sascha Peilicke Sascha Peilicke Sasha Chuzhoy Satoru Moriya Sean Dague Sean Dague Sean McGinnis Serge Kovaleff Sergey Lukjanov Sergey Lupersolsky Sergey Lupersolsky Sergey Nikitin Sergey Vilgelm Sergii Golovatiuk Shane Wang Shilla Saebi Shinn'ya Hoshino Shivanand Tendulker Shivanand Tendulker Shuangtai Tian Shuichiro MAKIGAKI Shuquan Huang Sinval Vieira Sirushti Murugesan SofiiaAndriichenko Solio Sarabia Srinivasa Acharya Stanislaw Pitucha Stenio Araujo Stephen Finucane Steve Baker Steven Dake Steven Hardy Stig Telfer Sukhdev Kapur Sukhdev Kapur Surya Seetharaman Takashi NATSUME Tan Lin Tang Chen Tao Li Thiago Paiva Thierry Carrez Thomas Bechtold Thomas Goirand Thomas Herve TienDC Tim Burke Tom Fifield Tony Breeds Tran Ha Tuyen Tuan Do Anh TuanLAF Tushar Kalra Tzu-Mainn Chen Vadim Hmyrov Vanou Ishii Varsha Varun Gadiraju Vasyl Saienko Vic Howard Victor Lowther Victor Sergeyev Vikas Jain Vinay B S Vincent S. Cojot Vishvananda Ishaya Vladyslav Drok Vu Cong Tuan Wang Jerry Wang Wei Wanghua Wei Du Will Szumski Xavier Xian Dong, Meng Xian Dong, Meng Xiaobin Qu XiaojueGuan XieYingYun Yaguo Zhou Yatin Kumbhare Yibo Cai Yolanda Robla Yolanda Robla Mota Yuiko Takada Yuiko Takada Mori Yuiko Takada Mori Yun Mao Yuriy Taraday Yuriy Yekovenko Yuriy Zveryanskyy Yushiro FURUKAWA Zachary Zane Bitter Zenghui Shi Zhang Yang Zhao Lei Zhenguo Niu Zhenguo Niu Zhenzan Zhou ZhiQiang Fan ZhiQiang Fan ZhongShengping Zhongyue Luo Zhongyue Luo akhiljain23 anascko ankit baiwenteng baiyuan bin yu blue55 brandonzhao caoyuan chao liu chenaidong1 chenghang chenglch chenjiao chenxiangui chenxing daz dekehn digambar divakar-padiyar-nandavar dparalen ericxiett fpxie gaoxiaoyong gaozx gecong1973 gengchc2 ghanshyam ghanshyam ghanshyam houming-wang huang.zhiping jiang wei jiangfei jiangwt100 jiapei jinxingfang jinxingfang junbo jxiaobin kesper kesper klyang lei-zhang-99cloud licanwei lijunjie lin shengrong linggao liumk liusheng liushuobj lukasz lvdongbing maelk mallikarjuna.kolagatla max_lobur melissaml michaeltchapman mkumari mpardhi23 mvpnitesh nishagbkar paresh-sao pawnesh.kumar pengyuesheng poojajadhav pradeepcsekar rabi rajinir rajinir ricolin root ryo.kurahashi saripurigopi shangxiaobj shenjiatong shenxindi shuangyang.qian sjing sonu.kumar spranjali srobert stephane suichangyin sunqingliang6 takanorimiyagishi tanlin tianhui tiendc tonybrad vishal mahajan vmud213 vsaienko wangdequn wanghao wanghongtaozz wangkf wangkf wangqi wangxiyuan wangzhengwei weizhao whaom whitekid whoami-rajat wu.chunyang wudong xgwang5843 xiexs yangxurong yatin yuan liang yufei yuhui_inspur yunhong jiang yushangbin yuyafei zackchen zhang.lei zhangbailin zhangdebo zhangjl zhangyanxian zhangyanxian zhangyanying zhu.fanglei zhufl zhurong zouyee zshi 翟小君 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/CONTRIBUTING.rst0000644000175000017500000000076400000000000017024 0ustar00coreycorey00000000000000If you would like to contribute to the development of OpenStack, you must follow the steps documented at: https://docs.openstack.org/infra/manual/developers.html#development-workflow Pull requests submitted through GitHub will be ignored since OpenStack projects use a Gerrit instance hosted on OpenDev. https://review.opendev.org Contributor documentation for the Ironic project can be found in the OpenStack Ironic documentation. https://docs.openstack.org/ironic/latest/contributor/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538405.0 ironic-14.0.1.dev163/ChangeLog0000644000175000017500000074227100000000000016143 0ustar00coreycorey00000000000000CHANGES ======= * Change default ram value * Added node multitenancy doc * Support burning configdrive into boot ISO * Add sushy-cli to client libraries release list * Fix AttributeError in check allowed port fields * Fix gunicorn name on Py3@CentOS7 in devstack * Add node lessee field * [doc] Images need some metadata for software RAID * Drop netaddr - use netutils.is\_valid\_ipv6() * Allow INSPECTWAIT state for lookup * Improve \`redfish\` set-boot-mode implementation * Change multinode job to voting * Cleanup Python 2.7 support * Use auth values from neutron conf when managing Neutron ports * Fetch netmiko session log * Doc - IPv6 Provisioning * Additional IP addresses to IPv6 stateful ports * Add network\_type to port local\_link\_connection * Make oslo.i18n an optional dependency * Make oslo.reports an optional dependency * Do not autoescape all Jinja2 templates * Make deploy step failure logging indicate the error * Fix the remaining hacking issues * Bump hacking to 3.0.0 * Extend install\_bootloader command timeout * Document deploy\_boot\_mode and boot\_option for standalone deployments * Remove future usage * Fix enabled\_hardware\_types from idrac-wsman to idrac * Document our policies for stable branches * Retry agent get\_command\_status upon failures * Add troubleshooting on IPMI section * Default IRONIC\_RAMDISK\_TYPE to dib * Generalize clean step functions to support deploy steps * Raise human-friendly messages on attempt to use pre-deploy steps drivers * Hash the rescue\_password * DRAC: Fix a failure to create virtual disk bug * [doc] Add documentation for retirement support * Add info on how to enable ironic-tempest-plugin * Follow-up releasenote use\_secrets * Add indicators REST API endpoints * Do not use random to generate token * Signal agent token is required * Support centos 7 rootwrap data directory * Refactoring: split out wrap\_ipv6 * Refactoring: move iSCSI deploy code to iscsi\_deploy.py * Clean up nits from adding additional node update policies * Allow specifying target devices for software RAID * Documentation clarifications for software RAID * Drop rootwrap.d/ironic-lib.filters file * Expand user-image doc * Move ipmi logging to a separate option * Change readfp to read\_file * Make image\_checksum optional if other checksum is present * Remove compatibility with pre-deploy steps drivers * Extend power sync timeout for Ericsson SDI * Skip clean steps from 'fake' interfaces in the documentation * Rename ironic-tox-unit-with-driver-libs-python3 * Send our token back to the agent * Enable agent\_token for virtual media boot * Add separate policies for updating node instance\_info and extra * Follow up to console port allocation * Change force\_raw\_images to use sha256 if md5 is selected * Make reservation checks caseless * [doc] Missing --name option * Bump minimum supported ansible version to 2.7 * Set abstract for ironic-base * Refactoring: move generic agent clean step functions to agent\_base * Docs: split away user image building and highlight whole disk images * Redfish: Add root\_prefix to Sushy * Cleanup docs building * Rename \`create\_isolinux\_image\_for\_uefi\` function as misleading * Finalize removal of ipxe\_enabled option * Start removing ipxe support from the pxe interface * Pre-shared agent token * DRAC: Fix RAID create\_config clean step * Expose allocation owner to additional policy checks * Project Contributing updates for Goal * Refactoring: rename agent\_base\_vendor to agent\_base * Use FIPS-compatible SHA256 for comparing files * Revert "Move ironic-standalone to non-voting" * Move ironic-standalone to non-voting * Make \`redfish\_system\_id\` property optional * Lower tempest concurrency * Refactoring: finish splitting do\_node\_deploy 14.0.0 ------ * Fix up release notes for 14.0.0 * Actually use ironic-python-agent from source in source builds * Update release mappings for Ussuri * Automatic port allocation for the serial console * Remove the [pxe]ipxe\_enabled configuration option * tell reno to ignore the kilo branch * Update API version history for v1.61 * [Trivial] Remove redundant brackets * Split cleaning-related functions from manager.py into a new module * Split deployment-related functions from manager.py into a new module * Disable debug output in doc building * Fix bash comparisons for grenade multinode switch * Fix jsonpatch related tests * Fix ipxe interface to perform ipxe boot without ipxe\_enabled enabled * Fix typo in setup-network.sh script * Support node retirement * Make ironic-api compatible with WSGI containers other than mod\_wsgi * Don't require root partition when installing a whole disk image * Clean up api controller base classes * Deprecate irmc hardware type * Subclass wsme.exc.ClientSideError * Use str type instead of wsme.types.text * Use bionic job for bifrost integration * Follow up to root device hints in instance\_info * Deprecate ibmc * Fix incorrect ibmc\_address parsing on Python 3.8 * Fix entry paths for cleaning and deployment * Nodes in maintenance didn't fail, when they should have * Fix API docs for target\_power\_state response * Document using CentOS 8 DIB IPA images for Ussuri and newer * Lower RAM for DIB jobs to 2 GiB * Remove reference to deprecated [disk\_utils]iscsi\_verify\_attempts * Add node info and exc name when getting rootfs info from Glance * Fix fast\_track + agent\_url update fix * CI: make the metalsmith job voting and gating * devstack: install bindep for diskimage-builder * Allow reading root\_device from instance\_info * Implement managed in-band inspection boot for ilo-virtual-media * Add a missing versionadded for configdrive[vendor\_data] * Make qemu hook running with python3 * Refactor glance retry code to use retrying lib * Fix duplicated words issue like "are are placed" * devstack: switch to using CentOS 8 DIB ramdisks by default * Remove the deprecated [glance]glance\_num\_retries * Fix missing job\_id parameter in the log message * Fix get\_boot\_option logic for software raid * Allow node owners to administer associated ports * Explicitly use ipxe as boot interface for iPXE testing * Replace disk-image-create with ironic-python-agent-builder * Remove those switches for python2 * Fix invalid assertIsNone statements * Add librsvg2\* to bindep * Stop using six library * Add notes on the pxe template for aarch64 * Enforce running tox with correct python version based on env * Tell the multinode subnode and grenade to use /opt * Disable automated clean on newer jobs * Extend service timeout * Tune down multinode concurrency * Restrict ability to change owner on provisioned or allocated node * Correct power state handling for managed in-band inspection * Implement managed in-band inspection boot for redfish-virtual-media * redfish-vmedia: correctly pass ipa-debug * Add a CI job to UEFI boot over Redfish virtual media * Fix use of urlparse.urljoin * Import importlib directly * Increasing BUILD\_TIMEOUT value for multinode job * Remove deprecated ironic-agent element * Add owner to allocations and create relevant policies * CI: do not enable rescue on indirect jobs * Update nova os-server-external-events response logic * DRAC: Drives conversion from raid to jbod * Changed to bug fix to follow-on idrac job patch * Fixes issue with checking whether ISO is passed * docs: add a missing heading * Add a CI job to legacy boot over Redfish virtual media * Fix UEFI NVRAM collision in devstack * Remove references to 'firewall\_driver' * Make redfish CI jobs pulling sushy-tools from git * Prevent localhost from being used as ironic-inspector callback URL * Add an ironic-inspector job with managed boot * Add timeout when querying agent's command statuses * docs: update the local development quickstart to use JSON RPC * Drop python 2.7 support and testing * Remove unused migration tests * Wire in in-band inspection for PXE boot and neutron-based networking * Foundation for boot/network management for in-band inspection * Add \`instance\_info/kernel\_append\_params\` to \`redfish\` * Add indicator management to redfish hw type * Mock out the correct greenthread sleep method * Don't install syslinux-nonlinux on rhel7 * Ensure text-only console in devstack * Pass correct flags during PXE cleanup in iPXEBoot * Drop [agent]heartbeat\_timeout * Remove old online migration codes * Block ability update callback\_url * Stop supporting incompatible heartbeat interfaces * Allow node owners to administer nodes * Fix variable name in cleanup\_baremetal\_basic\_ops func * Switch legacy jobs to Py3 * Ensure \`isolinux.bin\` is present and configured in devstack * Fix \`snmp\` unit test * Backward compatibility for the ramdisk\_params change * Allow vendor\_data to be included in a configdrive dict * Improve iDrac Documentation * Correct handling of ramdisk\_params in (i)PXE boot * Software RAID: Identify the root fs via its UUID from image metadata * Change integration jobs to run under Python3 * Using loop instead of with\_X * CI: add ironic-python-agent-builder to the multinode job * Update release with information about zuul job * Add virtual media boot section to the docs * CI: limit rescue testing to only two jobs * Mask secrets when logging in json\_rpc * Use new shiny Devices class instead of old ugly Device * Switch to ussuri job * Do not ignore 'fields' query parameter when building next url * Update sushy library version * Minor string formatting follow-up to idrac jbod patch * Document systemd-nspawn as a nice trick for patching a ramdisk * DRAC: Drives conversion from JBOD to RAID * Setup ipa-builder before building ramdisk * Fix EFIBOOT image upload in devstack * Fix drive sensors collection in \`redfish\` mgmt interface * Add Redfish vmedia boot interface to idrac HW type * Change MTU logic to allow for lower MTUs automatically * DRAC: Fix a bug for clear\_job\_queue clean step with non-BIOS pending job * Documentation for iLO hardware type deploy steps * ironic-tempest-functional-python3 unused variables * docs: use openstackdocstheme extlink extension * grub configuration should use user kernel & ramdisk * Raising minimum version of oslo.db * DRAC: Fix a bug for delete\_config with multiple controllers * Use correct function to stop service * Fix devstack installation failure * DRAC: Fix a bug for job creation when only required * Add a CI job with a DIB-built ramdisk * Remove old online migrations and new models * Remove earliest version from releasing docs, update examples * Change log level based on node status * enable\_python3\_package should not be necessary anymore * Update doc for CI * Add versions to release notes series * Document pre-built ramdisk images (including DIB) * Run DIB with tracing enabled and increase the DHCP timeout * Improve documentation about releasing deliverables * Update master for stable/train 13.0.0 ------ * Update release mappings for Train * Release notes cleanup for 13.0.0 (mk2) * Document PXE retries * Update env. variables in the documentation * Add iDRAC RAID deploy steps * Don't resume deployment or cleaning on heartbeat when polling * Make multinode jobs non-voting * devstack: wait for conductor to start and register itself * Allow retrying PXE boot if it takes too long * Lower MTU override * Devstack: Fix iPXE apache log location bug * Serve virtual media boot images from ironic conductor * Add Redfish inspect interface to idrac HW type * Add deploy steps for iLO Management interface * Do not log an error on heartbeat in deploying/cleaning/rescuing * Add an option to abort cleaning and deployment if node is in maintenance * CI: move libvirt images to /opt for standalone and multinode jobs * Add first idrac HW type Redfish interface support * Remove cisco references and add release note * Add \`FLOPPY\` boot device constant * Combined gate fixes * Read in non-blocking fashion when starting console * Release notes cleanup for 13.0.0 * CI: move the fast-track job to the experimental pipeline * Remove support for CoreOS images * Fix gate failure related to jsonschema * Minor: change a misleading InvalidState error message * Build pdf doc * iLO driver doc update * Use openstack cli in image creation guide * iLO driver doc update * devstack: save iPXE httpd logs * Prelude for 13.0.0 * Add a release note for iscsi\_verify\_attempts deprecation * Fix typo in handling of exception FailedToGetIPAddressOnPort * Add iLO RAID deploy steps * add table of available cleaning steps to documentation * Prepare for deprecation of iscsi\_verify\_attempts in ironic-lib * Add software raid release note to ironic * Add ironic-specs link to readme.rst * Fixed problem with UEFI iSCSI boot for nic adapters * DRAC : clear\_job\_queue clean step to fix pending bios config jobs * Add deploy steps for iLO BIOS interface * Follow-up for deploy steps for Redfish BIOS interface * Adding file uri support for ipa image location * Adjust placement query for reserved nodes * Add indicator management harness to ManagementInterface * Adds dhcp-all-interfaces element * Do not wait for console being started on timeout * Out-of-band \`erase\_devices\` clean step for Proliant Servers * Pass target\_raid\_config field to ironic variable * Allow deleting unbound ports on active node * Follow up to Option to send all portgroup data * Lower standalone concurrency to 3 from 4 * Make ironic\_log Ansible callback Python 3 ready * Remove ironic command bash completion * devstack: Fix libvirtd/libvirt-bin detection * Add iPXE boot interface to 'ilo' hardware type * Move to unsafe caching * Allow to configure additional ipmitool retriable errors * Fix exception on provisioning with idrac hw type * Add logic to determine Ironic node is HW or not into configure\_ironic\_dirs * Install sushy if redfish is a hardware type * Add \`filename\` parameter to Redfish virtual media boot URL * Add set\_boot\_device hook in \`redfish\` boot interface * Add Redfish Virtual Media Boot support * Follow-up to power sync reno * Add new method 'apply\_configuration' to RAIDInterface * Do not tear down node upon cleaning failure * Switch non-multinode jobs to new-style neutron services * Add deploy steps for Redfish BIOS interface * Ansible: fix partition\_configdrive for logical root\_devices * Support power state change callbacks to nova using ksa\_adapter * Docu: Fix broken link * Fixing broken links * DRAC : Fix issue for RAID-0 creation for multiple disks for PERC H740P * Uses IPA-B to build in addition to CoreOS * Asynchronous out of band deploy steps fails to execute * Clean up RAID documentation * Enable testing software RAID in the standalone job * devstack: allow creating more than one volume for a VM * Allow configuring global deploy and rescue kernel/ramdisk * Fix missing print format error * Update software RAID configuration documentation * Use HTTPProxyToWSGI middleware from oslo * RAID creation fails with 'ilo5' RAID interface * RAID create fails if 'controller' is missing in 'target\_raid\_config' * Use openstacksdk for accessing ironic-inspector * CI Documentation * Enable no IP address to be returned * Change debug to error for heartbeats * CI: stop using pyghmi from git master * Fixes power-on failure for 'ilo' hardware type * Creation of UEFI ISO fails with efiboot.img * Remove deprecated Neutron authentication options * Follow-up to the IntelIPMIHardware patch * Ansible driver: fix deployment with serial specified as root device hint * Enable testing adoption in the CI * Fix serial/wwn gathering for ansible+python3 * Update api-ref location * IPA does not boot up after cleaning reboot for 'redfish' bios interface * Revert "Add logic to determine Ironic node is HW or not into configure\_ironic\_dirs" * Filter security group list on the ID's we expect * Clean lower-constraints.txt * [Trivial] Fix is\_fast\_track parameter doc string * Failure in get\_sensor\_data() of 'redfish' management interface * Abstract away pecan.request/response * Fix potential race condition on node power on and reboot * iLO firmware update fails with 'update\_firmware\_sum' clean step * Bump keystonauth and warlock versions * Don't install ubuntu efi debs on cent * Remove the PXE driver page * Ansible module: fix deployment for private and/or shared images * Add logic to determine Ironic node is HW or not into install\_ironic * Add logic to determine Ironic node is HW or not into configure\_ironic\_dirs * Deal with iPXE boot interface incompatibility in Train * Bump openstackdocstheme to 1.20.0 * Remove deprecated app.wsgi script * devstack: Install arch specific debs only when deploying to that arch * DRAC: Upgraded RAID delete\_config cleaning step * Fix invalid assert state * CI: remove quotation marks from TEMPEST\_PLUGINS variable * Remove CIMC/UCS drivers * Add IntelIPMIHardware * Collect sensor data in \`\`redfish\`\` hardware type * [Trivial] Software RAID: Documentation edits * Software RAID: Add documentation * Blacklist sphinx 2.1.0 (autodoc bug) * Follow-up on UEFI/Grub2 job * Adds bandit template and exclude some of tests * Add documentation for IntelIPMI hardware * Add check on get\_endpoint returning None * Option to send all portgroup data 12.2.0 ------ * Replace deprecated with\_lockmode with with\_for\_update * Spruce up release notes for 12.2.0 release * Update API history and release mapping for 12.2.0 * Refactoring: flatten the glance service module * Remove the deprecated glance authentication options * DRAC: Adding reset\_idrac and known\_good\_state cleaning steps * devstack: add missing variables for ironic-python-agent-builder * Remove ipxe tags when ipx6 is in use * Update qemu hook to facilitate Multicast * redfish: handle missing Bios attribute * Fix :param: in docstring * Updates ironic for using ironic-python-agent-builder * Do not log an exception if Allocation is deleted during handling * Add release note updating status of smartnics * Switch to use exception from ironic-lib * Change constraints opendev.org to release.openstack.org * Incorporate bandit support in CI * Remove elilo support * Ansible module: fix configdrive partition creation step * Remove deprecated option [DEFAULT]enabled\_drivers * Fix regex string in the hacking check * Add api-ref for allocation update * Add a pxe/uefi/grub2 CI job * Bump lower mock version to 3.0.0 * Start using importlib for Python 3.x * Remove XML support in parsable\_error middleware * Fix binary file upload to Swift * fix typo in code comment * Software RAID: Trigger grub installation on the holder disks * Move stray reno file * Trivial: correct configuration option copy-pased from inspector * Remove commit\_required in iDRAC hardware type * Make the multinode grenade job voting again * devstack: configure rabbit outside of API configuration * Blacklist python-cinderclient 4.0.0 * Publish baremetal endpoint via mdns * Fix inaccurate url links * Update sphinx requirements * Allocation API: correct setting name to None * Allocation API: backfilling allocations * Fix GRUB config path when building EFI ISO * Add DHCP server part to make the document more detail * Do not try to return mock as JSON in unit tests * Remove deprecated option [ilo]power\_retry * Add API to allow update allocation name and extra field * Update Python 3 test runtimes for Train * Replace hardcoded "stack" user to $STACK\_USER * Run vbmcd as stack user in devstack * Adding enabled\_boot\_interface attribute in tempest config * Add openstack commands in node deployment guide * Add a high level vision reflection document * Add iDRAC driver realtime RAID creation and deletion * Correct spelling errors * Replace git.openstack.org URLs with opendev.org URLs * Direct bridge to be setup * Fix pyghmi path * OpenDev Migration Patch * Removes \`hash\_distribution\_replicas\` configuration option * Truncate node text fields when too long * Add note for alternative checksums * Make the JSON RPC server work with both IPv4 and IPv6 * Jsonschema 3.0.1: Binding the schema to draft-04 * Place upper bound on python-dracclient version * devstack: Remove syslinux dependency * Do not try to create temporary URLs with zero lifetime * Ansible module: fix partition\_configdrive.sh file * Use the PUBLIC\_BRIDGE for vxlan * Move devstack emulators configs under /etc/ironic * Uncap jsonschema in requirements * Split ibmc power/reboot classes * Temporarily mark grenade multinode as non-voting * Improve VirtualBMC use in Devstack * Run IPMI, SNMP and Redfish BMC emulators as stack * Add UEFI firmware to Redfish emulator config * Add systemd unit for sushy emulator in devstack * Ansible module: fix clean error handling * [Trivial] Fix typo in agent\_base\_vendor unit test * Fix exception generation errors * Add a request\_timeout to neutron * doc: update ibmc driver support servers document * Ansible module fix: stream\_url * Make it possible to send sensor data for all nodes * Slightly rephrase note in tenant networking docs * Bump sphinxcontrib-pecanwsme to 0.10.0 * ipmi: Ignore sensor debug data * Make 'noop' the explicit default of default\_storage\_interface * Docs: correct expected host format for drac\_address * Check for deploy.deploy deploy step in heartbeat * Workaround for sendfile size limit * Workaround for uefi job with ubuntu bionic * Replace openstack.org git:// URLs with https:// * Remove vbmc log file in devstack * Add versions to release notes series * Imported Translations from Zanata * Update master for stable/stein 12.1.0 ------ * Fix capabilities passed as string in agent prepare * Respect $USE\_PYTHON3 settings for gunicorn * Add systemd unit for vbmcd in devstack * Workaround for postgres job with ubuntu bionic * Add release note on conntrack issue on bionic * Update release-mappings and api version data for Stein release * Pass kwargs to exception to get better formatted error message * Advance python-dracclient version requirement * Add prelude and update release notes for 12.1.0 * Optimize: HUAWEI iBMC driver utils * Set boot\_mode in node properties during OOB Introspection * Fix idrac driver unit test backwards compat issue * Deploy Templates: factor out ironic.conductor.steps * Make metrics usable * Kg key for IPMIv2 authentication * Add fast-track testing * fast tracked deployment support * Update doc for UEFI first * Fix lower-constraints job * Fix idrac Job.state renamed to Job.status * Deprecates \`hash\_distribution\_replicas\` config option * Add Huawei iBMC driver support * Fix misuse of assertTrue * Allow methods to be both deploy and clean steps * Adding ansible python interpreter as driver\_info * Return 405 for old versions in allocation and deploy template APIs * honor ipmi\_port in serial console drivers * Follow up to available node protection * Migrate ironic-grenade-dsvm-multinode-multitenant job to Ubuntu Bionic * Deploy templates: conductor and API nits * Deploy Templates: documentation * Fixing a bash test in devstack ironic lib * Deploy Templates: API reference * Fix formatting issue in doc * Update dist filter for devstack ubuntu * Add a non-voting metalsmith job for local boot coverage * Document building configdrive on the server side * Check microversions before validations for allocations and deploy templates * Add python3 unit test with drivers installed * Fix missing print format error * Fix typo and docstring in pxe/ipxe * Stop requiring root\_gb for whole-disk images * driver-requirements: mark UcsSdk as Python 2 only * Set boot\_mode in node properties during Redfish introspection * Add option to set python interpreter for ansible * Document using a URL for image\_checksum * [docs] IPv6 support for iLO * Temporary marking ironic-standalone non-voting * Allow building configdrive from JSON in the API * Allocation API: optimize check on candidate nodes * Fix TypeError: \_\_str\_\_ returned non-string (type ImageRefValidationFailed) * Deploy templates: API & notifications * Deploy templates: conductor * Drop installing python-libvirt system package * Test API max version is in RELEASE\_MAPPINGS * Update the log message for ilo drivers * Deploy templates: fix updating steps in Python 3 * Fix pysendfile requirement marker * Add option to protect available nodes from accidental deletion * Deploy Templates: add 'extra' field to DB & object * Trivial: Fix error message when waiting for power state * Allocation API: fix minor issues in the API reference * Allocation API: reference documentation * Adding bios\_interface reference to api docs * Set available\_nodes in tempest conf * Update the proliantutils version in documentation * [trivial] Removing python 3.5 template jobs * Deploy Templates: Fix DB & object nits * Add check for object versions * [Trivial] Fix incorrect logging in destroy\_allocation * Allocation API: taking over allocations of offline conductors * Allocation API: resume allocations on conductor restart * Devstack - run vbmc as sudo * Documentation update for iLO Drivers * Follow up - API - Implement /events endpoint * Follow up to node description * ensure that socat serial proxy keeps running * Deprecate Cisco drivers * Follow up to ISO image build patch * API - Implement /events endpoint * Add a requisite for metadata with BFV * [Follow Up] Add support for Smart NICs * Support using JSON-RPC instead of oslo.messaging * Deploy templates: data model, DB API & objects * [Follow Up] Expose is\_smartnic in port API * Prioritize sloppy nodes for power sync * Expose conductors: api-ref * Remove duplicated jobs and refactor jobs * Allocation API: fix a small inconsistency * Expose is\_smartnic in port API * [Trivial] Allocation API: correct syntax in API version history docs * Allocation API: REST API implementation * Make power sync unit test operational * Allow case-insensitivity when setting conductor\_group via API * Optionally preserve original system boot order upon instance deployment * Add support for Smart NICs * Add a voting CI job running unit tests with driver-requirements * [Refactor] Make caching BIOS settings explicit * [docs] OOB RAID implementation for ilo5 based HPE Proliant servers * Make iLO BIOS interface clean steps asynchronous * Provides mount point as cinder requires it to attach volume * Add description field to node: api-ref * Add description field to node * Fix test for 'force\_persistent\_boot\_device' (i)PXE driver\_info option * Fix iPXE boot interface with ipxe\_enabled=False * Allocation API: conductor API (without HA and take over) * Removing deprecated drac\_host property * Add is\_smartnic to Port data model * Remove uses of logger name "oslo\_messaging" * [Trivial] Fix typo in noop interface comment * Remove duplicated fault code * Fix listing nodes with conductor could raise * Parallelize periodic power sync calls follow up * Build ISO out of EFI system partition image * Make versioned notifications topics configurable * Build UEFI-only ISO for UEFI boot * Parallelize periodic power sync calls * Limit the timeout value of heartbeat\_timeout * Replace use of Q\_USE\_PROVIDERNET\_FOR\_PUBLIC * Make ipmi\_force\_boot\_device more user friendly * Follow-up logging change * Remove dsvm from zuulv3 jobs * Allocation API: allow picking random conductor for RPC topic * Fix updating nodes with removed or broken drivers * Fix ironic port creation after Redfish inspection * Allocation API: minor fixes to DB and RPC * Allocation API: allow skipping retries in TaskManager * Allocation API: database and RPC * Allow missing \`\`local\_gb\`\` property * Fix typo in release note * Fix IPv6 iPXE support * OOB RAID implementation for ilo5 based HPE Proliant servers * Fix SushyError namespacing in Redfish inspection * Allow disabling TFTP image cache * Add pxe template per node * Fix the misspelling of "configuration" * Switch to cirros 0.4.0 * Update tox version to 2.0 * Disable metadata\_csum when creating ext4 filesystems * Switch the default NIC driver to e1000 * Change openstack-dev to openstack-discuss * Fix XClarity driver management defect * Ignore newly introduced tables in pre-upgrade versions check * Switch CI back to xenial 12.0.0 ------ * Add "owner" information field * Introduce configuration option [ipmi]ipmi\_disable\_timeout * Enroll XClarity machines in Ironic's devstack setting * spelling error * api-ref: update node.resource\_class description * Add a note regarding IPA multidevice fix * Allow disabling instance image cache * Add a prelude for ironic 12.0 * Set proper version numbering * Change multinode jobs to default to local boot * Follow-up Retries and timeout for IPA command * Fix "import xxx as xxx" grammar * Kill misbehaving \`ipmitool\` process * Fix OOB introspection to use pxe\_enabled flag in idrac driver * Add configurable Redfish client authentication * Expose conductors: api * Fix node exclusive lock not released on console start/restart * Fix IPv6 Option Passing * Let neutron regenerate mac on port unbind * Slim down grenade jobs * Extend job build timeout * Mark several tests to not test cleaning * Add BIOS interface to Redfish hardware type * Avoid cpu\_arch None values in iscsi deployments * Expose conductors: db and rpc * Fix Chinese quotes * Add ipmi\_disable\_timeout to avoid problematic IPMI command * Correct author email address * Ensure we unbind flat network ports and clear BM mac addresses * Retries and timeout for IPA command * Support for protecting nodes from undeploying and rebuilding * Add download link apache configuration with mod\_wsgi * spelling error * Add Redfish inspect interface follow up * Add the noop management interface to the manual-management hardware type * Add missing ws separator between words * Switch ironic-tempest-...-tinyipa-multinode to zuulv3 * Add a non-voting bifrost job to ironic * Increase RAM for the ironic node in UEFI job * Reuse Redfish sessions follow up * Improve logs when hard linking images fails * Don't fail when node is in CLEANFAIL state * Fix ipv6 URL formatting for pxe/iPXE * Fix redfish test\_get\_system\_resource\_not\_found test * Improve sushy mocks * Recommend to set boot mode explicitly * Add Redfish inspect interface * Fix CPU count returned by introspection in Ironic iDRAC driver * Add ironic-status upgrade check command framework * Passing thread pool size to IPA for parallel erasure * Change BFV job to use ipxe interface * [devstack] Allow setting TFTP max blocksize * Reuse Redfish sessions * Migration step to update objects to latest version * Cleanup of remaining pxe focused is\_ipxe\_enabled * Remove the xclarity deprecation * Follow-up to fix not exist deploy image of patch 592247 * Remove pywsman reference * Fix DHCPv6 support * Revert "Add openstack/placement as a required project for ironic-grenade\*" * Add api-ref for conductor group * Follow-up patch for I71feefa3d0593fd185a286bec4ce38607203641d * Fix ironic developer quickstart document * Add note to pxe configuration doc * Create base pxe class * Wrap up PXE private method to pxe\_utils move * Enhanced checksum support * Enable configuration of conversion flags for iscsi * Document how to implement a new deploy step * Refactor API code for checking microversions * Allow streaming raw partition images * Remove Vagrant * ipxe boot interface * Remove oneview drivers * Completely remove support for deprecated Glance V1 * Avoid race with nova on power sync and rescue * Log a warning for Gen8 Inspection * Doc: Adds cinder as a service requires creds * Fix unit test run on OS X * Fixes a race condition in the hash ring code * Add automated\_clean field to the API * Stop console at tearing down without unsetting console\_enabled * Add functionality for individual cleaning on nodes * Documentation for 'ramdisk' deploy with 'ilo-virtual-media' boot * Add documentation for soft power for ilo hardware type * Add documentation for 'inject nmi' for ilo hardware type * Remove unnecessary checks in periodic task methods * Remove token expiration * Adds support for soft power operations to 'ilo' power interface * Add openstack/placement as a required project for ironic-grenade\* * Remove tox checkconfig * Add admin documentation for rescue mode in iLO driver * Correct headings in README.rst * Minor fixes for docs on changing hardware types * Add admin documentation for rescue interface * pxe/ipxe: Move common calls out pxe.py * Switch ironic-tempest-dsvm-functional-python3 to zuulv3 * Switch ironic-tempest-dsvm-functional-python2 to zuulv3 * Switch grenade nic driver to e1000 * Remove ironic experimental jobs * Restore the nova-api redirect * Update docs to portgroup with creating windows images * Use templates for cover and lower-constraints * Remove wrong install-guide-jobs in zuul setup * Fix grenade tests * Add a more detailed release note for Dell BOSS RAID1 fix * Honors return value from BIOS interface cleansteps * Reuse checksum calculation from oslo * Adds support for 'ramdisk' deploy with 'ilo-virtual-media' boot * Remove inspecting state support from inspect\_hardware * Adds support for 'Inject NMI' to 'ilo' management interface * Docs for agent http provisioning * Ensure pagination marker is always set * Direct deploy serve HTTP images from conductor * Fix doc builds for ironic * Fix async keyword for Python 3.7 * Add vendor step placement suggestion * Prevent HTML from appearing in API error messages * Replace assertRaisesRegexp with assertRaisesRegex * Add version discovery information to the /v1 endpoint * Replace assertRaisesRegexp with assertRaisesRegex * Fix provisioning failure with \`ramdisk\` deploy interface * Minor fixes to contributor vision * Add automated\_clean field * Use HostAddressOpt for opts that accept IP and hostnames * Remove the duplicated word * add python 3.6 unit test job * switch documentation job to new PTI * import zuul job settings from project-config * Prevents deletion of ports for active nodes * Disable periodic tasks if interval set to 0 * Reformat instructions related with various OS * Imported Translations from Zanata * Add conductor\_group docs * Switch ironic-tempest-dsvm-ironic-inspector too zuulv3 * Switch ironic-tempest-dsvm-bfv too zuulv3 * A minor update to documentation of \`ilo\` hardware type * Imported Translations from Zanata * Update reno for stable/rocky * Fix not exist deploy image within irmc-virtual-media booting 11.1.0 ------ * Switch the "snmp" hardware type to "noop" management * Add "noop" management and use it in the "ipmi" hardware type * Update docs on ironic boot mode management * Follow-up to always link MAC address files * Simplify subclasses for PXERamdiskDeploy * Node gets stuck in ING state when conductor goes down * Add notes on Redfish boot mode management * Prepare for Rocky release * Update the reno for the reset\_interfaces feature * Use max version of an object * A vision * Improve the "Ironic behind mod wsgi" documentation * Deploy steps documentation * Mark the ZeroMQ driver deprecated * Remove rabbit\_max\_retries option * Fix iDRAC hardware type does not work with UEFI * Pass prep\_boot\_part\_uuid to install\_bootloader for ppc64\* partition images * Remove redundant swift vars * Document locale requirement for local testing * Switch ironic-tempest-dsvm-ipa-partition-pxe\_ipmitool-tinyipa-python3 * Improve doc of Node serial console * Follow-up patch to ramdisk interface * Ramdisk deploy driver doc * Change PXE logic to always link macs with UEFI * Add documentation for BIOS settings * Fix for failure of cleaning for iRMC restore\_bios\_config * Refactor RAID configuration via iRMC driver * Adds ramdisk deploy driver * Follow-up patch for 7c5a04c1149f14900f504f32e000a7b4e69e661f * Switch ironic-tempest-dsvm-ipa-partition-uefi-pxe\_ipmitool-tinyipa * Switch ironic-tempest-dsvm-ipa-wholedisk-bios-pxe\_snmp-tinyipa * Switch ironic-tempest-dsvm-ipa-wholedisk-bios-agent\_ipmitool-tinyipa * Switch ironic-tempest-dsvm-pxe\_ipmitool-postgres * Documentation update of iLO BIOS settings * Follow-up to improve pep8 checking with hacking * Fix for failure in cleaning in iRMC driver * Add deploy\_step to NodePayload.SCHEMA * Add conductor\_group to node notifications * Deprecate xclarity hardware type * Be more precise with conductor group API tests * Simplify hash ring tests * Add documentation for changing node's hardware type * Fix the list of irrelevant-files * snmp: Keep get\_next method backward-compatible * Fix for failure in cleaning * Expose node.conductor\_group in the REST API * Use conductor group for hash ring calculations * Expose BIOS interface * Ignore bashate E044 * Remove deprecated option [ipmi]retry\_timeout * iLO BIOS interface implementation * Make pxelinux.cfg folder configurable * Use openstack client instead of neutron client * Replace port 35357 with 5000 for "auth\_url" * Add conductor\_group field to config, node and conductor objects * Add reset\_interfaces parameter to node's PATCH * Don't handle warnings as errors * Follow up Add CUSTOM\_CPU\_FPGA Traits value to ironic inspection * Follow-up changes to iRMC bios interface * Minor changes for deploy\_steps framework * Caching of PDU autodiscovery * Migrate ironic \`snmp\` driver to the latest pysnmp API * Add conductor\_group field to nodes and conductors tables * Add mock object for get\_bios\_settings * Fix bug to doc:configdrive * Add notes for future job migrations * Assert a build timeout for zuul templated CI jobs * Fixed link to Storyboard instead of launchpad * Update CI jobs for rescue mode * Fix bug to doc:kernel-boot-parameters * Deploy steps - API & notifications * Deploy steps - conductor & drivers * Add CUSTOM\_CPU\_FPGA Traits value to ironic inspection * Implement iRMC BIOS configuration * Deploy steps - versioned objects * Deploy steps - DB model * Follow-up to RAID configuration via iRMC driver patch * Poweroff server after 10 tries * Make the lower-constraints tox env actually use lower-constraints * Fix typo of function naming conventions in test\_deploy\_utils.py * Update the doc regarding the removal of calssic drivers * Update boot-from-volume feature docs * [doc] Use openstack client commands to replace neutron client * Detect skip version upgrades from version earlier than Pike * Update API version history with release 11.0.0 * Bump osprofiler minimum requirement to 1.5.0 * Add 11.0 to release mapping * Add read&write SNMP community names to \`snmp\` driver * Add unit tests that "remove" is acceptable on /XXX\_interface node fields * Fix 11.0 prelude formatting * Change docs bug link to storyboard 11.0.0 ------ * Support RAID configuration for BM via iRMC driver * Fix list node vifs api error * Remove support for creating and loading classic drivers * Ensure we allow Ironic API traffic from baremetal network * Add a prelude for version 11 * iDRAC RAID10 creation with greater than 16 drives * Remove doc of classic drivers from the admin guide * Modifying 'whole\_disk\_image\_url' and 'whole\_disk\_image\_checksum' variable * Follow-up to update doc for oneview driver * Small change of doc title for the drivers * Fix wrong in apidoc\_excluded\_paths * Switch ironic-tempest-dsvm-ipa-partition-redfish-tinyipa * Switch ironic-dsvm-standalone to zuulv3 native * Follow-up to update doc for ilo driver * Add BayTech MRP27 snmp driver type * Improve pep8 checking along with hacking * Follow-up to update doc for irmc driver * DevStack: Tiny changes following iRMC classic driver removal * include all versions of Node in release\_mappings * Deprecate [inspector]enabled option * Do not disable inspector periodic tasks if [inspector]enabled is False * Remove the ipmitool classic drivers * Add snmp driver auto discovery * During cleaning, use current node.driver\_internal\_info * Rename test class * Remove the iRMC classic drivers * Remove the OneView classic drivers * Remove the deprecated pxe\_snmp driver * Remove the deprecated classic drivers for Cisco UCS hardware * Remove the iDRAC classic drivers * Separate unit tests into different classes * Add helper method for testing node fields * Fix conductor manager unit tests * Remove the ilo classic drivers * Move parse\_instance\_info\_capabilities() to common utils.py * Fix error when deleting a non-existent port * BIOS Settings: update admin doc * BIOS Settings: add bios\_interface field in NodePayload * BIOS Settings: update default BIOS setting version in db utils * Add documentation for XClarity Driver * Release note clean-ups for ironic release * Move boot-related code to boot\_mode\_utils.py * Raise TemporaryFailure if no conductors are online * BIOS Settings: add sync\_node\_setting * Fix for Unable to create RAID1 on Dell BOSS card * Add an external storage interface * fix typos * fix typos * Add detail=[True, False] query string to API list endpoints * Adds enable\_ata\_secure\_erase option * Remove the remaining fake drivers * Document that nova-compute attaches VIF to active nodes on start up * Added Redfish boot mode management * iRMC: Support ipmitool power interface with irmc hardware * Doc: Remove -r option for running a specific unit test * Fix stestr has no lower bound in test-requirements * Adds boot mode support to ManagementInterface * Modify the Ironic api-ref's parameters in parameters.yaml * rectify 'a image ID' to 'an image ID' * change 'a ordinary file ' to 'an ordinary file' * Validating fault value when querying with fault field * change 'a optional path' to 'an optional path' * Update links in README * Remove the fake\_ipmitool, fake\_ipmitool\_socat and fake\_snmp drivers * Add release notes link to README * BIOS Settings: add admin doc * Remove deprecated [keystone] config section * Make method public to support out-of-band cleaning * Remove the fake\_agent, fake\_pxe and fake\_inspector drivers * Consolidate the setting of ironic-extra-vars * Remove deprecated ansible driver options * Remove dulicate uses for zuul-cloner * Comply with PTI for Python testing * fix tox python3 overrides * Remove the "fake" and "fake\_soft\_power" classic drivers * Completely stop using the "fake" classic driver in unit tests * Power fault recovery follow up * Adds more \`ipmitool\` errors as retryable * Stop using pxe\_ipmitool in grenade * Fix FakeBIOS to allow tempest testing * Power fault recovery: Notification objects * Power fault recovery: API implementation * Add mock to doc requirements to fix doc build * Fix task\_manager process\_event docstring * Implements baremetal inspect abort * Add the ability to setup enabled bios interfaces in devstack * [Doc] Scheduling needs validated 'management' interface * Fix authentication issues along with add multi extra volumes * Stop passing IP address to IPA by PXE * Add Node BIOS support - REST API * Follow up to power fault recovery db tests * Power fault recovery: apply fault * Reraise exception with converting node ID * Gracefully handle NodeLocked exceptions during heartbeat * SNMPv3 security features added to the \`snmp\` driver * Allow customizing libvirt NIC driver * Convert conductor manager unit tests to hardware types * Remove excessive usage of mock\_the\_extension\_manager in unit tests - part 2 * Improve exception handling in agent\_base\_vendor * Check pep8 without ignoring D000 * Missing import of "\_" * Remove endpoint\_type from configuration * Power fault recovery: db and rpc implementation * Change exception msg of BIOS caching * Remove excessive usage of mock\_the\_extension\_manager in unit tests - part 1 * Mark xclarity password as secret * Fix E501 errors * Fix tenant DeprecationWarning from oslo\_context * update "auth\_url" in documents * Fix tenant DeprecationWarning from oslo\_context * Tear down console during unprovisioning * Fix XClarity parameters discrepancy * Follow up to inspect wait implementation * Silence F405 errors * Fix W605 Errors * Fix E305 Errors * Fix W504 errors * Gate fix: Cap hacking to avoid gate failure * Preserve env when running vbmc * Make validation failure on node deploy a 4XX code * Install OSC during quickstart * Ignore new errors until we're able to fix them * BIOS Settings: Add BIOS caching * BIOS Settings: Add BIOSInterface * Remove ip parameter from ipxe command line * Clarify image\_source with BFV * Update install guide to require resource classes * Fix error thrown by logging in common/neutron.py * Add note to oneview docs re: derprecation * Deprecate Oneview * Switch to the fake-hardware hardware type for API tests * Remove the Keystone API V2.0 endpoint registration * Move API (functional) tests to separate jobs * Add unit test for check of glance image status * Devstack plugin support for Redfish and Hardware * Collect periodic tasks from all enabled hardware interfaces * Stop verifying updated driver in creating task * BIOS Settings: Add RPC object * fix a typo * Trivial: Update pypi url to new url * Add more parameter explanation when create a node * Fix test\_get\_nodeinfo\_list\_with\_filters * Install reno to venv for creating release note * Stop removing root uuid in vendor interfaces * Fix \`\`agent\`\` deploy interface to call \`\`boot.prepare\_instance\`\` * Update wording used in removal of VIFs * [devstack] Switch ironic to uWSGI * Make ansible error message clearer * BIOS Settings: Add DB API * BIOS Settings: Add bios\_interface db field * BIOS Settings: Add DB model * Clean up driver\_internal\_info after tear\_down * Run jobs if requirements change * Remove vifs upon teardown * uncap eventlet * Update auth\_uri option to www\_authenticate\_uri * Resolve pep8 E402 errors and no longer ignore E402 * Remove pycodestyle version pin. Add E402 and W503 to ignore * Pin pycodestyle to <=2.3.1 * Check for PXE-enabled ports when creating neutron ports * Implementation of inspect wait state * Update Launchpad references to Storyboard * Add reno for new config [disk\_utils]partprobe\_attempts * Implement a function to check the image status * Fix callback plugin for Ansible 2.5 compatability * Follow the new PTI for document build * Clarify deprecation of "async" parameter * Fix incompatible requirement in lower-constraints * Reference architecture: small cloud with trusted tenants * Update and replace http with https for doc links * Assume node traits in instance trait validation * Adding grub2 bootloader support to devstack plugin * Describe unmasking fields in security document * Copy port[group] VIF info from extra to internal\_info * DevStack: Enroll node with iRMC hardware * Stop overriding tempdir in unit test * Uniformly capitalize parameter description * Gate: run ironic tests in the regular multinode job * Do not use async parameter * Remove the link to the old drivers wiki page * add lower-constraints job * Test driver-requirements changes on standalone job * Updated from global requirements * Exclude Ansible 2.5 from driver-reqs * Fix typos There are two 'the', delete one of them * fix typos in documentation * Fix nits in the XClarity Driver codebase * Validate instance\_info.traits against node traits * Prevent overwriting of last\_error on cleaning failures * Infiniband Port Configuration update[1] * Rework Bare Metal service overview in the install guide * Gate: stop setting IRONIC\_ENABLED\_INSPECT\_INTEFACES=inspector * Follow-up patch for rescue mode devstack change * devstack: enabled fake-hardware and fake interfaces * Updated from global requirements * Add descriptions for config option choices * devstack: add support for rescue mode * Updated from global requirements * Implements validate\_rescue() for IRMCVirtualMediaBoot * Updated from global requirements * Update config option for collecting sensor data * Use node traits during upgrade * multinode, multitenant grenade votes in gate * zuul: Remove duplicated TEMPEST\_PLUGIN entry * Use more granular mocking in test\_utils * change python-libguestfs to python-guestfs for ubuntu * Update links in README * Updated from global requirements * Remove useless variable * Don't validate local\_link\_connection when port has client-id * Updated from global requirements * Update docstring to agent client related codes * Move execution of 'tools/check-releasenotes.py' to pep8 * reloads mutable config values on SIGHUP * Make grenade-mulinode voting again * tox.ini: flake8: Remove I202 from ignore list * fix a typo in driver-property-response.json: s/doman/domain/ * Trivial: Remove the non ascii codes in tox.ini * Register traits on nodes in devstack * [devstack] block iPXE boot from HTTPS TempURLs * Fix issue with double mocking of utils.execute functions * Updates boot mode on the baremetal as per \`boot\_mode\` * Support nested objects and object lists in as\_dict * Revert "Don't try to lock for vif detach" * Rework logic handling reserved orphaned nodes in the conductor * Set 'initrd' to 'rescue\_ramdisk' for rescue with iPXE * Update iLO documentation for deprecating classical drivers * Increase the instance\_info column size to LONGTEXT on MySQL/MariaDB * Update release instructions wrt grenade * [ansible] use manual-mgmt hw type in unit tests * Use oslo\_db.sqlalchemy.test\_fixtures * Disable .pyc files for grenade multinode * Add docs for ansible deploy interface * Update comment and mock about autospec not working on staticmethods * Build instance PXE options for unrescue * Updated from global requirements * Fix default object versioning for Rocky * Allow sqalchemy filtering by id and uuid * Fix rare HTTP 400 from port list API * Clean nodes stuck in CLEANING state when ir-cond restarts * Imported Translations from Zanata * tox: stop validating locale files * Switch contributor documentation to hardware types * Stop using --os-baremetal-api-version in devstack by default * Conductor version cannot be null in Rocky * Add 'Other considerations' to security doc * Updated from global requirements * Implements validate\_rescue() for IloVirtualMediaBoot * Update to standalone ironic doc * Remove too large configdrive for handling error * Added known issue to iDRAC driver docs * Add missing noop implementations to fake-hardware * Stop running standalone tests for classic drivers * Stop running non-voting jobs in gate * Add optional healthcheck middleware * releasing docs: document stable jobs for the tempest plugin * Add meaningful exception in Neutron port show * Clean up CI playbooks * Fix broken log message * Add validate\_rescue() method to boot interface * Empty commit to bump minor pre-detected version * Remove test\_contains\_current\_release\_entry * Fix grammar errors * Clean up RPC versions and database migrations for Rocky * Remove validate\_boot\_option\_for\_trusted\_boot metric * Update reno for stable/queens 10.1.0 ------ * Add some missed test cases in node object tests * [reno] timeout parameter worked * Remove unnecessary lines from sample local.conf * Stop guessing mime types based on URLs * Clean up release notes before a release * Don't try to lock for vif detach * Revert grenade jobs to classic drivers * Handle case when a glance image contains no data * Add 10.1 and queens to the release mapping * Do not pass credentials to the ramdisk on cleaning * correct grammar, duplicate the found * Update iRMC document for classic driver deprecation * correct grammar, duplicate the found * Correct grammar, duplicate the found * Only set default network interface flat if enabled in config * Fix handling of 'timeout' parameter to power methods * Fixed some typos in test code * Replace chinese quotes to English quotes * Zuul: Remove project name * Modify error quotation marks * cleanup: Remove usage of some\_dict.keys() * Use zuul.override\_checkout instead of custom branch\_override var * Add validate\_rescue() method to network interface * [docs] Firmware based boot from volume for iLO drivers * Follow-up patch for api-ref documentation for rescue * Remove sample policy and config files * correct referenced url in comments * Remove unused code in unittest * Fix configure-networking docs * Migrate the remaining classic drivers to hardware types * Remove mode argument from boot.(prepare|clean\_up)\_ramdisk * Do not use asserts with business logic * Add option to specify mac adress in devstack/.../create-node.sh * Updated from global requirements * [api-ref] clarify what /v1/lookup returns * Update FAQ about updates of release notes * Add documentation for baremetal mech * Flat networks use node.uuid when binding ports * Add missing ilo vendor to the ilo hardware types * Follow-up for Switch OneView driver to hpOneView and ilorest libraries * Soft power operations for OneView hardware type * Deprecate classic drivers * Declare support for Python 3.5 in setup.cfg * Add api-ref and ironic state documentation for rescue * Mock check\_dir in ansible interface tests * Add documentation for node traits * Fix nits found in node traits * Follow-up for Implementation for UEFI iSCSI boot for ILO * Explicitly mark earliest-version for release notes * Remove unused code in common/neutron.py * Correct link address * Wait for ironic-neutron-agent to report state * Devstack - use neutron segments (routed provider networks) * Zuul: Remove project name * Add traits field to node notifications * Update description for config params of 'rescue' interface * Add rescue interface field to node-related notifications * Follow-up for API methods for rescue implementation * Add support for preparing rescue ramdisk in iLO PXE * Automatically migrate nodes to hardware types * Add API methods for [un]rescue * Fix unit tests for UEFI iSCSI boot for ILO * Follow-up for agent rescue implementation * iRMC:Support preparing rescue ramdisk in iRMC PXE * Redundant alias in import statement * Agent rescue implementation * Allow data migrations to accept options * Resolve race in validating neutron networks due to caching * Update api-ref for port group create * Implementation for UEFI iSCSI boot for ILO * Add node traits to API reference * Add a timeout for powering on/off a node on oneview * Fix persistent information when getting boot device * Remove python-oneviewclient from oneview hardware type * API: Node Traits API * Add RPC API and conductor manager for traits * Be more sane about cleaning * Fix node update with PostgreSQL * Switch the CI to hardware types * Migrate python-oneviewclient validations to oneview hardware type * Updated from global requirements * Add RPC object for traits * Allow setting {provisioning,cleaning,rescuing}\_network in driver\_info * Migrate oneview hardware type to use python-hpOneView * remeber spelling error * Add rescuewait timeout periodic task * Add rescue related methods to network interface * Add XClarity Driver * [docs] mention new nova scheduler option * Add a version argument to traits DB API * Mark multinode job as non-voting * Updated from global requirements * Fix docs for Sphinx 1.6.6 * fix a typo in ilo.rst: s/fimware/firmware/ * Do not send sensors data for nodes in maintenance mode 10.0.0 ------ * Adds RPC calls for rescue interface * Make the Python 3 job voting * Add additional context to contribution guide * node\_tag\_exists(): raise exception if bad node * Setup ansible interface in devstack * Remove the deprecated "giturl" option * Join nodes with traits * Update links * Node traits: Add DB API & model * Add release 10.0 to release mappings * Remove ironic\_tempest\_plugin/ directory * Do not validate root partition size for whole disk images in iscsi deploy * Switch non-vendor parts admin guide to hardware types * Clean up release notes before a release * Add Error Codes * Remove ironic\_tempest\_plugin/ directory * Fix initialization of auth token AuthProtocol * Rework exception handling on deploy failures in conductor * Add a provisioning target:adopt * Devstack: install qemu-system-x86 on RHEL * Add uWSGI support * Fix ironic node create cli * zuul: Update TLSPROXY based on branch * Run in superconductor cellsv2 mode for non-grenade jobs * Updated from global requirements * Add documentation covering storage multi-attach * Adds rescue\_interface to base driver class * Document the check done in "ironic-dbsync upgrade" * zuul: Add ability to specify a 'branch\_override' value * zuul: Remove some redundancy by consolidating the 'post.yaml' files * Use openstack port create instead of neutron port-create * ansible: handle mount of /sys the same way IPA does it * [ansible] add defaults to config * Prevent changes to the ironic\_tempest\_plugin/ directory * Finalize migration to keystoneauth adapters * Updated from global requirements * Follow up Add additional capabilities discovery for iRMC driver * Use NamedExtensionManager for drivers * Use the tempest plugin from openstack/ironic-tempest-plugin * Switch emphasis to hardware types in the installation guide * Use adapters for neutronclient * Remove deprecated ironic.common.policy.enforce() * Introduce hpOneView and ilorest to OneView * Auto-detect the defaults for [glance]swift\_{account,temp\_url\_key,endpoint\_url} * Add 'nova hypervisor-list' in example set of commands to compare the resources in Compute service and Bare Metal service * Receive and store agent version on heartbeat * tox: Use the default version of Python 3 for tox tests * Remove unused methond \_get\_connect\_string * Update comment on location of webapi-version-history.rst * Updated from global requirements * Do not access dbapi attributes on dbsync import * Fix swiftclient creation * Update docs to include API version pinning * Add networking-fujitsu ML2 driver to multitenacy doc * Updated from global requirements * 9.2.0 is the ironic version with rebuild configdrive * Pin API version during rolling upgrade * devstack to \`git pull sushy-tools\` if required * Add spec & priorities links to contributor doc * Fix HPE headers for oneview * Updated from global requirements * Fix the format command-line * Add information about neutron ML2 drivers to multitenancy docs * Apply pep8 check to app.wsgi * ironic.conf.sample includes default\_resource\_class * Add a configuration option for the default resource class * Rework drivers page in the admin documentation * Update bindep.txt for doc builds * Don't collect logs from powered off nodes * Add additional capabilities discovery for iRMC driver * Use adapters for inspectorclient * Use adapters for cinderclient * Imported Translations from Zanata * Followup to I07fb8115d254e877d8781207eaec203e3fdf8ad6 * Add missing gzip call to FAQ item on how to repack IPA * Rework keystone auth for glance * Remove setting of version/release from releasenotes * zuul.d: Remove unneeded required-projects * Updated from global requirements * Add 9.2 to release mappings * Remove provisioning network ports during tear down * Fix image type for partition-pxe\_ipmitool-tinyipa-python3 job 9.2.0 ----- * update description for Change Node Power State * Add no-vendor interface to the idrac hardware types * Updated from global requirements * Fail deploy if agent returns >= 400 * Don't run multinode jobs for changes to driver-requirements.txt * Revert "Introduce hpOneView and ilorest to OneView" * Revert "Migrate oneview driver to use python-hpOneView" * Revert "Fix persistent information when getting boot device" * Revert "Add a timeout for powering on/off a node on HPE OneView Driver" * Revert "Migrate python-oneviewclient validations to Ironic OneView drivers" * Revert "Remove python-oneviewclient from Ironic OneView drivers" * Revert "Get a new OneView client when needed" * Revert "Update python-ilorest-library to hardware type OneView" * Add missing 'autospec' to unit tests - /unit/objects/ * Add ansible deploy interface * Clean up release notes from the upcoming release * Fix misplaced reno note * Make the api format correctly * [devstack] stop setting or relying on standard properties * Remove some deprecated glance options * zuul.d/projects.yaml: Sort the job list * project.yaml: Remove 'branches:' & jobs that don't run on master * Miss node\_id in devstack lib * Update idrac hardware type documentation * Update Zuul 'gate' job * Rolling upgrades related dev documentation * Update python-ilorest-library to hardware type OneView * Add rescue\_interface to node DB table * Get a new OneView client when needed * Run tempest jobs when update requirements * Updated from global requirements * Remove unused IronicObjectIndirectionAPI from ironic-api * Add release note for fix to port 0 being valid * Simplify the logic of validate\_network\_port * Follow up Secure boot support for irmc-virtual-media driver * devstack: Clean up some of the devstack code * Remove python-oneviewclient from Ironic OneView drivers * Allow to set default ifaces in DevStack * Reword interface information in multitenancy docs * Ensure ping actually succed * Fix minor documentation missing dependency * Small fixes in the common reference architecture docs * [reno] Update ironic-dbsync's check object version * Migrate python-oneviewclient validations to Ironic OneView drivers * Remove unnesessary description for config parameters in cinder group * Update ironic.sample.conf * Fix the format issues of User guide * Zuul: add file extension to playbook path * Add I202 to flake ignore list * Revise deploy process documentation * Add a timeout for powering on/off a node on HPE OneView Driver * ironic-dbsync: check object versions * Update validating node information docs * Use jinja rendering from utils module * Add ability to provide configdrive when rebuilding * Finish the guide on upgrading to hardware types * Move ironic legacy jobs into the ironic tree * Fix missing logging format error * Add missing 'autospec' to unit tests - /unit/common/ * [bfv] Set the correct iqn for pxe * Fix "import xx as xx" grammer * Secure boot support for irmc-virtual-media driver * Change pxe dhcp options name to codes * Updated from global requirements * [docs] describe vendor passthru in hw types * Add bindep.txt file * Fix some mis-formatted log messages in oneview driver * Disallow rolling upgrade from Ocata to Queens * Add online data migrations for conductor version * [Devstack] Replace tap with veth * Support SUM based firmware update as clean step for iLO drivers * Add missing 'autospec' to unit tests - /unit/dhcp/ * Fix mis-formatted log messages * Use oslotest for base test case * Update tests to do not use deprecated test.services() * Follow-up patch 'Cleanup unit tests for ipmittool' * Makes ironic build reproducible * Remove 'next' for GET /nodes?limit=1&instance\_uuid= * ListType preserves the order of the input * Stop passing raw Exceptions as the reasons for ironic Image exceptions * Update after recent removal of cred manager aliases * ipmitool: reboot: Don't power off node if already off * Reduce complexity of node\_power\_action() function * Add default configuration files to data\_files * Documentation for 'oneview' hardware type * Cleanup unit tests for ipmittool * Use DocumentedRuleDefault instead of RuleDefault * main page: add links to docs on Upgrade to HW Types * Add documentation describing each Ironic state * Cleanup test-requirements * Fix API VIF tests when using flat network * Updated from global requirements * Migrate to stestr as unit tests runner * [reno] update for MAC address update fix * Revert "Change pxe dhcp options name to codes." * Drop neutron masking exception in vif\_attach * Rework update\_port\_address logic * api-ref portgroup\_id should be portgroup\_ident * Document setting discover\_hosts\_in\_cells\_interval in nova.conf * Adds more exception handling for ironic-conductor heartbeat * Updated from global requirements * Change pxe dhcp options name to codes * Updated from global requirements * Updated from global requirements * Reference architecture: common bits * Stop using Q\_PLUGIN\_EXTRA\_CONF\_{PATH|FILES} variables * Put unit test file in correct directory * Update vif\_attach from NeutronVIFPortIDMixin * Replace http with https for doc links * flake8: Enable some off-by-default checks * Update upgrade guide to use new pike release * [install docs] ironic -> openstack baremetal CLI * Using devstack configure\_rootwrap to configure ironic rootwrap * Use newer location for iso8601 UTC * reformat REST API Version History page * Fix persistent information when getting boot device * Migrate oneview driver to use python-hpOneView * [reno] Clarify fix for missing boot.prepare\_instance * [doc] Non-word updates to releasing doc * Introduce hpOneView and ilorest to OneView * Fix race condition in backfill\_version\_column() * Switch API ref to use versionadded syntax throughout * Replace DbMigrationError with DBMigrationError * [reno] Clarify fix for BFV & image\_source * Fix unit test for new fields in invaid API version * Put tests in correct location for ironic/api/controllers/v1/ * Troubleshooting docs: explain disabled compute services * Update documentation for \`\`ilo\`\` hardware type * Updated from global requirements * Boot from volume fails with 'iscsi' deploy interface * Boot from volume fails with 'iscsi' deploy interface * [contributor docs] ironic -> OSC baremetal CLI * Minor improvements to the resource classes documentation * Update Nova configuration documentation * Build docs with Python 2 for now * [doc] add FAQ about updating release notes * Follow-up for commit cb793d013610e6905f58c823e68580714991e2df * [docs] Update Releasing Ironic Projects * Add doc/source/\_static to .gitignore * Fix indentation in few of the documentation pages * Upgrade guide for \`snmp\` hardware type * tox.ini: Add 'py36' to the default envlist * devstack: Comment variables related to multi-tenant networking * Test ironic-dbsync online\_data\_migrations * Add a comment about default devstack images * Fix to use "." to source script files * Add #!/bin/bash to devstack/common\_settings * Add Sem-Ver flag to increment master branch version * conductor saves version in db * Update Pike release title to include version range * Updated from global requirements * remove REST API examples from RAID doc * [admin docs] ironic -> openstack baremetal CLI * [doc] change absolute to relative URL * Configuration documentation migrated * fix a typo in agent.py: s/doman/domain/ * Documentation for irmc hardware type * correct URLs in contributor docs & main index * Correct URLs in install docs * correct URLs in admin docs * Documentation for 'snmp' hardware type * Fix incorrect documentation urls * Updated from global requirements * Partially revert "Set resource class during upgrade" * Introduce keystoneauth adapters for clients * [doc] Replace http with https * Follow-up to \`\`ilo\`\` hardware type documentation * Set explicit default to enabled driver interfaces * Set resource class during upgrade * Fix names of capabilities for FibreChannel volume boot * iRMC: Follow-up: volume boot for virtual media boot interface * Do not restart n-cpu during upgrade * Make SNMP UDP transport settings configurable * Enable OSProfiler support in Ironic - follow-up * Wait for cleaning is completed after base smoke tests * Add 'hardware type' for Dell EMC iDRACs * Fix DRAC classic driver double manage/provide * [devstack] use resource classes by default * Add 9.1 to release\_mappings * Imported Translations from Zanata * Add 'force\_persistent\_boot\_device' to pxe props * devstack: Remove unused variable IRONIC\_VM\_NETWORK\_RANGE * Adds 9.0 to release\_mappings * Get rid of sourcing stackrc in grenade settings * Update reno for stable/pike * Revert "[reno] Add prelude for Pike release" 9.0.0 ----- * Add the new capabilities to the iLO InspectInterface * [docs] update irmc boot-from-volume * [releasenotes] update irmc's boot-from-volume support * [reno] Add prelude for Pike release * Add storage interface to enabling-drivers doc * Add admin guide for boot from volume * iRMC: Add documentation for remote volume boot * Remove ensure\_logs\_exist check during upgrade * Add functional API tests for volume connector and volume target * Follow-up to rolling upgrade docs * Update proliantutils version for Pike release * [reno] update * Documetation for 'ilo' hardware type * Follow up Secure boot support for irmc-pxe driver * Update the documentation links - code comments * Update the documentation links - install guide * Remove translator assignments from i18n * Add hardware types to support Cisco UCS Servers * Remove setting custom http\_timeout in grenade * Upgrade to hardware types: document changing interfaces for active nodes * Update the resource classes documentation based on recent progress * [devstack] switch to the latest API version and OSC commands * Prevent changes of a resource class for an active node * Guide on upgrading to hardware types * iRMC: Support volume boot for iRMC virtual media boot interface * Rolling upgrade procedure documentation * Release notes clean up for the next release * Fix missing print format error * Secure boot support for irmc-pxe driver * Adds hardware type for SNMP powered systems * Add a guide for Devstack configuration for boot-from-volume * Add a flag to always perform persistent boot on PXE interface * Put tests in correct location for ironic/api/controllers/v1/ * [tempest] also catch BadRequest in negative tests with physical\_network in old API * Use more specific asserts in tests * [Trivialfix]Fix typos in ironic * Remove WARNING from pin\_release\_version's help * Update ironic.conf.sample due to non-ironic code * Add new dbsync command with first online data migration * BFV Deploy skip minor logging, logic, and test fixes * Add hardware type for HPE OneView * [doc-migration] Add configuration folder for documentation * Add storage interface to api-ref * Add API for volume resources to api-ref * Disable automated cleaning for single node grenade * Optimize node locking on heartbeat * Remove file RELEASE-NOTES * Removed unnecessary setUp() call in unit tests * Adds doc for restore\_irmc\_bios\_config clean step * Remove SSH-based driver interfaces and drivers * [Tempest] fix negative tests on old API versions * Remove install-guide env which is no longer effective * Address review feedback for ipxe boot file fix * Change ramdisk log filename template * Remove usage of some of the deprecated methods * Updated from global requirements * grenade: Use test\_with\_retry to check if route is up * Don't use multicell setup for ironic & increase timeout * Tempest scenario test for boot-from-volume * Refactor VIFPortIDMixin: factor out common methods * Add negative attribute to negative port tests * Rolling upgrades support for create\_port RPCAPI * Fixes hashing issues for py3.5 * Generate iPXE boot script on start up * grenade: For multi-node grenade, do not upgrade nova * Changes log level of a message * Fix small issues in the installation documentation * Removes agent mixin from oneview drivers * Fix docstring and default value for local\_group\_info * [doc] update ironic's landing page * Adding note for ironic virt driver nova-compute changes * Added a condition for 'ilo' hardware type * Updated from global requirements * py3.5:Workaround fix for forcing virtualbmc installation with pip2 * [devstack] add support for running behind tls-proxy * Start passing portgroup information to Neutron * Add tempest tests for physical networks * Updated from global requirements * Refactor VIFPortIDMixin: rename * Doc for disk erase support in iLO drivers * DevStack: Add configuration for boot-from-volume * Refactor get\_physnets\_by\_portgroup\_id * Rolling upgrades support for port.physical\_network * Allow updating interfaces on a node in available state * replace 'interrace' with 'interface' * Improve port update API unit tests * Improve ports API reference * Expose ports' physical network attribute in API * Rename 'remove\_unavail\_fields' parameter * Updated from global requirements * Add missing parameter descriptions * Updated from global requirements * Generate iPXE boot script when deploying with boot from volume * Add Driver API change in 1.33 to history * Update URL home-page in documents according to document migration * Using non-persistent boot in PXE interface * Modifications for rolling upgrades * Update comments related to ipmi & old BMCs * Follow-up to fix for power action failure * Fix copy/paste error in VIF attach note * [reno] Clarify fix for inspect validation failures * [trivial] Fix argument descriptions * Remove \_ssh drivers from dev-quickstart * Fix broken links in tempest plugin README * Remove future plan from portgroup document * Enable OSProfiler support in Ironic * Revert "Wait until iDRAC is ready before out-of-band cleaning" * Force InnoDB engine on interfaces table * Add storage interface field to node-related notifications * Removed nonexistent option from quickstart snippet * Enable cinder storage interface for generic hardware * Mock random generator for BackoffLoopingCall in IPMI unittests * Raise HTTP 400 rather than 500 error * Make IP address of socat console configurable * Set nomulticell flag for starting nova-compute in grenade * Physical network aware VIF attachment * Update README to point at new doc location * Move ironic dbsync tool docs into doc/source/cli * Move doc/source/dev to doc/source/contributor * Move operator docs into into doc/source/admin * Move install guide into new doc/source/install location * Improve graceful shutdown of conductor process * switch from oslosphinx to openstackdocstheme * Fix quotes in documentation and schema description * Follow-up for bugfix 1694645 patch * Add REST API for volume connector and volume target operation * Add node power state validation to volume resource update/deletion * Make redfish power interface wait for the power state change * Refactor common keystone methods * Adds clean step 'restore\_irmc\_bios\_config' to iRMC drivers * Add CRUD notification objects for volume connector and volume target * Updated from global requirements * Don't retry power status if power action fails * Fix VIF list for noop network interface * Fetch Glance endpoint from Keystone if it's not provided in the configuration * Replace the usage of 'manager' with 'os\_primary' * Logic for skipping deployment with BFV * iPXE template support for iSCSI * Move \_abort\_attach\_volumes functionality to detach\_volumes * Allow to load a subset of object fields from DB * Unit test consistency: DB base and utils prefix * Updated from global requirements * Updated from global requirements * Remove unnecessary line in docstring * Validate portgroup physical network consistency * Wire in storage interface attach/detach operations * Wait until iDRAC is ready before out-of-band cleaning * Minor changes to object version-related code * Remove times.dbm prior to test run * Discover hosts while waiting for hypervisors to show up in devstack * Add docs for node.resource\_class and flavor creation * Updated from global requirements * Move port object creation to conductor * Make default\_boot\_option configurable in devstack * Trigger interface attach tests * Support setting inbound global-request-id * Follow-up docstring revision * Runs the script configure\_vm.py in py3.5 * Replace get\_transport with get\_rpc\_transport * Add version column * Add ldlinux.c32 to boot ISO for virtual media * Remove legacy auth loading * Add a note for specifying octal value of permission * Improve driver\_info/redfish\_verify\_ca value validation * Updated from global requirements * Stop sending custom context values over RPC * Replace assertTrue(isinstance()) with assertIsInstance() * Change volume metadata not to use nested dicts * Add physical network to port data model * Move deploy\_utils warnings to conductor start * Remove unused methods from GlanceImageService * [install-guide] explain the defaults calculation for hardware types * Improve driver\_info/redfish\_system\_id value validation * Add guru meditation report support * Adds parameters to run CI with hardware types * Fix description for [cinder] action\_retries option * Deprecate elilo support * Updated from global requirements * Update ipmitool installation and usage documentation * Replace test.attr with decorators.attr * Updated from global requirements * Replace test.attr with decorators.attr * remove explicit directions for release notes on current branch * Use cfg.URIOpt for URLs with required schemes * Updated from global requirements * Remove unneeded lookup policy check * Add Cinder storage driver * Add ipmitool vendor interface to the ipmi hardware type * Replace test.attr with decorators.attr * Fix directories permission for tftpboot * Comment the default values in policy.json.sample * Replace deprecated .assertRaisesRegexp() * Updated from global requirements * Remove remaining vendor passthru lookup/heartbeat * Prevent tests from using utils.execute() * Remove unit tests that test oslo\_concurrency.processutils.execute * Remove single quoted strings in json sample * Refactor install-guide: update node enrollment * Refactor install-guide: driver and hardware types configuration * Minor clean up in iLO drivers unit tests * Remove translation of log messages * Enable getting volume targets by their volume\_id * Check if sort key is allowed in API version * Updated from global requirements * Remove logging translation calls from ironic.common * [install-guide] add section on Glance+Swift config * Fix attribute name of cinder volume * Update reno for new ilo hardware type * Remove log translations from ironic/drivers Part-1 * Update developer quickstart doc about required OS version * Add 'iscsi' deploy support for 'ilo' hardware type * Trivial fix typos while reading doc * Fix docstrings in conductor manager * [devstack] start virtualpdu using full path * [Devstack] Increase default NIC numbers for VMs to 2 * Remove usage of parameter enforce\_type * Properly allow Ironic headers in REST API * Updated from global requirements * Fix a typo * DevStack: Install gunicorn and sushy based on g-r constraints * Fix keystone.py 'get\_service\_url' method parameter * Add functional api tests for node resource class * Refactor install-guide: integration with other services * Remove references to EOLed version of Ironic from the install guide * DevStack: Setup a Redfish environment * Add hardware type for HPE ProLiant servers based on iLO 4 * Bring the redfish driver address parameter closer to one of other drivers * [Grenade]: Do not run ir-api on primary node after upgrade * Validate outlet index in SNMP driver * [Devstack] Rework VMs connection logic * Fix oslo.messaging log level * Add context to IronicObject.\_from\_db\_object() * Add release notes for 8.0.0 * [api-ref] remove reference to old lookup/heartbeat * Follow-up patch to redfish documentation * [devstack] use the generic function to setup logging * Fix cleaning documents * Remove obsolete sentence from comment * TrivialFix: Remove logging import unused * Remove translation of log messages from ironic/drivers/modules/irmc * Run db\_sync after upgrade * Remove translation of log messages from ironic/drivers/modules/ucs * Start enforcing config variables type in tests * Add documentation for the redfish driver * Read disk identifier after config drive setup * Add a paragraph about image validation to Install Guide * Make terminal timeout value configurable * Remove nova mocks from documentation configuration * Remove fake\_ipmitool\_socat driver from the documentation * Add redfish driver * Ensure we install latest libivrt * Set env variables when all needed files are source * save\_and\_reraise\_exception() instead of raise * Follow-up patch of 7f12be1b14e371e269464883cb7dbcb75910e16f * VirtualPDU use libvirt group instead of libvirtd * Fix unit tests for oslo.config 4.0 * Always set host\_id when adding neutron ports * Add /baremetal path instead of port 6385 * Add SUSE instructions to the install guide * Remove pre-allocation model for OneView drivers * Remove log translations from iLO drivers * Follow-up patch of 565b31424ef4e1441cae022486fa6334a2811d21 * Setup logging in unit tests * Remove deprecated DHCP provider methods * Make config generator aware of 'default\_log\_levels' override * [Devstack] Fix libvirt group usage * Common cinder interface additional improvements * Config drive support for ceph radosgw * Improve error message for deleting node from error state * Updated from global requirements * Add comments re RPC versions being in sync * Help a user to enable console redirection * Fix some reST field lists in docstrings * Avoid double ".img" postfix of image file path in devstack installation * add portgroups in the task\_manager docstrings * Remove unneeded exception handling from agent driver * Updated from global requirements * Remove translation of log messages from ironic/dhcp and ironic/cmd * Updated from global requirements * Bypassing upload deploy ramdisk/kernel to glance when deploy iso is given * Drop commented import * Enforce releasenotes file naming * Remove unused methods in common/paths and common/rpc * Remove translation of log messages from ironic/api * Fix access to CONF in dhcp\_options\_for\_instance * Add string comparison for 'IRONIC\_DEPLOY\_DRIVER' * Modify the spelling mistakes Change explictly to explicitly 8.0.0 ----- * Revert "[Devstack] Rework VMs connection logic" * Fix base object serialization checks * Node should reflect what was saved * Changes 'deploy' and 'boot' interface for 'pxe\_ilo' driver * Use standard deploy interfaces for iscsi\_ilo and agent\_ilo * Refactor iLO drivers code to clean 'boot' and 'deploy' operations * Updated from global requirements * Add base cinder common interface * Updates to RPC and object version pinning * Add release note for messaging alias removal * Remove deprecated method build\_instance\_info\_for\_deploy() * Remove deprecated, untested ipminative driver * [Devstack] Rework VMs connection logic * Docs: bump tempest microversion caps after branching * Add assertion of name to test\_list\_portgroups test * Skip PortNotFound when unbinding port * Remove unnecessary setUp function in testcase * Remove deprecated [ilo]/clean\_priority\_erase\_devices config * Remove extra blank space in ClientSide error msg * Updated from global requirements * Convert BaseDriver.\*\_interfaces to tuples * [Devstack] cleanup upgrade settings * [doc] Update examples in devstack section * devstack: install python-dracclient if DRAC enabled * Call clean\_up\_instance() during node teardown for Agent deploy * Don't pass sqlite\_db in db\_options.set\_defaults() * Fix some api field lists in docstrings * Copy and append to static lists * Define minimum required API ver for portgroups * Add RPC and object version pinning * Updated from global requirements * Fix docstrings for creating methods in baremetal api tests * Extend tests and checks for node VIFs * Remove translation of log messages from ironic/conductor * Add functional API tests for portgroups * Revert the move of the logger setup * [devstack] Use global requirements for virtualbmc * Updates documentation to install PySqlite3 * Remove log translation function calls from ironic.db * Fix local copy of scenario manager * Add standalone tests using direct HTTP links * devstack: When Python 3 enabled, use Python 3 * Remove old oslo.messaging transport aliases * Fix file\_has\_content function for Py3 * Fix usage of various deprecated methods * Prune local copy of tempest.scenario.manager.py * devstack: Don't modprobe inside containers * Include a copy of tempest.scenario.manager module * flake8: Specify 'ironic' as name of app * Updated from global requirements * Fix API doc URL in GET / response * Add ironic standlaone test with ipmi dynamic driver * Update new proliantutils version to 2.2.1 * Add Ironic standalone tests * Fix typos of filename in api-ref * Updated from global requirements * Fix the exception message in tempest plugin * Speed up test\_touch\_conductor\_deadlock() * Cleanup hung iscsi session * Refactor waiters in our tempest plugin * Deprecate support for glance v1 * This adds a tempest test for creating a chassis with a specific UUID * Address a shell syntax mistake * Update ironic.conf.sample * grenade: Only 'enable\_plugin ironic' if not already in conf * Remove overwriting the default value of db\_max\_retries * Do not load credentials on import in tempest plugin clients.py * Update the Ironic Upgrade guide * Validation before perform node deallocation * Add wsgi handling to ironic-api in devstack * Fix updating node.driver to classic * devstack: Make sentry \_IRONIC\_DEVSTACK\_LIB a global variable * Use Sphinx 1.5 warning-is-error * Fixed release note for DBDeadLock handling * Remove references to py34 from developer guide * Delete release note to fix build * Correct typos in doc files * Clean up eventlet monkey patch comment and reno * Moved fix-socat-command release note * Allow to attach/detach VIFs to active ironic nodes * Move eventlet monkey patch code * Updated from global requirements * doc: update FAQ for release notes * Update test requirement * Add tempest plugin API tests for driver * Updated from global requirements * Remove gettext.install() for unit tests * Fix missing \_ import in driver\_factory * Add support for DBDeadlock handling * Fix BaseBaremetalTest.\_assertExpected docstring * Updated ramdisk API docstrings * Trivial: Change hardcoded values in tempest plugin * Developer guide should not include Python 3.4 * Add testcases for iLO drivers * Deduplicate \_assertExpected method in tests * Remove unused logging import * Use specific end version since liberty is EOL * Use flake8-import-order * Document PXE with Spanning Tree in troubleshooting FAQ * Skip VIF tests for standalone ironic * Switch to new location for oslo.db test cases * Explicitly use python 2 for the unit-with-driver-libs tox target * Add ironic port group CRUD notifications * Remove logging import unused * Update release nodes for Ocata * reno 'upgrades' should be 'upgrade' * Updated from global requirements * Update docs create port group 7.0.0 ----- * Clean up release notes for 7.0.0 * Add a summary release note for ocata * Walk over all objects when doing VIF detach * Fix unit tests with UcsSdk installed * Mock client initializations for irmc and oneview * Follow up patch for SNMPv3 support * Add a tox target for unit tests with driver libraries * Fix missed '\_' import * Change misc to test\_utils for tempest test * Source lib/ironic in grenade settings * Update api-ref for dynamic drivers * Switch to use test\_utils.call\_until\_true * Add port groups configuration documentation * Remove most unsupported drivers * SNMP agent support for OOB inspection for iLO Drivers * No node interface settings for classic drivers * Unbind tenant ports before rebuild * Remove a py34 environment from tox * Fix object save after refresh failure * Pass session directly to swiftclient * Adds network check in upgrade phase in devstack * Fix log formating in ironic/common/neutron * Follow-up iRMC power driver for soft reboot/poff * Use https instead of http for git.openstack.org * Validate the network interface before cleaning * log if 'flat' interface and no cleaning network * exception from driver\_factory.default\_interface() * devstack: Adding a README for ironic-bm-logs directory * [devstack] Allow using "ipmi" hardware type * Remove trailing slash from base\_url in tempest plugin * Improve enabled\_\*\_interfaces config help and validation * Prepare for using standard python tests * [Devstack] fix waiting resources on subnode * Log an actual error message when failed to load new style credentials * Speed up irmc power unit tests * Add bumping sem-ver to the releasing docs * Make \_send\_sensors\_data concurrent * [devstack] remove deprecated IRONIC\_IPMIINFO\_FILE * Fail conductor startup if invalid defaults exist * Add dynamic interfaces fields to base node notification * Improve conductor driver validation at startup * Remove iSCSI deploy support for IPA Mitaka * Do not change admin\_state for tenant port * Use delay configoption for ssh.SSHPower drivers * Add the timeout parameter to relevant methods in the fake power interface * Adding clean-steps via json string examples * Allow duplicate execution of update node DB api method * Remove deprecated heartbeat policy check * Add sem-ver flag so pbr generates correct version * Fix a few docstring warnings * Remove deprecated [deploy]erase\_devices\_iterations * Remove support for driver object periodic tasks * Log reason for hardware type registration failure * Duplicated code in ..api.get\_active\_driver\_dict() * Add hardware type 'irmc' for FUJITSU PRIMERGY servers * Allow using resource classes * DevStack: Only install edk2-ovmf on Fedora * [Devstack] Add stack user to libvirt group * Add soft reboot, soft power off and power timeout to api-ref * Add dynamic interfaces fields to nodes API * Add dynamic driver functionality to REST API * [Devstack] Download both disk and uec images * [Devstack] Set DEFAULT\_IMAGE\_NAME variable * Update the outdated link in user-guide * Add Inject NMI to api-ref * Don't override device\_owner for tenant network ports * Validate port info before assume we may use it * Switch to decorators.idempotent\_id * Updated from global requirements * Minor updates to multi-tenancy documentation * Follow-up iRMC driver doc update * Devstack: Create a "no ansi" logfile for the baremetal console logs * Add hardware type for IPMI using ipmitool * [Devstack] enable only pxe|agent\_ipmitool by default * Update iRMC driver doc for soft reboot and soft power off * Fix broken link in the iLO driver docs * DevStack: Fix cleaning up nodes with NVRAM (UEFI) * iRMC power driver for soft reboot and soft power off * Update proliantutils version required for Ocata release * Fix rel note format of the new feature Inject NMI * iRMC management driver for Inject NMI * Revert "Revert "Remove ClusteredComputeManager"" * Use context manager for better file handling * Updated from global requirements * Fix typo in the metrics.rst file * Allow to use no nova installation * Fix api-ref warnings * Turn NOTE into docstring * Updated from global requirements * Correctly cache "abortable" flag for manual clean steps * Use global vars for storing image deploy path's * Ipmitool management driver for Inject NMI * Generic management I/F for Inject NMI * Clean up driver\_factory.enabled\_supported\_interfaces * Add hardware types to the hash ring * Default ironic to not use nested KVM * Do not use user token in neutron client * Use only Glance V2 by default (with a compatibility option) * Enable manual-management hardware type in devstack * Register/unregister hardware interfaces for conductors * Validate the generated swift temp url * Move to tooz hash ring implementation * Add VIFs attach/detach to api-ref * DevStack: Configure nodes/environment to boot in UEFI mode * Add tests for Payloads with SCHEMAs * make sure OVS\_PHYSICAL\_BRIDGE is up before bring up vlan interface * Update troubleshooting docs on no valid host found error * Expose default interface calculation from driver\_factory * Add default column to ConductorHardwareInterfaces * Do not fail in Inspector.\_\_init\_\_ if [inspector]enabled is False * Use TENANT\_VIF\_KEY constant everywhere * Updated from global requirements * Allow to attach/detach VIF to portgroup * Refactor DRAC driver boot-device tests * Updated from global requirements * Remove check for UEFI + Whole disk images * Updated from global requirements * Update validate\_ports from BaremetalBasicOps * Ipmitool power driver for soft reboot and soft power off * Allow to set min,max API microversion in tempest * Skip VIF api tests for old api versions * Fix assertEqual parmeters position in unittests * Ensures that OneView nodes are free for use by Ironic * Move default image logic from DevStack to Ironic * Document HCTL for root device hints * Removes unnecessary utf-8 encoding * Move heartbeat processing to separate mixin class * Add Virtual Network Interface REST APIs * Fix logging if power interface does not support timeout * Add lsblk to ironic-lib filters * Fix setting persistent boot device does not work * Updated from global requirements * Add docs about creating release note when metrics change * Fix take over of ACTIVE nodes in AgentDeploy * Fix take over for ACTIVE nodes in PXEBoot * Don't translate exceptions w/ no message * Correct logging of loaded drivers/hardware types/interfaces * Move baremetal tempest config setting from devstack * Change object parameter of swift functions * Remove greenlet useless requirement * Fixes grammar in the hash\_partition\_exponent description * Revert "Disable placement-api by default" * Remove service argument from tempest plugin client manager * Fix the comma's wrong locations * Remove netaddr useless requirement * Generic power interface for soft reboot and soft power off * Create a table to track loaded interfaces * Remove trailing backtick * Updated from global requirements * Remove 'fork' option from socat command * Add Virtual Network Interface RPC APIs * Catch unknown exceptions in validate driver ifaces * Disable placement-api by default * Update regenerate-samples.sh api-ref script * Updated from global requirements * Add Virtual Network Interface Driver APIs * 'updated\_at' field value after node is updated * Add node console notifications * Add node maintenance notifications * Add ironic resources CRUD notifications * Auto-set nullable notification payload fields when needed * Update dev-quickstart: interval value cannot be -1 * Fix wrong exception message when deploy failed * Add storage\_interface to base driver class * Update multi-tenancy documentation * Add storage\_interface to node DB table * Add API reference for portgroup's mode and properties * Set access\_policy for messaging's dispatcher * Add a NodePayload test * Add test to ensure policy is always authorized * Fix bashate warning in devstack plugin * Forbid removing portgroup mode * Configure tempest for multitenancy/flat network * Wrap iscsi portal in []'s if IPv6 * Fix policy dict checkers * Updated from global requirements * Introduce generic hardware types * Remove grenade config workaround * Add portgroup configuration fields * Onetime boot when set\_boot\_device isn't persistent * Revert "Change liberty's reno page to use the tag" * Update multitenancy docs * Use oslo\_serialization.base64 to follow OpenStack Python3 * Updated from global requirements * Support defining and loading hardware types * Change liberty's reno page to use the tag * DevStack: Make $IRONIC\_IMAGE\_NAME less dependent of the name in DevStack * Fix error when system uses /usr/bin/qemu-kvm, as in CentOS 7.2 * Adds another validation step when using dynamic allocation * Fix return values in OneView deploy interface * Clarify the comment about the object hashes * Reusing oneview\_client when possible * Enhance wait\_for\_bm\_node\_status waiter * Use polling in set\_console\_mode tempest test * Make CONF.debug also reflect on IPA * Fail ironic startup if no protocol prefix in ironic api address * Remove agent vendor passthru completely * Remove iBoot, WoL and AMT drivers * Remove agent vendor passthru from OneView drivers * Move CONF.service\_available.ironic to our plugin * devstack: add vnc listen address * Autospec ironic-lib mocks, fix test error string * Remove deprecation of snmp drivers * Allow setting dhcp\_provider in devstack * Fix default value of "ignore\_req\_list" config option * Add unit test for create\_node RPC call * Documentation for Security Groups for baremetal servers * Remove agent vendor passthru from iLO drvers * Updated from global requirements * Add release names & numbers to API version history * Remove the VALID\_ROOT\_DEVICE\_HINTS list * Make "enabled\_drivers" config option more resilient to failures * Fix double dots at the end of a message to single dot * Clean up object code * Use IronicObject.\_from\_db\_object\_list method * Update help for 'provisioning\_network' option * Updated from global requirements * Add virtualpdu to ironic devstack plugin * Auto enable the deploy driver * Add volume\_connectors and volume\_targets to task * Renaming audit map conf sample file * Support names for {cleaning,provisioning}\_network * Allow use \*\_ipmitool with vbmc on multinode * Add RPCs to support volume target operations * Fix import method to follow community guideline * Add VolumeTarget object * Unneeded testing in DB migration of volume connector * Add volume\_targets table to database * Cleanup adding Ironic to cluster on upgrade case * Move interface validation from API to conductor side * Update the links in iLO documentation * Turn off tempest's multitenant network tests * Make all IronicExceptions RPC-serializable * Do not source old/localrc twise in grenade * Fix docs error about OOB RAID support * Remove agent vendor passthru from most drivers * Follow-up for volume connector db\_id * Remove file prefix parameter from lockutils methods * Install syslinux package only for Wheezy / Trusty * Show team and repo badges on README * Drac: Deprecate drac\_host property * Update keystone\_authtoken configuration sample in the install guide * Add RPCs to support volume connector operation * Add VolumeConnector object * Add volume\_connectors table to save connector information * Minor changes to neutron security groups code * Drop bad skip check in tempest plugin * Correct DB Interface migration test * Updated from global requirements * Add support for Security Groups for baremetal servers * mask private keys for the ssh power driver * Remove deprecated Neutron DHCP provider methods * Add notification documentation to install guide * Fix the message in the set\_raid\_config method * Convert iPXE boot script to Jinja template * Fix PXE setup for fresh Ubuntu Xenial * Add node (database and objects) fields for all interfaces * Move \`deploy\_forces\_oob\_reboot\` to deploy drivers * Add route to Neutron private network * Rely on portgroup standalone\_ports\_supported * Add node provision state change notification * Update the alembic migration section in the developer FAQ * Add notification documentation to administrator's guide * Revert "Remove ClusteredComputeManager" * Remove ClusteredComputeManager * Followup to 0335e81a8787 * Update iptables rules and services IPs for multinode * Add devstack setup\_vxlan\_network() * Skip some steps for multinode case * Timing metrics: iRMC drivers * Use function is\_valid\_mac from oslo.utils * Docs: Document using operators with root device hints * Add portgroup to api-ref * Updated from global requirements * Add user and project domains to ironic context * Bring configurations from tempest to ironic\_tempest\_plugin * Do not pass ipa-driver-name as kernel parameter * Timing metrics: OneView drivers * Add unit test for microversion validator * Update ironic node names for multinode case * Update devstack provision net config for multihost * Add CI documentation outline * Add possibility to remove chassis\_uuid from a node * Create dummy interfaces for use with hardware types * [install-guide] describe service clients auth * Simplify base interfaces in ironic.drivers.base * Integrate portgroups with ports to support LAG * Updated from global requirements * Increase verbosity of devstack/lib/ironic * Update to hacking 0.12.0 and use new checks * Add PS4 for better logfile information of devstack runs * Update guide section for messaging setup * Updated from global requirements * Replaces uuid.uuid4 with uuidutils.generate\_uuid() * Enable PXE for systems using petitboot * Fix typo of 'authenticaiton' * Add a unit test for microversion validation V1.22 * Clean up unit test of API root test * DevStack: Fix standard PXE on Ubuntu Xenial * Skip db configuration on subnodes * Ignore required\_services for multinode topology * Add PortGroups API * DevStack: Support for creating UEFI VMs * Updated from global requirements * Clarify ironic governance requirements and process * API: lookup() ignore malformed MAC addresses * TrivialFix: Fix typo in config file * DRAC get\_bios\_config() passthru causes exception * Fix exception handling in iscsi\_deploy.continue\_deploy * Log currently known iSCSI devices when we retry waiting for iSCSI target * Use kvm for ironic VMs when possible * Correct log the node UUID on failure * Updated from global requirements * Change 'writeable' to 'writable' * Add the way to get the deploy ram disks * Remove use of 'vconfig' command in devstack ironic script * Imported Translations from Zanata * Updated from global requirements * Revert "Set SUBNETPOOL\_PREFIX\_V4 to FIXED\_RANGE" * Fix typo in release note filename * Use function import\_versioned\_module from oslo.utils * Updated from global requirements * Remove "dhcp" command from the iPXE template * Fixes a small documentation typo in snmp * IPMI command should depend on console type * Trivial fix of notifications doc * Mock ironic-lib properly in test\_deploy\_utils * Remove ..agent.build\_instance\_info\_for\_deploy() in Pike * Trivial: fix typo in docstring * Add a missing error check in ipmitool driver's reboot * Adding Timing metrics for DRAC drivers * Remove 'agent\_last\_heartbeat' from node.driver\_internal\_info * Add power state change notifications * Skip create\_ovs\_taps() for multitenancy case * Remove unnecessary '.' before ':' in ironic rst * Updated from global requirements * Imported Translations from Zanata * Replace parse\_root\_device\_hints with the ironic-lib version one * Fixes parameters validation in SSH power manager * Fix API docs to include API version history * fix a typo in document * Updated from global requirements * Update guide for PXE multi-architecture setup * Remove "agent\_last\_heartbeat" internal field from agent drivers * No need to clear "target\_provision\_state" again from conductor * Trivial: fix warning message formatting * Updated from global requirements * Fix some typos * Add docs about releasing ironic projects * Fix unit tests failing with ironic-lib 2.1.1 * Do not hide unexpected exceptions in inspection code * Avoid name errors in oneview periodics * A few fixes in Multitenancy document * Introduce default\_boot\_option configuration option * Fix broken xenial job * Fix setting custom IRONIC\_VM\_NETWORK\_BRIDGE * Update configure\_tenant\_networks * Remove wrong check from conductor periodic task * Remove reservation from sync power states db filter * Fix a typo in deploy.py * Updated from global requirements * Fix some PEP8 issues and Openstack Licensing * Clarify when oneview node can be managed by ironic * Add tense guide to release note FAQ * Refactor \_test\_build\_pxe\_config\_options tests * Imported Translations from Zanata * OneView driver docs explaining hardware inspection * Enable release notes translation * Clean up provision ports when reattempting deploy * Remove unnecessary option from plugin settings * Cleanup unused (i)PXE kernel parameters * Set SUBNETPOOL\_PREFIX\_V4 to FIXED\_RANGE * Enable DeprecationWarning in test environments * Fix \_lookup() method for node API routing * Log node state transitions at INFO level * Update ironic config docs for keystone v3 * Clean exceptions handling in conductor manager * Move build\_instance\_info\_for\_deploy to deploy\_utils * Fix undisplayed notes in Quick-Start * Keep numbering of list in Install Guide * Add description for vendor passthru methods * [install-guide] describe pxe.ipxe\_swift\_tempurl * Fix docstrings in tempest plugin baremetal json client * Add entry\_point for oslo policy scripts * Remove unneeded exception handling from conductor * Remove unused methods in common/utils.py * Do not use mutable object as func default param * Trivial: Fix some typos in comments and docstring * doc: Add oslo.i18n usage link * Replace assertTrue(isinstance()) with assertIsInstance() * Fix typo: remove redundant 'the' * Support multi arch deployment * Updated from global requirements * Use method delete\_if\_exists from oslo.utils * Use assertRaises() instead of fail() * Cleanup get\_ilo\_license() * Fix grenade jobs * Add a missing whitespace to an error message * Invalid URL and Typo in enrollment.rst * Update configuration reference link to latest draft * Update external links to developer documentation * Fail test if excepted error was not raised * Add inspection feature for the OneView drivers * Use correct option value for standalone install * Move flavor create under 'VIRT\_DRIVER == ironic' * Change links to point to new install guide * Fix inexact config option name in multitenancy.rst * Fix typos in docstring/comments * Have bashate run for entire project * Change 'decom' to clean/cleaning * Fix docstring typo in test\_common.py * Fix invalid git url in devstack/local.conf sample * Fix absolute links to install-guide.rst in developer docs * Update developer's guide "Installation Guide" link * Add link to new guide in old install guide * Fixing Typo * [install-guide] Import "Setup the drivers for the Bare Metal service" * [install-guide] Import "Trusted boot with partition image" * [install-guide] Import "Building or downloading a deploy ramdisk image" * [install-guide] Import "Appending kernel parameters to boot instances" * [install-guide] Import configdrive * [install-guide] Import HTTPS, standalone and root device hints * [install-guide] Import "Enrollment" and "Troubleshooting" sections * [install-guide] Import "Local boot with partition images" * [install-guide] Import "Flavor creation" * [install-guide] Import "Image requirements" * [install-guide] Import "integration with other OpenStack components" * [install-guide] Import Install and configure sections * [install-guide] Import "Bare Metal service overview" * Remove unused method is\_valid\_ipv6\_cidr * Support https in devstack plugin * Use six.StringIO instead of six.moves.StringIO * Remove unneeded try..except in heartbeat * Fix a typo in helper.py * Add more details to MIGRATIONS\_TIMEOUT note * Fixes wrong steps to perform migration of nodes * Increase timeout for migration-related tests * Update reno index for Newton * Add i18n \_() to string * Change the logic of selecting image for tests * Always return chassis UUID in node's API representation * Updated from global requirements * Fix iLO drivers to not clear local\_gb if its not detected 6.2.0 ----- * Clean up release notes for 6.2.0 * Fix DRAC passthru 'list\_unfinished\_jobs' desc * DevStack: Use Jinja2 for templating when creating new VMs * DRAC: list unfinished jobs * Fix broken unit tests for get\_ilo\_object * Sync ironic-lib.filters from ironic-lib * Documentation change for feature updates in iLO drivers * Remove websockify from requirements * Add a note about security groups in install guide * Remove unnecessary setUp * Adds a missing space in a help string * Remove duplicated line wrt configdrive * Notification event types have status 'error' * Refactor common checks when instantiating the ipmitool classes * Grub2 by default for PXE + UEFI * Support configdrive in iscsi deploy for whole disk images * Remove NotificationEventTypeError as not needed * Mark untested drivers as unsupported * [trivial] Fix typo in docstring * Replace "phase" with "status" in notification base * Updated from global requirements * Fix test syntax error in devstack/lib/ironic * Separate WSGIService from RPCService * Fix link from doc index to user guide * Update proliantutils version required for Newton release * Remove unused argument in Tempest Plugin * Fix docstrings in Tempest Plugin REST client for Ironic API * Fix docstrings to match with method arguments * Remove cyclic import between rpcapi and objects.base * Fix nits on DRAC OOB inspection patch * Fix DRAC failure during automated cleaning * Replace six iteration methods with standard ones * Timing metrics: iLO drivers * Use assertEqual() instead of assertDictEqual() * Configure clean network to provision network * Updated from global requirements * \_\_ne\_\_() unit tests & have special methods use (self, other) * Add metrics to administrator guide * Add \_\_ne\_\_() function for API Version object * Update unit tests for neutron interface * Update ironic/ironic.conf.sample * Allow using TempURLs for deploy images * Log a warning for unsupported drivers and interfaces * Add a basic install guide * [api-ref] Remove temporary block in conf.py * Deny chassis with too long description * Update the string format * [api-ref] Correcting type of r\_addresses parameter * Remove unused file: safe\_utils.py * DRAC OOB inspection * Remove neutron client workarounds * Update driver requirement for iRMC * Refresh fsm in task when a shared lock is upgraded * Updated from global requirements * Fix exception handling in NodesController.\_lookup * Remove unused LOG and CONF * Fix updating port.portgroup\_uuid for node * Add a newline at the end of release note files * Replace DOS line endings with Unix * Fix ironic-multitenant-network job * Update test\_update\_portgroup\_address\_no\_vif\_id test * Use assertIsInstance/assertNotIsInstance in tests * Add standalone\_ports\_supported to portgroup - DB * Config logABug feature for Ironic api-ref * DevStack: Configure retrieving logs from the deploy ramdisk * DRAC RAID configuration * Metrics for ConductorManager * Option to enroll nodes with drac driver * Allow suppressing ramdisk logs collection * Fix pep8 on Python3.5 * Fix incorrect order of params of assertEqual() * Updated from global requirements * Fix for check if dynamic allocation model is enabled * Add multi-tenancy section to security doc * Fix formatting strings in LOG.error * Mask instance secrets in API responses * Update documentation for keystone policy support * Fix typo in policy.json.sample * Add node serial console documentation * Prevent URL collisions with sub-controllers: nodes/ports * Centralize Config Options - patch merge, cleanup * Update the webapi version history reference * Fix fall back to newer keystonemiddleware options * OneView test nodes to use dynamic allocation * Updated from global requirements * Fix issues in dev-quickstart and index * Updated from global requirements * Add notification base classes and docs * Update hacking test-requirement * Documentation update * Removed unneeded vlan settings from neutron config * iLO drivers documentation update * Move console documentation to separate file * Switch Inspector interface to pass keystoneauth sessions * Adds instructions to perform nodes migration * Replace DB API call to object's method in iLO drivers * Move "server\_profile\_template\_uri" to REQUIRED\_ON\_PROPERTIES * Using assertIsNone() is preferred over assertEqual() * Updated from global requirements * Update api-ref for v1.22 * Updated from global requirements * Pass swiftclient header values as strings * Get ready for os-api-ref sphinx theme change * Log node uuid rather than id when acquiring node lock * Allow changing lock purpose on lock upgrade * Fix typo: interations -> iterations * Update code to use Pike as the code name * Operator documentation for multitenancy * Always set DEFAULT/host in devstack * Fix AgentDeploy take\_over() docstring * Clean imports in code * Copy iPXE script over only when needed * Fix incorrect order of params of assertEqual() * Fix iLO drivers inconsistent boot mode default value * Update readme file * Bring upgrade documentation up to date * Fix test\_find\_node\_by\_macs test * Use memory mode for sqlite in db test * Fix key word argument interface\_type -> interface * Use upper-constraints for all tox targets * Add nova scheduler\_host\_subset\_size option to docs * Fix the description of inspection time fields * DevStack: No need to change the ramdisk filesystem type * Fix incorrect order of params of assertEqual() in test\_objects.py * Fix assertEqual(10, 10) in unit/api/v1/test\_utils.py * Adding InfiniBand Support * Doc: Recommend users to update their systems * Centralize config options - [iscsi] * Centralize config options - [pxe] * Add "erase\_devices\_metadata\_priority" config option * Updated from global requirements * Update renos for fix to ipmi's set-boot-device * Remove unused [pxe]disk\_devices option * IPMINative: Check the boot mode when setting the boot device * IPMITool: Check the boot mode when setting the boot device * Fix ssh credential validation message * Remove CONF.import\_opt() from api/controllers/v1/node.py * Document retrieving logs from the deploy ramdisk * Fix updating port MAC address for active nodes * Remove incorrect CONF.import\_opt() from test\_ipmitool.py 6.1.0 ----- * Rename some variables in test\_ipminative.py * Update proliantutils version required for Newton release * Refactor OneView dynamic allocation release notes * Clean up release notes for 6.1.0 * Refactor multitenant networking release notes * DevStack guide: Bump IRONIC\_VM\_SPECS\_RAM to 1280 * Deprecate ClusteredComputeManager * 'As of' in documentation is incorrect * Updated Dev quickstart for viewing doc changes * Remove duplicate parameters from local.conf example * Check keyword arguments * Deprecate putting periodic tasks on a driver object * Updated from global requirements * Add metrics for the ipminative driver * test\_console\_utils: using mock\_open for builtin open() * Update devstack configure\_ironic\_ssh\_keypair * Trivial: Remove useless function call in glance service test * Simplify code by using mask\_dict\_password (again) * Officially deprecate agent passthru classes and API * Timing metrics: pxe boot and iscsi deploy driver * Fix the mistakes in Installation Guide doc * Use devstack test-config phase * Rename BaseApiTest.config to app\_config * Documentation fixes for iLO SSL Certificate feature * Metrics for agent client * Simplify code by using mask\_dict\_password * OneView driver docs explaining Dynamic Allocation * Docs: Run py34 tox test before py27 * Collect deployment logs from IPA * Fix typo * Remove oslo-incubator references * Promote agent vendor passthru to core API * Update add nova user to baremetal\_admin behaviour * Fix typo in Install-guide.rst file * Replacing generic OneViewError w/ InvalidNodeParameter * Add Dynamic Allocation feature for the OneView drivers * Fix \_\_all\_\_ module attributes * Fix tempest realted exceptions during docs build * Add keystone policy support to Ironic * Follow up to keystoneauth patch * Add a data migration to fill node.network\_interface * Test that network\_interface is explicitly set on POST/PATCH * Updated from global requirements * Create a custom StringField that can process functions * Revert "Devstack should use a prebuilt ramdisk by default" * Fix for "db type could not be determined" error message * Update devstack plugin with new auth options * Migrate to using keystoneauth Sessions * Updating dev quickstart to include compatiblity for newest distros * Update nova scheduler\_host\_manager config docs * Extend the "configuring ironic-api behind mod\_wsgi" guide * Add metrics for the ipmitool driver * Timing metrics for agent deploy classes * Pass agent metrics config via conductor * Minor docstring and unittests fixes for IPMIConsole * Move default network\_interface logic in node object * Updated from global requirements * Devstack should use a prebuilt ramdisk by default * Updated tests for db migration scripts * Centralize config options - [agent] * Log full config only once in conductor * Add node.resource\_class field * Add api-ref for new port fields * Add support for the audit middleware * Change comment regarding network\_interface * Fix rendering for version 1.14 * Use 'UUID', not 'uuid' in exception strings * IPMITool: add IPMISocatConsole and IPMIConsole class * Use assertEqual() instead of assertDictEqual() * Remove unused code when failing to start console * Trivial: Fix a trivial flake8 error * Centralize config options - [deploy] * Centralize config options - [api] * Added note to local.conf addressing firewall/proxy blocking Git protocol * Bug fixes and doc updates for adoption * Do the VM setup only when requested * Remove unused import * Remove duplicate copyright * Add build-essential to required packages for development * Implement new heartbeat for AgentDeploy * Add Python 3.5 tox venv * Updated from global requirements * Doc update for in-band cleaning support on more drivers * Updated from global requirements * Support to validate iLO SSL certificate in iLO drivers * Update {configure|cleanup}ironic\_provision\_network * Add test to verify ironic multitenancy * Add multitenancy devstack configuration examples * Following the hacking rule for string interpolation at logging * Centralize config options - [DEFAULT] * Add py35 to tox environments * Metric chassis, driver, node, and port API calls * Fix fake.FakeBoot.prepare\_ramdisk() signature * Follow-up to 317392 * Follow-up patch of 0fcf2e8b51e7dbbcde6d4480b8a7b9c807651546 * Updated from global requirements * Expose node's network\_interface field in API * Update devstack section of quickstart to use agent\_ipmitool * Grammar fix in code contribution guide * Deprecate [ilo]/clean\_priority\_erase\_devices config * Add configure\_provision\_network function * Update Ironic VM network connection * Centralize config options - [neutron] * Follow-up fixes to 206244 * Nova-compatible serial console: socat console\_utils * Updated from global requirements * Add multitenancy-related fields to port API object * Update the deploy drivers with network flipping logic * Add 'neutron' network interface * Fix docstring warnings * Add and document the "rotational" root device hint * Add network interface to base driver class * Increase devstack BM VM RAM for coreos to boot * Config variable to configure [glance] section * Add support for building ISO for deploy ramdisk * Add a doc about appending kernel parameters to boot instances * Trivial grammar fixes to the upgrade guide * Remove unused expected\_filter in the unit test * Updated from global requirements * Remove white space between print and () * Remove IBootOperationError exception * Delete bios\_wsman\_mock.py from DRAC driver * Correct reraising of exception * Allow to enroll nodes with oneview driver * Add internal\_info field to ports and portgroups * Centralize config options - [glance] * Document API max\_limit configuration option * Fix two types in ironic.conf.sample * Remove unused LOG * Remove iterated form of side effects * Improve the readability of configuration drive doc part * Drop IRONIC\_DEPLOY\_DRIVER\_ISCSI\_WITH\_IPA from documentation * Allow to use network interfaces in devstack * Updated from global requirements * Centralize config options - [virtualbox] * Centralize config options - [swift] * Centralize config options - [ssh] * Centralize config options - [snmp] * Add Ironic specs process to the code contribution guide * Add network\_interface node field to DB and object * Fix typo in inspection.rst * Add missing translation marker to clear\_node\_target\_power\_state * Throwing an exception when creating a node with tags * Follow-up patch of 9a1aeb76da2ed53e042a94ead8640af9374a10bf * Fix releasenotes formatting error * Improve tests for driver's parse\_driver\_info() * Centralize config options - [seamicro] * Centralize config options - [oneview] * Centralize config options - [keystone] * Centralize config options - [irmc] * Centralize config options - [ipmi] * Centralize config options - [inspector] * Centralize config options - [ilo] * Introduce new driver call and RPC for heartbeat * Remove unnecessary calls to dict.keys() * Fail early if ramdisk type is dib, and not building * Add dbapi and objects functions to get a node by associated MAC addresses * Drop references to RPC calls from user-visible errors * Centralize config options - [iboot] * Updated from global requirements * Replace dict.get(key) in api & conductor tests * Use PRIVATE\_NETWORK\_NAME for devstack plugin * Create common neutron module * Updated from global requirements * Properly set ephemeral size in agent drivers * Add validation of 'ilo\_deploy\_iso' in deploy.validate() * Restore diskimage-builder install 6.0.0 ----- * Updated from global requirements * Mask password on agent lookup according to policy * Clear target\_power\_state on conductor startup * Replace assertRaisesRegexp with assertRaisesRegex * Fix test in test\_agent\_client.py * Replace dict.get(key) in drivers unit tests * Docs: Fix some typos in the documentation * Removes the use of mutables as default args * Follow-up to Active Node Creation * Fix parameter create-node.sh * Replace dict.get(key) in drivers/modules/\*/ tests * Change port used for Ironic static http to 3928 * Centralize config options - [dhcp] * Centralize config options - [database] * Centralize config options - [conductor] * Centralize config options - [cisco\_ucs] * Centralize config options - [cimc] * Centralize config options - [console] * No need for 'default=None' in config variable * Fix typo in agent driver * Use assertIn and assertNotIn * Document testing an in-review patch with devstack * Replace vif\_portgroup\_id with vif\_port\_id * Use assert\_called\_once\_with in test\_cleanup\_cleanwait\_timeout * Trivial comments fix * Add Link-Local-Connection info to ironic port * Remove workaround for nova removing instance\_uuid during cleaning * Document support for APC AP7921 * Updated from global requirements * Add cleanwait timeout cleanup process * Add restrictions for changing portgroup-node association * Imported Translations from Zanata * Support for APC AP7922 * fix sed strings in developer doc * Replace dict.get(key) with dict[key] in unit tests * Fix JSON error in documentation * Remove support for the old ramdisk (DIB deploy-ironic element) * Updated from global requirements * Document packing and unpacking the deploy ramdisk * Fix nits related to Ports api-ref * Gracefully degrade start\_iscsi\_target for Mitaka ramdisk * Update the api-ref documentation for Drivers * Update comment from NOTE to TODO * Active Node Creation via adopt state * Update resources subnet CIDR * remove neutron stuff from devstack deb packages * Keep original error message when cleaning tear down fails * Add config option for ATA erase fallback in agent * Fix markup in documentation * Imported Translations from Zanata * Updated from global requirements * Add debug environment to tox * Correct RAID documentation JSON * Added ironic-ui horizon dashboard plugin to ironic docs * Updated from global requirements * Disable disk\_config compute-feature-enabled in tempest * Make sure create\_ovs\_taps creates unique taps * NOTIFICATION\_TRANSPORT should be global * Remove links to github for OpenStack things * Update the api-ref documentation for Ports * Add one use case for configdrive * Updated from global requirements * Remove hard-coded keystone version from setup * Use a single uuid parameter in api-ref * Use correct iscsi portal port in continue\_deploy * Fix raises to raise an instance of a class * Fix formatting of a release note * Remove support for 'hexraw' iPXE type * Use messaging notifications transport instead of default * Updated from global requirements * tempest: start using get\_configured\_admin\_credentials * Fix signature for request method * Remove backward compatibility code for agent url * Add 'How to get a decision on something' to FAQ * Follow-up patch of 8e5e69869df476788b3ccf7e5ba6c2210a98fc8a * Introduce provision states: AVAILABLE, ENROLL * minor changes to security documentation * Add support for API microversions in Tempest tests * Make use of oslo-config-generator * Mention RFEs in README * Make the ssh driver work on headless VirtualBox machines * Allow to specify node arch * Remove unused is\_valid\_cidr method * Updated from global requirements * Restart n-cpu after Ironic install * Move all cleanups to cleanup\_ironic * Keep backward compatibility for openstack port create * Revert "Run smoke tests after upgrade" * Add some docs about firmware security * Change HTTP\_SERVER's default value to TFTPSERVER\_IP * Update the api-ref documentation for Root and Nodes * Read the Sphinx html\_last\_updated\_fmt option correctly in py3 * devstack: Configure console device name * Updated from global requirements * Replace project clients calls with openstack client * Stop unit-testing processutils internals * Fix start order for Ironic during upgrade * Run smoke tests after upgrade * Add ironic to enabled\_services * Remove link to Liberty configs * Updated from global requirements * Fix shutdown.sh & upgrade.sh for grenade * add mitaka configuration reference link to the index page * Remove "periodic\_interval" config option * Remove verbose option * Updated from global requirements * Eliminate warnings about rm in api-ref build * Remove deprecated driver\_periodic\_task * Remove backward compat for Liberty cleaning * Remove [conductor]/clean\_nodes config option * Remove "message" attribute support from IronicException * Setup for using the Grenade 'early\_create' phase * Add support for dib based agent ramdisk in lib/ironic * Remove deprecated [pxe]/http\_\* options * Remove [agent]/manage\_tftp option * Remove "discoverd" configuration group * Regenerate sample config * Doc: Replace nova image-list * Migrate to os-api-ref library * Add require\_exclusive\_lock decorators to conductor methods * Fix syntax error in devstack create-node script * Updated from global requirements * Fix formatting error in releasenotes * Allow vendor drivers to acquire shared locks * Modify doc for RAID clean steps in manual cleaning * Make iPXE + TinyIPA the defaults for devstack * Only install DIB if going to use DIB * Add some docs/comments to devstack/plugin.sh * devstack: Fetch tarball images via https * DevStack: Support to install virtualbmc from source * Regenerate sample configuration * Allow configuring shred's final overwrite with zeros * Updated from global requirements * Deployment vmedia operations to run when cleaning * Extend IRONIC\_RAMDISK\_TYPE to support 'dib' * Cleanup unused conf variables * Adds RAID interface for 'iscsi\_ilo' * Pass environment through to create-node.sh * DevStack: Support to install pyghmi from source * RAID interface to support JBOD volumes * Remove ClusteredComputeManager docs * API: Check for reserved words when naming a node * File download fails with swift pseudo folder * Migrate api-ref into our tree * Updating dev-quickstart.rst file links * Devstack: allow extra PXE params * Updated from global requirements * Update resources only for specific node during deletion * Fix tox cover command * Fix VirtualBox cannot set boot device when powered on * Set root hints for disks less than 4Gb and IPA * Use Ironic node name for VM * Allow to sepecify VM disk format * Update compute\_driver in documentation * Replace logging constants with oslo.log * iscsi: wipe the disk before deployment * Joined 'tags' column while getting node * FIX: IPMI bmc\_reset() always executed as "warm" * Fix API node name updates * DevStack: Parametrize automated\_clean * Very important single character typo fix * Remove two DEPRECATED config options from [agent] * Allow to set Neutron port setup delay from config * Update ironic.config.sample * Fix usage of rest\_client expected\_success() in tests * Fixed nits in the new inspection doc page * Imported Translations from Zanata * Updated from global requirements * Document how to run the tempest tests * Update the inspection documentation * ipxe: retry on failure * Add note on prerequisite of 'rpm' file extraction * Follow-up patch of 0607226fc4b4bc3c9e1738dc3f78ed99e5d4f13d * Devstack: Change to use 'ovs-vsctl get port tag' * Restart consoles on conductor startup * Remove backwards compat for CLEANING * Make sure Cisco drivers are documented on IRONIC\_DEPLOY\_DRIVER * Remove two deprecated config option names from [agent] section * Updated from global requirements * Add support for Cisco drivers in Ironic devstack * Updated from global requirements * [docstring] Update ironic/api/controllers/v1/\_\_init\_\_.py comment * add new portal\_port option for iscsi module * Fix tinyipa initrd tarballs.openstack.org file name * Remove description of 'downgrade' for ironic-dbsync * In node\_power\_action() add node.UUID to log message * Rename juno name state modification method * Prepare for transition to oslo-config-generator * Updated from global requirements * Reduce amount of unhelpful debug logging in the API service * Correct api version check conditional for node.name * Updated from global requirements * Enable download of tinyipa prebuilt image * Follow-up to I244c3f31d0ad26194887cfb9b79f96b5111296c6 * Use get\_admin\_context() to create the context object * Updated from global requirements * Don't power off non-deploying iLO nodes in takeover * deployment vmedia ops should not be run when not deploying * Fix NamedTemporaryFile() OSError Exception * Updated from global requirements * Fix \_do\_next\_clean\_step\_fail\_in\_tear\_down\_cleaning() * Make tox respect upper-constraints.txt * Adopt Ironic's own context * Allow fetching IPA ramdisk with branch name * Tune interval for node provision state check * Fix typo in devstack script * Note on ilo firmware update swift url scheme * Force iRMC vmedia boot from remotely connected CD/DVD * Normalize MAC OctetString to fix InvalidMAC exception * Enable Grenade usage as a plugin * Readability fixes for cleaning\_reboot code * Support reboot\_requested bool on agent clean\_steps * Update tempest compute flavor\_ref/flavor\_ref\_alt * Move testcases related to parse\_instance\_info() * Improve check for ssh-key to include public and private files * Assign valid values to UUIDFields in unit tests * Fix typos in some source files * Follow up patch of 843ce0a16160f2e2710ef0901028453cd9a0357c * Clean up test node post data * Fix: Duplicated driver causes conductor to fail * Use trueorfalse function instead of specific value * Update reno for stable/mitaka * Doc update to enable HTTPS in Glance and Ironic comm * Fix race in hash ring refresh unit test * Addressing nits on I2984cd9d469622a65201fd9d50f964b144cce625 * Config to stop powering off nodes on failure 5.1.0 ----- * Documentation update for partition image support * Delete bridge "brbm" in devstack/unstack.sh * Remove unneeded use of task.release\_resources() * [Devstack]Add ability to enable shellinabox SSL certificate * Append 'Openstack-Request-Id' header to the response * Add disk\_label and node\_uuid for agent drivers * Fix sphinx docs build * Update authorized\_keys with new key only * Agent: Out-of-band power off on deploy * Document partition image support with agent\_ilo * Add support for partition images in agent drivers * Update the text in user guide of ironic * Translate requests exception to IronicException * Extend the Conductor RPC object * Make sure target state is cleared on stable states * Removes redundant "to" * Install apparmor b/c Docker.io has undeclared dep * Don't depend on existing file perm for qemu hook * Move \_normalize\_mac to driver utils * Devstack: add check of chassis creating * Allow user to specify cleaning network * Update ironic\_ssh\_check method * Adds doc - firmware update(iLO) manual clean step * Add ensure\_thread\_contain\_context() to task\_manager * [devstack] Do not die if neutron is disabled * Follow-up of firmware update(iLO) as manual cleaning step * Updating driver docs with DL hardwares requirements * Remove unneeded 'wait=False' to be more clean and consistent * Pass region\_name to SwiftAPI * Uses jsonschema library to verify clean steps * Fix important typo in the ipmitool documentation * DevStack: Allow configuring the authentication strategy * Add documentation for RAID 5.0.0 ----- * Add documentation about the disk\_label capability * SSH driver: Remove pipes from virsh's list\_{all, running} * Add documentation for the IPMITool driver * Fix error in cleaning docs * Replace depricated tempest-lib with tempest.lib * Add new 'disk\_label' capability * Fix JSON string in example of starting manual cleaning * Remove 'grub2' option in creating whole-disk-images * Update iRMC driver doc for inspection * Don't use token for glance & check for some unset vars * Use 'baremetal' flavor in devstack * [devstack] Fix IPA source build on Fedora * DevStack: Enable VirtualBMC logs * Support for passing CA certificate in Ironic Glance Communication * Updated from global requirements * Firmware update(iLO) as manual cleaning step * Updated from global requirements * Remove code duplication * Update iLO documentation for clean step 'reset\_ilo' * Refactor the management verbs check to utils * Updated from global requirements * Remove duplicate doc in ironic.conf.sample * Prep for 5.0 release * Fix unittests after new releases of libraries * Updating docs with support for DL class servers * Update CIMC driver docs to install ImcSdk from PyPi * Add returns to send\_raw() ipmitool function * Add function for dump SDR to ipmitool driver * Add clean step in iLO drivers to activate iLO license * Update proliantutils version to 2.1.7 for Mitaka release * ipxe: add --timeout parameter to kernel and initrd * Updated iLO driver documentation to recommend ipmitool version * Refactor driver loading to load a driver instance per node * Clean up driver loading in init\_host * add wipefs to ironic-lib.filters * Updated from global requirements * Use assertEqual/Greater/Less/IsNone * Follow up nits of 3429e3824c060071e59a117c19c95659c78e4c8b * API to list nodes using the same driver * [devstack] set ipa-debug=1 for greater debugability * Loose python-oneviewclient version requirement * Set node last\_error in TaskManager * Add possible values for config options * Follow up nits of irmc oob inspection * Enable removing name when updating node * Make some agent functions require exclusive lock * Add db api layer for CRUD operations on node tags * Update proliantutils version required for Mitaka release * Add deprecated\_for\_removal config info in ironic.conf.sample * Update ironic.conf.sample * Tolerate roles in context.RequestContext * Switch to Futurist library for asynchronous execution and periodic tasks * Move \_from\_db\_object() into base class * Add ironic\_tempest\_plugin to the list of packages in setup.cfg * Fix gate broken by sudden remove of SERVICE\_TENANT\_NAME variable * Add manual cleaning to documentation * Import host option in base test module * Fixes automated cleaning failure in iLO drivers * Updated from global requirements * DevStack: Add support for deploying nodes with pxe\_ipmitool * Change the libvirt NIC driver to virtio * DevStack: Support to install diskimage-builder from source * [Devstack]Add ability to enable ironic node pty console * Use 'node' directly in update\_port() * Add links to the standalone configdrive documentation * DevStack: Install squashfs-tools * [DevStack] fix restart of nova compute * Use http\_{root, url} config from "deploy" instead of "pxe" * During cleaning, store clean step index * Use oslo\_config.fixture in unit tests * Introduce driver\_internal\_info in code-contribution-guide * Updated from global requirements * Correct instance parameter description * Add node.uuid to InstanceDeploy error message * Set existing ports pxe\_enabled=True when adding pxe\_enabled column * Augmenting the hashing strategy * Add hardware inspection module for iRMC driver * Document possible access problems with custom IRONIC\_VM\_LOG\_DIR path * Add documentation for proxies usage with IPA * Updated from global requirements * Devstack: create endpoint in catalog unconditionally * Comment out test options that already exists on tempest's tree * Replace config 'clean\_nodes' with 'automated\_clean' * Remove 'zapping' from code * Cache agent clean steps on node * API to manually clean nodes * Replace ifconfig with ip * Updated iLO documentation for boot mode capability * Agent vendor handles manual cleaning * Remove downgrade support from migrations * Enable tinyipa for devstack Ironic * Disable clean step 'reset\_ilo' for iLO drivers by default * Add proxy related parameters to agent driver * Update ironic.conf.samle * Fix genconfig "tempdir" inconsistency * Update the home page * Follow-up on dracclient refactor * Log warning if ipmi\_username/ipmi\_password missing * Add portgroups to support LAG interfaces - net * Add portgroups to support LAG interfaces - RPC * Add portgroups to support LAG interfaces - objs * Add portgroups to support LAG interfaces - DB * Fix missing lookup() vendor method error for pxe\_drac * Refresh ssh verification mechanism * Refactor install-guide to configure API/Conductor seperately * Enable Ironic Inspector for Cisco Drivers * Fix doc8's "duplicated target names" (D000) error * Remove conditional checking the auth\_strategy values * Extend root device hints to support device name * Fix spawn error hook in "continue\_node\_clean" RPC method * Enable doc8 style checker for \*.rst files * Updated from global requirements * Show transitions initiated by API requests * Remove hard-coded DEPLOYWAIT timeout from Baremetal Scenario * Fix tiny format issue with install\_guide * Add priority to manual clean step example * Use node uuid in some exception log * Fix error message in devstack * Updated from global requirements * [devstack] Restart nova compute before checking hypervisor stats * Imported Translations from Zanata * Fix minor typo * DRAC: cleanup after switch to python-dracclient * API service logs access requests again * Updated from global requirements * Correct port\_id parameter description * Remove duplicate words in API version history * Remove unneeded enable\_service in dev-quickstart.rst * Clarify that size in root device hints and local\_gb are often different * Update ImcSdk requirement to use PyPi * Clean up 'no\_proxy' unit tests * Add more unit tests for NO\_PROXY validation * Add ability to cache swift temporary URLs * DRAC: switch to python-dracclient on vendor-passthru * Migrate Tempest tests into Ironic tree * Use Tempest plugin interface * Fix issues with uefi-ipxe booting * Update links to OpenStack manuals * Fix issue where system hostname can impact genconfig * Add choices option to several options * Add xinetd and its TFTP configuration in Install Guide * Reorganize the developer's main page * Document backwards compat for passthru methods * Drop MANIFEST.in - it's not needed pbr * Clean up unneeded deprecated\_group * Devstack: replace 'http' with SERVICE\_PROTOCOL * Clarify rejected status in RFE contribution docs * Bring UP baremetal bridge * Adjust ipminative.\_reboot to comply with pyghmi contract * Document the process of proposing new features * Updated from global requirements * Use assertTrue/False instead of assertEqual(T/F) * devstack 'cleanup-node' script should delete OVS bridges * Change default IRONIC\_VM\_SPECS\_RAM to 1024 * Remove release differences from flavor creation docs * Add documentation for standalone ilo drivers * Devstack: Make sure libvirt's hooks directory exists * Update the ironic.conf.sample file * Follow-up on refactor DRAC management interface * Allow user to set arch for the baremetal flavor and ironic node * tox: make it possible to run pep8 on current patch only * Devstack: Use [deploy] erase\_devices\_priority config option * Remove bashate from envlist * Use ironic-lib's util methods * Refactor objects into a magic registry * Don't return tracebacks in API response in debug mode * Updated from global requirements * Change assertTrue(isinstance()) by optimal assert * Remove \*/openstack/common\* in tox * Remove vim headers in source files * Trival: Remove unused logging import * Use ironic-lib's qemu\_img\_info() & convert\_image() * Update "Developer Quick-Start" guide for Fedora 23+ * Enable ironic devstack plugin in local.conf sample * Correct a tiny issue in install-guide * Install 'shellinabox' package for Ironic * Fix translations in driver base * Run flake8 against the python scripts under tools/ and devstack/tools * Add UEFI support for iPXE * Add console feature to ssh driver * Conductor handles manual cleaning * Add extensions to the scripts at devstack/tools/ironic/scripts * Fix "No closing quotation" error when building with tox * Devstack: Remove QEMU hook at ./unstack * Run bashate as part of the pep8 command * Fix bashate errors in grenade plugin * Fix syntax errors in the shell scripts under devstack/tools * Use the apache-ironic.template from our tree * Fix typo in ironic/conductor/manager.py * genconfig: Debug info for unknown config types * Keep the console logs for all boots * Use imageutils from oslo.utils * Add documentation for user inputs as HTTPS URLs * Add bashate tox command * Updated from global requirements * Add documentation for swiftless intermediate images * DRAC: switch to python-dracclient on management interface * DRAC: switch to python-dracclient on power interface * Follow up nits of Exception to str type conversion * Clean up variables in plugin.sh * Replace assertEqual(None, \*) with assertIsNone in tests * Add utility function to validate NO\_PROXY * Add bifrost as an option projects in Service overview * Sequence diagrams for iLo driver documentation * Refactor ilo documentation for duplicate information * Update swift HTTPs information in ilo documentation * Updated from global requirements * Deprecated tox -downloadcache option removed * Remove override-defaults * Use 'service\_type' of 'network'. Not 'neutron' * Update ironic.conf.sample by applying the bug fix #1522841 * Add grenade plugin * Follow up patch to correct code-contribute-guide * Fix iPXE template for whole disk image * Add devstack plugin * Copy devstack code to ironic tree * Add FSM.is\_stable() method * Explicitly depend on WebTest>=2.0 * Always pass keystone credentials to neutronclient * Remove extra space in 'host' config comment * Add oslo\_config.Opt support in Ironic config generator * Refactor disk partitioner code from ironic and use ironic-lib * Simplifies exception message assurance for oneview.common tests * Use node.uuid directly in stop\_console() * Correct NotImplemented to NotImplementedError in rpcapi.py * Adding oneview.common tests for some method not well tested * Add port option support for ipmitool * Numerous debug messages due to iso8601 log level * Handle deprecated opts' group correctly * Updated from global requirements * Clarify what changes need a release note * Remove wsgi reset\_pool\_size\_to\_default test * Add Mitaka release notes page * Update python-scciclient version number * Add release notes from Icehouse to Liberty * Add Code Contribution Guide for Ironic * Replace HTTP 'magic numbers' with constants * Documentation points to official release notes 4.3.0 ----- * Fix awake AMT unit test * Fix bug where clean steps do not run * Add reno for AMT wakeup patch * Updating OneView driver requirements and docs * Correct the db connection string in dev-quickstart * Split BaseConductorManager from ConductorManager * Validate arguments to clean\_step() decorator * test: Remove \_BaseTestCase * Wake up AMT interface before send request * Fall back to old boot.ipxe behaviour if inc command is not found * Only mention IPA in the quick start and user guides for DevStack * Improve options help for image caching * Add troubleshooting docs for "no valid host found" * change mysql url in dev-quickstart doc * Extend FAQ with answer of how to create a new release note * Sync ironic.conf sample * Comment spelling error in ironic-images.filters file * Updated from global requirements * Add a developer FAQ * Add tests for RequestContextSerializer * Add a test to enforce object version bump correctly * force releasenotes warnings to be treated as errors * Avoid RequestContextSerializer from oslo.messaging * Follow up patch for the first commit of iRMC new boot I/F * Move iso8601 as a test dependency only * Catch up release notes for Mitaka * Move common code from ironic.conductor.manager to ironic.conductor.utils * Add deprecated config info in ironic.conf.sample * Add switch to enable/disable streaming raw images for IPA * SwiftAPI constructor should read CONF variables at runtime * Take over console session if enabled * Drop some outdated information from our quick start guide * Refactor IRMCVirtualMediaAgentDeploy by applying new BootInterface * Refactor IRMCVirtualMediaIscsiDeploy by applying new BootInterface * Updated from global requirements * Fix: Next cleaning hangs if the previous cleaning was aborted * Add clean up method for the DHCP factory * Add missing packages to dev-quickstart * Support arguments for clean step methods * Validate all tcp/udp port numbers * Add manual cleaning to state machine * Specifying target provision states in fsm * Use server\_profile\_template\_uri at scheduling * Check shellinabox started successfully or not * Add SSL support to the Ironic API * Updated from global requirements * Use wsgi from oslo.service for Ironic API * Remove duplicated unit tests in test\_manager * Get mandatory patch attrs from WSME properties * Add and document two new root device hints: wwn\_{with, vendor}\_extension * Sort root device hints when parsing * add "unreleased" release notes page * Follow up patch for 39e40ef12b016a1aeb37a3fe755b9978d3f9934f * Document 'erase\_devices\_iterations' config option * Update iLO documentation * Adds test case for the iscsi\_ilo recreate boot iso * Refactor agent\_ilo driver to use new boot interface * Updated from global requirements * Refactor iLO driver console interface into new module * Add reno for release notes management * Add choices to temp\_url\_endpoint\_type config option * Fix oslo namespace in default log level * Remove \_\_name\_\_ attribute from WSME user types * refine the ironic installation guide * Revert "Add Pillow to test-requirements.txt" * Update etc/ironic/ironic.conf.sample * Make task parameter mandatory in get\_supported\_boot\_devices * Follow up patch for Ib8968418a1835a4131f2f22fb3e4df5ecb9b0dc5 * Check shellinabox process during stopping console * Add whole disk image creation command to Installation Guide * Fix docker.io bug in the Install Guide * Updated from global requirements * Node's last\_error to show the actual error from sync\_power\_state * Updated from global requirements * Rename test\_conductor\_utils.py to test\_utils.py * Follow up patch for 8c3e102fc5736bfcf98525ebab59b6598a69b428 * Add agent\_iboot entrypoint * Validate console port number in a valid range * iboot: add wait loop for pstate to activate * Don't reraise the exception in \_set\_console\_mode * Check seamicro terminal port as long as it specified * Add missing unit tests for some PXE drivers * Validate the input of properties of nodes * Add documentation for Ceph Object Gateway support * Refactor iscsi\_ilo driver to use new boot interface * Fix comments on DRAC BIOS vendor\_passthru * cautiously fail on unhandled heartbeat exception * Add "agent\_wol" (AgentAndWakeOnLanDriver) * Added unit tests for CORS middleware * Use oslo\_config new type PortOpt for port options * Fix markup error in deploy/drivers.rst * Update the Configuration Reference to Liberty in doc * Updated from global requirements * Use self.\_\_class\_\_.X instead of self.X * Rename utils.py to mgr\_utils.py to avoid namespace collision * XenAPI: Add support for XenServer VMs * Add PortOpt to config generator * Imported Translations from Zanata * Move hash\_ring refresh logic out of sync\_local\_state * Move ironic.tests.unit.base to ironic.tests.base * Change required version of ImcSdk to 0.7.2 * Add an iboot reboot\_delay setting * iPXE document about the existence of prebuilt images * Fix a typo * Switched order of CORS middleware * DRAC BIOS vendor\_passthru: enable rebooting the node * Replace deprecated LOG.warn with warning * Add db migration and model for tags table * Add OneView driver documentation * Fix snmp property descriptions * Updated from global requirements * Slightly reword README * Remove unused functions from agent driver * mocking syscalls to make the tests run on OS X * Enable cmd/api & cmd/conductor to be launched directly * Add reboot\_delay option to snmp driver * Add self.raid for iSCSI based drivers * Move test\_pxe.py inside unit/drivers/modules directory * Move pxe.\_parse\_instance\_info() to deploy\_utils * Add note about driver API breakage * Fix a missing detail in install guide * Enable radosgw support in ironic * Updated from global requirements * Add agent\_amt docs * Add release notes for 4.2.1 * Convert set() to list in ListType * remove lxml requirement * Update python-oneviewclient version * Fix an annoying detail in the developer quick-start * Updated from global requirements * Expose versioning information on GET / endpoint * Fixes logging of failure in deletion of swift temporary object * ucs\_hostname changed to ucs\_address * Updated from global requirements * Remove functions: \_cleanse\_dict & format\_message * Move FakeOneViewDriver to the fake.py module * Add testresources and testscenarios used by oslo.db fixture * Add agent\_amt driver * Imported Translations from Zanata * Stop adding translation function to builtins * Fix tests giving erroneous output during os-testr run * OneView Driver for Ironic * Fix agent\_ilo to remove temporary images * Updated from global requirements * iPXE: Fix assumption that ${mac} is the MAC of the NIC it's booting * Prevent iRMC unit test from potential failure at the gate * Add secret=True to password option * Fix a bug error by passwords only includes numbers * Add support for in-band cleaning in ISCSIDeploy * Fix typo in document * Remove unused import of oslo\_log * Use power manager to reboot in agent deployments * Add retries to ssh.\_get\_hosts\_name\_for\_node * Refactor deploy\_utils methods * Fix irmc driver unit test * PXE: Support Extra DHCP Options for IPv6 * Use standard locale when executing 'parted' command * Updated from global requirements * To run a specific unit test with ostestr use -r * Add .eggs to gitignore * Fix log formatting issue in agent base * Add notes to functions which are in ironic-lib * Allow empty password for ipmitool console * Update help string on tftp\_root option * Updated from global requirements * Fix conductor deregistration on non init conductor * Imported Translations from Zanata * Add Pillow to test-requirements.txt * Add agent inspection support for IPMI and SSH drivers * Python 3.4 unit tests fail with LANG=C * Fix ubuntu install command in install guide * Move unit tests to correct directory * Add 'whitelist\_externals = bash' for two testenvs * Rename 'message' attribute to '\_msg\_fmt' in IronicException * Follow up for: Prepare for functional testing patch * Fix documentation for installing mariaDB * Update help strings for DRAC configs * Switch tox unit test command to use ostestr * Use standard locale when executing 'dd' command * Imported Translations from Zanata * Fix typo: add a missing white space * Prepare for functional testing * Fix some iBoot strings * Replace six.iteritems() with .items() * Make generation of ironic.conf.sample deterministic * Cached file should not be deleted if time equal to master 4.2.0 ----- * Cleanup of Translations * Update architecture docs to mention new driver interfaces * Add 4.2.0 release notes * Update docs for Fedora 22 * Add i18n \_ import to cimc common * Update proliantutils version required for L release * Use of 'the Bare Metal service' in guide * Update install guide to reflect latest code * Implement indirection\_api * Add 'abort' to state machine diagram * Unit test environment setup clarification * Make end-points discoverable via Ironic API * Updated from global requirements * Allow unsetting node.target\_raid\_config * Allow abort for CLEANWAIT states * Clean up CIMC driver docs and comments * Add Cisco IMC PXE Driver * Fix final comments in RAID commits * Refactor agent {prepare,tear\_down}\_cleaning into deploy\_utils * Handle unquoted node names from virt types * Fix iRMC vmedia deploy failure due to already attached image * Implement take\_over for iscsi\_ilo driver * Fix typo in vendor method dev documentation * Fix incorrect urls * Check image size before provisioning for agent driver * Help patch authors to remember to update version docs * Add constraint target to tox.ini * Add IPMINative vendor methods to \*IPMINative drivers * Fix string formatting issues * Remove DictMatches custom matcher from unit tests * Imported Translations from Zanata * Remove unused object function * Use oslo.versionedobjects remotable decorators * Base IronicObject on VersionedObject * Update descriptions in RAID config schema * Document GET ...raid/logical\_disk\_properties * Convert functools.wraps() usage to six.wraps() * Remove comment about exception decorator * Replace metaclass registry with explicit opt-in registry from oslo * Add config option to override url for links * Fix iBoot test\_\_switch\_retries test to not waste time sleeping * Allow tftpd usage of '--secure' by using symlinks * Add support for inband raid configuration agent ramdisk * Agent supports post-clean-step operations * Update 'Installation Guide' for RHEL7/CentOS7/Fedora * Fix docs about --is-public parameter for glance image-create * Fix indentation of the console docs * Fix heading levels in the install-guide * Cache the description of RAID properties * Remove the hard dependency of swift from ilo drivers * Fix mistakes in comments * Updated from global requirements * Fix object field type calling conventions * Add version info for pyghmi in driver-requirements.txt 4.1.0 ----- * Add 4.1.0 release notes * Try to standardize retrieval of an Exception's description * Add description how to restart ironic services in Fedora/RHEL7/CentOS7 * Improve the ability to resolve capability value * Add supported environment 'VMware' to comments * Updated from global requirements * Remove policy 'admin' rule support * Handle missing is\_whole\_disk\_image in pxe.\_build\_pxe\_config\_options * Raise InvalidPrameterValue when ipmi\_terminal\_port is '' * Fix doc typo * Remove executable permission from irmc.py * Add APIs for RAID configuration * agent\_ilo fails to bring up instance * Updated from global requirements * Remove 'is\_valid\_event' method * Set boot device in PXE Boot interface method prepare\_instance() * Revert "Do not overwrite the iPXE boot script on every deployment" * Add vendor interface to ipminative driver * When boot option is not persisted, set boot on next power on * Document nodes in enroll state, in install guide * Added CORS support middleware to Ironic * Refactor map\_color() * Removes unused posix-ipc requirement * Add retry options to iBoot power driver * Trusted boot doc * Prevent ilo drivers powering off active nodes during take over * Add release notes for 4.0.0 * Clean up cleaning error handling on heartbeats * Use vendor mixin in IPMITool drivers * Use oslo.messaging serializers * Add RPC APIs for RAID configuration * Add new method validate\_raid\_config to RAIDInterface * Fix docker package name in Ubuntu 14.04 in Install Guide * Updated from global requirements * Do not overwrite the iPXE boot script on every deployment * Reset tempdir config option after NestedTempfile fixture applied * Remove unused dep discover from test reqs * Add deprecation warning to periodic tasks with parallel=False * Use six.text\_type in parse\_image\_ref * Ensure that pass\_deploy\_info() always calls boot.prepare\_instance() * Add minimum and maximum on port option * Update ironic.conf.sample with tox -egenconfig * Update documentation to install grub2 when creating the user image * Fix logging and exceptions messages in ipminative driver * Fix minor spelling/grammar errors * Put py34 first in the env order of tox * format links in the readme to work with the release notes tools * Periodically checks for nodes being cleaned * Add links for UEFI secure boot support to iLO driver documentation * Add cleanup in console utils tests * Follow up the nits in iRMC vmedia driver merged patch * Refactor agent driver with pxe boot interface * Update tests to reflect WSME 0.8 fixes * Remove ObjectListBase * Remove broken workaround code for old mock * Create a versions.py file * Improve comparison operators for api/controllers/base.py * Switch to post-versioning 4.0.0 ----- * Fix improper exception catching * Fix nits from 'HTTP constants' patch * Use JsonEncoded{Dict,List} from oslo\_db * Move tests into correct directories * Fix logging levels in do\_node\_deploy * Fix misspelling from "applicatin" to "application" * Updated from global requirements * Remove unneeded module variable '\_\_all\_\_' * Updated from global requirements * Change and edit of Ironic Installation Guide * Remove the --autofree option from boot.ipxe * Switch from deprecated timeutils.isotime * Fix "tox -egenconfig" by avoiding the MODULEPATH env variable * Improve logging for agent driver * Refactor the essential prop list of inspect driver * Reset clean\_step if error occurs in CLEANWAIT * Fix bug sending sensor data for drivers w/o management * Replace HTTP 'magic numbers' with constants * Address final comments on update image cache based on update time * 'updated\_at' field shows old value after resource is saved * Increase size of nodes.driver column * Add better dbapi support for querying reservation * Allow digits in IPA driver names * Updated from global requirements * Add documentation for iRMC virtual media driver * Add copyright notice to iRMC driver source code * Remove CONF.agent.agent\_pxe\_bootfile\_name * Update single letter release names to full names * Enforce flake8 E711 * Update docstring for agent deploy's take\_over * Update cached images based on update time * Updated from global requirements * Add RAIDInterface for RAID configuration * get\_supported\_boot\_devices() returns static device list * add ironic client and ironic inspector projects into contribution list * Updated from global requirements * Use the oslo\_utils.timeutils 'StopWatch' class * Update the documentation to use IPA as deploy ramdisk * Inspector inspection fails due to node locked error * Prevent power actions when the node is in CLENWAIT state * Imported Translations from Transifex * Remove unnecessary trailing backslash in Installation Guide * Refactor some minor issues to improve code readability * Fix misspelling in comment * Make app.wsgi more like ironic.cmd.api * Migrate IronicObjectSerializer to subclass from oslo * Updated from global requirements * Fix warnings on doc builds * Change vagrant.yml to vagrant.yaml * Developer quickstart documentation fixes * Document configuring ironic-api behind mod\_wsgi * Updated from global requirements * Add deprecation messages on the bash ramdisk endpoints * Document API versioning * Log configuration values as DEBUG, not INFO * Update ironic.conf.sample * Update ironic.conf.sample * Add information 'node\_uuid' in debug logs to facilitate the reader's life * Clean up instance\_uuid as part of the node's tear down * Fix a trusted boot test bug * Add more info level log to deploy\_utils.work\_on\_disk() method * Fix broken agent virtual media drivers * Updated from global requirements * Fix apache wsgi import * Add raises docstring tag into object.Ports methods * Only take exclusive lock in sync\_power\_state if node is updated * Secure boot support for pxe\_ilo driver * UCS: node-get-boot-device is failing for Cisco servers * grub2 bootloader support for uefi boot mode * Add Nova scheduler\_tracks\_instance\_changes config to docs * Use automaton's converters/pydot * enroll/verify/cleanwait in state machine diagram * Save and re-raise exception * Cache Keystone client instance * Refactor pxe - New PXEBoot and ISCSIDeploy interfaces * Don't prevent updates if power transition is in progress * Follow-on to b6ed09e297 to fix docstrings/comments * Make inspector driver test correctly * Allow inspector driver to work in standalone mode * Remove outdated TODO.rst file * Updated from global requirements * Introduce support for APC MasterSwitchPlus and Rack PDU * Allow agent lookup to directly accept node UUID * Add CLEANWAIT state * Allow updates in VERIFYING state * Allow deleting nodes in ENROLL state * Updated from global requirements * Fixes a testcase related to trusted boot in UEFI boot mode * Clarify inspection upgrade guide * Refactor refresh method in objects for reuse * Imported Translations from Transifex * Use utils.mkfs directly in deploy\_utils * Updated from global requirements * Migrate ObjectListBase to subclass from the Oslo one * Clean up tftp files if agent deployed disk image * Don't do a premature reservation check in the provision API * Move the http\_url and http\_root to deploy config * Allow upgrading shared lock to an exclusive one * Fix the DEPLOYWAIT check for agent\_\* drivers * Add a missing comma in Vendor Methods of Developer Guide * Replacing dict.iteritems() with dict.items() * Updated from global requirements * db: use new EngineFacade feature of oslo.db * Address minor comments on the ENROLL patch * Remove requirements.txt from tox.ini deps * Updated from global requirements * Replace common.fileutils with oslo\_utils.fileutils * Updated from global requirements * Switch to the oslo\_utils.fileutils * Start using new ENROLL state * Add .idea to .gitignore * Periodically checks the status of nodes in DEPLOYING state * Add IPA support for iscsi\_irmc driver * Updated from global requirements * Vagrant configuration generation now uses pymysql * Remove deprecated code for driver vendor passthru * Add DRAC BIOS config vendor passthru API * Use DEPLOYWAIT while waiting for agent to write image * Fix unittests due mock 1.1.0 release * Migrate RPC objects to oslo.versionedobjects Fields * Imported Translations from Transifex * Updated from global requirements * Mock the file creation for the GetConfigdriveTestCase tests * Address follow-up comments * Clear ilo\_boot\_iso before deploy for glance images * Enable translation for config option help messages * Replace is\_hostname\_safe with a better check * Initial oslo.versionedobjects conversion * Add whole disk image support for iscsi\_irmc driver * Add localboot support for iscsi\_irmc driver * Add iRMC Virtual Media Deploy module for iRMC Driver * add python-scciclient version number requirement * Remove db connection string env variable from tox.ini * Make use of tempdir configuration * Updated from global requirements * Fix failing unit tests under py34 * Allow vendor methods to serve static files * Allow updates when node is on ERROR provision state * Add sequence diagrams for pxe\_ipmi driver * Fix logging for soft power off failures * Mute ipmi debug log output * Validate IPMI protocol version for IPMIShellinaboxConsole * Image service should not be set in ImageCache constructor * Clean nodes stuck in DEPLOYING state when ir-cond restarts * Add ability to filter nodes by provision\_state via API * Refactor check\_allow\_management\_verbs * Add node fields for raid configuration * Switch to oslo.service * Fix "boot\_mode\_support" hyper link in Installation Guide * Log configuration options on ironic-conductor startup * Allow deleting even associated and active node in maintenance mode * Use oslo\_log * Replace self.assertEqual(None,\*) to self.assertIsNone() * Improve warning message in conductor.utils.node\_power\_action() * Add a new boot section 'trusted\_boot' for PXE * use versionutils from oslo\_utils * Make task\_manager logging more helpful * Add IPMI 1.5 support for the ipmitool power driver * Add iBoot driver documentation * Updated from global requirements * Add unit test for ilo\_deploy \_configure\_vmedia\_boot() * Do not use "private" attribute in AuthTokenMiddleware * API: Get a subset of fields from Ports and Chassis * Save disk layout information when deploying * Add ENROLL and related states to the state machine * Refactor method to add or update capability string * Use LOGDIR instead of SCREEN\_LOGDIR in docs * Always allow removing instance\_uuid from node in maintenance mode * API: Get a subset of fields from Nodes * Switch from MySQL-python to PyMySQL * Updated from global requirements * copy editing of ironic deploy docs * Transition state machine to use automaton oslo lib * Finish switch to inspector and inspector-client * Rename ilo\_power.\_attach\_boot\_iso to improve readability * Expose current clean step in the API * Fix broken ACL tests * Add option to configure passes in erase\_devices * Refactor node's and driver's vendor passthru to a common place * Change return value of [driver\_]vendor\_passthru to dict * Add Wake-On-Lan driver documentation * Fixes a bug on the iLO driver tutorial * Address follow-up comments on ucs drivers * Added documentation to Vagrantfile * Updated from global requirements * Addresses UcsSdk install issue * Don't raise exception from set\_failed\_state() * Add disk layout check on re-provisioning * Add boot interface in Ironic * Fix Cisco UCS slow tests * Validate capability in properties and instance\_info * Pass environment variables of proxy to tox * DRAC: fix set/get boot device for 11g * Enable flake8 checking of ironic/nova/\* * Remove tools/flakes.py * Wake-On-Lan Power interface * IPA: Do a soft power off at the end of deployment * Remove unnecessary validation in PXE * Add additional logging around cleaning * remove unneeded sqlalchemy-migrate requirement * Add vendor-passthru to attach and boot an ISO * Updated from global requirements * Sync with latest oslo-incubator * Add pxe\_ucs and agent\_ucs drivers to manage Cisco UCS servers * Doc: Use --notest for creating venv * Updated from global requirements * Fix DRAC driver job completion detection * Add additional required RPMs to dev instructions * Update docs for usage of python-ironicclient * Install guide reflects changes on master branch * Remove auth token saving from iLO driver * Don't support deprecated drivers' vendor\_passthru * Updated from global requirements * Enforce flake8 E123/6/7/8 in ironic * Change driver\_info to driver\_internal\_info in conductor * Use svg as it looks better/scales better than png * Updated from global requirements * Use oslo config import methods for Keystone options * Add documentation for getting a node's console * fix node-get-console returns url always start with http * Update the config drive doc to replace deprecated value * Updated from global requirements * Remove bogus conditional from node\_update * Prevent node delete based on provision, not power, state * Revert "Add simplegeneric to py34 requirements" * Do not save auth token on TFTP server in PXE driver * Updated from global requirements * Update iLO documentation for UEFI secure boot * ironic-discoverd is being renamed to ironic-inspector * Update doc "install from packages" section to include Red Hat * Improve strictness of iLO test cases error checking * Remove deprecated pxe\_deploy\_{kernel, ramdisk} * Get admin auth token for Glance client in image\_service * Fix: iSCSI iqn name RFC violation * Update documentation index.rst * Update AMT Driver doc * Refactor ilo.common.\_prepare\_floppy\_image() * Do not add auth token in context for noauth API mode * DRAC: config options for retry values * Disable meaningless sort keys in list command * Update pyremotevbox documentation * Fix drac implementation of set\_boot\_device * Update to hacking 0.10.x * Prepare for hacking 0.10.x * Rename gendocs tox environment * Add simplegeneric to py34 requirements * Reduce AMT Driver's dependence on new release of Openwsman * Fixes some docstring warnings * Slight changes to Vagrant developer configs * Delete neutron ports when the node cleaning fails * Update docstring DHCPNotFound -> DHCPLoadError * Wrap all DHCP provider load errors * Add partition number to list\_partitions() output fields * Added vagrant VM for developer use * Execute "parted" from root in list\_partitions() * Remove unused CONF variable in test\_ipminative.py * Ironic doesn't use cacert while talking to Swift * Fix chainloading iPXE (undionly.kpxe) * Updated from global requirements * Improve root partition size check in deploy\_partition\_image * ironic/tests/drivers: Add autospec=True and spec\_set= * Fix and enhance "Exercising the Services Locally" docs * Fix typos in Ironic docs * Fix spelling error in docstring * Remove deprecated exceptions * Check temp dir is usable for ipmitool driver * Improve strictness of AMT test cases error checking * Improve strictness of iRMC test cases error checking * Fix Python 3.4 test failure * Remove unneeded usage of '# noqa' * Drop use of 'oslo' namespace package * Updated from global requirements * Specify environment variables needed for a standalone usage * Adds OCS Power and Management interfaces * Run tests in py34 environment * Adds docstrings to some functions in ironic/conductor/manager.py * Add section header to state machines page * Update config generator to use oslo released libs * Use oslo\_log lib * Include graphviz in install prerequisites * Link to config reference in our docs * Adopt config generator * Remove cleanfail->cleaning from state diagram * Imported Translations from Transifex * Return HTTP 400 for invalid sort\_key * Update the Vendor Passthru documentation * Add maintenance mode example with reason * Add logical name example to install-guide * Improve strictness of DRAC test cases error checking * Add a venv that can generate/write/update the states diagram * Log attempts while trying to sync power state * Disable clean\_step if config option is set to 0 * Improve iSCSI deployment logs * supports alembic migration for db2 * Updated from global requirements * Update iLO documentation for capabilities 2015.1.0 -------- * ironic/tests/drivers/amt: Add autospec=True to mocks * ironic/tests/drivers/irmc: Add spec\_set & autospec=True * Updated from global requirements * ironic/tests/drivers/drac: Add spec\_set= or autospec=True * Create a 3rd party mock specs file * Release Import of Translations from Transifex * Document how to configure Neutron with iPXE * Remove state transition: CLEANFAIL -> CLEANING * Remove scripts for migrating nova baremetal * Add a missing comma and correct some typos * Remove API reboot from cleaning docs * Remove scripts for migrating nova baremetal * Fixed is\_glance\_image(image\_href) predicate logic * Rearrange some code in PXEDeploy.prepare * Fixes typo in ironic/api/hooks.py and removes unnecessary parenthesis * update .gitreview for stable/kilo * Add cleaning network docs * Remove ironic compute driver and sched manager * ironic/tests/drivers/ilo: Add spec= & autospec=True to mocks * Replace 'metrics' with 'meters' in option * Update some config option's help strings * document "scheduler\_use\_baremetal\_filters" option in nova.conf * Fix heartbeat when clean step in progress * Fix heartbeat when clean step in progress * Update ilo drivers documentation for inspection * Open Liberty development 2015.1.0rc1 ----------- * Local boot note about updated deploy ramdisk * Convert internal RPC continue\_node\_cleaning to a "cast" * iLO driver documentation for node cleaning * Fix typos in vendor-passthru.rst * Add Ceilometer to Ironic's Conceptual Architecture * Improve AMT driver doc * iLO driver documentation for UEFI secure boot * Fix for automated boot iso issue with IPA ramdisk * Update session headers during initialization of AgentClient * Agent driver fails without Ironic-managed TFTP * Add notes about upgrading juno->kilo to docs * Address comments on I5cc41932acd75cf5e9e5b626285331f97126932e * Use mock patch decorator for eventlet.greenthread.sleep * Cleanup DHCPFactory.\_dhcp\_provider after tests * Follow-up to "Add retry logic to \_exec\_ipmitool" * Nit fixes for boot\_mode being overwritten * Update installation service overview * Don't pass boot\_option: local for whole disk images * Fixup post-merge comments on cleaning document * Use hexhyp instead of hexraw iPXE type * Fix exception handling in Glance image service * Update proliantutils version required for K release * Fix type of value in error middleware response header * Imported Translations from Transifex * Fix mocks not being stopped as intended * Add maintenance check before call do\_node\_deploy * Fix VM stuck when deploying with pxe\_ssh + local boot * Fix bad quoting in quickstart guide * Set hash seed to 0 in gendocs environment * boot\_mode is overwritten in node properties * Add retry logic to \_exec\_ipmitool * Check status of bootloader installation for DIB ramdisk * Add missing mock for test\_create\_cleaning\_ports\_fail * Shorten time for unittest test\_download\_with\_retries * Disable XML now that we have WSME/Pecan support * tests/db: Add autospec=True to mocks * Sync with oslo.incubator * Enable cleaning by default * Improve error handling when JSON is not returned by agent * Fix help string for glance auth\_strategy option * Document ports creating configuration for in-band inspection * Remove DB tests workarounds * Fix formatting issue in install guide * Add missing test for DB migration 2fb93ffd2af1 * Regenerate states diagram after addition of CLEANING * Fix UnicodeEncodeError issue when the language is not en\_US * pxe deploy fails for whole disk images in UEFI * Remove setting language to en\_US for 'venv' * Add config drive documentation * Refactor test code to reduce duplication * Mock time.sleep() for two unittests * Clarify message for power action during cleaning * Add display-name option to example apache2 configuration * New field 'name' not supported in port REST API * Update doc for test database migrations * Add PXE-AMT driver's support of IPA ramdisk * Fix cleaning nits * Update docs: No power actions during cleaning * Prevent power actions on node in cleaning * Followup to comments on Cleaning Docs * Remove inspect\_ports from ilo inspection * Removed hardcoded IDs from "chassis" test resources * Fix is\_hostname\_safe for RFC compliance * Enable pxe\_amt driver with localboot * Improve backwards compat on API behaviour * Use node UUID in logs instead of node ID * Add IPA to enable drivers doc's page * Top level unit tests: Use autospec=True for mocks * DRAC: power on during reboot if powered off * Update pythonseamicroclient package version * A wrong variable format used in msg of ilo: * Add documentation for Cleaning * Explictly state that reboot is expected to work with powered off nodes * Prevent updating the node's driver if console is enabled * Agent driver: no-op heartbeat for maintenanced node * Deploys post whole disk image deploy fails * Allow node.instance\_uuid to be removed during cleaning * Attach ilo\_boot\_iso only if node is active * Ensure configdrive isn't mounted for ilo drivers * Ensure configdrive isn't mounted for ipxe/elilo * Correct update\_dhcp\_opts methods * Fix broken unittests usage of sort() * Add root device hints documentation * Ensure configdrive isn't mounted in CoreOS ramdisks * Add local boot with partition images documentation * Add a return after saving node power state * Fix formatting error in states\_to\_dot * pxe partition image deploy fails in UEFI boot mode * Updated from global requirements * Fix common misspellings * Ilo drivers sets capabilities:boot\_mode in node * Add whole disk image support for iscsi\_ilo using agent ramdisk * Fixed nits for secure boot support for iLO Drivers * Fix typos in ironic/ironic/drivers/modules * fix invalid asserts in tests * Fail deploy if root uuid or disk id isn't available * Hide new fields via single method * Update "Ironic as a standalone service" documentation * DRAC: add retry capability to wsman client operations * Secure boot support for agent\_ilo driver * Secure boot support for iscsi\_ilo driver * Changes for secure boot support for iLO drivers 2015.1.0b3 ---------- * follow up patch for ilo capabilities * Support agent\_ilo driver to perform cleaning * Implement cleaning/zapping for the agent driver * Add Cleaning Operations for iLO drivers * Automate uefi boot iso creation for iscsi\_ilo driver * Generate keystone\_authtoken options in sample config file * Use task.spawn\_after to maintain lock during cleaning * is\_whole\_disk\_image might not exist for previous instances * Hide inspection\_\*\_at fields if version < 1.6 * Disable cleaning by default * Suppress urllib3.connection INFO level logging * Allow periods (".") in hostnames * iscsi\_ilo driver do not validate boot\_option * Sync from oslo.incubator * Common changes for secure boot support * Add pxe\_irmc to the sending IPMI sensor data driver list * iLO driver updates node capabilities during inspection * iLO implementation for hardware inspection * Address nits in uefi agent iscsi deploy commit * Raise exception for Agent Deploy driver when using partition images * Add uefi support for agent iscsi deploy * Enable agent\_ilo for uefi-bios switching * Fixup log message for discoverd * Update unittests and use NamedTemporaryFile * Rename \_continue\_deploy() to pass\_deploy\_info() * Write documentation for hardware inspection * Start using in-band inspection * Log message is missing a blank space * Address comments on cleaning commit * IPA: Add support for root device hints * Use Mock.patch decorator to handle patching amt management module * iscsi\_ilo driver to support agent ramdisk * Enhance AMT driver documentation, pt 2 * Implement execute clean steps * Add missing exceptions to destroy\_node docstrings * Force LANGUAGE=en\_US in test runs * Add validations for root device hints * Add localboot support for uefi boot mode * ironic port deletion fails even if node is locked by same process * Add whole disk image support in iscsi\_ilo driver * Enhance AMT driver documentation * Use oslo\_policy package * Use oslo\_context package * Adds support for deploying whole disk images * Add AMT-PXE driver doc * Fix two typos * Add node UUID to deprecated log message * Fix wrong chown command in deployment guide * PXE driver: Deprecate pxe\_deploy\_{ramdisk, kernel} * Add label to virtual floppy image * Make sure we don't log the full content of the config drive * Update API doc to reflect node uuid or name * Fix typo agaist->against * Use strutils from oslo\_utils * Updated from global requirements * Add AMT-PXE-Driver Power&Management&Vendor Interface * Fix wrong log output in ironic/ironic/conductor/manager.py * Refactor agent iscsi deploy out of pxe driver * Tiny improvement of efficient * Make try block shorter for \_make\_password\_file * Add module for in-band inspection using ironic-discoverd * Fix take over for agent driver * Add server-supported min and max API version to HTTPNotAcceptable(406) * Updated from global requirements * Add tftp mapfile configuration in install-guide * Fix nits in cleaning * Fix nits for supporting non-glance images * Follow-up patch for generic node inspection * Add a note to dev-quickstart * Add iter\_nodes() helper to the conductor manager * Implement Cleaning in DriverInterfaces * Update install-guide for Ubuntu 14.10 package changes * Use mock instead of fixtures when appropriate * Generic changes for Node Inspection * Fix typo in "Enabling Drivers" * Support for non-Glance image references * Create new config for pecan debug mode * Local boot support for IPA * PXE drivers support for IPA * Update documentation on VirtualBox drivers * Add localboot support for iscsi\_ilo driver * Improve last\_error for async exceptions * Fix IPMI support documentation * Root partition should be bootable for localboot * Updated from global requirements * Add iRMC Management module for iRMC Driver * Spelling error in Comment * Remove unused code from agent vendor lookup() * Add documentation for VirtualBox drivers * Implement Cleaning States * Missing mock causing long tests * Add support for 'latest' in microversion header * Add tests for ilo\_deploy driver * Fix reboot logic of iRMC Power Driver * Update the states generator and regenerate the image * Ensure state values are 15 characters or less * Minor changes to InspectInterface * INSPECTFAIL value is more readable * Disable n-novnc, heat, cinder and horizon on devstack * Return required properties for agent deploy driver * Remove unused modules from ironic/openstack/common * Use functions from oslo.utils * Update Ilo drivers to use REST API interface to iLO * Add dhcp-all-interfaces to get IP to NIC other than eth0 * Log exception on tear\_down failure * Fix PEP8 E124 & E125 errors * Mock sleep function for OtherFunctionTestCase * Log node UUID rather than node object * Updated from global requirements * Add InspectInterface for node-introspection * Correctly rebuild the PXE file during takeover of ACTIVE nodes * Fix PEP8 E121 & E122 errors * Add documentation for the IPMI retry timeout option * Use oslo\_utils replace oslo.utils * Avoid deregistering conductor following SIGUSR1 * Add states required for node-inspection * For flake8 check, make the 'E12' ignore be more granular * add retry logic to is\_block\_device function * Imported Translations from Transifex * Move oslo.config references to oslo\_config * Add AMT-PXE-Driver Common Library * Fix typos in documentation: Capabilities * Removed unused image file * Address final comments of a4cf7149fb * Add concept of stable states to the state machine * Fix ml2\_conf.ini settings * Vendorpassthru doesn't get correct 'self' * Remove docs in proprietary formats * Fix file permissions in project * Imported Translations from Transifex * Updated from global requirements * Remove deploy\_is\_done() from AgentClient * AgentVendorInterface: Move to a common place * Stop console at first if console is enabled when destroy node * fixed typos from eligable to eligible and delition to deletion * Add logical name support to Ironic * Add support for local boot * Fix chown invalid option -- 'p' * ipmitool drivers fail with integer passwords * Add the subnet creation step to the install guide 2015.1.0b2 ---------- * improve iSCSI connection check * Remove min and max from base.Version * Add list of python driver packages * Add policy show\_password to mask passwords in driver\_info * Conductor errors if enabled\_drivers are not found * Add MANAGEABLE state and associated transitions * Raise minimum API version to 1.1 * Correct typo in agent\_client * Fix argument value for work\_on\_disk() in unit test * Documentation: Describe the 'spacing' argument * update docstring for driver\_periodic\_task's parallel param * Use prolianutils module for ilo driver tests * Add documentation on parallel argument for driver periodic tasks * Rename provision\_state to power\_state in test\_manager.py * Refactor ilo.deploy.\_get\_single\_nic\_with\_vif\_port\_id() * Update agent driver with new field driver\_internal\_info * Updated from global requirements * Add support for driver-specific periodic tasks * Partial revert of 4606716 until we debug further * Clean driver\_internal\_info when changes nodes' driver * Add Node.driver\_internal\_info * Move oslo.config references to oslo\_config * Move oslo.db references to oslo\_db * Revert "Do not pass PXE net config from bootloader to ramdisk" * Bump oslo.rootwrap to 1.5.0 * Drop deprecated namespace for oslo.rootwrap * Add VirtualBox drivers and its modules * region missing in endpoint selection * Add :raises: for Version constructor docstring * Improve testing of the Node's REST API * Rename NOSTATE to AVAILABLE * Add support for API microversions * Address final comments of edf532db91 * Add missing exceptions into function docstring * Fix typos in commit I68c9f9f86f5f113bb111c0f4fd83216ae0659d36 * Add logic to store the config drive passed by Nova * Do not POST conductor\_affinity in tests * Add 'irmc\_' prefix to optional properties * Actively check iSCSI connection after login * Updated from global requirements * Add iRMC Driver and its iRMC Power module * Fix drivers.rst doc format error * Improve test assertion for get\_glance\_image\_properties * Do not pass PXE net config from bootloader to ramdisk * Adds get\_glance\_image\_properties * Fix filter\_query in drac/power interface * Updated from global requirements * Simplify policy.json * Replace DIB installation step from git clone to pip * Add a TODO file * Updated from global requirements * Fix function docstring of \_get\_boot\_iso\_object\_name() * Improve ironic-dbsync help strings * Clear locks on conductor startup * Remove argparse from requirements * Use oslo\_serialization replace oslo.serialization * Agent driver fails with Swift Multiple Containers * Add ipmitool to quickstart guide for Ubuntu * Allow operations on DEPLOYFAIL'd nodes * Allow associate an instance independent of the node power state * Improve docstrings about TaskManager's spawning feature * DracClient to handle ReturnValue validation * Fix instance\_info parameters clearing * DRAC: Fix wsman host verification * Updated from global requirements * Clean up ilo's parse\_driver\_info() * Fix ssh \_get\_power\_status as it returned status for wrong node * Fix RPCService and Ironic Conductor so they shut down gracefully * Remove jsonutils from openstack.common * Remove lockfile from dependencies * Remove IloPXEDeploy.validate() * Force glance recheck for kernel/ramdisk on rebuild * iboot power driver: unbound variable error * Remove unused state transitions * PXE: Add configdrive support * Rename localrc for local.conf * DracClient to handle ClientOptions creation * Ensure we don't have stale power state in database after power action * Remove links autogenerated from module names * Make DD block size adjustable * Improve testing of state transitions * Convert drivers to use process\_event() * Update service.py to support graceful Service shutdown * Ensure that image link points to the correct image * Raise SSH failure messages to the error level * Make 'method' explicit for VendorInterface.validate() * Updated from global requirements * Provided backward compat for enforcing admin policy * Allow configuration of neutronclient retries * Convert check\_deploy\_timeout to use process\_event * Add requests to requirements.txt * Enable async callbacks from task.process\_event() * Document dependency on \`fuser\` for pxe driver * Distinguish between prepare + deploy errors * Avoid querying the power state twice * Add state machine to documentation * Updated from global requirements * Adjust the help strings to better reflect usage * Updated from global requirements * Updated from global requirements * Update etc/ironic/ironic.conf.sample * Fix policy enforcement to properly detect admin * Minor changes to state model * Add documentation to create in RegionOne * Delete unnecessary document files * Updated from global requirements * display error logging should be improved * Refactor async helper methods in conductor/manager.py * Hide oslo.messaging DEBUG logs by default * add comments for NodeStates fields * Stop conductor if no drivers were loaded * Fix typo in install-guide.rst * Reuse methods from netutils * Use get\_my\_ipv4 from oslo.utils * improve the neutron configuration in install-guide * Refactoring for Ironic policy * PXE: Pass root device hints via kernel cmdline * Extend API multivalue fields * Add a fsm state -> dot diagram generator * Updated from global requirements * Update command options in the Installation Guide 2015.1.0b1 ---------- * Improve Agent deploy driver validation * Add new enrollment and troubleshooting doc sections * Begin using the state machine for node deploy/teardown * Add base state machine * Updated from global requirements * Get rid of set\_failed\_state duplication * Remove Python 2.6 from setup.cfg * Updated from global requirements * Update dev quick-start for devstack * Updated from global requirements * Correct vmware ssh power manager * rename oslo.concurrency to oslo\_concurrency * Remove duplicate dependencies from dev-quickstart docs * Do not strip 'glance://' prefix from image hrefs * Updated from global requirements * Fix image\_info passed to IPA for image download * Use Literal Blocks to write code sample in docstring * Workflow documentation is now in infra-manual * Add tests to iscsi\_deploy.build\_deploy\_ramdisk\_options * Fix for broken deploy of iscsi\_ilo driver * Updated from global requirements * Add info on creating a tftp map file * Add documentation for SeaMicro driver * Fixed typo in Drac management driver test * boot\_devices.PXE value should match with pyghmi define * Add decorator that requires a lock for Drac management driver * Remove useless deprecation warning for node-update maintenance * Ilo tests refactoring * Change some exceptions from invalid to missing * Add decorator that requires a lock for Drac power driver * Change methods from classmethod to staticmethod * iLO Management Interface * Improve docs for running IPA in Devstack * Update 'Introduction to Ironic' document * Avoid calling \_parse\_driver\_info in every test * Updated from global requirements * Correct link in user guide * Minor fix to install guide for associating k&r to nodes * Add serial console feature to seamicro driver * Support configdrive in agent driver * Add driver\_validate() * Update drivers VendorInterface validate() method * Adds help for installing prerequisites on RHEL * Add documentation about Vendor Methods * Make vendor methods discoverable via the Ironic API * Fix PXEDeploy class docstring * Updated from global requirements * Vendor endpoints to support different HTTP methods * Add ipmitool as dependency on RHEL/Fedora systems * dev-quickstart.rst update to add required packages * Add gendocs tox job for generating the documentation * Add gettext to packages needed in dev quickstart * Convert qcow2 image to raw format when deploy * Update iLO driver documentation * Disable IPMI timeout before setting boot device * Updated from global requirements * ConductorManager catches Exceptions * Remove unused variable in agent.\_get\_interfaces() * Enable hacking rule E265 * Add sync and async support for passthru methods * Fix documentation on Standard driver interfaces * Add a mechanism to route vendor methods * Remove redundant FunctionalTest usage in API tests * Use wsme.Unset as default value for API objects * Fix traceback on rare agent error case * Make \_send\_sensor\_data more cooperative * Updated from global requirements * Add logging to driver vendor\_passthru functions * Support ipxe with Dnsmasq * Correct "returns" line in PXE deploy method * Remove all redundant setUp() methods * Update install guide to install tftp * Remove duplicated \_fetch\_images function * Change the force\_raw\_image config usage * Clear maintenance\_reason when setting maintenance=False * Removed hardcoded IDs from "port" test resources * Switch to oslo.concurrency * Updated from global requirements * Use docstrings for attributes in api/controllers * Put nodes-related API in same section * Fix get\_test\_node attributes set incorrectly * Get new auth token for ramdisk if old will expire soon * Delete unused 'use\_ipv6' config option * Updated from global requirements * Add maintenance to RESTful web API documentation * Updated from global requirements * Iterate over glance API servers * Add API endpoint to set/unset the node maintenance mode * Removed hardcoded IDs from "node" test resources * Add maintenance\_reason when setting maintenance mode * Add Node.maintenance\_reason * Fix F811 error in pep8 * Improve hash ring value conversion * Add SNMP driver for Aten PDU's * Update node-validate error messages * Store image disk\_format and container\_format * Continue heartbeating after DB connection failure * TestAgentVendor to use the fake\_agent driver * Put a cap on our cyclomatic complexity * More helpful failure for tests on noexec /tmp * Update doc headers at end of Juno * Fix E131 PEP8 errors 2014.2 ------ * Add the PXE VendorPassthru interface to PXEDracDriver * Add documentation for iLO driver(s) * Enable E111 PEP8 check * Updated from global requirements * Fix F812 PEP8 error * Enable H305 PEP8 check * Enable H307 PEP8 check * Updated from global requirements * Enable H405 PEP8 check * Enable H702 PEP8 check * Enable H904 PEP8 check * Migration to oslo.serialization * Add the PXE VendorPassthru interface to PXEDracDriver * Adds instructions for deploying instances on real hardware * Fix pep8 test * Add missing attributes to sample API objects * Fix markup-related issues in documentation * Add documentation for PXE UEFI setup 2014.2.rc2 ---------- * Clear hash ring cache in get\_topic\_for\* * Fix exceptions names and messages for Keystone errors * Remove unused change\_node\_maintenance\_mode from rpcapi * Imported Translations from Transifex * Clear hash ring cache in get\_topic\_for\* * Move database fixture to a separate test case * KeyError from AgentVendorInterface.\_heartbeat() * Validate the power interface before deployment * Cleans up some Sphinx rST warnings in Ironic * Remove kombu as a dependency for Ironic 2014.2.rc1 ---------- * Make hash ring mapping be more consistent * Add periodic task to rebuild conductor local state * Open Kilo development * Add "affinity" tracking to nodes and conductors * ilo\* drivers to use only ilo credentials * Update hacking version in test requirements * Add a call to management.validate(task) * Replace custom lazy loading by stevedore * Updated from global requirements * Remove useless variable in migration * Use DbTestCase as test base when context needed * For convention rename the first classmethod parameter to cls * Always reset target\_power\_state in node\_power\_action * Imported Translations from Transifex * Stop running check\_uptodate in the pep8 testenv * Add HashRingManager to wrap hash ring singleton * Fix typo in agent validation code * Conductor changes target\_power\_state before starting work * Adds openSUSE support for developer documentation * Updated from global requirements * Remove untranslated PO files * Update ironic.conf.sample * Remove unneeded context initialization in tests * Force the SSH commands to use their default language * Add parameter to override locale to utils.execute * Refactor PXE clean up tests * Updated from global requirements * Don't reraise Exceptions from agent driver * Add documentation for ironic-dbsync command * Do not return 'id' in REST API error messages * Separate the agent driver config from the base localrc config * pxe\_ilo driver to call iLO set\_boot\_device * Remove redundant context parameter * Update docs with new dbsync command * Update devstack docs, require Ubuntu 14.04 * Do not use the context parameter on refresh() * Pass ipa-driver-name to agent ramdisk * Do not set the context twice when forming RPC objects * Make context mandatory when instantiating a RPC object * Neutron DHCP implementation to raise exception if no ports have VIF * Do not cache auth token in Neutron DHCP provider * Imported Translations from Transifex * add\_node\_capability and rm\_node\_capability unable to save changes to db * Updated from global requirements * Handle SNMP exception error.PySnmpError * Use standard locale in list\_partitions * node\_uuid should not be used to create test port * Revert "Revert "Search line with awk itself and avoid grep"" * Fix code error in pxe\_ilo driver * Add unit tests for SNMPClient * Check whether specified FS is supported * Sync the doc with latest code * Add a doc note about the vendor\_passthru endpoint * Remove 'incubated' documentation theme * Import modules for fake IPMINative/iBoot drivers * Allow clean\_up with missing image ref * mock.called\_once\_with() is not a valid method * Fix Devstack docs for zsh users * Fix timestamp column migration * Update ironic states and documentation * Stop using intersphinx * Updated from global requirements * Remove the objectify decorator * Add reserve() and release() to Node object * Add uefi boot mode support in IloVirtualMediaIscsiDeploy * Don't write python bytecode while testing * Support for setting boot mode in pxe\_ilo driver * Remove bypassing of H302 for gettextutils markers * Revert "Search line with awk itself and avoid grep" * Search line with awk itself and avoid grep * Add list\_by\_node\_id() to Port object * Remove unused modules from openstack-common.conf * Sync the document with the current implementation * Unify the sensor data format * Updated from global requirements * Deprecate Ironic compute driver and sched manager * Log ERROR power state in node\_power\_action() * Fix compute\_driver and scheduler\_host\_manager in install-guide * Use oslo.utils instead of ironic.openstack.common * Use expected, actual order for PXE template test * Fix agent PXE template * Translator functions cleanup part 3 * Translator functions cleanup part 2 * Imported Translations from Transifex * Updated from global requirements * Remove XML from api doc samples * Update ironic.conf.sample * Fix race conditions running pxe\_utils tests in parallel * Switch to "incubating" doc theme * Minor fixes for ipminative console support * Translator functions cleanup part 4 * Translator functions cleanup part 1 * Remove unnecessary mapping from Agent drivers * mock.assert\_called\_once() is not valid method * Use models.TimestampMixin from oslo.db * Updated from global requirements 2014.2.b3 --------- * Driver merge review comments from 111425 * Nova review updates for \_node\_resource * Ignore backup files * IloVirtualMediaAgent deploy driver * IloVirtualMediaIscsi deploy driver * Unbreak debugging via testr * Interactive console support for ipminative driver * Add UEFI based deployment support in Ironic * Adds SNMP power driver * Control extra space for images conversion in image\_cache * Use metadata.create\_all() to initialise DB schema * Fix minor issues in the DRAC driver * Add send-data-to-ceilometer support for pxe\_ipminative driver * Reduce redundancy in conductor manager docstrings * Fix typo in PXE driver docstrings * Update installation guide for syslinux 6 * Updated from global requirements * Imported Translations from Transifex * Avoid deadlock when logging network\_info * Implements the DRAC ManagementInterface for get/set boot device * Rewrite images tests with mock * Add boot\_device support for vbox * Remove gettextutils \_ injection * Make DHCP provider pluggable * DRAC wsman\_{enumerate, invoke}() to return an ElementTree object * Remove futures from requirements * Script to migrate Nova BM data to Ironic * Imported Translations from Transifex * Updated from global requirements * Fix unit tests with keystoneclient master * Add support for interacting with swift * properly format user guide in RST * Updated from global requirements * Fix typo in user-guide.rst * Add console interface to agent\_ipmitool driver * Add support for creating vfat and iso images * Check ERROR state from driver in \_do\_sync\_power\_state * Set PYTHONHASHSEED for venv tox environment * Add iPXE Installation Guide documentation * Add management interface for agent drivers * Add driver name on driver load exception * Take iSCSI deploy out of pxe driver * Set ssh\_virt\_type to vmware * Update nova driver's power\_off() parameters * return power state ERROR instead of an exception * handle invalid seamicro\_api\_version * Imported Translations from Transifex * Nova ironic driver review update requests to p4 * Allow rebuild of node in ERROR and DEPLOYFAIL state * Use cache in node\_is\_available() * Query full node details and cache * Add in text for text mode on trusty * Add Parallels virtualisation type * IPMI double bridging functionality * Add DracDriver and its DracPower module * use MissingParameterValue exception in iboot * Update compute driver macs\_for\_instance per docs * Update DevStack guide when querying the image UUID * Updated from global requirements * Fix py3k-unsafe code in test\_get\_properties() * Fix tear\_down a node with missing info * Remove d\_info param from \_destroy\_images * Add docs for agent driver with devstack * Removes get\_port\_by\_vif * Update API document with BootDevice * Replace incomplete "ilo" driver with pxe\_ilo and fake\_ilo * Handle all exceptions from \_exec\_ipmitool * Remove objectify decorator from dbapi's {get, register}\_conductor() * Improve exception handling in console code * Use valid exception in start\_shellinabox\_console * Remove objectify decorator from dbapi.update\_\* methods * Add list() to Chassis, Node, Port objects * Raise MissingParameterValue when validating glance info * Mechanism to cleanup all ImageCaches * Driver merge review comments from 111425-2-3 * Raise MissingParameterValue instead of Invalid * Import fixes from the Nova driver reviews * Imported Translations from Transifex * Use auth\_token from keystonemiddleware * Make swift tempurl key secret * Add method for deallocating networks on reschedule * Reduce running time of test\_different\_sizes * Remove direct calls to dbapi's get\_node\_by\_instance * Add create() and destroy() to Port object * Correct \`op.drop\_constraint\` parameters * Use timeutils from one place * Add create() and destroy() to Chassis object * Add iPXE support for Ironic * Imported Translations from Transifex * Add posix\_ipc to requirements * backport reviewer comments on nova.virt.ironic.patcher * Move the 'instance\_info' fields to GenericDriverFields * Migration to oslo.utils library * Fix self.fields on API Port object * Fix self.fields on API Chassis object * Sync oslo.incubator modules * Updated from global requirements * Expose {set,get}\_boot\_device in the API * Check if boot device is persistent on ipminative * Sync oslo imageutils, strutils to Ironic * Add charset and engine settings to every table * Imported Translations from Transifex * Remove dbapi calls from agent driver * Fix not attribute '\_periodic\_last\_run' * Implements send-data-to-ceilometer * Port iBoot PDU driver from Nova * Log exception with translation * Add ironic-python-agent deploy driver * Updated from global requirements * Imported Translations from Transifex * Clean up calls to get\_port() * Clean up calls to get\_chassis() * Do not rely on hash ordering in tests * Update\_port should expect MACAlreadyExists * Imported Translations from Transifex * Adding swift temp url support * Push the image cache ttl way up * Imported Translations from Transifex * SSH virsh to use the new ManagementInterface * Split test case in ironic.tests.conductor.test\_manager * Tune down node\_locked\_retry\_{attempts,interval} config for tests * Add RPC version to test\_get\_driver\_properties 2014.2.b2 --------- * Import fixes from the Nova driver reviews * Generalize exception handling in Nova driver * Fix nodes left in an incosistent state if no workers * IPMINative to use the new ManagementInterface * Backporting nova host manager changes into ironic * Catch oslo.db error instead of sqlalchemy error * Add a test case for DB schema comparison * remove ironic-manage-ipmi.filters * Implement API to get driver properties * Add drivers.base.BaseDriver.get\_properties() * Implement retry on NodeLocked exceptions * SeaMicro to use the new ManagementInterface * Import fixes from Nova scheduler reviews * Rename/update common/tftp.py to common/pxe\_utils.py * Imported Translations from Transifex * Factor out deploy info from PXE driver * IPMITool to use the new ManagementInterface * Use mock.assert\_called\_once\_with() * Add missing docstrings * Raise appropriate errors on duplicate Node, Port and Chassis creation * Add IloDriver and its IloPower module * Add methods to ipmitool driver * Use opportunistic approach for migration testing * Use oslo.db library * oslo.i18n migration * Import a few more fixes from the Nova driver * Set a more generous default image cache size * Fix wrong test fixture for Node.properties * Make ComputeCapabilitiesFilter work with Ironic * Add more INFO logging to ironic/common/service.py * Clean up nova virt driver test code * Fix node to chassis and port to node association * Allow Ironic URL from config file * Imported Translations from Transifex * Update webapi doc with link and console * REST API 'limit' parameter to only accept positive values * Update docstring for api...node.validate * Document 'POST /v1/.../vendor\_passthru' * ManagementInterface {set, get}\_boot\_device() to support 'persistent' * Use my\_ip for neutron URL * Updated from global requirements * Add more INFO logging to ironic/conductor * Specify rootfstype=ramfs deploy kernel parameter * Add set\_spawn\_error\_hook to TaskManager * Imported Translations from Transifex * Updates the Ironic on Devstack dev documentation * Simplify error handling * Add gettextutils.\_L\* to import\_exceptions * Fix workaround for the "device is busy" problem * Allow noauth for Neutron * Minor cleanups to nova virt driver and tests * Update nova rebuild to account for new image * Updated from global requirements * pep8 cleanup of Nova code * PEP fixes for the Nova driver * Fix glance endpoint tests * Update Nova's available resources at termination * Fix the section name in CONTRIBUTING.rst * Add/Update docstrings in the Nova Ironic Driver * Update Nova Ironic Driver destroy() method * Nova Ironic driver get\_info() to return memory stats in KBytes * Updates Ironic Guide with deployment information * Add the remaining unittests to the ClientWrapper class * Wait for Neutron port updates when using SSHPower * Fix 'fake' driver unable to finish a deploy * Update "Exercising the Services Locally" doc * Fixing hardcoded glance protocol * Remove from\_chassis/from\_nodes from the API doc * Prevent updating UUID of Node, Port and Chassis on DB API level * Imported Translations from Transifex * Do not delete pxe\_deploy\_{kernel, ramdisk} on tear down * Implement security groups and firewall filtering methods * Add genconfig tox job for sample config file generation * Mock pyghmi lib in unit tests if not present * PXE to pass hints to ImageCache on how much space to reclaim * Add some real-world testing on DiskPartitioner * Eliminate races in Conductor \_check\_deploy\_timeouts * Use temporary dir for image conversion * Updated from global requirements * Move PXE instance level parameters to instance\_info * Clarify doc: API is admin only * Mock time.sleep for the IPMI tests * Destroy instance to clear node state on failure * Add 'context' parameter to get\_console\_output() * Cleanup virt driver tests and verify final spawn * Test fake console driver * Allow overriding the log level for ironicclient * Virt driver logging improvements * ipmitool driver raises DriverLoadError * VendorPassthru.validate()s call \_parse\_driver\_info * Enforce a minimum time between all IPMI commands * Remove 'node' parameter from the validate() methods * Test for membership should be 'not in' * Replace mknod() with chmod() * Factoring out PXE and TFTP functions * Let ipmitool natively retry commands * Sync processutils from oslo code * Driver interface's validate should return nothing * Use .png instead of .gif images * Fix utils.execute() for consistency with Oslo code * remove default=None for config options 2014.2.b1 --------- * Stop ipmitool.validate from touching the BMC * Set instance default\_ephemeral\_device * Add unique constraint to instance\_uuid * Add node id to DEBUG messages in impitool * Remove 'node' parameter from the Console and Rescue interfaces * TaskManager: Only support single node locking * Allow more time for API requests to be completed * Add retry logic to iscsiadm commands * Wipe any metadata from a nodes disk * Rework make\_partitions logic when preserve\_ephemeral is set * Fix host manager node detection logic * Add missing stats to IronicNodeState * Update IronicHostManager tests to better match how code works * Update Nova driver's list\_instance\_uuids() * Remove 'fake' and 'ssh' drivers from default enabled list * Work around iscsiadm delete failures * Mock seamicroclient lib in unit tests if not present * Cleanup mock patch without \`with\` part 2 * Add \_\_init\_\_.py for nova scheduler filters * Skip migrations test\_walk\_versions instead of pass * Improving unit tests for \_do\_sync\_power\_state * Fix AttributeError when calling create\_engine() * Reuse validate\_instance\_and\_node() Nova ironic Driver * Fix the logging message to identify node by uuid * Fix concurrent deletes in virt driver * Log exceptions from deploy and tear\_down * PXE driver to validate the requested image in Glance * Return the HTTP Location for accepted requestes * Return the HTTP Location for newly created resources * Fix tests with new keystoneclient * list\_instances() to return a list of instances names * Pass kwargs to ClientWrapper's call() method * Remove 'node' parameter from the Power interface * Set the correct target versions for the RPC methods * Consider free disk space before downloading images into cache * Change NodeLocked status code to a client-side error * Remove "node" parameter from methods handling power state in docs * Add parallel\_image\_downloads option * Synced jsonutils from oslo-incubator * Fix chassis bookmark link url * Remove 'node' parameter from the Deploy interface * Imported Translations from Transifex * Remove all mostly untranslated PO files * Cleanup images after deployment * Fix wrong usage of mock methods * Using system call for downloading files * Run keepalive in a dedicated thread * Don't translate debug level logs * Update dev quickstart guide for ephemeral testing * Speed up Nova Ironic driver tests * Renaming ironicclient exceptions in nova driver * Fix bad Mock calls to assert\_called\_once() * Cleanup mock patch without \`with\` part 1 * Corrects a typo in RESTful Web API (v1) document * Updated from global requirements * Clean up openstack-common.conf * Remove non-existent 'pxe\_default\_format' parameter from patcher * Remove explicit dependency on amqplib * Pin RPC client version min == max * Check requested image size * Fix 'pxe\_preserve\_ephemeral' parameter leakage * RPC\_API\_VERSION out of sync * Simplify calls to ImageCache in PXE module * Implement the reboot command on the Ironic Driver * Place root partition last so that it can always be expanded * Stop creating a swap partition when none was specified * Virt driver change to use API retry config value * Implement more robust caching for master images * Decouple state inspection and availability check * Updated from global requirements * Fix ironic node state comparison * Add create() and destroy() to Node * Fix typo in rpcapi.driver\_vendor\_passthru * Support serial console access * Remove 'node' parameter from the VendorPassthru interface * Updated from global requirements * Synced jsonutils from oslo-incubator * Fix chassis-node relationship * Implement instance rebuild in nova.virt.driver * Sync oslo logging * Add ManagementInterface * Clean oslo dependencies files * Return error immediately if set\_console\_mode is not supported * Fix bypassed reference to node state values * Updated from global requirements * Port to oslo.messaging * Drivers may expose a top-level passthru API * Overwrite instance\_exists in Nova Ironic Driver * Update Ironic User Guide post landing for 41af7d6b * Spawn support for TaskManager and 2 locking fixes * Document ClusteredComputeManager * Clean up calls to get\_node() * nova.virt.ironic passes ephemeral\_gb to ironic * Implement list\_instance\_uuids() in Nova driver * Modify the get console API * Complete wrapping ironic client calls * Add worker threads limit to \_check\_deploy\_timeouts task * Use DiskPartitioner * Better handling of missing drivers * Remove hardcoded node id value * cleanup docstring for drivers.utils.get\_node\_mac\_addresses * Update ironic.conf.sample * Make sync\_power\_states yield * Refactor sync\_power\_states tests to not use DB * Add DiskPartitioner * Some minor clean up of various doc pages * Fix message preventing overwrite the instance\_uuid * Install guide for Ironic * Refactor the driver fields mapping * Imported Translations from Transifex * Fix conductor.manager test assertion order * Overwriting node\_is\_available in IronicDriver * Sync oslo/common/excutils * Sync oslo/config/generator * Cherry pick oslo rpc HA fixes * Add Ironic User Guide * Remove a DB query for get\_ports\_by\_node() * Fix missed stopping of conductor service * Encapsulate Ironic client retry logic * Do not sync power state for new invalidated nodes * Make tests use Node object instead of dict * Sync object list stuff from Nova * Fix Node object version * Cleanup running conductor services in tests * Factor hash ring management out of the conductor * Replace sfdisk with parted * Handling validation in conductor consistently * JsonPatch add operation on existing property * Updated from global requirements * Remove usage of Glance from PXE clean\_up() * Fix hosts mapping for conductor's periodic tasks * Supports filtering port by address * Fix seamicro power.validate() method definition * Update tox.ini to also run nova tests * Updated from global requirements * Fix messages formatting for \_sync\_power\_states * Refactor nova.virt.ironic.driver get\_host\_stats * Use xargs -0 instead of --null * Change admin\_url help in ironic driver * Sync base object code with Nova's * Add Node.instance\_info field * Fix self.fields on API Node object * Show maintenance field in GET /nodes * Move duplicated \_get\_node(s)\_mac\_addresses() * Fix grammar in error string in pxe driver * Reduce logging output from non-Ironic libraries * Open Juno development 2014.1.rc1 ---------- * Fix spelling error in conductor/manager * Improved coverage for ironic API * Manually update all translated strings * Check that all po/pot files are valid * If no swap is specified default to 1MB * Fix Nova rescheduling tear down problem * Remove obsolete po entries - they break translation jobs * Add note to ssh about impact on ci testing * Adds exact match filters to nova scheduler * Clean up IronicNodeStates.update\_from\_compute\_node * ironic\_host\_manager was missing two stats * Imported Translations from Transifex * Fix seamicro validate() method definition * Remove some obsolete settings from DevStack doc * Raise unexpected exceptions during destroy() * Start using oslosphinx theme for docs * Provide a new ComputeManager for Ironic * Nova Ironic driver to set pxe\_swap\_mb in Ironic * Fix strings post landing for c63e1d9f6 * Run periodic\_task in a with a dynamic timer * Update SeaMicro to use MixinVendorInterface * Run ipmi power status less aggressively * Avoid API root controller dependency on v1 dir * Update Neutron if mac address of the port changed * Replace fixtures with mock in test\_keystone.py * Decrease running time of SeaMicro driver tests * Remove logging of exceptions from controller's methods * Imported Translations from Transifex * Fix missed exception raise in \_add\_driver\_fields * Speed up ironic tests * Pass no arguments to \_wait\_for\_provision\_state() * Adds max retry limit to sync\_power\_state task * Updated from global requirements * Imported Translations from Transifex * Stop incorrectly returning rescue: supported * Correct version.py and update current version string * Documentation for deploying DevStack /w Ironic * Hide rescue interface from validate() output * Change set\_console\_mode() to use greenthreads * Fix help string for a glance option * Expose API for fetching a single driver * Change JsonEncodedType.impl to TEXT * Fix traceback hook for avoid duplicate traces * Fix 'spacing' parameters for periodic tasks * Permit passing SSH keys into the Ironic API * Better instance-not-found handling within IronicDriver * Make sure auth\_url exists and is not versionless * Conductor de-registers on shutdown * Change deploy validation exception handling * Suppress conductor logging of expected exceptions * Remove unused method from timeutils * Add admin\_auth\_token option for nova driver * Remove redundant nova virt driver test * Process public API list as regular expressions * Enable pep8 tests for the Nova Ironic Driver * Fix typo tenet -> tenant * Stop logging paramiko's DEBUG and INFO messages * Set boot device to PXE when deploying * Driver utils should raise unsupported method * Delete node while waiting for deploy * Check BMC availability in ipmitool 'validate' method * SeaMicro use device parameter for set\_boot\_device * Make the Nova Ironic driver to wait for ACTIVE * Fix misspelled impi to ipmi * Do not use \_\_builtin\_\_ in python3 * Use range instead xrange to keep python 3.X compatibility * Set the database.connection option default value * PXE validate() to fail if no Ironic API URL * Improve Ironic Conductor threading & locks * Generic MixinVendorInterface using static mapping * Conductor logs better error if seamicroclient missing * Add TaskManager lock on change port data * Nova ironic driver to retry on HTTP 503 * Mark hash\_replicas as experimental * do\_node\_deploy() to use greenthreads * Move v1 API tests to separate v1 directory * Pin iso8601 logging to WARN * Only fetch node once for vif actions * Fix how nova ironic driver gets flavor information * Imported Translations from Transifex * API: Add sample() method to remaining models * Import Nova "ironic" driver * Remove errors from API documentation * Add libffi-dev(el) dependency to quickstart * Updated from global requirements * Remove redundant default value None for dict.get 2014.1.b3 --------- * Refactor vendor\_passthru to use conductor async workers * Fix wrong exception raised by conductor for node * Fix params order in assertEqual * Sync the log\_handler from oslo * Fix SeaMicro driver post landing for ba207b4aa0 * Implements SeaMicro VendorPassThru functionality * Implement the SeaMicro Power driver * Fix provision\_updated\_at deserialization * Remove jsonutils from test\_rpcapi * Do not delete a Node which is not powered off * Add provision\_updated\_at to node's resource * Prevent a node in maintenance from being deployed * Allow clients to mark a node as in maintenance * Support preserve\_ephemeral * Updated from global requirements * API: Expose a way to start/stop the console * Add option to sync node power state from DB * Make the PXE driver understand ephemeral disks * Log deploy\_utils.deploy() erros in the PXE driver * Removing get\_node\_power\_state, bumping RPC version * Add timeout for waiting callback from deploy ramdisk * Prevent GET /v1/nodes returning maintenance field * Suggested improvements to \_set\_boot\_device * Move ipminative \_set\_boot\_device to VendorPassthru * Sync common db code from Oslo * PXE clean\_up() to remove the pxe\_deploy\_key parameter * Add support for custom libvirt uri * Python 3: replace "im\_self" by "\_\_self\_\_" * Fix race condition when deleting a node * Remove extraneous vim configuration comments for ironic * Do not allow POST ports and chassis internal attributes * Do not allow POST node's internal attributes * Unused 'pxe\_key\_data' & 'pxe\_instance\_name' info * Add provision\_updated\_at field to nodes table * Exclude nodes in DEPLOYWAIT state from \_sync\_power\_states * Sync common config module from Oslo * Get rid object model \`dict\` methods part 4 * Sync Oslo rpc module to Ironic * Clarify and fix the dev-quickstart doc some more * Do not use CONF as a default parameter value * Simplify locking around acquiring Node resources * Improve help strings * Remove shebang lines from code * Use six.moves.urllib.parse instead of urlparse * Add string representation method to MultiType * Fix test migrations for alembic * Sync Oslo gettextutils module to Ironic * NodeLocked returns 503 error status * Supports OPERATOR priv level for ipmitool driver * Correct assertEqual order from patch e69e41c99fb * PXE and SSH validate() method to check for a port * Task object as paramater to validate() methods * Fix dev-quick-start.rst post landing for 9d81333fd0 * API validates driver name for both POST and PATCH * Sync Oslo service module to Ironic * Move ipmitool \_set\_boot\_device to VendorPassthru * Use six.StringIO/BytesIO instead of StringIO.StringIO * Add JSONEncodedType with enforced type checking * Correct PXEPrivateMethodsTestCase.setUp * Don't raise MySQL 2013 'Lost connection' errors * Use the custom wsme BooleanType on the nodes api * Add wsme custom BooleanType type * Fix task\_manager acquire post landing for c4f2f26ed * Add common.service config options to sample * Removes use of timeutils.set\_time\_override * Replace assertEqual(None, \*) with assertIsNone in tests * Replace nonexistent mock assert methods with real ones * Log IPMI power on/off timeouts * Remove None as default value for dict get() * Fix autodoc formatting in pxe.py * Fix race condition when changing node states * Use StringType from WSME * Add testing and doc sections to docs/dev-quickstart * Implement \_update\_neutron in PXE driver * Remove \_load\_one\_plugin fallback * SSHPower driver support VMware ESXi * Make ironic-api not single threaded * Remove POST calls in tests for resource creation * Add topic to the change\_node\_maintenance\_mode() RPC method * Fix API inconsistence when changing node's states * Add samples to serve API through Apache mod\_wsgi * Add git dependency to quickstart docs * Add get\_console() method * Remove unnecessary json dumps/loads from tests * Add parameter for filtering nodes by maintenance mode * Rename and update ironic-deploy-helper rootwrap * Remove tox locale overrides * Updated from global requirements * Move eventlent monkeypatch out of cmd/ * Fix misspellings in ironic * Ensure parameter order of assertEqual correct * Return correct HTTP response codes for create ops * Fix broken doc links on the index page * Allow to tear-down a node waiting to be deployed * Improve NodeLocked exception message * Expose 'reservation' field of a node via API * Implement a multiplexed VendorPassthru example * Fix log and test for NeutronAPI.update\_port\_dhcp\_opts * Fix 'run\_as\_root' parameter check in utils * Handle multiple exceptions raised by jsonpatch * API tests to check for the return codes * Imported Translations from Transifex * Move test\_\_get\_nodes\_mac\_addresses * Removed duplicated function to create a swap fs * Updated from global requirements * Add futures to requirements * Fix missing keystone option in ironic.conf.sample * Adds Neutron support to Ironic * Replace CONF.set\_default with self.config * Fix ssh\_port type in \_parse\_driver\_info() from ssh.py * Improve handling of invalid input in HashRing class * Sync db.sqlalchemy code from Oslo * Add lockfile>=0.8 to requirements.txt * Remove net\_config\_template options * Remove deploy kernel and ramdisk global config * Update docstrings in ssh.py * SSHPower driver raises IronicExceptions * mock's return value for processutils.ssh\_execute * API: Add sample() method on Node * Update method doc strings in pxe.py * Minor documentation update * Removed unused exceptions * Bump version of sphinxcontrib-pecanwsme * Add missing parameter in call to \_load\_one\_plugin * Docstrings for ipmitool * alembic with initial migration and tests * Update RPC version post-landing for 9bc5f92fb * ipmitool's \_power\_status raises IPMIFailure 2014.1.b2 --------- * Add [keystone\_authtoken] to ironic.conf.sample * Updated from global requirements * Add comment about node.instance\_uuid * Run mkfs as root * Remove the absolute paths from ironic-deploy-helper.filters * PXE instance\_name is no longer mandatory * Remove unused config option - pxe\_deploy\_timeout * Delete the iscsi target * Imported Translations from Transifex * Fix non-unique tftp dir instance\_uuid * Fix non-unique pxe driver 'instance\_name' * Add missing "Filters" section to the ironic-images.filters * Use oslo.rootwrap library instead of local copy * Replace assertTrue with explicit assertIsInstance * Disallow new provision for nodes in maintenance * Add RPC method for node maintenance mode * Fix keystone get\_service\_url filtering * Use same MANAGER\_TOPIC variable * Implement consistent hashing of nodes to conductors * PXEAndSSH driver lacked vendor\_passthru * Use correct auth context inside pxe driver * sync\_power\_states handles missing driver info * Enable $pybasedir value in pxe.py * Correct SSHPowerDriver validate() exceptions * API to check the requested power state * Improve the node driver interfaces validation output * Remove copyright from empty files * Make param descriptions more consistent in API * Imported Translations from Transifex * Fix wrong message of pxe validator * Remove unused dict BYTE\_MULTIPLIERS * Implement API for provisioning * API to validate UUID parameters * Make chassis\_uuid field of nodes optional * Add unit tests for get\_nodeinfo\_list * Improve error handling in PXE \_continue\_deploy * Make param names more consistent in API * Sync config module from oslo * Fix wrong message of MACAlreadyExists * Avoid a race when associating instance\_uuid * Move and rename ValidTypes * Convert trycmd() to oslo's processutils * Improve error handling in validate\_vendor\_action * Passing nodes more consistently * Add 'next' link when GET maximum number of items * Check connectivity in SSH driver 'validate' method * GET /drivers to show a list of active conductors * Improve method to get list of active conductors * Refactor /node//state * Reworks Chassis validations * Reworks Node validations * Developer doc index page points to correct API docs * Fix auto-generated REST API formatting * Method to generate PXE options for Neutron ports * Strip '/' from api\_url string for PXE driver * Add driver interfaces validation * Command call should log the stdout and stderr * Add prepare, clean\_up, take\_over methods to deploy * PEP8-ify imports in test\_ipmitool * API: Add sample() method on Port and PortCollection * API: Validate and normalize address * Handle DBDuplicateEntry on Ports with same address * Imported Translations from Transifex * removed wrap\_exception method from ironic/common/exception.py * Rework patch validation on Ports * Add JsonPatchType class * Change default API auth to keystone-based * Clean up duplicated change-building code in objects * Add -U to pip install command in tox.ini * Updated from global requirements * Add config option for # of conductor replicas * Port StringType class from WSME trunk * Add tools/conf/check\_uptodate to tox.ini 2014.1.b1 --------- * Correct error with unicode mac address * Expose created\_at/updated\_at properties in the REST API * Import heartbeat\_interval opt in API * Add power control to PXE driver * Implement sync\_power\_state periodic task * Set the provision\_state to DEPLOYFAIL * Save PKI token in a file for PXE deploy ramdisk * API ports update for WSME 0.5b6 compliance * Add heartbeat\_interval to new 'conductor' cfg group * Add missing hash\_partition\_exponent config option * If no block devices abort deployment * Add missing link for drivers resource * Apply comments to 58558/4 post-landing * Replace removed xrange in Python3 * Imported Translations from Transifex * Use addCleanup() in test\_deploy\_utils * Allow Pecan to use 'debuginfo' response field * Do not allow API to expose error stacktrace * Add port address unique constraint for sqlite * Implement consistent hashing common methods * Sync some db changes from Oslo * Bump required version of sqlalchemy-migrate * Update ironic.conf.sample * Import uuidutils unit tests from oslo * Allow FakePower to return node objects power\_state * Adds doc strings to API FunctionalTest class * Use oslo's execute() and ssh\_execute() methods * Remove openstack.common.uuidutils * Sync common.context changes from olso * Remove oslo uuidutils.is\_uuid\_like call * Remove oslo uuidutils.generate\_uuid() call * Add troubleshoot option to PXE template * Imported Translations from Transifex * Add tftp\_server pattern in ironic.conf * Import HasLength object * ipmitool SHOULD accept empty username/password * Imported Translations from Transifex * Add missing ConfigNotFound exception * Imported Translations from Transifex * Add hooks to auto-generate REST API docs * Imported Translations from Transifex * Redefined default value of allowed\_rpc\_exception\_modules * Add last\_error usage to deploy and teardown methods * Support building wheels (PEP-427) * Import missing gettext \_ to fix Sphinx error * sync common.service from oslo * sync common.periodic\_task from oslo * sync common.notifier.\* from oslo * sync common.log from oslo * sync common.local from oslo * Sync common utils from Oslo * Rename parameters * Accessing a subresource that parent does not exist * Imported Translations from Transifex * Changes power\_state and adds last\_error field * Update openstack/common/lockutils * sync common.context from oslo * sync common.config.generator from oslo * Remove sqlalchemy-migrate 0.7.3 patching * Fix integer division compatibility in middleware * Fix node lock in PXE driver * Imported Translations from Transifex * Register API options under the 'api' group * Supporting both Python 2 and Python 3 with six * Supports get node by instance uuid in API * Imported Translations from Transifex * Check invalid uuid for get-by-instance db api * Fix error handling in ssh driver * Replace \_\_metaclass\_\_ * Supporting both Python 2 and Python 3 with six * Pass Ironic API url to deploy ramdisk in PXE driver * Remove 'basestring' from objects utils * Allows unicode description for chassis * Fix a typo in the name of logger method exception * Don't use deprecated module commands * Comply with new hacking requirements * Improve the API doc spec for chassis * Improve the API doc spec for node * Updated from global requirements * Fix i18N compliance * Add wrapper for keystone service catalog * Fix test node manager * Expose /drivers on the API * Update mailmap for Joe Gordon * Add mailmap file * Implement /nodes/UUID/vendor\_passthru in the API * Add context to TaskManager * Regenerate the sample config file * Conductors maintan driver list in the DB * Group and unify ipmi configurations * Fix a few missing i18n * Fix status codes in node controller * Fix exceptions handling in controllers * Updated from global requirements * Support uniform MAC address with colons * Remove redundant test stubs from conductor/manager * Remove several old TODO messages * Supports paginate query for two get nodes DB APIs * Remove \_driver\_factory class attribute * Fixes RootController to allow URL without version tag * Don't allow deletion of associated node * Remove duplicated db\_api.get\_instance() from tests * Updated from global requirements * Do not use string concatenation for localized strings * Remove the NULL state * Add DriverFactory * Adjust native ipmi default wait time * Be more patient with IPMI and BMC * Implement db get\_[un]associated\_nodes * Remove unused nova specific files * Removes unwanted mox and fixture files * Removes stubs from unit tests * Remove unused class/file * Remove driver validation on node update * Consolidates TestCase and BaseTestCase * Fix policies * Improve error message for ssh * Fix datetime format in FakeCache * Fix power\_state set to python object repr * Updated from global requirements * Replaces mox with mock for test\_deploy\_utils * Replaces mox with mock in api's unit tests * Replaces mox with mock in objects' unit tests * Replaces mox with mock for conductor unit tests * fix ssh driver exec command issues * Fix exceptions error codes * Remove obsolete redhat-eventlet.patch * Replaces mox with mock for test\_utils * Replaces mox with mock for ssh driver unit tests * Remove nested 'ipmi' dict from driver\_info * Replace tearDown with addCleanup in unit tests * Remove nested 'ssh' dict from driver\_info * Remove nested 'pxe' dict from driver\_info * Save and validate deployment key in PXE driver * Implement deploy and tear\_down conductor methods * Use mock to do unit tests for pxe driver * Code clean in node controller * Use mock to do unit tests for ipminative driver * Replaces mox with mock for ipmitool driver unit tests * Fix parameter name in wsexpose * Rename start\_power\_state\_change to change\_node\_power\_state * Mount iSCSI target and 'dd' in PXE driver * Add tests for api/utils.py * Check for required fields on ports * Replace Cheetah with Jinja2 * Update from global requirements * Upgrade tox to 1.6 * Add API uuid <-> id mapping * Doc string and minor clean up for 41976 * Update error return code to match new Pecan release * Add vendor\_passthru method to RPC API * Integer types support in api * Add native ipmi driver * API GET to return only minimal data * Fix broken links * Collection named based on resource type * Remove nova specific tests * Changes documentation hyperlinks to be relative * Replace OpenStack LLC with OpenStack Foundation * Force textmode consoles * Implemented start\_power\_state\_change In Conductor * Updates documentation for tox use * Drop setuptools\_git dependency * Fix tests return codes * Fix misused assertTrue in unit tests * Prevent updates while state change is in progress * Use localisation where user visible strings are used * Update only the changed fields * Improve parameters validate in PXE driver * Rename ipmi driver to ipmitool * Remove jsonutils from PXE driver * Expose the vendor\_passthru resource * Driver's validation during node update process implemented * Public API * Remove references for the 'task\_state' property * Use 'provision\_state' in PXE driver * Updating resources with PATCH * Add missing unique constraint * Fix docstring typo * Removed templates directory in api config * Added upper version boundry for six * Sync models with migrations * Optimization reserve and release nodes db api methods * Add missing foreign key * Porting nova pxe driver to ironic * API Nodes states * Fix driver loading * Move glance image service client from nova and cinder into ironic * Implement the root and v1 entry points of the API * Expose subresources for Chassis and Node * Add checks locked nodes to db api * Update the dev docs with driver interface description * Add missing tests for chassis API * Delete controller to make code easy to read and understood * Disable deleting a chassis that contains nodes * Update API documentation * Add Pagination of collections across the API * Fix typo in conductor manager * Remove wsme validate decorator from API * Add missing tests for ports API * Modify is\_valid\_mac() for support unicode strings * Add DB and RPC method doc strings to hook.py * Delete unused templates * Use fixture from Oslo * Move "opportunistic" db migrations tests from Nova * Build unittests for nodes api * make api test code more readable * Add links to API Objects * Delete Ironic context * Add tests for existing db migrations * Add common code from Oslo for db migrations test * Remove extra pep8/flake8/pyflakes requirements * Sync requirements with OpenStack/requirements * Fix up API tests before updating hacking checks * Add RPC methods for updating nodes * Run extract\_messages * Keystone authentiation * Add serializer param to RPC service * Import serialization and nesting from Nova Objects * Implement chassis api actions * update requires to prevent version cap * Change validate() to raise instead of returning T/F * Add helpers for single-node tasks * Implement port api action * Modify gitignore to ignore sqlite * Update resource manager for fixed stevedore issue * Add dbapi functions * Remove suds requirement * Sync install\_venv\_common from oslo * Move mysql\_engine option to [database] group * Re-define 'extra' as dict\_or\_none * Added Python-2.6 to the classifier * Rename "manager" to "conductor" * Port from nova: Fix local variable 'root\_uuid' ref * Created a package for API controllers V1 * Sync requirements with OpenStack/requirements * Remove unused APICoverage class * Sync fileutils from oslo-incubator * Sync strutils from oslo-incubator * Add license header * Update get\_by\_uuid function doc in chassis * Fix various Python 2.x->3.x compat issues * Improve unit tests for API * Add Chassis object * Add Chassis DB model and DB-API * Delete associated ports after deleting a node * Virtual power driver is superceded by ssh driver * Add conf file generator * Refactored query filters * Add troubleshoot to baremetal PXE template * Add err\_msg param to baremetal\_deploy\_helper * Retry the sfdisk command up to 3 times * Updated API Spec for new Drivers * Improve IPMI's \_make\_password\_file method * Remove spurious print statement from update\_node * Port middleware error handler from ceilometer API * Add support for GET /v1/nodes to return a list * Add object support to API service * Remove the unused plugin framework * Improve tests for Node and Port DB objects * SSH driver doesn't need to query database * Create Port object * Add uuid to Port DB model * Delete Flask Dependence * Writing Error: nodess to nodes * Create the Node object * Restructuring driver API and inheritance * Remove explicit distribute depend * Bump version of PBR * Remove deleted[\_at] from base object * Make object actions pass positional arguments * Fix relative links in architecture doc * Reword architecture driver description * Remove duplication from README, add link to docs * Port base object from Nova * Fix ironic-rootwrap capability * Add ssh power manager * Prevent IPMI actions from colliding * Add TaskManager tests and fix decorator * Mocked NodeManager can load and mock real drivers * Add docs for task\_manager and tests/manager/utils * Fix one typo in index.rst * Add missing 'extra' field to models.nodes * More doc updates * Remove the old README * More doc updates * Minor fixes to sphinx docs * Added API v1 Specification * Add initial sphinx docs, based on README * Initial skeleton for an RPC layer * Log configuration values on API startup * Don't use pecan to configure logging * Move database.backend option import * Remove unused authentication CLI options * Rename TestCase.flags() to TestCase.config() * Copy the RHEL6 eventlet workaround from Oslo * Sync new database config group from oslo-incubator * Minor doc change for manager and resorce\_manager * Add support for Sphinx Docs * Update IPMI driver to work with resource manager * Add validate\_driver\_info to driver classes * Implement Task and Resource managers * Update [reserve|release]\_nodes to accept a tag * More updates to the README * Reimplement reserve\_nodes and release\_nodes * Rename the 'ifaces' table to 'ports' * Change 'nodes' to use more driver-specific JSON * Update driver names and base class * Stop creating a new db IMPL for every request * Fix double "host" option * Sync safe changes from oslo-incubator * Sync rpc changes from oslo-incubator * Sync log changes from oslo-incubator * Sync a rootwrap KillFilter fix from oslo-incubator * Sync oslo-incubator python3 changes * Add steps to README.rst * Fix fake bmc driver * move ironic docs to top level for ease of discovery * Update the README file development section * Add some API definitions to the README * Update the distribute dependency version * Add information to the project README * Fixes test\_update\_node by testing updated node * Fix pep8 errors and make it pass Jenkins tests * Update IPMI driver for new base class * Add new base and fake driver classes * Delete old base and fake classes * Add a few fixes for the API * Move strong nova depenencies into temporary dir * Update IPMI for new DB schema * Add unit tests for DB API * Remove tests for old DB * Add tests for ironic-dbsync * Remove ironic\_manage * Implement GET /node/ifaces/ in API * Update exception.py * Update db models and API * Implement skeleton for a new DB backend * Remove the old db implementation * Implement initial skeleton of a manager service * Implement initial draft of a Pecan-based API * Fix IPMI tests * Move common things to ironic.common * Fix failing db and deploy\_helper tests * un-split the db backend * Rename files and fix things * Import add'l files from Nova * update openstack-common.conf and import from oslo * Added .testr.conf * Renamed nova to ironic * Fixed hacking, pep8 and pyflakes errors * Added project infrastructure needs * Fix baremetal get\_available\_nodes * Improve Python 3.x compatibility * Import and convert to oslo loopingcall * baremetal: VirtualPowerDriver uses mac addresses in bm\_interfaces * baremetal: Change input for sfdisk * baremetal: Change node api related to prov\_mac\_address * Remove "undefined name" pyflake errors * Remove unnecessary LOG initialisation * Define LOG globally in baremetal\_deploy\_helper * Only call getLogger after configuring logging * baremetal: Integrate provisioning and non-provisioning interfaces * Move console scripts to entrypoints * baremetal: Drop unused columns in bm\_nodes * Remove print statements * Delete tests.baremetal.util.new\_bm\_deployment() * Adds Tilera back-end for baremetal * Change type of ssh\_port option from Str to Int * Virtual Power Driver list running vms quoting error * xenapi: Fix reboot with hung volumes * Make bm model's deleted column match database * Correct substring matching of baremetal VPD node names * Read baremetal images from extra\_specs namespace * Compute manager should remove dead resources * Add ssh port and key based auth to VPD * Add instance\_type\_get() to virt api * Don't blindly skip first migration * BM Migration 004: Actually drop column * Update OpenStack LLC to Foundation * Sync nova with oslo DB exception cleanup * Fix exception handling in baremetal API * BM Migrations 2 & 3: Fix drop\_column statements * Remove function redefinitions * Move some context checking code from sqlalchemy * Baremetal driver returns accurate list of instance * Identify baremetal nodes by UUID * Improve performance of baremetal list\_instances * Better error handling in baremetal spawn & destroy * Wait for baremetal deploy inside driver.spawn * Add better status to baremetal deployments * Use oslo-config-2013.1b4 * Delete baremetal interfaces when their parent node is deleted * VirtualPowerDriver catches ProcessExecutionError * Don't modify injected\_files inside PXE driver * Remove nova.db call from baremetal PXE driver * Add a virtual PowerDriver for Baremetal testing * Recache or rebuild missing images on hard\_reboot * Use oslo database code * Fixes 'not in' operator usage * Make sure there are no unused import * Enable N302: Import modules only * Correct a format string in virt/baremetal/ipmi.py * Add REST api to manage bare-metal nodes * Baremetal/utils should not log certain exceptions * PXE driver should rmtree directories it created * Add support for Option Groups in LazyPluggable * Remove obsolete baremetal override of MAC addresses * PXE driver should not accept empty kernel UUID * Correcting improper use of the word 'an' * Export the MAC addresses of nodes for bare-metal * Break out a helper function for working with bare metal nodes * Keep self and context out of error notification payload * Tests for PXE bare-metal provisioning helper server * Change ComputerDriver.legacy\_nwinfo to raise by default * fix new N402 errors * Remove unused baremetal PXE options * Move global service networking opts to new module * Fix N402 for nova/virt * Cope better with out of sync bm data * Fix baremetal VIFDriver * CLI for bare-metal database sync * attach/detach\_volume() take instance as a parameter * Convert short doc strings to be on one line * Check admin context in bm\_interface\_get\_all() * Provide a PXE NodeDriver for the Baremetal driver * Refactor periodic tasks * Add helper methods to nova.paths * Move global path opts in nova.paths * Removes unused imports * Improve baremetal driver error handling * baremetal power driver takes \*\*kwargs * Implement IPMI sub-driver for baremetal compute * Fix tests/baremetal/test\_driver.py * Move baremetal options to [BAREMETAL] OptGroup * Remove session.flush() and session.query() monkey patching * Remove unused imports * Removed unused imports * Parameterize database connection in test.py * Baremetal VIF and Volume sub-drivers * New Baremetal provisioning framework * Move baremetal database tests to fixtures * Add exceptions to baremetal/db/api * Add blank nova/virt/baremetal/\_\_init\_\_.py * Move sql options to nova.db.sqlalchemy.session * Use CONF.import\_opt() for nova.config opts * Remove nova.config.CONF * remove old baremetal driver * Remove nova.flags * Fix a couple uses of FLAGS * Added separate bare-metal MySQL DB * Switch from FLAGS to CONF in tests * Updated scheduler and compute for multiple capabilities * Switch from FLAGS to CONF in nova.virt * Make ComputeDrivers send hypervisor\_hostname * Introduce VirtAPI to nova/virt * Migrate to fileutils and lockutils * Remove ComputeDriver.update\_host\_status() * Rename imagebackend arguments * Move ensure\_tree to utils * Keep the ComputeNode model updated with usage * Don't stuff non-db data into instance dict * Making security group refresh more specific * Use dict style access for image\_ref * Remove unused InstanceInfo class * Remove list\_instances\_detail from compute drivers * maint: remove an unused import in libvirt.driver * Fixes bare-metal spawn error * Refactoring required for blueprint xenapi-live-migration * refactor baremetal/proxy => baremetal/driver * Switch to common logging * Make libvirt LoopingCalls actually wait() * Imports cleanup * Unused imports cleanup (folsom-2) * convert virt drivers to fully dynamic loading * cleanup power state (partially implements bp task-management) * clean-up of the bare-metal framework * Added a instance state update notification * Update pep8 dependency to v1.1 * Alphabetize imports in nova/tests/ * Make use of openstack.common.jsonutils * Alphabetize imports in nova/virt/ * Replaces exceptions.Error with NovaException * Log instance information for baremetal * Improved localization testing * remove unused flag: baremetal\_injected\_network\_template baremetal\_uri baremetal\_allow\_project\_net\_traffic * Add periodic\_fuzzy\_delay option * HACKING fixes, TODO authors * Add pybasedir and bindir options * Only raw string literals should be used with \_() * Remove unnecessary setting up and down of mox and stubout * Remove unnecessary variables from tests * Move get\_info to taking an instance * Exception cleanup * Backslash continuations (nova.tests) * Replace ApiError with new exceptions * Standardize logging delaration and use * remove unused and buggy function from baremetal proxy * Backslash continuations (nova.virt.baremetal) * Remove the last of the gflags shim layer * Implements blueprint heterogeneous-tilera-architecture-support * Deleting test dir from a pull from trunk * Updated to remove built docs * initial commit ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/LICENSE0000644000175000017500000002363700000000000015374 0ustar00coreycorey00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538406.3824005 ironic-14.0.1.dev163/PKG-INFO0000644000175000017500000000520000000000000015446 0ustar00coreycorey00000000000000Metadata-Version: 2.1 Name: ironic Version: 14.0.1.dev163 Summary: OpenStack Bare Metal Provisioning Home-page: https://docs.openstack.org/ironic/latest/ Author: OpenStack Author-email: openstack-discuss@lists.openstack.org License: UNKNOWN Description: ====== Ironic ====== Team and repository tags ------------------------ .. image:: https://governance.openstack.org/tc/badges/ironic.svg :target: https://governance.openstack.org/tc/reference/tags/index.html Overview -------- Ironic consists of an API and plug-ins for managing and provisioning physical machines in a security-aware and fault-tolerant manner. It can be used with nova as a hypervisor driver, or standalone service using bifrost. By default, it will use PXE and IPMI to interact with bare metal machines. Ironic also supports vendor-specific plug-ins which may implement additional functionality. Ironic is distributed under the terms of the Apache License, Version 2.0. The full terms and conditions of this license are detailed in the LICENSE file. Project resources ~~~~~~~~~~~~~~~~~ * Documentation: https://docs.openstack.org/ironic/latest * Source: https://opendev.org/openstack/ironic * Bugs: https://storyboard.openstack.org/#!/project/943 * Wiki: https://wiki.openstack.org/wiki/Ironic * APIs: https://docs.openstack.org/api-ref/baremetal/index.html * Release Notes: https://docs.openstack.org/releasenotes/ironic/ * Design Specifications: https://specs.openstack.org/openstack/ironic-specs/ Project status, bugs, and requests for feature enhancements (RFEs) are tracked in StoryBoard: https://storyboard.openstack.org/#!/project/943 For information on how to contribute to ironic, see https://docs.openstack.org/ironic/latest/contributor Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 :: Only Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Requires-Python: >=3.6 Provides-Extra: guru_meditation_reports Provides-Extra: i18n Provides-Extra: test ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/README.rst0000644000175000017500000000270400000000000016046 0ustar00coreycorey00000000000000====== Ironic ====== Team and repository tags ------------------------ .. image:: https://governance.openstack.org/tc/badges/ironic.svg :target: https://governance.openstack.org/tc/reference/tags/index.html Overview -------- Ironic consists of an API and plug-ins for managing and provisioning physical machines in a security-aware and fault-tolerant manner. It can be used with nova as a hypervisor driver, or standalone service using bifrost. By default, it will use PXE and IPMI to interact with bare metal machines. Ironic also supports vendor-specific plug-ins which may implement additional functionality. Ironic is distributed under the terms of the Apache License, Version 2.0. The full terms and conditions of this license are detailed in the LICENSE file. Project resources ~~~~~~~~~~~~~~~~~ * Documentation: https://docs.openstack.org/ironic/latest * Source: https://opendev.org/openstack/ironic * Bugs: https://storyboard.openstack.org/#!/project/943 * Wiki: https://wiki.openstack.org/wiki/Ironic * APIs: https://docs.openstack.org/api-ref/baremetal/index.html * Release Notes: https://docs.openstack.org/releasenotes/ironic/ * Design Specifications: https://specs.openstack.org/openstack/ironic-specs/ Project status, bugs, and requests for feature enhancements (RFEs) are tracked in StoryBoard: https://storyboard.openstack.org/#!/project/943 For information on how to contribute to ironic, see https://docs.openstack.org/ironic/latest/contributor ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586538406.154399 ironic-14.0.1.dev163/api-ref/0000755000175000017500000000000000000000000015677 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/regenerate-samples.sh0000755000175000017500000003021200000000000022017 0ustar00coreycorey00000000000000#!/bin/bash set -e -x if [ ! -x /usr/bin/jq ]; then echo "This script relies on 'jq' to process JSON output." echo "Please install it before continuing." exit 1 fi OS_AUTH_TOKEN=$(openstack token issue | grep ' id ' | awk '{print $4}') IRONIC_URL="http://127.0.0.1:6385" IRONIC_API_VERSION="1.55" export OS_AUTH_TOKEN IRONIC_URL DOC_BIOS_UUID="dff29d23-1ded-43b4-8ae1-5eebb3e30de1" DOC_CHASSIS_UUID="dff29d23-1ded-43b4-8ae1-5eebb3e30de1" DOC_NODE_UUID="6d85703a-565d-469a-96ce-30b6de53079d" DOC_DYNAMIC_NODE_UUID="2b045129-a906-46af-bc1a-092b294b3428" DOC_PORT_UUID="d2b30520-907d-46c8-bfee-c5586e6fb3a1" DOC_PORTGROUP_UUID="e43c722c-248e-4c6e-8ce8-0d8ff129387a" DOC_VOL_CONNECTOR_UUID="9bf93e01-d728-47a3-ad4b-5e66a835037c" DOC_VOL_TARGET_UUID="bd4d008c-7d31-463d-abf9-6c23d9d55f7f" DOC_PROVISION_UPDATED_AT="2016-08-18T22:28:49.946416+00:00" DOC_CREATED_AT="2016-08-18T22:28:48.643434+11:11" DOC_UPDATED_AT="2016-08-18T22:28:49.653974+00:00" DOC_IRONIC_CONDUCTOR_HOSTNAME="897ab1dad809" DOC_ALLOCATION_UUID="3bf138ba-6d71-44e7-b6a1-ca9cac17103e" DOC_DEPLOY_TEMPLATE_UUID="bbb45f41-d4bc-4307-8d1d-32f95ce1e920" function GET { # GET $RESOURCE curl -s -H "X-Auth-Token: $OS_AUTH_TOKEN" \ -H "X-OpenStack-Ironic-API-Version: $IRONIC_API_VERSION" \ ${IRONIC_URL}/$1 | jq -S '.' } function POST { # POST $RESOURCE $FILENAME curl -s -H "X-Auth-Token: $OS_AUTH_TOKEN" \ -H "X-OpenStack-Ironic-API-Version: $IRONIC_API_VERSION" \ -H "Content-Type: application/json" \ -X POST --data @$2 \ ${IRONIC_URL}/$1 | jq -S '.' } function PATCH { # POST $RESOURCE $FILENAME curl -s -H "X-Auth-Token: $OS_AUTH_TOKEN" \ -H "X-OpenStack-Ironic-API-Version: $IRONIC_API_VERSION" \ -H "Content-Type: application/json" \ -X PATCH --data @$2 \ ${IRONIC_URL}/$1 | jq -S '.' } function PUT { # PUT $RESOURCE $FILENAME curl -s -H "X-Auth-Token: $OS_AUTH_TOKEN" \ -H "X-OpenStack-Ironic-API-Version: $IRONIC_API_VERSION" \ -H "Content-Type: application/json" \ -X PUT --data @$2 \ ${IRONIC_URL}/$1 } function wait_for_node_state { local node="$1" local field="$2" local target_state="$3" local attempt=10 while [[ $attempt -gt 0 ]]; do res=$(openstack baremetal node show "$node" -f value -c "$field") if [[ "$res" == "$target_state" ]]; then break fi sleep 1 attempt=$((attempt - 1)) echo "Failed to get node $field == $target_state in $attempt attempts." done if [[ $attempt == 0 ]]; then exit 1 fi } pushd source/samples ########### # ROOT APIs GET '' > api-root-response.json GET 'v1' > api-v1-root-response.json ########### # DRIVER APIs GET v1/drivers > drivers-list-response.json GET v1/drivers?detail=true > drivers-list-detail-response.json GET v1/drivers/ipmi > driver-get-response.json GET v1/drivers/agent_ipmitool/properties > driver-property-response.json GET v1/drivers/agent_ipmitool/raid/logical_disk_properties > driver-logical-disk-properties-response.json ######### # CHASSIS POST v1/chassis chassis-create-request.json > chassis-show-response.json CID=$(cat chassis-show-response.json | grep '"uuid"' | sed 's/.*"\([0-9a-f\-]*\)",*/\1/') if [ "$CID" == "" ]; then exit 1 else echo "Chassis created. UUID: $CID" fi GET v1/chassis > chassis-list-response.json GET v1/chassis/detail > chassis-list-details-response.json PATCH v1/chassis/$CID chassis-update-request.json > chassis-update-response.json # skip GET /v1/chassis/$UUID because the response is same as POST ####### # NODES # Create a node with a real driver, but missing ipmi_address, # then do basic commands with it POST v1/nodes node-create-request-classic.json > node-create-response.json NID=$(cat node-create-response.json | grep '"uuid"' | sed 's/.*"\([0-9a-f\-]*\)",*/\1/') if [ "$NID" == "" ]; then exit 1 else echo "Node created. UUID: $NID" fi # Also create a node with a dynamic driver for viewing in the node list # endpoint DNID=$(POST v1/nodes node-create-request-dynamic.json | grep '"uuid"' | sed 's/.*"\([0-9a-f\-]*\)",*/\1/') if [ "$DNID" == "" ]; then exit 1 else echo "Node created. UUID: $DNID" fi # get the list of passthru methods from agent* driver GET v1/nodes/$NID/vendor_passthru/methods > node-vendor-passthru-response.json # Change to the fake driver and then move the node into the AVAILABLE # state without saving any output. # NOTE that these three JSON files are not included in the docs PATCH v1/nodes/$NID node-update-driver.json PUT v1/nodes/$NID/states/provision node-set-manage-state.json PUT v1/nodes/$NID/states/provision node-set-available-state.json # Wait node to become available wait_for_node_state $NID provision_state available GET v1/nodes/$NID/validate > node-validate-response.json PUT v1/nodes/$NID/states/power node-set-power-off.json # Wait node to reach power off state wait_for_node_state $NID power_state "power off" GET v1/nodes/$NID/states > node-get-state-response.json GET v1/nodes > nodes-list-response.json GET v1/nodes/detail > nodes-list-details-response.json GET v1/nodes/$NID > node-show-response.json # Node traits PUT v1/nodes/$NID/traits node-set-traits-request.json GET v1/nodes/$NID/traits > node-traits-list-response.json ############ # ALLOCATIONS POST v1/allocations allocation-create-request.json > allocation-create-response.json AID=$(cat allocation-create-response.json | grep '"uuid"' | sed 's/.*"\([0-9a-f\-]*\)",*/\1/') if [ "$AID" == "" ]; then exit 1 else echo "Allocation created. UUID: $AID" fi # Create a failed allocation for listing POST v1/allocations allocation-create-request-2.json # Poor man's wait_for_allocation sleep 1 GET v1/allocations > allocations-list-response.json GET v1/allocations/$AID > allocation-show-response.json GET v1/nodes/$NID/allocation > node-allocation-show-response.json ############ # NODES - MAINTENANCE # Do this after allocation API to be able to create successful allocations PUT v1/nodes/$NID/maintenance node-maintenance-request.json ############ # PORTGROUPS # Before we can create a portgroup, we must # write NODE ID into the create request document body sed -i "s/.*node_uuid.*/ \"node_uuid\": \"$NID\",/" portgroup-create-request.json POST v1/portgroups portgroup-create-request.json > portgroup-create-response.json PGID=$(cat portgroup-create-response.json | grep '"uuid"' | sed 's/.*"\([0-9a-f\-]*\)",*/\1/') if [ "$PGID" == "" ]; then exit 1 else echo "Portgroup created. UUID: $PGID" fi GET v1/portgroups > portgroup-list-response.json GET v1/portgroups/detail > portgroup-list-detail-response.json PATCH v1/portgroups/$PGID portgroup-update-request.json > portgroup-update-response.json # skip GET $PGID because same result as POST # skip DELETE ########### # PORTS # Before we can create a port, we must # write NODE ID and PORTGROUP ID into the create request document body sed -i "s/.*node_uuid.*/ \"node_uuid\": \"$NID\",/" port-create-request.json sed -i "s/.*portgroup_uuid.*/ \"portgroup_uuid\": \"$PGID\",/" port-create-request.json POST v1/ports port-create-request.json > port-create-response.json PID=$(cat port-create-response.json | grep '"uuid"' | sed 's/.*"\([0-9a-f\-]*\)",*/\1/') if [ "$PID" == "" ]; then exit 1 else echo "Port created. UUID: $PID" fi GET v1/ports > port-list-response.json GET v1/ports/detail > port-list-detail-response.json PATCH v1/ports/$PID port-update-request.json > port-update-response.json # skip GET $PID because same result as POST # skip DELETE ################ # NODE PORT APIs GET v1/nodes/$NID/ports > node-port-list-response.json GET v1/nodes/$NID/ports/detail > node-port-detail-response.json ##################### # NODE PORTGROUP APIs GET v1/nodes/$NID/portgroups > node-portgroup-list-response.json GET v1/nodes/$NID/portgroups/detail > node-portgroup-detail-response.json ##################### # PORTGROUPS PORT APIs GET v1/portgroups/$PGID/ports > portgroup-port-list-response.json GET v1/portgroups/$PGID/ports/detail > portgroup-port-detail-response.json ############ # LOOKUP API GET v1/lookup?node_uuid=$NID > lookup-node-response.json ##################### # NODES MANAGEMENT API # These need to be done while the node is in maintenance mode, # and the node's driver is "fake", to avoid potential races # with internal processes that lock the Node # this corrects an intentional ommission in some of the samples PATCH v1/nodes/$NID node-update-driver-info-request.json > node-update-driver-info-response.json GET v1/nodes/$NID/management/boot_device/supported > node-get-supported-boot-devices-response.json PUT v1/nodes/$NID/management/boot_device node-set-boot-device.json GET v1/nodes/$NID/management/boot_device > node-get-boot-device-response.json PUT v1/nodes/$NID/management/inject_nmi node-inject-nmi.json ############################# # NODES VIF ATTACH/DETACH API POST v1/nodes/$NID/vifs node-vif-attach-request.json GET v1/nodes/$NID/vifs > node-vif-list-response.json ############# # VOLUME APIs GET v1/volume/ > volume-list-response.json sed -i "s/.*node_uuid.*/ \"node_uuid\": \"$NID\",/" volume-connector-create-request.json POST v1/volume/connectors volume-connector-create-request.json > volume-connector-create-response.json VCID=$(cat volume-connector-create-response.json | grep '"uuid"' | sed 's/.*"\([0-9a-f\-]*\)",*/\1/') if [ "$VCID" == "" ]; then exit 1 else echo "Volume connector created. UUID: $VCID" fi GET v1/volume/connectors > volume-connector-list-response.json GET v1/volume/connectors?detail=True > volume-connector-list-detail-response.json PATCH v1/volume/connectors/$VCID volume-connector-update-request.json > volume-connector-update-response.json sed -i "s/.*node_uuid.*/ \"node_uuid\": \"$NID\",/" volume-target-create-request.json POST v1/volume/targets volume-target-create-request.json > volume-target-create-response.json VTID=$(cat volume-target-create-response.json | grep '"uuid"' | sed 's/.*"\([0-9a-f\-]*\)",*/\1/') if [ "$VTID" == "" ]; then exit 1 else echo "Volume target created. UUID: $VCID" fi GET v1/volume/targets > volume-target-list-response.json GET v1/volume/targets?detail=True > volume-target-list-detail-response.json PATCH v1/volume/targets/$VTID volume-target-update-request.json > volume-target-update-response.json ################## # NODE VOLUME APIs GET v1/nodes/$NID/volume > node-volume-list-response.json GET v1/nodes/$NID/volume/connectors > node-volume-connector-list-response.json GET v1/nodes/$NID/volume/connectors?detail=True > node-volume-connector-detail-response.json GET v1/nodes/$NID/volume/targets > node-volume-target-list-response.json GET v1/nodes/$NID/volume/targets?detail=True > node-volume-target-detail-response.json ################## # DEPLOY TEMPLATES POST v1/deploy_templates deploy-template-create-request.json > deploy-template-create-response.json DTID=$(cat deploy-template-create-response.json | grep '"uuid"' | sed 's/.*"\([0-9a-f\-]*\)",*/\1/') if [ "$DTID" == "" ]; then exit 1 else echo "Deploy template created. UUID: $DTID" fi GET v1/deploy_templates > deploy-template-list-response.json GET v1/deploy_templates?detail=True > deploy-template-detail-response.json GET v1/deploy_templates/$DTID > deploy-template-show-response.json PATCH v1/deploy_templates/$DTID deploy-template-update-request.json > deploy-template-update-response.json ##################### # Replace automatically generated UUIDs by already used in documentation sed -i "s/$BID/$DOC_BIOS_UUID/" *.json sed -i "s/$CID/$DOC_CHASSIS_UUID/" *.json sed -i "s/$NID/$DOC_NODE_UUID/" *.json sed -i "s/$DNID/$DOC_DYNAMIC_NODE_UUID/" *.json sed -i "s/$PID/$DOC_PORT_UUID/" *.json sed -i "s/$PGID/$DOC_PORTGROUP_UUID/" *.json sed -i "s/$VCID/$DOC_VOL_CONNECTOR_UUID/" *.json sed -i "s/$VTID/$DOC_VOL_TARGET_UUID/" *.json sed -i "s/$AID/$DOC_ALLOCATION_UUID/" *.json sed -i "s/$DTID/$DOC_DEPLOY_TEMPLATE_UUID/" *.json sed -i "s/$(hostname)/$DOC_IRONIC_CONDUCTOR_HOSTNAME/" *.json sed -i "s/created_at\": \".*\"/created_at\": \"$DOC_CREATED_AT\"/" *.json sed -i "s/updated_at\": \".*\"/updated_at\": \"$DOC_UPDATED_AT\"/" *.json sed -i "s/provision_updated_at\": \".*\"/provision_updated_at\": \"$DOC_PROVISION_UPDATED_AT\"/" *.json ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586538406.158399 ironic-14.0.1.dev163/api-ref/source/0000755000175000017500000000000000000000000017177 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/baremetal-api-v1-allocation.inc0000644000175000017500000001502200000000000025044 0ustar00coreycorey00000000000000.. -*- rst -*- ========================= Allocations (allocations) ========================= The Allocation resource represents a request to find and allocate a Node for deployment. .. versionadded:: 1.52 Allocation API was introduced. Create Allocation ================= .. rest_method:: POST /v1/allocations Creates an allocation. A Node can be requested by its resource class and traits. Additionally, Nodes can be pre-filtered on the client side, and the resulting list of UUIDs and/or names can be submitted as ``candidate_nodes``. Otherwise all nodes are considered. A Node is suitable for an Allocation if all of the following holds: * ``provision_state`` is ``available`` * ``power_state`` is not ``null`` * ``maintenance`` is ``false`` * ``instance_uuid`` is ``null`` * ``resource_class`` matches requested one * ``traits`` list contains all of the requested ones The allocation process is asynchronous. The new Allocation is returned in the ``allocating`` state, and the process continues in the background. If it succeeds, the ``node_uuid`` field is populated with the Node's UUID, and the Node's ``instance_uuid`` field is set to the Allocation's UUID. If you want to backfill an allocation for an already deployed node, you can pass the UUID or name of this node to ``node``. In this case the allocation is created immediately, bypassing the normal allocation process. Other parameters must be missing or match the provided node. .. versionadded:: 1.52 Allocation API was introduced. .. versionadded:: 1.58 Added support for backfilling allocations. .. versionadded:: 1.60 Introduced the ``owner`` field. Normal response codes: 201 Error response codes: 400, 401, 403, 409, 503 Request ------- .. rest_parameters:: parameters.yaml - resource_class: req_allocation_resource_class - candidate_nodes: req_candidate_nodes - name: req_allocation_name - traits: req_allocation_traits - uuid: req_uuid - extra: req_extra - node: req_allocation_node - owner: owner Request Example --------------- .. literalinclude:: samples/allocation-create-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - uuid: uuid - candidate_nodes: candidate_nodes - last_error: allocation_last_error - name: allocation_name - node_uuid: allocation_node - resource_class: allocation_resource_class - state: allocation_state - traits: allocation_traits - owner: owner - extra: extra - created_at: created_at - updated_at: updated_at - links: links Response Example ---------------- .. literalinclude:: samples/allocation-create-response.json :language: javascript List Allocations ================ .. rest_method:: GET /v1/allocations Lists all Allocations. .. versionadded:: 1.52 Allocation API was introduced. .. versionadded:: 1.60 Introduced the ``owner`` field. Normal response codes: 200 Error response codes: 400, 401, 403, 404 Request ------- .. rest_parameters:: parameters.yaml - node: r_allocation_node - resource_class: r_resource_class - state: r_allocation_state - owner: owner - fields: fields - limit: limit - marker: marker - sort_dir: sort_dir - sort_key: sort_key Response Parameters ------------------- .. rest_parameters:: parameters.yaml - uuid: uuid - candidate_nodes: candidate_nodes - last_error: allocation_last_error - name: allocation_name - node_uuid: allocation_node - resource_class: allocation_resource_class - state: allocation_state - traits: allocation_traits - owner: owner - extra: extra - created_at: created_at - updated_at: updated_at - links: links Response Example ---------------- .. literalinclude:: samples/allocations-list-response.json :language: javascript Show Allocation Details ======================= .. rest_method:: GET /v1/allocations/{allocation_id} Shows details for an Allocation. .. versionadded:: 1.52 Allocation API was introduced. .. versionadded:: 1.60 Introduced the ``owner`` field. Normal response codes: 200 Error response codes: 400, 401, 403, 404 Request ------- .. rest_parameters:: parameters.yaml - fields: fields - allocation_id: allocation_ident Response Parameters ------------------- .. rest_parameters:: parameters.yaml - uuid: uuid - candidate_nodes: candidate_nodes - last_error: allocation_last_error - name: allocation_name - node_uuid: allocation_node - resource_class: allocation_resource_class - state: allocation_state - traits: allocation_traits - owner: owner - extra: extra - created_at: created_at - updated_at: updated_at - links: links Response Example ---------------- .. literalinclude:: samples/allocation-show-response.json :language: javascript Update Allocation ================= .. rest_method:: PATCH /v1/allocations/{allocation_id} Updates an allocation. Allows updating only name and extra fields. .. versionadded:: 1.57 Allocation update API was introduced. Normal response codes: 200 Error response codes: 400, 401, 403, 404, 409, 503 Request ------- The BODY of the PATCH request must be a JSON PATCH document, adhering to `RFC 6902 `_. .. rest_parameters:: parameters.yaml - allocation_id: allocation_ident - name: req_allocation_name - extra: req_extra Request Example --------------- .. literalinclude:: samples/allocation-update-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - uuid: uuid - candidate_nodes: candidate_nodes - last_error: allocation_last_error - name: allocation_name - node_uuid: allocation_node - resource_class: allocation_resource_class - state: allocation_state - traits: allocation_traits - owner: owner - extra: extra - created_at: created_at - updated_at: updated_at - links: links Response Example ---------------- .. literalinclude:: samples/allocation-update-response.json :language: javascript Delete Allocation ================= .. rest_method:: DELETE /v1/allocations/{allocation_id} Deletes an Allocation. If the Allocation has a Node associated, the Node's ``instance_uuid`` is reset. The deletion will fail if the Allocation has a Node assigned and the Node is ``active`` and not in the maintenance mode. .. versionadded:: 1.52 Allocation API was introduced. Normal response codes: 204 Error response codes: 400, 401, 403, 404, 409, 503 Request ------- .. rest_parameters:: parameters.yaml - allocation_id: allocation_ident ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/baremetal-api-v1-chassis.inc0000644000175000017500000001026400000000000024357 0ustar00coreycorey00000000000000.. -*- rst -*- ================= Chassis (chassis) ================= The Chassis resource type was originally conceived as a means to group Node resources. Support for this continues to exist in the REST API, however, it is very minimal. The Chassis object does not provide any functionality today aside from a means to list a group of Nodes. Use of this resource is discouraged, and may be deprecated and removed in a future release. List chassis with details ========================= .. rest_method:: GET /v1/chassis/detail Lists all chassis with details. Normal response codes: 200 .. TODO: add error codes Request ------- .. rest_parameters:: parameters.yaml - limit: limit - marker: marker - sort_dir: sort_dir - sort_key: sort_key Response Parameters ------------------- .. rest_parameters:: parameters.yaml - uuid: uuid - chassis: chassis - description: description - extra: extra Response Example ---------------- .. literalinclude:: samples/chassis-list-details-response.json :language: javascript Show chassis details ==================== .. rest_method:: GET /v1/chassis/{chassis_id} Shows details for a chassis. Normal response codes: 200 .. TODO: add error codes Request ------- .. rest_parameters:: parameters.yaml - fields: fields - chassis_id: chassis_ident Response Parameters ------------------- .. rest_parameters:: parameters.yaml - uuid: uuid - chassis: chassis - description: description - extra: extra Response Example ---------------- .. literalinclude:: samples/chassis-show-response.json :language: javascript Update chassis ============== .. rest_method:: PATCH /v1/chassis/{chassis_id} Updates a chassis. Normal response codes: 200 .. TODO: add error codes Request ------- The BODY of the PATCH request must be a JSON PATCH document, adhering to `RFC 6902 `_. .. rest_parameters:: parameters.yaml - chassis_id: chassis_ident - description: req_description - extra: req_extra Request Example --------------- .. literalinclude:: samples/chassis-update-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - description: description - links: links - extra: extra - created_at: created_at - updated_at: updated_at - chassis: chassis - nodes: nodes - uuid: uuid Response Example ---------------- .. literalinclude:: samples/chassis-update-response.json :language: javascript Delete chassis ============== .. rest_method:: DELETE /v1/chassis/{chassis_id} Deletes a chassis. .. TODO: add error codes Request ------- .. rest_parameters:: parameters.yaml - chassis_id: chassis_ident Create chassis ============== .. rest_method:: POST /v1/chassis Creates a chassis. Error response codes:201,413,415,405,404,403,401,400,503,409, Request ------- .. rest_parameters:: parameters.yaml - chassis: req_chassis - description: req_description - extra: req_extra Request Example --------------- .. literalinclude:: samples/chassis-create-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - description: description - links: links - extra: extra - created_at: created_at - updated_at: updated_at - nodes: nodes - uuid: uuid Response Example ---------------- .. literalinclude:: samples/chassis-show-response.json :language: javascript List chassis ============ .. rest_method:: GET /v1/chassis Lists all chassis. .. versionadded:: 1.43 Added the ``detail`` boolean request parameter. When specified ``True`` this causes the response to include complete details about each chassis. Normal response codes: 200 .. TODO: add error codes Request ------- .. rest_parameters:: parameters.yaml - limit: limit - marker: marker - sort_dir: sort_dir - sort_key: sort_key - fields: fields - detail: detail Response Parameters ------------------- .. rest_parameters:: parameters.yaml - uuid: uuid - chassis: chassis - description: description - extra: extra Response Example ---------------- .. literalinclude:: samples/chassis-list-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/baremetal-api-v1-conductors.inc0000644000175000017500000000401200000000000025077 0ustar00coreycorey00000000000000.. -*- rst -*- ======================= Conductors (conductors) ======================= .. versionadded:: 1.49 Listing Conductor resources is done through the ``conductors`` resource. Conductor resources are read-only, they can not be created, updated, or removed. List Conductors =============== .. rest_method:: GET /v1/conductors Return a list of conductors known by the Bare Metal service. By default, this query will return the hostname, conductor group, and alive status for each Conductor. When ``detail`` is set to True in the query string, will return the full representation of the resource. Normal response code: 200 Request ------- .. rest_parameters:: parameters.yaml - fields: fields_for_conductor - limit: limit - marker: marker - sort_dir: sort_dir - sort_key: sort_key - detail: detail Response -------- .. rest_parameters:: parameters.yaml - hostname: hostname - conductor_group: conductor_group - alive: alive - drivers: drivers - links: links **Example Conductor list response:** .. literalinclude:: samples/conductor-list-response.json :language: javascript **Example detailed Conductor list response:** .. literalinclude:: samples/conductor-list-details-response.json :language: javascript Show Conductor Details ====================== .. rest_method:: GET /v1/conductors/{hostname} Shows details for a conductor. By default, this will return the full representation of the resource; an optional ``fields`` parameter can be supplied to return only the specified set. Normal response codes: 200 Error codes: 400,403,404,406 Request ------- .. rest_parameters:: parameters.yaml - hostname: hostname_ident - fields: fields_for_conductor Response -------- .. rest_parameters:: parameters.yaml - hostname: hostname - conductor_group: conductor_group - alive: alive - drivers: drivers - links: links **Example JSON representation of a Conductor:** .. literalinclude:: samples/conductor-show-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/baremetal-api-v1-deploy-templates.inc0000644000175000017500000001061300000000000026210 0ustar00coreycorey00000000000000.. -*- rst -*- =================================== Deploy Templates (deploy_templates) =================================== The Deploy Template resource represents a collection of Deploy Steps that may be executed during deployment of a node. A deploy template is matched for a node if at the time of deployment, the template's name matches a trait in the node's ``instance_info.traits``. .. versionadded:: 1.55 Deploy Template API was introduced. Create Deploy Template ====================== .. rest_method:: POST /v1/deploy_templates Creates a deploy template. .. versionadded:: 1.55 Deploy Template API was introduced. Normal response codes: 201 Error response codes: 400, 401, 403, 409 Request ------- .. rest_parameters:: parameters.yaml - name: deploy_template_name - steps: deploy_template_steps - uuid: req_uuid - extra: req_extra Request Example --------------- .. literalinclude:: samples/deploy-template-create-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - uuid: uuid - name: deploy_template_name - steps: deploy_template_steps - extra: extra - created_at: created_at - updated_at: updated_at - links: links Response Example ---------------- .. literalinclude:: samples/deploy-template-create-response.json :language: javascript List Deploy Templates ===================== .. rest_method:: GET /v1/deploy_templates Lists all deploy templates. .. versionadded:: 1.55 Deploy Template API was introduced. Normal response codes: 200 Error response codes: 400, 401, 403, 404 Request ------- .. rest_parameters:: parameters.yaml - fields: fields - limit: limit - marker: marker - sort_dir: sort_dir - sort_key: sort_key - detail: detail Response Parameters ------------------- .. rest_parameters:: parameters.yaml - uuid: uuid - name: deploy_template_name - steps: deploy_template_steps - extra: extra - created_at: created_at - updated_at: updated_at - links: links Response Example ---------------- **Example deploy template list response:** .. literalinclude:: samples/deploy-template-list-response.json :language: javascript **Example detailed deploy template list response:** .. literalinclude:: samples/deploy-template-detail-response.json :language: javascript Show Deploy Template Details ============================ .. rest_method:: GET /v1/deploy_templates/{deploy_template_id} Shows details for a deploy template. .. versionadded:: 1.55 Deploy Template API was introduced. Normal response codes: 200 Error response codes: 400, 401, 403, 404 Request ------- .. rest_parameters:: parameters.yaml - fields: fields - deploy_template_id: deploy_template_ident Response Parameters ------------------- .. rest_parameters:: parameters.yaml - uuid: uuid - name: deploy_template_name - steps: deploy_template_steps - extra: extra - created_at: created_at - updated_at: updated_at - links: links Response Example ---------------- .. literalinclude:: samples/deploy-template-show-response.json :language: javascript Update a Deploy Template ======================== .. rest_method:: PATCH /v1/deploy_templates/{deploy_template_id} Update a deploy template. .. versionadded:: 1.55 Deploy Template API was introduced. Normal response code: 200 Error response codes: 400, 401, 403, 404, 409 Request ------- The BODY of the PATCH request must be a JSON PATCH document, adhering to `RFC 6902 `_. Request ------- .. rest_parameters:: parameters.yaml - deploy_template_id: deploy_template_ident .. literalinclude:: samples/deploy-template-update-request.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - name: deploy_template_name - steps: deploy_template_steps - extra: extra - created_at: created_at - updated_at: updated_at - links: links .. literalinclude:: samples/deploy-template-update-response.json :language: javascript Delete Deploy Template ====================== .. rest_method:: DELETE /v1/deploy_template/{deploy_template_id} Deletes a deploy template. .. versionadded:: 1.55 Deploy Template API was introduced. Normal response codes: 204 Error response codes: 400, 401, 403, 404 Request ------- .. rest_parameters:: parameters.yaml - deploy_template_id: deploy_template_ident ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/baremetal-api-v1-driver-passthru.inc0000644000175000017500000000534400000000000026067 0ustar00coreycorey00000000000000.. -*- rst -*- ================================ Driver Vendor Passthru (drivers) ================================ Each driver MAY support vendor-specific extensions, called "passthru" methods. Internally, Ironic's driver API supports flexibly exposing functions via the common HTTP methods GET, PUT, POST, and DELETE. To call a passthru method, the query string must contain the name of the method. For example, if the method name was ``my_passthru_method``, the request would look like ``/vendor_passthru?method=my_passthru_method``. The contents of the HTTP request are forwarded to the driver and validated there. Ironic's REST API provides a means to discover these methods, but does not provide support, testing, or documentation for these endpoints. The Ironic development team does not guarantee any compatibility within these methods between releases, though we encourage driver authors to provide documentation and support for them. Besides the endpoints documented here, all other resources and endpoints under the heading ``vendor_passthru`` should be considered unsupported APIs, and could be changed without warning by the driver authors. List Methods ============ .. rest_method:: GET /v1/drivers/{driver_name}/vendor_passthru/methods Retrieve a list of the available vendor passthru methods for the given Driver. The response will indicate which HTTP method(s) each vendor passthru method allows, whether the method call will be synchronous or asynchronous, and whether the response will include any attachment. Normal response code: 200 Request ------- .. rest_parameters:: parameters.yaml - driver_name: driver_ident Response -------- The response BODY is a dictionary whose keys are the method names. The value of each item is itself a dictionary describing how to interact with that method. .. rest_parameters:: parameters.yaml - async: passthru_async - attach: passthru_attach - description: passthru_description - http_methods: passthru_http_methods Call a Method ============= .. rest_method:: METHOD /v1/drivers/{driver_name}/vendor_passthru?method={method_name} The HTTP METHOD may be one of GET, POST, PUT, DELETE, depending on the driver and method. This endpoint passes the request directly to the hardware driver. The HTTP BODY must be parseable JSON, which will be converted to parameters passed to that function. Unparseable JSON, missing parameters, or excess parameters will cause the request to be rejected with an HTTP 400 error. Normal response code: 200 202 Error codes: 400 Request ------- .. rest_parameters:: parameters.yaml - driver_name: driver_ident - method_name: method_name All other parameters should be passed in the BODY. Parameter list varies by method_name. Response -------- Varies. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/baremetal-api-v1-drivers.inc0000644000175000017500000001777400000000000024415 0ustar00coreycorey00000000000000.. -*- rst -*- ================= Drivers (drivers) ================= .. versionchanged:: 1.30 The REST API now also exposes information about *dynamic* drivers. Ironic has two types of drivers: *classic* drivers and *dynamic* drivers. A *classic* driver is a Python object containing all the logic to manage the bare metal nodes enrolled within Ironic. A driver may be loaded within one or more ``ironic-conductor`` services. Each driver contains a pre-determined set of instantiated interfaces. Each type of interface (eg, ``power`` or ``boot``) performs a specific hardware function. *Dynamic* drivers are supported via hardware types, which are Python classes enabled via entry points. Unlike *classic* drivers, which have pre-determined interfaces, a hardware type may support multiple types of interfaces. For example, the ``ipmi`` hardware type may support multiple methods for enabling node console. Which interface a node of a particular hardware type uses is determined at runtime. This collection of interfaces is called a *dynamic* driver. For more information about this, see the node API documentation. The REST API exposes the list of drivers and which ``ironic-conductor`` processes have loaded that driver via the Driver resource (``/v1/drivers`` endpoint). This can be useful for operators to validate their configuration in a heterogeneous hardware environment. Each ``ironic-conductor`` process may load one or more drivers, and does not necessarily need to load the same *classic* drivers as another ``ironic-conductor``. Each ``ironic-conductor`` with the same hardware types must have the same hardware interfaces enabled. The REST API also exposes details about each driver, such as what properties must be supplied to a node's ``driver_info`` for that driver to manage hardware. Lastly, some drivers may expose methods through a ``driver_vendor_passthru`` endpoint, allowing one to interact with the driver directly (i.e., without knowing a specific node identifier). For example, this is used by the ironic python agent ramdisk to get the UUID of the node being deployed/cleaned by using MAC addresses of the node's network interfaces the agent has discovered. List drivers ============ .. rest_method:: GET /v1/drivers Lists all drivers. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - type: driver_type - detail: driver_detail Response Parameters ------------------- The response BODY contains a single key, "drivers", whose value is a list of drivers supported by this Ironic service. .. rest_parameters:: parameters.yaml - drivers: drivers - name: driver_name - hosts: hosts - type: response_driver_type - links: links - properties: driver_property_links .. versionchanged:: 1.30 If the request has the "detail" URL parameter set to true, each driver will also include the following fields. .. rest_parameters:: parameters.yaml - default_bios_interface: default_bios_interface - default_boot_interface: default_boot_interface - default_console_interface: default_console_interface - default_deploy_interface: default_deploy_interface - default_inspect_interface: default_inspect_interface - default_management_interface: default_management_interface - default_network_interface: default_network_interface - default_power_interface: default_power_interface - default_raid_interface: default_raid_interface - default_rescue_interface: default_rescue_interface - default_storage_interface: default_storage_interface - default_vendor_interface: default_vendor_interface - enabled_bios_interfaces: enabled_bios_interfaces - enabled_boot_interfaces: enabled_boot_interfaces - enabled_console_interfaces: enabled_console_interfaces - enabled_deploy_interfaces: enabled_deploy_interfaces - enabled_inspect_interfaces: enabled_inspect_interfaces - enabled_management_interfaces: enabled_management_interfaces - enabled_network_interfaces: enabled_network_interfaces - enabled_power_interfaces: enabled_power_interfaces - enabled_rescue_interfaces: enabled_rescue_interfaces - enabled_raid_interfaces: enabled_raid_interfaces - enabled_storage_interfaces: enabled_storage_interfaces - enabled_vendor_interfaces: enabled_vendor_interfaces Response Example ---------------- Example for a request with detail=false (the default): .. literalinclude:: samples/drivers-list-response.json :language: javascript Example for a request with detail=true: .. literalinclude:: samples/drivers-list-detail-response.json :language: javascript Show driver details =================== .. rest_method:: GET /v1/drivers/{driver_name} Shows details for a driver. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - driver_name: driver_ident Response Parameters ------------------- .. rest_parameters:: parameters.yaml - name: driver_name - hosts: hosts - type: response_driver_type - default_bios_interface: default_bios_interface - default_boot_interface: default_boot_interface - default_console_interface: default_console_interface - default_deploy_interface: default_deploy_interface - default_inspect_interface: default_inspect_interface - default_management_interface: default_management_interface - default_network_interface: default_network_interface - default_power_interface: default_power_interface - default_raid_interface: default_raid_interface - default_rescue_interface: default_rescue_interface - default_storage_interface: default_storage_interface - default_vendor_interface: default_vendor_interface - enabled_bios_interfaces: enabled_bios_interfaces - enabled_boot_interfaces: enabled_boot_interfaces - enabled_console_interfaces: enabled_console_interfaces - enabled_deploy_interfaces: enabled_deploy_interfaces - enabled_inspect_interfaces: enabled_inspect_interfaces - enabled_management_interfaces: enabled_management_interfaces - enabled_network_interfaces: enabled_network_interfaces - enabled_power_interfaces: enabled_power_interfaces - enabled_raid_interfaces: enabled_raid_interfaces - enabled_rescue_interfaces: enabled_rescue_interfaces - enabled_storage_interfaces: enabled_storage_interfaces - enabled_vendor_interfaces: enabled_vendor_interfaces - links: links - properties: driver_property_links Response Example ---------------- .. literalinclude:: samples/driver-get-response.json :language: javascript Show driver properties ====================== .. rest_method:: GET /v1/drivers/{driver_name}/properties Shows the required and optional parameters that ``driver_name`` expects to be supplied in the ``driver_info`` field for every Node it manages. To check if all required parameters have been supplied to a Node, you should query the ``/v1/nodes/{node_ident}/validate`` endpoint. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - driver_name: driver_ident Response Example ---------------- The response BODY is a dictionary, but the keys are unique to each driver. The structure of the response is ``property`` : ``description``. The following example is returned from the ``agent_ipmitool`` driver. .. literalinclude:: samples/driver-property-response.json :language: javascript Show driver logical disk properties =================================== .. versionadded:: 1.12 .. rest_method:: GET /v1/drivers/{driver_name}/raid/logical_disk_properties Show the required and optional parameters that ``driver_name`` expects to be supplied in the node's ``raid_config`` field, if a RAID configuration change is requested. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - driver_name: driver_ident Response Example ---------------- The response BODY is a dictionary, but the keys are unique to each driver. The structure of the response is ``property`` : ``description``. The following example is returned from the ``agent_ipmitool`` driver. .. literalinclude:: samples/driver-logical-disk-properties-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/baremetal-api-v1-misc.inc0000644000175000017500000000624300000000000023657 0ustar00coreycorey00000000000000.. -*- rst -*- ======= Utility ======= This section describes two API endpoints used by the ``ironic-python-agent`` ramdisk as it communicates with the Bare Metal service. These were previously exposed as vendor passthrough methods, however, as ironic-python-agent has become the standard ramdisk agent, these methods have been made a part of the official REST API. .. note:: **Operators are reminded not to expose the Bare Metal Service's API to unsecured networks.** Both API endpoints listed below are available to *unauthenticated* clients because the default method for booting the ``ironic-python-agent`` ramdisk does not provide the agent with keystone credentials. .. note:: It is possible to include keys in your ramdisk, or pass keys in via the boot method, if your driver supports it; if that is done, you may configure these endpoints to require authentication by changing the policy rules ``baremetal:driver:ipa_lookup`` and ``baremetal:node:ipa_heartbeat``. In light of that, operators are recommended to ensure that this endpoint is only available on the ``provisioning`` and ``cleaning`` networks. Agent Lookup ============ .. rest_method:: GET /v1/lookup .. versionadded:: 1.22 A ``/lookup`` method is exposed at the root of the REST API. This should only be used by the ``ironic-python-agent`` ramdisk to retrieve required configuration data from the Bare Metal service. By default, ``/v1/lookup`` will only match Nodes that are expected to be running the ``ironic-python-agent`` ramdisk (for instance, because the Bare Metal service has just initiated a deployment). It can not be used as a generic search mechanism, though this behaviour may be changed by setting the ``[api] restrict_lookup = false`` configuration option for the ironic-api service. The query string should include either or both a ``node_uuid`` or an ``addresses`` query parameter. If a matching Node is found, information about that Node shall be returned. Normal response codes: 200 Error response codes: 400 404 Request ------- .. rest_parameters:: parameters.yaml - node_uuid: r_node_uuid - addresses: r_addresses Response -------- Returns only the information about the corresponding Node that the ``ironic-python-agent`` process requires. .. rest_parameters:: parameters.yaml - node: agent_node - config: agent_config Response Example ---------------- .. literalinclude:: samples/lookup-node-response.json :language: javascript Agent Heartbeat =============== .. rest_method:: POST /v1/heartbeat/{node_ident} .. versionadded:: 1.22 A ``/heartbeat`` method is exposed at the root of the REST API. This is used as a callback from within the ``ironic-python-agent`` ramdisk, so that an active ramdisk may periodically contact the Bare Metal service and provide the current URL at which to contact the agent. Normal response codes: 202 Error response codes: 400 404 .. versionadded:: 1.36 ``agent_version`` parameter for passing the version of the Ironic Python Agent to Ironic during heartbeat Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident - callback_url: callback_url - agent_version: agent_version ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/baremetal-api-v1-node-allocation.inc0000644000175000017500000000327600000000000025777 0ustar00coreycorey00000000000000.. -*- rst -*- ==================================== Node Allocation (allocations, nodes) ==================================== Given a Node identifier (``uuid`` or ``name``), the API allows to get and delete the associated allocation. .. versionadded:: 1.52 Allocation API was introduced. Show Allocation by Node ======================= .. rest_method:: GET /v1/nodes/{node_ident}/allocation Shows details for an allocation. .. versionadded:: 1.52 Allocation API was introduced. Normal response codes: 200 Error response codes: 400, 401, 403, 404 Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident - fields: fields Response Parameters ------------------- .. rest_parameters:: parameters.yaml - uuid: uuid - candidate_nodes: candidate_nodes - last_error: allocation_last_error - name: allocation_name - node_uuid: allocation_node - resource_class: allocation_resource_class - state: allocation_state - traits: allocation_traits - extra: extra - created_at: created_at - updated_at: updated_at - links: links Response Example ---------------- .. literalinclude:: samples/allocation-show-response.json :language: javascript Delete Allocation by Node ========================= .. rest_method:: DELETE /v1/nodes/{node_ident}/allocation Deletes the allocation of this node and resets its ``instance_uuid``. The deletion will fail if the allocation the node is ``active`` and not in the ``maintenance`` mode. .. versionadded:: 1.52 Allocation API was introduced. Normal response codes: 204 Error response codes: 400, 401, 403, 404, 409, 503 Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/baremetal-api-v1-node-management.inc0000644000175000017500000002706000000000000025763 0ustar00coreycorey00000000000000.. -*- rst -*- ======================= Node Management (nodes) ======================= Nodes can be managed through several sub-resources. Maintenance mode can be set by the operator, with an optional "reason" stored by Ironic. The supplied ``driver_info`` can be validated to ensure that the selected ``driver`` has all the information it requires to manage the Node. A Node can be rebooted, turned on, or turned off by requesting a change to its power state. This is handled asynchronously and tracked in the ``target_power_state`` field after the request is received. A Node's boot device can be changed, and the set of supported boot devices can be queried. A request to change a Node's provision state is also tracked asynchronously; the ``target_provision_state`` represents the requested state. A Node may transition through several discrete ``provision_state`` steps before arriving at the requested state. This can vary between drivers and based on configuration. For example, a Node in the ``available`` state can have an instance deployed to it by requesting the provision state of ``active``. During this transition, the Node's ``provision_state`` will temporarily be set to ``deploying``, and depending on the driver, it may also be ``wait call-back``. When the transitions are complete, ``target_provision_state`` will be set to ``None`` and ``provision_state`` will be set to ``active``. To destroy the instance, request the provision state of ``delete``. During this transition, the Node may or may not go through a ``cleaning`` state, depending on the service configuration. Validate Node =============== .. rest_method:: GET /v1/nodes/{node_ident}/validate Request that Ironic validate whether the Node's ``driver`` has enough information to manage the Node. This polls each ``interface`` on the driver, and returns the status of that ``interface`` as an element in the response. Note that each ``driver`` may require different information to be supplied, and not all drivers support all interfaces. Normal response codes: 200 .. TODO: add error codes Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident Response -------- Each element in the response will contain a "result" variable, which will have a value of "true" or "false", indicating that the interface either has or does not have sufficient information to function. A value of ``null`` indicates that the Node's driver does not support that interface. .. rest_parameters:: parameters.yaml - boot: v_boot - console: v_console - deploy: v_deploy - inspect: v_inspect - management: v_management - network: v_network - power: v_power - raid: v_raid - rescue: v_rescue - storage: v_storage **Example node validation response:** .. literalinclude:: samples/node-validate-response.json :language: javascript Set Maintenance Flag ============================= .. rest_method:: PUT /v1/nodes/{node_ident}/maintenance Request that Ironic set the maintenance flag on the Node. This will disable certain automatic actions that the Node's driver may take, and remove the Node from Nova's available resource pool. Normal response code: 202 .. TODO: Add link to user / operator documentation on the Maintenance flag Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident - reason: reason **Example request: mark a node for maintenance:** .. literalinclude:: samples/node-maintenance-request.json Clear Maintenance Flag ============================== .. rest_method:: DELETE /v1/nodes/{node_ident}/maintenance The maintenance flag is unset by sending a DELETE request to this endpoint. If the request is accepted, Ironic will also clear the ``maintenance_reason`` field. Normal response code: 202 .. TODO: Add link to user / operator documentation on the Maintenance flag Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident Set Boot Device =============== .. rest_method:: PUT /v1/nodes/{node_ident}/management/boot_device Set the boot device for the given Node, and set it persistently or for one-time boot. The exact behaviour of this depends on the hardware driver. .. note:: In some drivers, eg. the ``*_ipmitool`` family, this method initiates a synchronous call to the hardware management device (BMC). It should be used with caution! This is `a known bug `_. .. note:: Some drivers do not support one-time boot, and always set the boot device persistently. Normal response code: 204 .. TODO: add error codes Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident - boot_device: req_boot_device - persistent: req_persistent **Example JSON request body to set boot device:** .. literalinclude:: samples/node-set-boot-device.json Get Boot Device =============== .. rest_method:: GET /v1/nodes/{node_ident}/management/boot_device Get the current boot device for the given Node. .. note:: In some drivers, eg. the ``*_ipmitool`` family, this method initiates a synchronous call to the hardware management device (BMC). It should be used with caution! This is `a known bug `_. Normal response code: 200 .. TODO: add error codes Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident Response -------- .. rest_parameters:: parameters.yaml - boot_device: boot_device - persistent: persistent **Example JSON response to get boot device:** .. literalinclude:: samples/node-get-boot-device-response.json Get Supported Boot Devices =========================== .. rest_method:: GET /v1/nodes/{node_ident}/management/boot_device/supported Retrieve the acceptable set of supported boot devices for a specific Node. Normal response code: 200 .. TODO: add error codes Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident Response -------- .. rest_parameters:: parameters.yaml - supported_boot_devices: supported_boot_devices **Example response listing supported boot devices:** .. literalinclude:: samples/node-get-supported-boot-devices-response.json Inject NMI (Non-Masking Interrupts) =================================== .. rest_method:: PUT /v1/nodes/{node_ident}/management/inject_nmi .. versionadded:: 1.29 Inject NMI (Non-Masking Interrupts) for the given Node. This feature can be used for hardware diagnostics, and actual support depends on a driver. Normal response code: 204 (No content) Error codes: - 400 (Invalid) - 403 (Forbidden) - 404 (NotFound) - 406 (NotAcceptable) - 409 (NodeLocked, ClientError) Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident **Request to inject NMI to a node has to be empty dictionary:** .. literalinclude:: samples/node-inject-nmi.json Node State Summary ================== .. rest_method:: GET /v1/nodes/{node_ident}/states Get a summary of the Node's current power, provision, raid, and console status. Normal response code: 200 Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident Response -------- .. rest_parameters:: parameters.yaml - power_state: power_state - target_power_state: target_power_state - provision_state: provision_state - target_provision_state: target_provision_state - provision_updated_at: provision_updated_at - last_error: last_error - console_enabled: console_enabled - raid_config: raid_config - target_raid_config: target_raid_config **Example node state:** .. literalinclude:: samples/node-get-state-response.json Change Node Power State ======================= .. rest_method:: PUT /v1/nodes/{node_ident}/states/power Request a change to the Node's power state. Normal response code: 202 (Accepted) .. versionadded:: 1.27 In the request, the ``target`` value can also be one of ``soft power off`` or ``soft rebooting``. .. versionadded:: 1.27 In the request, a ``timeout`` can be specified. Error codes: - 409 (NodeLocked, ClientError) - 400 (Invalid, InvalidStateRequested, InvalidParameterValue) - 406 (NotAcceptable) - 503 (NoFreeConductorWorkers) Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident - target: req_target_power_state - timeout: power_timeout **Example request to power off a Node:** .. literalinclude:: samples/node-set-power-off.json **Example request to soft power off a Node with timeout:** .. literalinclude:: samples/node-set-soft-power-off.json Change Node Provision State =========================== .. rest_method:: PUT /v1/nodes/{node_ident}/states/provision Request a change to the Node's provision state. Acceptable target states depend on the Node's current provision state. More detailed documentation of the Ironic State Machine is available `in the developer docs `_. .. versionadded:: 1.35 A ``configdrive`` can be provided when setting the node's provision target state to ``rebuild``. .. versionadded:: 1.38 A node can be rescued or unrescued by setting the node's provision target state to ``rescue`` or ``unrescue`` respectively. .. versionadded:: 1.56 A ``configdrive`` can be a JSON object with ``meta_data``, ``network_data`` and ``user_data``. .. versionadded:: 1.59 A ``configdrive`` now accepts ``vendor_data``. Normal response code: 202 Error codes: - 409 (NodeLocked, ClientError) - 400 (InvalidState, NodeInMaintenance) - 406 (NotAcceptable) - 503 (NoFreeConductorWorkers) Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident - target: req_provision_state - configdrive: configdrive - clean_steps: clean_steps - rescue_password: rescue_password **Example request to deploy a Node, using a configdrive served via local webserver:** .. literalinclude:: samples/node-set-active-state.json **Example request to clean a Node, with custom clean step:** .. literalinclude:: samples/node-set-clean-state.json Set RAID Config =============== .. rest_method:: PUT /v1/nodes/{node_ident}/states/raid .. versionadded:: 1.12 Store the supplied configuration on the Node's ``target_raid_config`` property. This property must be structured JSON, and will be validated by the driver upon receipt. The request schema is defined in the `documentation for the RAID feature `_ .. note:: Calling this API only stores the requested configuration; it will be applied the next time that the Node transitions through the ``cleaning`` phase. Normal response code: 204 .. TODO: add more description, response code, sample response Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident - target_raid_config: req_target_raid_config **Example requested RAID config:** .. literalinclude:: samples/node-set-raid-request.json .. TODO: add more description, response code, sample response Get Console =========== .. rest_method:: GET /v1/nodes/{node_ident}/states/console Get connection information about the console. .. TODO: add more description, response code, sample response Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident .. TODO: add more description, response code, sample response Start/Stop Console =================== .. rest_method:: PUT /v1/nodes/{node_ident}/states/console Start or stop the serial console. .. TODO: add more description, response code, sample response Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident - enabled: req_console_enabled ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/baremetal-api-v1-node-passthru.inc0000644000175000017500000000466500000000000025526 0ustar00coreycorey00000000000000.. -*- rst -*- ============================ Node Vendor Passthru (nodes) ============================ Each driver MAY support vendor-specific extensions, called "passthru" methods. Internally, Ironic's driver API supports flexibly exposing functions via the common HTTP methods GET, PUT, POST, and DELETE. To call a passthru method, the query string must contain the name of the method, eg. ``/vendor_passthru?method=reset_bmc``. The contents of the HTTP request are forwarded to the Node's driver and validated there. Ironic's REST API provides a means to discover these methods, but does not provide support, testing, or documentation for these endpoints. The Ironic development team does not guarantee any compatibility within these methods between releases, though we encourage driver authors to provide documentation and support for them. Besides the endpoints documented here, all other resources and endpoints under the heading ``vendor_passthru`` should be considered unsupported APIs, and could be changed without warning by the driver authors. List Methods ============ .. rest_method:: GET /v1/nodes/{node_ident}/vendor_passthru/methods Retrieve a list of the available vendor passthru methods for the given Node. The response will indicate which HTTP method(s) each vendor passthru method allows, whether the method call will be synchronous or asynchronous, and whether the response will include any attachment. Normal response code: 200 .. TODO: add error codes Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident Response -------- **Example passthru methods listing:** .. literalinclude:: samples/node-vendor-passthru-response.json Call a Method ============= .. rest_method:: METHOD /v1/nodes/{node_ident}/vendor_passthru?method={method_name} The HTTP METHOD may be one of GET, POST, PUT, DELETE, depending on the driver and method. This endpoint passes the request directly to the Node's hardware driver. The HTTP BODY must be parseable JSON, which will be converted to parameters passed to that function. Unparseable JSON, missing parameters, or excess parameters will cause the request to be rejected with an HTTP 400 error. Normal response code: 200 202 .. TODO: add error codes Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident - method_name: method_name All other parameters should be passed in the BODY. Parameter list varies by method_name. Response -------- Varies.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/baremetal-api-v1-nodes-bios.inc0000644000175000017500000000323200000000000024761 0ustar00coreycorey00000000000000.. -*- rst -*- ================= Node Bios (nodes) ================= .. versionadded:: 1.40 Given a Node identifier (``uuid`` or ``name``), the API exposes the list of all Bios settings associated with that Node. These endpoints do not allow modification of the Bios settings; that should be done by using ``clean steps``. List all Bios settings by Node ============================== .. rest_method:: GET /v1/nodes/{node_ident}/bios Return a list of Bios settings associated with ``node_ident``. Normal response code: 200 Error codes: 404 Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident Response -------- .. rest_parameters:: parameters.yaml - bios: bios_settings - created_at: created_at - updated_at: updated_at - links: links - name: bios_setting_name - value: bios_setting_value **Example list of a Node's Bios settings:** .. literalinclude:: samples/node-bios-list-response.json Show single Bios setting of a Node ================================== .. rest_method:: GET /v1/nodes/{node_ident}/bios/{bios_setting} Return the content of the specific bios ``bios_setting`` associated with ``node_ident``. Normal response code: 200 Error codes: 404 Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident - bios_setting: bios_setting Response -------- .. rest_parameters:: parameters.yaml - : d_bios_setting - created_at: created_at - updated_at: updated_at - links: links - name: bios_setting_name - value: bios_setting_value **Example details of a Node's Bios setting details:** .. literalinclude:: samples/node-bios-detail-response.json ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/baremetal-api-v1-nodes-portgroups.inc0000644000175000017500000000426400000000000026257 0ustar00coreycorey00000000000000.. -*- rst -*- ============================================== Listing Portgroups by Node (nodes, portgroups) ============================================== .. versionadded:: 1.24 Given a Node identifier (``uuid`` or ``name``), the API exposes the list of, and details of, all Portgroups associated with that Node. These endpoints do not allow modification of the Portgroups; that should be done by accessing the Portgroup resources under the ``/v1/portgroups`` endpoint. List Portgroups by Node ======================= .. rest_method:: GET /v1/nodes/{node_ident}/portgroups Return a list of bare metal Portgroups associated with ``node_ident``. Normal response code: 200 Error codes: 400,401,403,404 Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident - fields: fields - limit: limit - marker: marker - sort_dir: sort_dir - sort_key: sort_key Response -------- .. rest_parameters:: parameters.yaml - portgroups: portgroups - uuid: uuid - address: portgroup_address - name: portgroup_name - links: links **Example list of a Node's Portgroups:** .. literalinclude:: samples/node-portgroup-list-response.json List detailed Portgroups by Node ================================ .. rest_method:: GET /v1/nodes/{node_ident}/portgroups/detail Return a detailed list of bare metal Portgroups associated with ``node_ident``. Normal response code: 200 Error codes: 400,401,403,404 Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident - limit: limit - marker: marker - sort_dir: sort_dir - sort_key: sort_key Response -------- .. rest_parameters:: parameters.yaml - portgroups: portgroups - uuid: uuid - address: portgroup_address - name: portgroup_name - node_uuid: node_uuid - standalone_ports_supported: standalone_ports_supported - internal_info: portgroup_internal_info - extra: extra - mode: portgroup_mode - properties: portgroup_properties - ports: pg_ports - created_at: created_at - updated_at: updated_at - links: links **Example details of a Node's Portgroups:** .. literalinclude:: samples/node-portgroup-detail-response.json ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/baremetal-api-v1-nodes-ports.inc0000644000175000017500000000531600000000000025201 0ustar00coreycorey00000000000000.. -*- rst -*- ==================================== Listing Ports by Node (nodes, ports) ==================================== Given a Node identifier (``uuid`` or ``name``), the API exposes the list of, and details of, all Ports associated with that Node. These endpoints do not allow modification of the Ports; that should be done by accessing the Port resources under the ``/v1/ports`` endpoint. List Ports by Node =================== .. rest_method:: GET /v1/nodes/{node_ident}/ports Return a list of bare metal Ports associated with ``node_ident``. .. versionadded:: 1.8 Added the ``fields`` request parameter. When specified, this causes the content of the response to include only the specified fields, rather than the default set. .. versionadded:: 1.19 Added the ``pxe_enabled`` and ``local_link_connection`` fields. .. versionadded:: 1.24 Added the ``portgroup_uuid`` field. .. versionadded:: 1.34 Added the ``physical_network`` field. .. versionadded:: 1.53 Added the ``is_smartnic`` response fields. Normal response code: 200 Error codes: TBD Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident - fields: fields - limit: limit - marker: marker - sort_dir: sort_dir - sort_key: sort_key Response -------- .. rest_parameters:: parameters.yaml - ports: ports - uuid: uuid - address: port_address - links: links **Example list of a Node's Ports:** .. literalinclude:: samples/node-port-list-response.json List detailed Ports by Node =========================== .. rest_method:: GET /v1/nodes/{node_ident}/ports/detail Return a detailed list of bare metal Ports associated with ``node_ident``. .. versionadded:: 1.19 Added the ``pxe_enabled`` and ``local_link_connection`` fields. .. versionadded:: 1.24 Added the ``portgroup_uuid`` field. .. versionadded:: 1.34 Added the ``physical_network`` field. .. versionadded:: 1.53 Added the ``is_smartnic`` response fields. Normal response code: 200 Error codes: TBD Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident - fields: fields - limit: limit - marker: marker - sort_dir: sort_dir - sort_key: sort_key Response -------- .. rest_parameters:: parameters.yaml - ports: ports - uuid: uuid - address: port_address - node_uuid: node_uuid - local_link_connection: local_link_connection - pxe_enabled: pxe_enabled - physical_network: physical_network - internal_info: internal_info - extra: extra - created_at: created_at - updated_at: updated_at - links: links - is_smartnic: is_smartnic **Example details of a Node's Ports:** .. literalinclude:: samples/node-port-detail-response.json ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/baremetal-api-v1-nodes-traits.inc0000644000175000017500000000507200000000000025337 0ustar00coreycorey00000000000000.. -*- rst -*- =================== Node Traits (nodes) =================== .. versionadded:: 1.37 Node traits are used for scheduling in the Compute service, using qualitative attributes to influence the placement of instances to bare metal compute nodes. Traits specified for a node in the Bare Metal service will be registered on the corresponding resource provider in the Compute service's placement API. Traits can be either standard or custom. Standard traits are listed in the `os_traits library `_. Custom traits must meet the following requirements: * prefixed with ``CUSTOM_`` * contain only upper case characters A to Z, digits 0 to 9, or underscores * no longer than 255 characters in length A bare metal node can have a maximum of 50 traits. List Traits of a Node ===================== .. rest_method:: GET /v1/nodes/{node_ident}/traits Return a list of traits for the node. Normal response code: 200 Error codes: 400,401,403,404 Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident Response -------- .. rest_parameters:: parameters.yaml - traits: n_traits **Example list of traits for the node:** .. literalinclude:: samples/node-traits-list-response.json :language: javascript Set all traits of a node ======================== .. rest_method:: PUT /v1/nodes/{node_ident}/traits Set all traits of a node, replacing any existing traits. Normal response code: 204 Error codes: 400,401,403,404,409 Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident - traits: n_traits **Example request to set all traits of a Node:** .. literalinclude:: samples/node-set-traits-request.json Add a trait to a node ===================== .. rest_method:: PUT /v1/nodes/{node_ident}/traits/{trait} Add a single trait to a node. Normal response code: 204 Error codes: 400,401,403,404,409 Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident - trait: trait Remove all traits from a node ============================= .. rest_method:: DELETE /v1/nodes/{node_ident}/traits Normal response code: 204 Error codes: 400,401,403,404,409 Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident Remove a trait from a node ========================== Remove a single trait from a node. .. rest_method:: DELETE /v1/nodes/{node_ident}/traits/{trait} Normal response code: 204 Error codes: 400,401,403,404,409 Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident - trait: trait ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/baremetal-api-v1-nodes-vifs.inc0000644000175000017500000000317300000000000025000 0ustar00coreycorey00000000000000.. -*- rst -*- ================================== VIFs (Virtual Interfaces) of nodes ================================== .. versionadded:: 1.28 Attaching and detaching VIFs (Virtual Interfaces) to or from a node are done via the ``v1/nodes/{node_ident}/vifs`` endpoint. Attaching a VIF to a node means that a VIF will be mapped to a free port or port group of the specified node. List attached VIFs of a Node ============================ .. rest_method:: GET /v1/nodes/{node_ident}/vifs Return a list of VIFs that are attached to the node. Normal response code: 200 Error codes: 400,401,403,404 Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident Response -------- .. rest_parameters:: parameters.yaml - vifs: n_vifs - id: node_vif_ident **Example list of VIFs that are attached to the node:** .. literalinclude:: samples/node-vif-list-response.json :language: javascript Attach a VIF to a node ====================== .. rest_method:: POST /v1/nodes/{node_ident}/vifs Attach a VIF to a node. Normal response code: 204 Error codes: 400,401,403,404,409 Request ------- .. rest_parameters:: parameters.yaml - id: req_node_vif_ident - node_ident: node_ident **Example request to attach a VIF to a Node:** .. literalinclude:: samples/node-vif-attach-request.json Detach VIF from a node ====================== .. rest_method:: DELETE /v1/nodes/{node_ident}/vifs/{node_vif_ident} Detach VIF from a Node. Normal response code: 204 Error codes: 400,401,403,404 Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident - node_vif_ident: req_node_vif_ident ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/baremetal-api-v1-nodes-volume.inc0000644000175000017500000000613000000000000025334 0ustar00coreycorey00000000000000.. -*- rst -*- ================================================ Listing Volume resources by Node (nodes, volume) ================================================ .. versionadded:: 1.32 Given a Node identifier (``uuid`` or ``name``), the API exposes the list of, and details of, all Volume resources associated with that Node. These endpoints do not allow modification of the Volume connectors and Volume targets; that should be done by accessing the Volume resources under the ``/v1/volume/connectors`` and ``/v1/volume/targets`` endpoint. List Links of Volume Resources by Node ====================================== .. rest_method:: GET /v1/nodes/{node_ident}/volume Return a list of links to all volume resources associated with ``node_ident``. Normal response code: 200 Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident Response -------- .. rest_parameters:: parameters.yaml - connectors: volume_connectors_link - targets: volume_targets_link - links: links **Example Volume list response:** .. literalinclude:: samples/node-volume-list-response.json :language: javascript List Volume connectors by Node ============================== .. rest_method:: GET /v1/nodes/{node_ident}/volume/connectors Return a list of bare metal Volume connectors associated with ``node_ident``. Normal response code: 200 Error codes: 400,401,403,404 Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident - fields: fields - limit: limit - marker: marker - sort_dir: sort_dir - sort_key: sort_key Response -------- .. rest_parameters:: parameters.yaml - connectors: volume_connectors - uuid: uuid - type: volume_connector_type - connector_id: volume_connector_connector_id - node_uuid: node_uuid - extra: extra - links: links - next: next **Example list of Node's Volume connectors:** .. literalinclude:: samples/node-volume-connector-list-response.json **Example detailed list of Node's Volume connectors:** .. literalinclude:: samples/node-volume-connector-detail-response.json List Volume targets by Node =========================== .. rest_method:: GET /v1/nodes/{node_ident}/volume/targets Return a list of bare metal Volume targets associated with ``node_ident``. Normal response code: 200 Error codes: 400,401,403,404 Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident - fields: fields - limit: limit - marker: marker - sort_dir: sort_dir - sort_key: sort_key Response -------- .. rest_parameters:: parameters.yaml - targets: volume_targets - uuid: uuid - volume_type: volume_target_volume_type - properties: volume_target_properties - boot_index: volume_target_boot_index - volume_id: volume_target_volume_id - extra: extra - node_uuid: node_uuid - links: links - next: next **Example list of Node's Volume targets:** .. literalinclude:: samples/node-volume-target-list-response.json **Example detailed list of Node's Volume targets:** .. literalinclude:: samples/node-volume-target-detail-response.json ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/baremetal-api-v1-nodes.inc0000644000175000017500000004576600000000000024051 0ustar00coreycorey00000000000000.. -*- rst -*- ============= Nodes (nodes) ============= List, Searching, Creating, Updating, and Deleting of bare metal Node resources are done through the ``/v1/nodes`` resource. There are also several sub-resources, which allow further actions to be performed on a bare metal Node. A Node is the canonical representation of a discretely allocatable server, capable of running an Operating System. Each Node must be associated with a ``driver``; this informs Ironic what protocol to use when managing the Node. .. versionchanged:: 1.6 A Node may be referenced both by its UUID and by a unique human-readable "name" in any request. Throughout this documentation, this is referred to as the ``node_ident``. Responses clearly indicate whether a given field is a ``uuid`` or a ``name``. Depending on the Roles assigned to the authenticated OpenStack User, and upon the configuration of the Bare Metal service, API responses may change. For example, the default value of the "show_password" settings cause all API responses to mask passwords within ``driver_info`` with the literal string "\*\*\*\*\*\*". Create Node =========== .. rest_method:: POST /v1/nodes Creates a new Node resource. This method requires that a ``driver`` be supplied in the request body. Most subresources of a Node (eg, ``properties``, ``driver_info``, etc) may be supplied when the Node is created, or the resource may be updated later. .. versionadded:: 1.2 Added ``available`` state name, which replaced ``None`` as the status of an unprovisioned Node. All clients should be updated to use the new ``available`` state name. Nodes in the ``available`` state may have workloads provisioned on them; they are "available" for use. .. versionadded:: 1.5 Introduced the ``name`` field. .. versionadded:: 1.7 Introduced the ``clean_step`` field. .. versionchanged:: 1.11 The default initial state of newly-created Nodes from ``available`` to ``enroll``. This provides users a workflow to verify the manageability of a Node and perform necessary operational functions (eg, building a RAID array) before making the Node available for provisioning. .. versionadded:: 1.12 Introduced support for the ``raid_config`` and ``target_raid_config`` fields. .. versionadded:: 1.20 Introduced the ``network_interface`` field. If this field is not supplied when creating the Node, the default value will be used. .. versionadded:: 1.21 Introduced the ``resource_class`` field, which may be used to store a resource designation for the proposed OpenStack Placement Engine. This field has no effect within Ironic. .. versionadded:: 1.31 Introduced the ``boot_interface``, ``deploy_interface``, ``management_interface``, ``power_interface``, ``inspect_interface``, ``console_interface``, ``vendor_interface`` and ``raid_interface`` fields. If any of these fields are not supplied when creating the Node, their default value will be used. .. versionchanged:: 1.31 If the specified driver is a dynamic driver, then all the interfaces (boot_interface, deploy_interface, etc.) will be set to the default interface for that driver unless another enabled interface is specified in the creation request. .. versionadded:: 1.33 Introduced the ``storage_interface`` field. If this field is not supplied when creating the Node, the default value will be used. .. versionadded:: 1.38 Introduced the ``rescue_interface`` field. If this field is not supplied when creating the Node, the default value will be used. .. versionadded:: 1.44 Introduced the ``deploy_step`` field. .. versionadded:: 1.46 Introduced the ``conductor_group`` field. .. versionadded:: 1.50 Introduced the ``owner`` field. .. versionadded:: 1.51 Introduced the ``description`` field. .. versionadded:: 1.52 Introduced the ``allocation_uuid`` field. .. versionadded:: 1.65 Introduced the ``lessee`` field. Normal response codes: 201 Error codes: 400,403,406 Request ------- .. rest_parameters:: parameters.yaml - boot_interface: req_boot_interface - conductor_group: req_conductor_group - console_interface: req_console_interface - deploy_interface: req_deploy_interface - driver_info: req_driver_info - driver: req_driver_name - extra: req_extra - inspect_interface: req_inspect_interface - management_interface: req_management_interface - name: node_name - network_interface: req_network_interface - power_interface: req_power_interface - properties: req_properties - raid_interface: req_raid_interface - rescue_interface: req_rescue_interface - resource_class: req_resource_class_create - storage_interface: req_storage_interface - uuid: req_uuid - vendor_interface: req_vendor_interface - owner: owner - description: n_description - lessee: lessee **Example Node creation request with a dynamic driver:** .. literalinclude:: samples/node-create-request-dynamic.json :language: javascript **Example Node creation request with a classic driver:** .. literalinclude:: samples/node-create-request-classic.json :language: javascript Response -------- The response will contain the complete Node record, with the supplied data, and any defaults added for non-specified fields. Most fields default to "null" or "". The list and example below are representative of the response as of API microversion 1.48. .. rest_parameters:: parameters.yaml - uuid: uuid - name: node_name - power_state: power_state - target_power_state: target_power_state - provision_state: provision_state - target_provision_state: target_provision_state - maintenance: maintenance - maintenance_reason: maintenance_reason - fault: fault - last_error: last_error - reservation: reservation - driver: driver_name - driver_info: driver_info - driver_internal_info: driver_internal_info - properties: n_properties - instance_info: instance_info - instance_uuid: instance_uuid - chassis_uuid: chassis_uuid - extra: extra - console_enabled: console_enabled - raid_config: raid_config - target_raid_config: target_raid_config - clean_step: clean_step - deploy_step: deploy_step - links: links - ports: n_ports - portgroups: n_portgroups - states: n_states - resource_class: resource_class - boot_interface: boot_interface - console_interface: console_interface - deploy_interface: deploy_interface - inspect_interface: inspect_interface - management_interface: management_interface - network_interface: network_interface - power_interface: power_interface - raid_interface: raid_interface - rescue_interface: rescue_interface - storage_interface: storage_interface - traits: n_traits - vendor_interface: vendor_interface - volume: n_volume - conductor_group: conductor_group - protected: protected - protected_reason: protected_reason - conductor: conductor - owner: owner - lessee: lessee - description: n_description - allocation_uuid: allocation_uuid **Example JSON representation of a Node:** .. literalinclude:: samples/node-create-response.json :language: javascript List Nodes ========== .. rest_method:: GET /v1/nodes Return a list of bare metal Nodes, with some useful information about each Node. Some filtering is possible by passing in flags with the request. By default, this query will return the name, uuid, instance uuid, power state, provision state, and maintenance setting for each Node. .. versionadded:: 1.8 Added the ``fields`` Request parameter. When specified, this causes the content of the Response to include only the specified fields, rather than the default set. .. versionadded:: 1.9 Added the ``provision_state`` Request parameter, allowing the list of returned Nodes to be filtered by their current state. .. versionadded:: 1.16 Added the ``driver`` Request parameter, allowing the list of returned Nodes to be filtered by their driver name. .. versionadded:: 1.21 Added the ``resource_class`` Request parameter, allowing the list of returned Nodes to be filtered by this field. .. versionadded:: 1.42 Introduced the ``fault`` field. .. versionadded:: 1.43 Added the ``detail`` boolean request parameter. When specified ``True`` this causes the response to include complete details about each node, as shown in the "List Nodes Detailed" section below. .. versionadded:: 1.46 Introduced the ``conductor_group`` request parameter, to allow filtering the list of returned nodes by conductor group. .. versionadded:: 1.49 Introduced the ``conductor`` request parameter, to allow filtering the list of returned nodes by conductor. .. versionadded:: 1.50 Introduced the ``owner`` field. .. versionadded:: 1.51 Introduced the ``description`` field. .. versionadded:: 1.65 Introduced the ``lessee`` field. Normal response codes: 200 Error codes: 400,403,406 Request ------- .. rest_parameters:: parameters.yaml - instance_uuid: r_instance_uuid - maintenance: r_maintenance - associated: r_associated - provision_state: r_provision_state - driver: r_driver - resource_class: r_resource_class - conductor_group: r_conductor_group - conductor: r_conductor - fault: r_fault - owner: owner - lessee: lessee - description_contains: r_description_contains - fields: fields - limit: limit - marker: marker - sort_dir: sort_dir - sort_key: sort_key - detail: detail Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - name: node_name - instance_uuid: instance_uuid - power_state: power_state - provision_state: provision_state - maintenance: maintenance - links: links **Example list of Nodes:** .. literalinclude:: samples/nodes-list-response.json :language: javascript List Nodes Detailed =================== .. rest_method:: GET /v1/nodes/detail .. deprecated:: Use ?detail=True query string instead. Return a list of bare metal Nodes with complete details. Some filtering is possible by passing in flags with the request. This method is particularly useful to locate the Node associated to a given Nova instance, eg. with a request to ``v1/nodes/detail?instance_uuid={NOVA INSTANCE UUID}`` .. versionadded:: 1.37 Introduced the ``traits`` field. .. versionadded:: 1.38 Introduced the ``rescue_interface`` field. .. versionadded:: 1.42 Introduced the ``fault`` field. .. versionadded:: 1.46 Introduced the ``conductor_group`` field. .. versionadded:: 1.48 Introduced the ``protected`` and ``protected_reason`` fields. .. versionadded:: 1.49 Introduced the ``conductor`` request parameter and ``conductor`` field. .. versionadded:: 1.50 Introduced the ``owner`` field. .. versionadded:: 1.51 Introduced the ``description`` field. .. versionadded:: 1.52 Introduced the ``allocation_uuid`` field. .. versionadded:: 1.65 Introduced the ``lessee`` field. Normal response codes: 200 Error codes: 400,403,406 Request ------- .. rest_parameters:: parameters.yaml - instance_uuid: r_instance_uuid - maintenance: r_maintenance - fault: r_fault - associated: r_associated - provision_state: r_provision_state - driver: r_driver - resource_class: r_resource_class - conductor_group: r_conductor_group - conductor: r_conductor - owner: owner - lessee: lessee - description_contains: r_description_contains - limit: limit - marker: marker - sort_dir: sort_dir - sort_key: sort_key Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - name: node_name - power_state: power_state - target_power_state: target_power_state - provision_state: provision_state - target_provision_state: target_provision_state - maintenance: maintenance - maintenance_reason: maintenance_reason - fault: fault - last_error: last_error - reservation: reservation - driver: driver_name - driver_info: driver_info - driver_internal_info: driver_internal_info - properties: n_properties - instance_info: instance_info - instance_uuid: instance_uuid - chassis_uuid: chassis_uuid - extra: extra - console_enabled: console_enabled - raid_config: raid_config - target_raid_config: target_raid_config - clean_step: clean_step - deploy_step: deploy_step - links: links - ports: n_ports - portgroups: n_portgroups - states: n_states - resource_class: resource_class - boot_interface: boot_interface - console_interface: console_interface - deploy_interface: deploy_interface - inspect_interface: inspect_interface - management_interface: management_interface - network_interface: network_interface - power_interface: power_interface - raid_interface: raid_interface - rescue_interface: rescue_interface - storage_interface: storage_interface - traits: n_traits - vendor_interface: vendor_interface - volume: n_volume - conductor_group: conductor_group - protected: protected - protected_reason: protected_reason - owner: owner - lessee: lessee - description: n_description - conductor: conductor - allocation_uuid: allocation_uuid - retired: retired - retired_reason: retired_reason **Example detailed list of Nodes:** .. literalinclude:: samples/nodes-list-details-response.json :language: javascript Show Node Details ================= .. rest_method:: GET /v1/nodes/{node_ident} Shows details for a node. By default, this will return the full representation of the resource; an optional ``fields`` parameter can be supplied to return only the specified set. .. versionadded:: 1.37 Introduced the ``traits`` field. .. versionadded:: 1.38 Introduced the ``rescue_interface`` field. .. versionadded:: 1.42 Introduced the ``fault`` field. .. versionadded:: 1.46 Introduced the ``conductor_group`` field. .. versionadded:: 1.48 Introduced the ``protected`` and ``protected_reason`` fields. .. versionadded:: 1.49 Introduced the ``conductor`` field .. versionadded:: 1.50 Introduced the ``owner`` field. .. versionadded:: 1.51 Introduced the ``description`` field. .. versionadded:: 1.52 Introduced the ``allocation_uuid`` field. .. versionadded:: 1.61 Introduced the ``retired`` and ``retired_reason`` fields. .. versionadded:: 1.65 Introduced the ``lessee`` field. Normal response codes: 200 Error codes: 400,403,404,406 Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident - fields: fields Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - name: node_name - power_state: power_state - target_power_state: target_power_state - provision_state: provision_state - target_provision_state: target_provision_state - maintenance: maintenance - maintenance_reason: maintenance_reason - fault: fault - last_error: last_error - reservation: reservation - driver: driver_name - driver_info: driver_info - driver_internal_info: driver_internal_info - properties: n_properties - instance_info: instance_info - instance_uuid: instance_uuid - chassis_uuid: chassis_uuid - extra: extra - console_enabled: console_enabled - raid_config: raid_config - target_raid_config: target_raid_config - clean_step: clean_step - deploy_step: deploy_step - links: links - ports: n_ports - portgroups: n_portgroups - states: n_states - resource_class: resource_class - boot_interface: boot_interface - console_interface: console_interface - deploy_interface: deploy_interface - inspect_interface: inspect_interface - management_interface: management_interface - network_interface: network_interface - power_interface: power_interface - raid_interface: raid_interface - rescue_interface: rescue_interface - storage_interface: storage_interface - traits: n_traits - vendor_interface: vendor_interface - volume: n_volume - conductor_group: conductor_group - protected: protected - protected_reason: protected_reason - owner: owner - lessee: lessee - description: n_description - conductor: conductor - allocation_uuid: allocation_uuid **Example JSON representation of a Node:** .. literalinclude:: samples/node-show-response.json :language: javascript Update Node =========== .. rest_method:: PATCH /v1/nodes/{node_ident} Updates the information stored about a Node. Note that this endpoint can not be used to request state changes, which are managed through sub-resources. .. versionadded:: 1.25 Introduced the ability to unset a node's chassis UUID. .. versionadded:: 1.51 Introduced the ability to set/unset a node's description. Normal response codes: 200 Error codes: 400,403,404,406,409 Request ------- The BODY of the PATCH request must be a JSON PATCH document, adhering to `RFC 6902 `_. .. rest_parameters:: parameters.yaml - node_ident: node_ident **Example PATCH document updating Node driver_info:** .. literalinclude:: samples/node-update-driver-info-request.json Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - name: node_name - power_state: power_state - target_power_state: target_power_state - provision_state: provision_state - target_provision_state: target_provision_state - maintenance: maintenance - maintenance_reason: maintenance_reason - fault: fault - last_error: last_error - reservation: reservation - driver: driver_name - driver_info: driver_info - driver_internal_info: driver_internal_info - properties: n_properties - instance_info: instance_info - instance_uuid: instance_uuid - chassis_uuid: chassis_uuid - extra: extra - console_enabled: console_enabled - raid_config: raid_config - target_raid_config: target_raid_config - clean_step: clean_step - deploy_step: deploy_step - links: links - ports: n_ports - portgroups: n_portgroups - states: n_states - resource_class: resource_class - boot_interface: boot_interface - console_interface: console_interface - deploy_interface: deploy_interface - inspect_interface: inspect_interface - management_interface: management_interface - network_interface: network_interface - power_interface: power_interface - raid_interface: raid_interface - rescue_interface: rescue_interface - storage_interface: storage_interface - traits: n_traits - vendor_interface: vendor_interface - volume: n_volume - conductor_group: conductor_group - protected: protected - protected_reason: protected_reason - owner: owner - lessee: lessee - description: n_description - conductor: conductor - allocation_uuid: allocation_uuid **Example JSON representation of a Node:** .. literalinclude:: samples/node-update-driver-info-response.json :language: javascript Delete Node =========== .. rest_method:: DELETE /v1/nodes/{node_ident} Deletes a node. Normal response codes: 204 Error codes: 400,403,404,409 Request ------- .. rest_parameters:: parameters.yaml - node_ident: node_ident ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/baremetal-api-v1-portgroups-ports.inc0000644000175000017500000000506300000000000026314 0ustar00coreycorey00000000000000.. -*- rst -*- ============================================= Listing Ports by Portgroup (portgroup, ports) ============================================= .. versionadded:: 1.24 Given a Portgroup identifier (``uuid`` or ``name``), the API exposes the list of, and details of, all Ports associated with that Portgroup. These endpoints do not allow modification of the Ports; that should be done by accessing the Port resources under the ``/v1/ports`` endpoint. List Ports by Portgroup ======================= .. rest_method:: GET /v1/portgroups/{portgroup_ident}/ports Return a list of bare metal Ports associated with ``portgroup_ident``. When specified, the ``fields`` request parameter causes the content of the Response to include only the specified fields, rather than the default set. .. versionadded:: 1.34 Added the ``physical_network`` field. .. versionadded:: 1.53 Added the ``is_smartnic`` response fields. Normal response code: 200 Error codes: 400,401,403,404 Request ------- .. rest_parameters:: parameters.yaml - portgroup_ident: portgroup_ident - fields: fields - limit: limit - marker: marker - sort_dir: sort_dir - sort_key: sort_key Response -------- .. rest_parameters:: parameters.yaml - ports: ports - uuid: uuid - address: port_address - links: links **Example list of a Portgroup's Ports:** .. literalinclude:: samples/portgroup-port-list-response.json List detailed Ports by Portgroup ================================ .. rest_method:: GET /v1/portgroups/{portgroup_ident}/ports/detail Return a detailed list of bare metal Ports associated with ``portgroup_ident``. .. versionadded:: 1.34 Added the ``physical_network`` field. .. versionadded:: 1.53 Added the ``is_smartnic`` response fields. Normal response code: 200 Error codes: 400,401,403,404 Request ------- .. rest_parameters:: parameters.yaml - portgroup_ident: portgroup_ident - limit: limit - marker: marker - sort_dir: sort_dir - sort_key: sort_key Response -------- .. rest_parameters:: parameters.yaml - ports: ports - uuid: uuid - address: port_address - node_uuid: node_uuid - local_link_connection: local_link_connection - pxe_enabled: pxe_enabled - physical_network: physical_network - internal_info: internal_info - extra: extra - portgroup_uuid: portgroup_uuid - created_at: created_at - updated_at: updated_at - links: links - is_smartnic: is_smartnic **Example details of a Portgroup's Ports:** .. literalinclude:: samples/portgroup-port-detail-response.json ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/baremetal-api-v1-portgroups.inc0000644000175000017500000001420700000000000025147 0ustar00coreycorey00000000000000.. -*- rst -*- ======================= Portgroups (portgroups) ======================= .. versionadded:: 1.23 Ports can be combined into portgroups to support static link aggregation group (LAG) or multi-chassis link aggregation group (MLAG) configurations. Listing, Searching, Creating, Updating, and Deleting of bare metal Portgroup resources are done through the ``v1/portgroups`` resource. All Portgroups must be associated with a Node when created. This association can be changed, though the request may be rejected if either the current or destination Node are in a transitive state (for example, in the process of deploying) or are in a state that would be non-deterministically affected by such a change (for example, there is an active user instance on the Node). List Portgroups =============== .. rest_method:: GET /v1/portgroups Return a list of bare metal Portgroups. Some filtering is possible by passing in some parameters with the request. By default, this query will return the UUID, name and address for each Portgroup. .. versionadded:: 1.43 Added the ``detail`` boolean request parameter. When specified ``True`` this causes the response to include complete details about each portgroup. Normal response code: 200 Error codes: 400,401,403,404 Request ------- .. rest_parameters:: parameters.yaml - node: r_portgroup_node_ident - address: r_portgroup_address - fields: fields - limit: limit - marker: marker - sort_dir: sort_dir - sort_key: sort_key - detail: detail Response -------- .. rest_parameters:: parameters.yaml - portgroups: portgroups - uuid: uuid - address: portgroup_address - name: portgroup_name - links: links **Example Portgroup list response:** .. literalinclude:: samples/portgroup-list-response.json :language: javascript Create Portgroup ================ .. rest_method:: POST /v1/portgroups Creates a new Portgroup resource. This method requires a Node UUID and the physical hardware address for the Portgroup (MAC address in most cases). Normal response code: 201 Error codes: 400,401,403,404 Request ------- .. rest_parameters:: parameters.yaml - node_uuid: req_node_uuid - address: req_portgroup_address - name: portgroup_name **Example Portgroup creation request:** .. literalinclude:: samples/portgroup-create-request.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - name: portgroup_name - address: portgroup_address - node_uuid: node_uuid - standalone_ports_supported: standalone_ports_supported - internal_info: portgroup_internal_info - extra: extra - mode: portgroup_mode - properties: portgroup_properties - created_at: created_at - updated_at: updated_at - links: links - ports: pg_ports **Example Portgroup creation response:** .. literalinclude:: samples/portgroup-create-response.json :language: javascript List Detailed Portgroups ======================== .. rest_method:: GET /v1/portgroups/detail Return a list of bare metal Portgroups, with detailed information. Normal response code: 200 Error codes: 400,401,403,404 Request ------- .. rest_parameters:: parameters.yaml - node: r_portgroup_node_ident - address: r_portgroup_address - limit: limit - marker: marker - sort_dir: sort_dir - sort_key: sort_key Response -------- .. rest_parameters:: parameters.yaml - portgroups: portgroups - name: portgroup_name - uuid: uuid - address: portgroup_address - node_uuid: node_uuid - standalone_ports_supported: standalone_ports_supported - internal_info: portgroup_internal_info - extra: extra - mode: portgroup_mode - properties: portgroup_properties - created_at: created_at - updated_at: updated_at - links: links - ports: pg_ports **Example detailed Portgroup list response:** .. literalinclude:: samples/portgroup-list-detail-response.json :language: javascript Show Portgroup Details ====================== .. rest_method:: GET /v1/portgroups/{portgroup_ident} Show details for the given Portgroup. Normal response code: 200 Error codes: 400,401,403,404 Request ------- .. rest_parameters:: parameters.yaml - portgroup_ident: portgroup_ident - fields: fields Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - name: portgroup_name - address: portgroup_address - node_uuid: node_uuid - standalone_ports_supported: standalone_ports_supported - internal_info: portgroup_internal_info - extra: extra - mode: portgroup_mode - properties: portgroup_properties - created_at: created_at - updated_at: updated_at - links: links - ports: pg_ports **Example Portgroup details:** .. literalinclude:: samples/portgroup-create-response.json :language: javascript Update a Portgroup ================== .. rest_method:: PATCH /v1/portgroups/{portgroup_ident} Update a Portgroup. Normal response code: 200 Error codes: 400,401,403,404 Request ------- The BODY of the PATCH request must be a JSON PATCH document, adhering to `RFC 6902 `_. .. rest_parameters:: parameters.yaml - portgroup_ident: portgroup_ident **Example Portgroup update request:** .. literalinclude:: samples/portgroup-update-request.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - name: portgroup_name - address: portgroup_address - node_uuid: node_uuid - standalone_ports_supported: standalone_ports_supported - internal_info: portgroup_internal_info - extra: extra - mode: portgroup_mode - properties: portgroup_properties - created_at: created_at - updated_at: updated_at - links: links - ports: pg_ports **Example Portgroup update response:** .. literalinclude:: samples/portgroup-update-response.json :language: javascript Delete Portgroup ================ .. rest_method:: DELETE /v1/portgroups/{portgroup_ident} Delete a Portgroup. Normal response code: 204 Error codes: 400,401,403,404 Request ------- .. rest_parameters:: parameters.yaml - portgroup_ident: portgroup_ident ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/baremetal-api-v1-ports.inc0000644000175000017500000001775600000000000024106 0ustar00coreycorey00000000000000.. -*- rst -*- ============= Ports (ports) ============= Listing, Searching, Creating, Updating, and Deleting of bare metal Port resources are done through the ``ports`` resource. All Ports must be associated to a Node when created. This association can be changed, though the request may be rejected if either the current or destination Node are in a transitive state (e.g., in the process of deploying) or are in a state that would be non-deterministically affected by such a change (e.g., there is an active user instance on the Node). List Ports ========== .. rest_method:: GET /v1/ports Return a list of bare metal Ports. Some filtering is possible by passing in some parameters with the request. By default, this query will return the uuid and address for each Port. .. versionadded:: 1.6 Added the ``node`` query parameter. If both ``node_uuid`` and ``node`` are specified in the request, ``node_uuid`` will be used to filter results. .. versionadded:: 1.8 Added the ``fields`` request parameter. When specified, this causes the content of the response to include only the specified fields, rather than the default set. .. versionadded:: 1.19 Added the ``pxe_enabled`` and ``local_link_connection`` fields. .. versionadded:: 1.24 Added the ``portgroup_uuid`` field. .. versionadded:: 1.34 Added the ``physical_network`` field. .. versionadded:: 1.43 Added the ``detail`` boolean request parameter. When specified ``True`` this causes the response to include complete details about each port. .. versionadded:: 1.53 Added the ``is_smartnic`` field. Normal response code: 200 Request ------- .. rest_parameters:: parameters.yaml - node: r_port_node_ident - node_uuid: r_port_node_uuid - portgroup: r_port_portgroup_ident - address: r_port_address - fields: fields - limit: limit - marker: marker - sort_dir: sort_dir - sort_key: sort_key - detail: detail Response -------- .. rest_parameters:: parameters.yaml - ports: ports - uuid: uuid - address: port_address - links: links **Example Port list response:** .. literalinclude:: samples/port-list-response.json :language: javascript Create Port =========== .. rest_method:: POST /v1/ports Creates a new Port resource. This method requires a Node UUID and the physical hardware address for the Port (MAC address in most cases). .. versionadded:: 1.19 Added the ``pxe_enabled`` and ``local_link_connection`` request and response fields. .. versionadded:: 1.24 Added the ``portgroup_uuid`` request and response fields. .. versionadded:: 1.34 Added the ``physical_network`` request and response fields. .. versionadded:: 1.53 Added the ``is_smartnic`` request and response fields. Normal response code: 201 Request ------- .. rest_parameters:: parameters.yaml - node_uuid: req_node_uuid - address: req_port_address - portgroup_uuid: req_portgroup_uuid - local_link_connection: req_local_link_connection - pxe_enabled: req_pxe_enabled - physical_network: req_physical_network - extra: req_extra - is_smartnic: req_is_smartnic **Example Port creation request:** .. literalinclude:: samples/port-create-request.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - address: port_address - node_uuid: node_uuid - portgroup_uuid: portgroup_uuid - local_link_connection: local_link_connection - pxe_enabled: pxe_enabled - physical_network: physical_network - internal_info: internal_info - extra: extra - created_at: created_at - updated_at: updated_at - links: links - is_smartnic: is_smartnic **Example Port creation response:** .. literalinclude:: samples/port-create-response.json :language: javascript List Detailed Ports =================== .. rest_method:: GET /v1/ports/detail Return a list of bare metal Ports, with detailed information. .. versionadded:: 1.6 Added the ``node`` query parameter. If both ``node_uuid`` and ``node`` are specified in the request, ``node_uuid`` will be used to filter results. .. versionadded:: 1.19 Added the ``pxe_enabled`` and ``local_link_connection`` response fields. .. versionadded:: 1.24 Added the ``portgroup`` query parameter and ``portgroup_uuid`` response field. .. versionadded:: 1.34 Added the ``physical_network`` response field. .. versionadded:: 1.53 Added the ``is_smartnic`` response fields. Normal response code: 200 Request ------- .. rest_parameters:: parameters.yaml - node: r_port_node_ident - node_uuid: r_port_node_uuid - portgroup: r_port_portgroup_ident - address: r_port_address - limit: limit - marker: marker - sort_dir: sort_dir - sort_key: sort_key Response -------- .. rest_parameters:: parameters.yaml - ports: ports - uuid: uuid - address: port_address - node_uuid: node_uuid - portgroup_uuid: portgroup_uuid - local_link_connection: local_link_connection - pxe_enabled: pxe_enabled - physical_network: physical_network - internal_info: internal_info - extra: extra - created_at: created_at - updated_at: updated_at - links: links - is_smartnic: is_smartnic **Example detailed Port list response:** .. literalinclude:: samples/port-list-detail-response.json :language: javascript Show Port Details ================= .. rest_method:: GET /v1/ports/{port_id} Show details for the given Port. .. versionadded:: 1.8 Added the ``fields`` request parameter. When specified, this causes the content of the response to include only the specified fields, rather than the default set. .. versionadded:: 1.19 Added the ``pxe_enabled`` and ``local_link_connection`` response fields. .. versionadded:: 1.24 Added the ``portgroup_uuid`` response field. .. versionadded:: 1.34 Added the ``physical_network`` response field. .. versionadded:: 1.53 Added the ``is_smartnic`` response fields. Normal response code: 200 Request ------- .. rest_parameters:: parameters.yaml - port_id: port_ident - fields: fields Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - address: port_address - node_uuid: node_uuid - portgroup_uuid: portgroup_uuid - local_link_connection: local_link_connection - pxe_enabled: pxe_enabled - physical_network: physical_network - internal_info: internal_info - extra: extra - created_at: created_at - updated_at: updated_at - links: links - is_smartnic: is_smartnic **Example Port details:** .. literalinclude:: samples/port-create-response.json :language: javascript Update a Port ============= .. rest_method:: PATCH /v1/ports/{port_id} Update a Port. .. versionadded:: 1.19 Added the ``pxe_enabled`` and ``local_link_connection`` fields. .. versionadded:: 1.24 Added the ``portgroup_uuid`` field. .. versionadded:: 1.34 Added the ``physical_network`` field. .. versionadded:: 1.53 Added the ``is_smartnic`` fields. Normal response code: 200 Request ------- The BODY of the PATCH request must be a JSON PATCH document, adhering to `RFC 6902 `_. .. rest_parameters:: parameters.yaml - port_id: port_ident **Example Port update request:** .. literalinclude:: samples/port-update-request.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - address: port_address - node_uuid: node_uuid - portgroup_uuid: portgroup_uuid - local_link_connection: local_link_connection - pxe_enabled: pxe_enabled - physical_network: physical_network - internal_info: internal_info - extra: extra - created_at: created_at - updated_at: updated_at - links: links - is_smartnic: is_smartnic **Example Port update response:** .. literalinclude:: samples/port-update-response.json :language: javascript Delete Port =========== .. rest_method:: DELETE /v1/ports/{port_id} Delete a Port. Normal response code: 204 Request ------- .. rest_parameters:: parameters.yaml - port_id: port_ident ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/baremetal-api-v1-volume.inc0000644000175000017500000002315700000000000024236 0ustar00coreycorey00000000000000.. -*- rst -*- =============== Volume (volume) =============== .. versionadded:: 1.32 Information for connecting remote volumes to a node can be associated with a Node. There are two types of resources, Volume connectors and Volume targets. Volume connectors contain initiator information of Nodes. Volume targets contain target information of remote volumes. Listing, Searching, Creating, Updating, and Deleting of Volume connector resources are done through the ``v1/volume/connectors`` resource. The same operations for Volume targets are done through the ``v1/volume/targets`` resources. List Links of Volume Resources ============================== .. rest_method:: GET /v1/volume Return a list of links to all volume resources. Normal response code: 200 Request ------- Response -------- .. rest_parameters:: parameters.yaml - connectors: volume_connectors_link - targets: volume_targets_link - links: links **Example Volume list response:** .. literalinclude:: samples/volume-list-response.json :language: javascript List Volume Connectors ====================== .. rest_method:: GET /v1/volume/connectors Return a list of Volume connectors for all nodes. By default, this query will return the UUID, node UUID, type, and connector ID for each Volume connector. Normal response code: 200 Error codes: 400,401,403,404 Request ------- .. rest_parameters:: parameters.yaml - node: r_volume_connector_node_ident - fields: fields - detail: detail - limit: limit - marker: marker - sort_dir: sort_dir - sort_key: sort_key Response -------- .. rest_parameters:: parameters.yaml - connectors: volume_connectors - uuid: uuid - type: volume_connector_type - connector_id: volume_connector_connector_id - node_uuid: node_uuid - extra: extra - links: links - next: next **Example Volume connector list response:** .. literalinclude:: samples/volume-connector-list-response.json :language: javascript **Example detailed Volume connector list response:** .. literalinclude:: samples/volume-connector-list-detail-response.json :language: javascript Create Volume Connector ======================= .. rest_method:: POST /v1/volume/connectors Creates a new Volume connector resource. This method requires a Node UUID, a connector type and a connector ID. Normal response code: 201 Error codes: 400,401,403,404,409 Request ------- .. rest_parameters:: parameters.yaml - node_uuid: req_node_uuid - type: volume_connector_type - connector_id: volume_connector_connector_id - extra: req_extra **Example Volume connector creation request:** .. literalinclude:: samples/volume-connector-create-request.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - type: volume_connector_type - connector_id: volume_connector_connector_id - node_uuid: node_uuid - extra: extra - links: links **Example Volume connector creation response:** .. literalinclude:: samples/volume-connector-create-response.json :language: javascript Show Volume Connector Details ============================= .. rest_method:: GET /v1/volume/connectors/{volume_connector_id} Show details for the given Volume connector. Normal response code: 200 Error codes: 400,401,403,404 Request ------- .. rest_parameters:: parameters.yaml - volume_connector_id: volume_connector_id - fields: fields Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - type: volume_connector_type - connector_id: volume_connector_connector_id - node_uuid: node_uuid - extra: extra - links: links **Example Volume connector details:** .. literalinclude:: samples/volume-connector-create-response.json :language: javascript Update a Volume Connector ========================= .. rest_method:: PATCH /v1/volume/connectors/{volume_connector_id} Update a Volume connector. A Volume connector can be updated only while a node associated with the Volume connector is powered off. Normal response code: 200 Error codes: 400,401,403,404,409 Request ------- The BODY of the PATCH request must be a JSON PATCH document, adhering to `RFC 6902 `_. .. rest_parameters:: parameters.yaml - volume_connector_id: volume_connector_id **Example Volume connector update request:** .. literalinclude:: samples/volume-connector-update-request.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - type: volume_connector_type - connector_id: volume_connector_connector_id - node_uuid: node_uuid - extra: extra - links: links **Example Volume connector update response:** .. literalinclude:: samples/volume-connector-update-response.json :language: javascript Delete Volume Connector ======================= .. rest_method:: DELETE /v1/volume/connector/{volume_connector_id} Delete a Volume connector. A Volume connector can be deleted only while a node associated with the Volume connector is powered off. Normal response code: 204 Error codes: 400,401,403,404,409 Request ------- .. rest_parameters:: parameters.yaml - volume_connector_id: volume_connector_id List Volume Targets =================== .. rest_method:: GET /v1/volume/targets Return a list of Volume targets for all nodes. By default, this query will return the UUID, node UUID, volume type, boot index, and volume ID for each Volume target. Normal response code: 200 Error codes: 400,401,403,404 Request ------- .. rest_parameters:: parameters.yaml - node: r_volume_target_node_ident - fields: fields - detail: detail - limit: limit - marker: marker - sort_dir: sort_dir - sort_key: sort_key Response -------- .. rest_parameters:: parameters.yaml - targets: volume_targets - uuid: uuid - volume_type: volume_target_volume_type - properties: volume_target_properties - boot_index: volume_target_boot_index - volume_id: volume_target_volume_id - extra: extra - node_uuid: node_uuid - links: links - next: next **Example Volume target list response:** .. literalinclude:: samples/volume-target-list-response.json :language: javascript **Example detailed Volume target list response:** .. literalinclude:: samples/volume-target-list-detail-response.json :language: javascript Create Volume Target ==================== .. rest_method:: POST /v1/volume/targets Creates a new Volume target resource. This method requires a Node UUID, volume type, volume ID, and boot index.. Normal response code: 201 Error codes: 400,401,403,404,409 Request ------- .. rest_parameters:: parameters.yaml - node_uuid: req_node_uuid - volume_type: volume_target_volume_type - properties: volume_target_properties - boot_index: volume_target_boot_index - volume_id: volume_target_volume_id - extra: req_extra **Example Volume target creation request:** .. literalinclude:: samples/volume-target-create-request.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - volume_type: volume_target_volume_type - properties: volume_target_properties - boot_index: volume_target_boot_index - volume_id: volume_target_volume_id - extra: extra - node_uuid: node_uuid - links: links **Example Volume target creation response:** .. literalinclude:: samples/volume-target-create-response.json :language: javascript Show Volume Target Details ========================== .. rest_method:: GET /v1/volume/targets/{volume_target_id} Show details for the given Volume target. Normal response code: 200 Error codes: 400,401,403,404 Request ------- .. rest_parameters:: parameters.yaml - volume_target_id: volume_target_id - fields: fields Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - volume_type: volume_target_volume_type - properties: volume_target_properties - boot_index: volume_target_boot_index - volume_id: volume_target_volume_id - extra: extra - node_uuid: node_uuid - links: links **Example Volume target details:** .. literalinclude:: samples/volume-target-create-response.json :language: javascript Update a Volume Target ====================== .. rest_method:: PATCH /v1/volume/targets/{volume_target_id} Update a Volume target. A Volume target can be updated only while a node associated with the Volume target is powered off. Normal response code: 200 Error codes: 400,401,403,404,409 Request ------- The BODY of the PATCH request must be a JSON PATCH document, adhering to `RFC 6902 `_. .. rest_parameters:: parameters.yaml - volume_target_id: volume_target_id **Example Volume target update request:** .. literalinclude:: samples/volume-target-update-request.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - volume_type: volume_target_volume_type - properties: volume_target_properties - boot_index: volume_target_boot_index - volume_id: volume_target_volume_id - extra: extra - node_uuid: node_uuid - links: links **Example Volume target update response:** .. literalinclude:: samples/volume-target-update-response.json :language: javascript Delete Volume Target ==================== .. rest_method:: DELETE /v1/volume/target/{volume_target_id} Delete a Volume target. A Volume target can be deleted only while a node associated with the Volume target is powered off. Normal response code: 204 Error codes: 400,401,403,404,409 Request ------- .. rest_parameters:: parameters.yaml - volume_target_id: volume_target_id ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/baremetal-api-versions.inc0000644000175000017500000000471400000000000024251 0ustar00coreycorey00000000000000.. -*- rst -*- ============ API versions ============ Concepts ======== In order to bring new features to users over time, the Ironic API supports versioning. There are two kinds of versions in Ironic. - ''major versions'', which have dedicated urls. - ''microversions'', which can be requested through the use of the ``X-OpenStack-Ironic-API-Version`` header. The Version APIs work differently from other APIs as they *do not* require authentication. Beginning with the Kilo release, all API requests support the ``X-OpenStack-Ironic-API-Version`` header. This header SHOULD be supplied with every request; in the absence of this header, each request is treated as though coming from an older pre-Kilo client. This was done to preserve backwards compatibility as we introduced new features in the server. If you try to use a feature with an API version older than when that feature was introduced the ironic service will respond as would before that feature existed. For example if a new API URL was added, and you try to make a request with an older API version, then you will get a ``Not Found (404)`` error, or if a new field was added to an existing API and you request an older API version then you will get an ``Invalid Parameter`` response. List API versions ================= .. rest_method:: GET / This fetches all the information about all known major API versions in the deployment. Links to more specific information will be provided for each major API version, as well as information about supported min and max microversions. Normal response codes: 200 Request ------- Response Example ---------------- .. rest_parameters:: parameters.yaml - description: description - versions: versions - version: version - id: id - links: links - min_version: x-openstack-ironic-api-min-version .. literalinclude:: samples/api-root-response.json :language: javascript Show v1 API =========== .. rest_method:: GET /v1/ Show all the resources within the Ironic v1 API. Normal response codes: 200 Request ------- Response Example ---------------- .. rest_parameters:: parameters.yaml - id: id - links: links - openstack-request-id: openstack-request-id - x-openstack-ironic-api-version: header_version - x-openstack-ironic-api-min-version: x-openstack-ironic-api-min-version - x-openstack-ironic-api-max-version: x-openstack-ironic-api-max-version .. literalinclude:: samples/api-v1-root-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/conf.py0000644000175000017500000001513400000000000020502 0ustar00coreycorey00000000000000# -*- coding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # ironic documentation build configuration file, created by # sphinx-quickstart on Sat May 1 15:17:47 2010. # # This file is execfile()d with the current directory set to # its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import os import sys html_theme = 'openstackdocs' html_theme_options = { "sidebar_mode": "toc", } extensions = [ 'os_api_ref', 'openstackdocstheme' ] repository_name = 'openstack/ironic' use_storyboard = True # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('../../')) sys.path.insert(0, os.path.abspath('../')) sys.path.insert(0, os.path.abspath('./')) # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # # source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Ironic API Reference' copyright = u'OpenStack Foundation' # html_context allows us to pass arbitrary values into the html template html_context = {"bug_tag": "api-ref", "bug_project": "ironic"} # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # The reST default role (used for this markup: `text`) to use # for all documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = False # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # -- Options for man page output ---------------------------------------------- # Grouping the document tree for man pages. # List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual' # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. # html_theme_path = ["."] # html_theme = '_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". # html_static_path = ['_static'] # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_use_modindex = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'ironicdoc' # -- Options for LaTeX output ------------------------------------------------- # The paper size ('letter' or 'a4'). # latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). # latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', 'Ironic.tex', u'OpenStack Bare Metal API Documentation', u'OpenStack Foundation', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # Additional stuff for the LaTeX preamble. # latex_preamble = '' # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_use_modindex = True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/index.rst0000644000175000017500000000231300000000000021037 0ustar00coreycorey00000000000000:tocdepth: 2 ================ Bare Metal API ================ .. rest_expand_all:: .. include:: baremetal-api-versions.inc .. include:: baremetal-api-v1-nodes.inc .. include:: baremetal-api-v1-node-management.inc .. include:: baremetal-api-v1-node-passthru.inc .. include:: baremetal-api-v1-nodes-traits.inc .. include:: baremetal-api-v1-nodes-vifs.inc .. include:: baremetal-api-v1-portgroups.inc .. include:: baremetal-api-v1-nodes-portgroups.inc .. include:: baremetal-api-v1-ports.inc .. include:: baremetal-api-v1-nodes-ports.inc .. include:: baremetal-api-v1-portgroups-ports.inc .. include:: baremetal-api-v1-volume.inc .. include:: baremetal-api-v1-nodes-volume.inc .. include:: baremetal-api-v1-drivers.inc .. include:: baremetal-api-v1-driver-passthru.inc .. include:: baremetal-api-v1-nodes-bios.inc .. include:: baremetal-api-v1-conductors.inc .. include:: baremetal-api-v1-allocation.inc .. include:: baremetal-api-v1-node-allocation.inc .. include:: baremetal-api-v1-deploy-templates.inc .. NOTE(dtantsur): keep chassis close to the end since it's semi-deprecated .. include:: baremetal-api-v1-chassis.inc .. NOTE(dtantsur): keep misc last, since it covers internal API .. include:: baremetal-api-v1-misc.inc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/parameters.yaml0000644000175000017500000013427600000000000022243 0ustar00coreycorey00000000000000# variables in header header_version: description: | Specific API microversion used to generate this response. in: header required: true type: string openstack-request-id: description: > A unique ID for tracking the request. The request ID associated with the request appears in the log lines for that request. By default, the middleware configuration ensures that the request ID appears in the log files. in: header required: false type: string x-openstack-ironic-api-max-version: description: | Maximum API microversion supported by this endpoint, eg. "1.22" in: header required: true type: string x-openstack-ironic-api-min-version: description: | Minimum API microversion supported by this endpoint, eg. "1.1" in: header required: true type: string x-openstack-ironic-api-version: description: > A request SHOULD include this header to indicate to the Ironic API service what version the client supports. The server will transform the response object into compliance with the requested version, if it is supported, or return a 406 Not Supported error. If this header is not supplied, the server will default to ``min_version`` in all responses. in: header required: true type: string # variables in path allocation_ident: description: | The UUID or name of the allocation. in: path required: true type: string bios_setting: description: | The name of the Bios setting. in: path required: true type: string chassis_ident: description: | The UUID of the chassis. in: path required: true type: string deploy_template_ident: description: | The UUID or name of the deploy template. in: path required: true type: string driver_ident: description: | The name of the driver. in: path required: true type: string hostname_ident: description: | The hostname of the conductor. in: path required: true type: string node_id: description: | The UUID of the node. in: path required: false type: string node_ident: description: | The UUID or Name of the node. in: path required: true type: string port_ident: description: | The UUID of the port. in: path required: true type: string portgroup_ident: description: | The UUID or Name of the portgroup. in: path required: true type: string trait: description: | A single trait for this node. in: path required: true type: string volume_connector_id: description: | The UUID of the Volume connector. in: path required: true type: string volume_target_id: description: | The UUID of the Volume target. in: path required: true type: string agent_version: description: | The version of the ironic-python-agent ramdisk, sent back to the Bare Metal service and stored during provisioning. in: query required: true type: string callback_url: description: | The URL of an active ironic-python-agent ramdisk, sent back to the Bare Metal service and stored temporarily during a provisioning action. in: query required: true type: string detail: description: | Whether to show detailed information about the resource. This cannot be set to True if ``fields`` parameter is specified. in: query required: false type: boolean # variables in driver query string driver_detail: description: | Whether to show detailed information about the drivers (e.g. the "boot_interface" field). in: query required: false type: boolean driver_type: description: | Only list drivers of this type. Options are "classic" or "dynamic". in: query required: false type: string # variables common to all query strings fields: description: | One or more fields to be returned in the response. For example, the following request returns only the ``uuid`` and ``name`` fields for each node: :: GET /v1/nodes?fields=uuid,name in: query required: false type: array fields_for_conductor: description: | One or more fields to be returned in the response. For example, the following request returns only the ``hostname`` and ``alive`` fields for each conductor: :: GET /v1/conductors?fields=hostname,alive in: query required: false type: array limit: description: | Requests a page size of items. Returns a number of items up to a limit value. Use the ``limit`` parameter to make an initial limited request and use the ID of the last-seen item from the response as the ``marker`` parameter value in a subsequent limited request. This value cannot be larger than the ``max_limit`` option in the ``[api]`` section of the configuration. If it is higher than ``max_limit``, only ``max-limit`` resources will be returned. in: query required: false type: integer marker: description: | The ID of the last-seen item. Use the ``limit`` parameter to make an initial limited request and use the ID of the last-seen item from the response as the ``marker`` parameter value in a subsequent limited request. in: query required: false type: string # variables in the vendor_passthru query string method_name: description: | Driver specific method name. in: query required: true type: string # variable in the lookup query string r_addresses: description: | Optional list of one or more Port addresses. in: query required: false type: array # variables in the query string r_allocation_node: description: | Filter the list of allocations by the node UUID or name. in: query required: false type: string r_allocation_state: description: | Filter the list of allocations by the allocation state, one of ``active``, ``allocating`` or ``error``. in: query required: false type: string r_associated: description: | Filter the list of returned nodes and only return those which are, or are not, associated with an ``instance_uuid``. in: query required: false type: boolean r_conductor: description: | Filter the list of returned nodes, and only return those with the specified ``conductor``. in: query required: false type: string r_conductor_group: description: | Filter the list of returned nodes, and only return those with the specified ``conductor_group``. Case-insensitive string up to 255 characters, containing ``a-z``, ``0-9``, ``_``, ``-``, and ``.``. in: query required: false type: string r_description_contains: description: | Filter the list of returned nodes, and only return those containing substring specified by ``description_contains``. in: query requred: false type: string r_driver: description: | Filter the list of returned nodes, and only return those with the specified ``driver``. in: query required: false type: string r_fault: description: | Filter the list of returned nodes, and only return those with the specified ``fault``. Possible values are determined by faults supported by ironic, e.g., ``power failure``, ``clean failure`` or ``rescue abort failure``. in: query required: false type: string r_instance_uuid: description: | Filter the list of returned nodes, and only return the node with this specific instance UUID, or an empty set if not found. in: query required: false type: string r_maintenance: description: | Filter the list of returned nodes and only return those with ``maintenance`` set to ``True`` or ``False``. in: query required: false type: boolean # variable in the lookup query string r_node_uuid: description: | Optional Node UUID. in: query required: false type: string r_port_address: description: | Filter the list of returned Ports, and only return the ones with the specified physical hardware address, typically MAC, or an empty set if not found. in: query required: false type: string r_port_node_ident: description: | Filter the list of returned Ports, and only return the ones associated with this specific node (name or UUID), or an empty set if not found. in: query required: false type: string r_port_node_uuid: description: | Filter the list of returned Ports, and only return the ones associated with this specific node UUID, or an empty set if not found. in: query required: false type: string r_port_portgroup_ident: description: | Filter the list of returned Ports, and only return the ones associated with this specific Portgroup (name or UUID), or an empty set if not found. in: query required: false type: string r_portgroup_address: description: | Filter the list of returned Portgroups, and only return the ones with the specified physical hardware address, typically MAC, or an empty set if not found. in: query required: false type: string r_portgroup_node_ident: description: | Filter the list of returned Portgroups, and only return the ones associated with this specific node (name or UUID), or an empty set if not found. in: query required: false type: string r_provision_state: description: | Filter the list of returned nodes, and only return those with the specified ``provision_state``. in: query required: false type: string r_resource_class: description: | Filter the list of returned nodes, and only return the ones with the specified resource class. in: query required: false type: string r_volume_connector_node_ident: description: | Filter the list of returned Volume connectors, and only return the ones associated with this specific node (name or UUID), or an empty set if not found. in: query required: false type: string r_volume_target_node_ident: description: | Filter the list of returned Volume targets, and only return the ones associated with this specific node (name or UUID), or an empty set if not found. in: query required: false type: string sort_dir: description: | Sorts the response by the requested sort direction. A valid value is ``asc`` (ascending) or ``desc`` (descending). Default is ``asc``. You can specify multiple pairs of sort key and sort direction query parameters. If you omit the sort direction in a pair, the API uses the natural sorting direction of the server attribute that is provided as the ``sort_key``. in: query required: false type: string sort_key: description: | Sorts the response by the this attribute value. Default is ``id``. You can specify multiple pairs of sort key and sort direction query parameters. If you omit the sort direction in a pair, the API uses the natural sorting direction of the server attribute that is provided as the ``sort_key``. in: query required: false type: string # variable returned from /lookup agent_config: description: | JSON document of configuration data for the ironic-python-agent process. in: body required: true type: JSON agent_node: description: | JSON document containing the Node fields "uuid", "properties", "instance_info", and "driver_internal_info"; used by the ironic-python-agent process as it operates on the Node. in: body required: true type: JSON # variables in the API body alive: description: | The conductor status indicates whether a conductor is considered alive or not. in: body required: true type: boolean allocation_last_error: description: | The error message for the allocation if it is in the ``error`` state, ``null`` otherwise. in: body required: true type: string allocation_name: description: | The unique name of the allocation. in: body required: true type: string allocation_node: description: | The UUID of the node assigned to the allocation. Will be ``null`` if a node is not yet assigned. in: body required: true type: string allocation_resource_class: description: | The resource class requested for the allocation. Can be ``null`` if the allocation was created via backfilling and the target node did not have the resource class set. in: body required: true type: string allocation_state: description: | The current state of the allocation. One of: * ``allocating`` - allocation is in progress. * ``active`` - allocation is finished and ``node_uuid`` is assigned. * ``error`` - allocation has failed, see ``last_error`` for details. in: body required: true type: string allocation_traits: description: | The list of the traits requested for the allocation. in: body required: true type: array allocation_uuid: description: | The UUID of the allocation associated with the node. If not ``null``, will be the same as ``instance_uuid`` (the opposite is not always true). Unlike ``instance_uuid``, this field is read-only. Please use the Allocation API to remove allocations. in: body required: true type: string bios_setting_name: description: | The name of a Bios setting for a Node, eg. "virtualization". in: body required: true type: string bios_setting_value: description: | The value of a Bios setting for a Node, eg. "on". in: body required: true type: string bios_settings: description: | Optional list of one or more Bios settings. It includes following fields "created_at", "updated_at", "links", "name", "value". in: body required: true type: array boot_device: description: | The boot device for a Node, eg. "pxe" or "disk". in: body required: true type: string boot_interface: description: | The boot interface for a Node, e.g. "pxe". in: body required: true type: string candidate_nodes: description: | A list of UUIDs of the nodes that are candidates for this allocation. in: body required: true type: array chassis: description: | A ``chassis`` object. in: body required: true type: array chassis_uuid: description: | UUID of the chassis associated with this Node. May be empty or None. in: body required: true type: string clean_step: description: | The current clean step. Introduced with the cleaning feature. in: body required: false type: string clean_steps: description: | An ordered list of cleaning steps that will be performed on the node. A cleaning step is a dictionary with required keys 'interface' and 'step', and optional key 'args'. If specified, the value for 'args' is a keyword variable argument dictionary that is passed to the cleaning step method. in: body required: false type: array conductor: description: | The conductor currently servicing a node. This field is read-only. in: body required: false type: string conductor_group: description: | The conductor group for a node. Case-insensitive string up to 255 characters, containing ``a-z``, ``0-9``, ``_``, ``-``, and ``.``. in: body required: true type: string configdrive: description: | A config drive to be written to a partition on the Node's boot disk. Can be a full gzip'ed and base-64 encoded image or a JSON object with the keys: * ``meta_data`` (optional) - JSON object with the standard meta data. Ironic will provide the defaults for the ``uuid`` and ``name`` fields. * ``network_data`` (optional) - JSON object with networking configuration. * ``user_data`` (optional) - user data. May be a string (which will be UTF-8 encoded); a JSON object, or a JSON array. * ``vendor_data`` (optional) - JSON object with extra vendor data. This parameter is only accepted when setting the state to "active" or "rebuild". in: body required: false type: string or object console_enabled: description: | Indicates whether console access is enabled or disabled on this node. in: body required: true type: boolean console_interface: description: | The console interface for a node, e.g. "no-console". in: body required: true type: string created_at: description: | The UTC date and time when the resource was created, `ISO 8601 `_ format. in: body required: true type: string d_bios_setting: description: | Dictionary containing the definition of a Bios setting. It includes the following fields "created_at", "updated_at", "links", "name", "value". in: body required: true type: dictionary default_bios_interface: description: | The default bios interface used for a node with a dynamic driver, if no bios interface is specified for the node. in: body required: true type: string default_boot_interface: description: | The default boot interface used for a node with a dynamic driver, if no boot interface is specified for the node. in: body required: true type: string default_console_interface: description: | The default console interface used for a node with a dynamic driver, if no console interface is specified for the node. in: body required: true type: string default_deploy_interface: description: | The default deploy interface used for a node with a dynamic driver, if no deploy interface is specified for the node. in: body required: true type: string default_inspect_interface: description: | The default inspection interface used for a node with a dynamic driver, if no inspection interface is specified for the node. in: body required: true type: string default_management_interface: description: | The default management interface used for a node with a dynamic driver, if no management interface is specified for the node. in: body required: true type: string default_network_interface: description: | The default network interface used for a node with a dynamic driver, if no network interface is specified for the node. in: body required: true type: string default_power_interface: description: | The default power interface used for a node with a dynamic driver, if no power interface is specified for the node. in: body required: true type: string default_raid_interface: description: | The default RAID interface used for a node with a dynamic driver, if no RAID interface is specified for the node. in: body required: true type: string default_rescue_interface: description: | The default rescue interface used for a node with a dynamic driver, if no rescue interface is specified for the node. in: body required: true type: string default_storage_interface: description: | The default storage interface used for a node with a dynamic driver, if no storage interface is specified for the node. in: body required: true type: string default_vendor_interface: description: | The default vendor interface used for a node with a dynamic driver, if no vendor interface is specified for the node. in: body required: true type: string deploy_interface: description: | The deploy interface for a node, e.g. "iscsi". in: body required: true type: string deploy_step: description: | The current deploy step. in: body required: false type: string deploy_template_name: description: | The unique name of the deploy template. in: body required: true type: string deploy_template_steps: description: | The deploy steps of the deploy template. Must be a list containing at least one deploy step. A deploy step is a dictionary with required keys ``interface``, ``step``, ``args``, and ``priority``. The value for ``interface`` is the name of the driver interface. The value for ``step`` is the name of the deploy step method on the driver interface. The value for ``args`` is a dictionary of arguments that are passed to the deploy step method. The value for ``priority`` is a non-negative integer priority for the step. A value of ``0`` for ``priority`` will disable that step. in: body required: true type: array description: description: | Descriptive text about the Ironic service. in: body required: true type: string driver_info: description: | All the metadata required by the driver to manage this Node. List of fields varies between drivers, and can be retrieved from the ``/v1/drivers//properties`` resource. in: body required: true type: JSON driver_internal_info: description: | Internal metadata set and stored by the Node's driver. This field is read-only. in: body required: false type: JSON driver_name: description: | The name of the driver. in: body required: true type: string driver_property_links: description: | A list of links to driver properties. in: body required: true type: array drivers: description: | A list of driver objects. in: body required: true type: array enabled_bios_interfaces: description: | The enabled bios interfaces for this driver. in: body required: true type: list enabled_boot_interfaces: description: | The enabled boot interfaces for this driver. in: body required: true type: list enabled_console_interfaces: description: | The enabled console interfaces for this driver. in: body required: true type: list enabled_deploy_interfaces: description: | The enabled deploy interfaces for this driver. in: body required: true type: list enabled_inspect_interfaces: description: | The enabled inspection interfaces for this driver. in: body required: true type: list enabled_management_interfaces: description: | The enabled management interfaces for this driver. in: body required: true type: list enabled_network_interfaces: description: | The enabled network interfaces for this driver. in: body required: true type: list enabled_power_interfaces: description: | The enabled power interfaces for this driver. in: body required: true type: list enabled_raid_interfaces: description: | The enabled RAID interfaces for this driver. in: body required: true type: list enabled_rescue_interfaces: description: | The enabled rescue interfaces for this driver. in: body required: true type: list enabled_storage_interfaces: description: | The enabled storage interfaces for this driver. in: body required: true type: list enabled_vendor_interfaces: description: | The enabled vendor interfaces for this driver. in: body required: true type: list extra: description: | A set of one or more arbitrary metadata key and value pairs. in: body required: true type: object fault: description: | The fault indicates the active fault detected by ironic, typically the Node is in "maintenance mode". None means no fault has been detected by ironic. "power failure" indicates ironic failed to retrieve power state from this node. There are other possible types, e.g., "clean failure" and "rescue abort failure". in: body required: false type: string hostname: description: | The hostname of this conductor. in: body required: true type: array hosts: description: | A list of active hosts that support this driver. in: body required: true type: array id: description: | Major API version, eg, "v1" in: body required: true type: string inspect_interface: description: | The interface used for node inspection, e.g. "no-inspect". in: body required: true type: string inspection_finished_at: description: | The UTC date and time when the last hardware inspection finished successfully, `ISO 8601 `_ format. May be "null". in: body required: true type: string inspection_started_at: description: | The UTC date and time when the hardware inspection was started, `ISO 8601 `_ format. May be "null". in: body required: true type: string instance_info: description: | Information used to customize the deployed image. May include root partition size, a base 64 encoded config drive, and other metadata. Note that this field is erased automatically when the instance is deleted (this is done by requesting the Node provision state be changed to DELETED). in: body required: true type: JSON instance_uuid: description: | UUID of the Nova instance associated with this Node. in: body required: true type: string internal_info: description: | Internal metadata set and stored by the Port. This field is read-only. in: body required: true type: JSON is_smartnic: description: | Indicates whether the Port is a Smart NIC port. in: body required: false type: boolean last_error: description: | Any error from the most recent (last) transaction that started but failed to finish. in: body required: true type: string lessee: description: | A string or UUID of the tenant who is leasing the object. in: body required: false type: string links: description: | A list of relative links. Includes the self and bookmark links. in: body required: true type: array local_link_connection: description: | The Port binding profile. If specified, must contain ``switch_id`` (only a MAC address or an OpenFlow based datapath_id of the switch are accepted in this field) and ``port_id`` (identifier of the physical port on the switch to which node's port is connected to) fields. ``switch_info`` is an optional string field to be used to store any vendor-specific information. in: body required: true type: JSON maintenance: description: | Whether or not this Node is currently in "maintenance mode". Setting a Node into maintenance mode removes it from the available resource pool and halts some internal automation. This can happen manually (eg, via an API request) or automatically when Ironic detects a hardware fault that prevents communication with the machine. in: body required: true type: boolean maintenance_reason: description: | User-settable description of the reason why this Node was placed into maintenance mode in: body required: false type: string management_interface: description: | Interface for out-of-band node management, e.g. "ipmitool". in: body required: true type: string n_description: description: | Informational text about this node. in: body required: true type: string n_portgroups: description: | Links to the collection of portgroups on this node. in: body required: true type: array n_ports: description: | Links to the collection of ports on this node in: body required: true type: array n_properties: description: | Physical characteristics of this Node. Populated by ironic-inspector during inspection. May be edited via the REST API at any time. in: body required: true type: JSON n_states: description: | Links to the collection of states. Note that this resource is also used to request state transitions. in: body required: true type: array n_traits: description: | List of traits for this node. in: body required: true type: array n_vifs: description: | VIFs attached to this node. in: body required: true type: array n_volume: description: | Links to the volume resources. in: body required: true type: array name: description: | The name of the driver. in: body required: true type: string network_interface: description: | Which Network Interface provider to use when plumbing the network connections for this Node. in: body required: true type: string next: description: | A URL to request a next collection of the resource. This parameter is returned when ``limit`` is specified in a request and there remain items. in: body required: false type: string node_name: description: | Human-readable identifier for the Node resource. May be undefined. Certain words are reserved. in: body required: false type: string node_uuid: description: | UUID of the Node this resource belongs to. in: body required: true type: string node_vif_ident: description: | The UUID or name of the VIF. in: body required: true type: string nodes: description: | Links to the collection of nodes contained in this chassis. in: body required: true type: array owner: description: | A string or UUID of the tenant who owns the object. in: body required: false type: string passthru_async: description: | If True the passthru function is invoked asynchronously; if False, synchronously. in: body required: true type: boolean passthru_attach: description: | True if the return value will be attached to the response object, and False if the return value will be returned in the response body. in: body required: true type: boolean passthru_description: description: | A description of what the method does, including any method parameters. in: body required: true type: string passthru_http_methods: description: | A list of HTTP methods supported by the vendor function. in: body required: true type: array persistent: description: | Whether the boot device should be set only for the next reboot, or persistently. in: body required: true type: boolean pg_ports: description: | Links to the collection of ports belonging to this portgroup. in: body required: true type: array physical_network: description: | The name of the physical network to which a port is connected. May be empty. in: body required: true type: string port_address: description: | Physical hardware address of this network Port, typically the hardware MAC address. in: body required: true type: string portgroup_address: description: | Physical hardware address of this Portgroup, typically the hardware MAC address. in: body required: false type: string portgroup_internal_info: description: | Internal metadata set and stored by the Portgroup. This field is read-only. in: body required: true type: JSON portgroup_mode: description: | Mode of the port group. For possible values, refer to https://www.kernel.org/doc/Documentation/networking/bonding.txt. If not specified in a request to create a port group, it will be set to the value of the ``[DEFAULT]default_portgroup_mode`` configuration option. When set, can not be removed from the port group. in: body required: true type: string portgroup_name: description: | Human-readable identifier for the Portgroup resource. May be undefined. in: body required: false type: string portgroup_properties: description: | Key/value properties related to the port group's configuration. in: body required: true type: JSON portgroup_uuid: description: | UUID of the Portgroup this resource belongs to. in: body required: true type: string portgroups: description: | A collection of Portgroup resources. in: body required: true type: array ports: description: | A collection of Port resources. in: body required: true type: array power_interface: description: | Interface used for performing power actions on the node, e.g. "ipmitool". in: body required: true type: string power_state: description: | The current power state of this Node. Usually, "power on" or "power off", but may be "None" if Ironic is unable to determine the power state (eg, due to hardware failure). in: body required: true type: string power_timeout: description: | Timeout (in seconds) for a power state transition. in: body required: false type: integer properties: description: | A list of links to driver properties. in: body required: true type: array protected: description: | Whether the node is protected from undeploying, rebuilding and deletion. in: body required: false type: boolean protected_reason: description: | The reason the node is marked as protected. in: body required: false type: string provision_state: description: | The current provisioning state of this Node. in: body required: true type: string provision_updated_at: description: | The UTC date and time when the resource was created, `ISO 8601 `_ format. ``null`` if the node is not being provisioned. in: body required: true type: string pxe_enabled: description: | Indicates whether PXE is enabled or disabled on the Port. in: body required: true type: boolean raid_config: description: | Represents the current RAID configuration of the node. Introduced with the cleaning feature. in: body required: false type: JSON raid_interface: description: | Interface used for configuring RAID on this node, e.g. "no-raid". in: body required: true type: string reason: description: | Specify the reason for setting the Node into maintenance mode. in: body required: false type: string req_allocation_name: description: | The unique name of the Allocation. in: body required: false type: string req_allocation_node: description: | The node UUID or name to create the allocation against, bypassing the normal allocation process. .. warning:: This field must not be used to request a normal allocation with one candidate node, use ``candidate_nodes`` instead. in: body required: false type: string req_allocation_resource_class: description: | The requested resource class for the allocation. Can only be missing when backfilling an allocation (will be set to the node's ``resource_class`` in such case). in: body required: true type: string req_allocation_traits: description: | The list of requested traits for the allocation. in: body required: false type: array req_boot_device: description: | The boot device for a Node, eg. "pxe" or "disk". in: body required: true type: string req_boot_interface: description: | The boot interface for a Node, e.g. "pxe". in: body required: false type: string req_candidate_nodes: description: | The list of nodes (names or UUIDs) that should be considered for this allocation. If not provided, all available nodes will be considered. in: body required: false type: array req_chassis: description: | A ``chassis`` object. in: body required: true type: array req_conductor_group: description: | The conductor group for a node. Case-insensitive string up to 255 characters, containing ``a-z``, ``0-9``, ``_``, ``-``, and ``.``. in: body required: false type: string req_console_enabled: description: | Indicates whether console access is enabled or disabled on this node. in: body required: true type: boolean req_console_interface: description: | The console interface for a node, e.g. "no-console". in: body required: false type: string req_deploy_interface: description: | The deploy interface for a node, e.g. "iscsi". in: body required: false type: string req_description: description: | Descriptive text about the Ironic service. in: body required: false type: string req_driver_info: description: | All the metadata required by the driver to manage this Node. List of fields varies between drivers, and can be retrieved from the ``/v1/drivers//properties`` resource. in: body required: false type: JSON req_driver_name: description: | The name of the driver used to manage this Node. in: body required: true type: string req_extra: description: | A set of one or more arbitrary metadata key and value pairs. in: body required: false type: object req_inspect_interface: description: | The interface used for node inspection, e.g. "no-inspect". in: body required: false type: string req_is_smartnic: description: | Indicates whether the Port is a Smart NIC port. in: body required: false type: boolean req_local_link_connection: description: | The Port binding profile. If specified, must contain ``switch_id`` (only a MAC address or an OpenFlow based datapath_id of the switch are accepted in this field) and ``port_id`` (identifier of the physical port on the switch to which node's port is connected to) fields. ``switch_info`` is an optional string field to be used to store any vendor-specific information. in: body required: false type: JSON req_management_interface: description: | Interface for out-of-band node management, e.g. "ipmitool". in: body required: false type: string req_network_interface: description: | Which Network Interface provider to use when plumbing the network connections for this Node. in: body required: false type: string req_node_uuid: description: | UUID of the Node this resource belongs to. in: body required: true type: string req_node_vif_ident: description: | The UUID or name of the VIF. in: body required: true type: string req_persistent: description: | Whether the boot device should be set only for the next reboot, or persistently. in: body required: false type: boolean req_physical_network: description: | The name of the physical network to which a port is connected. May be empty. in: body required: false type: string req_port_address: description: | Physical hardware address of this network Port, typically the hardware MAC address. in: body required: true type: string req_portgroup_address: description: | Physical hardware address of this Portgroup, typically the hardware MAC address. in: body required: false type: string req_portgroup_uuid: description: | UUID of the Portgroup this resource belongs to. in: body required: false type: string req_power_interface: description: | Interface used for performing power actions on the node, e.g. "ipmitool". in: body required: false type: string req_properties: description: | Physical characteristics of this Node. Populated during inspection, if performed. Can be edited via the REST API at any time. in: body required: false type: JSON req_provision_state: description: | The requested provisioning state of this Node. in: body required: true type: string req_pxe_enabled: description: | Indicates whether PXE is enabled or disabled on the Port. in: body required: false type: boolean req_raid_interface: description: | Interface used for configuring RAID on this node, e.g. "no-raid". in: body required: false type: string req_rescue_interface: description: | The interface used for node rescue, e.g. "no-rescue". in: body required: false type: string req_resource_class_create: description: | A string which can be used by external schedulers to identify this Node as a unit of a specific type of resource. in: body required: false type: string req_storage_interface: description: | Interface used for attaching and detaching volumes on this node, e.g. "cinder". in: body required: false type: string req_target_power_state: description: | If a power state transition has been requested, this field represents the requested (ie, "target") state either "power on", "power off", "rebooting", "soft power off" or "soft rebooting". in: body required: true type: string req_target_raid_config: description: | Represents the requested RAID configuration of the node, which will be applied when the Node next transitions through the CLEANING state. Introduced with the cleaning feature. in: body required: true type: JSON req_uuid: description: | The UUID for the resource. in: body required: false type: string req_vendor_interface: description: | Interface for vendor-specific functionality on this node, e.g. "no-vendor". in: body required: false type: string requested_provision_state: description: | One of the provisioning verbs: manage, provide, inspect, clean, active, rebuild, delete (deleted), abort, adopt, rescue, unrescue. in: body required: true type: string rescue_interface: description: | The interface used for node rescue, e.g. "no-rescue". in: body required: true type: string rescue_password: description: | Non-empty password used to configure rescue ramdisk during node rescue operation. in: body required: false type: string reservation: description: | The ``name`` of an Ironic Conductor host which is holding a lock on this node, if a lock is held. Usually "null", but this field can be useful for debugging. in: body required: true type: string resource_class: description: | A string which can be used by external schedulers to identify this Node as a unit of a specific type of resource. For more details, see: https://docs.openstack.org/ironic/latest/install/configure-nova-flavors.html in: body required: true type: string response_driver_type: description: | Type of this driver ("classic" or "dynamic"). in: body required: true type: string retired: description: | Whether the node is retired and can hence no longer be provided, i.e. move from ``manageable`` to ``available``, and will end up in ``manageable`` after cleaning (rather than ``available``). in: body required: false type: boolean retired_reason: description: | The reason the node is marked as retired. in: body required: false type: string standalone_ports_supported: description: | Indicates whether ports that are members of this portgroup can be used as stand-alone ports. in: body required: true type: boolean storage_interface: description: | Interface used for attaching and detaching volumes on this node, e.g. "cinder". in: body required: true type: string supported_boot_devices: description: | List of boot devices which this Node's driver supports. in: body required: true type: array target_power_state: description: | If a power state transition has been requested, this field represents the requested (ie, "target") state, either "power on" or "power off". in: body required: true type: string target_provision_state: description: | If a provisioning action has been requested, this field represents the requested (ie, "target") state. Note that a Node may go through several states during its transition to this target state. For instance, when requesting an instance be deployed to an AVAILABLE Node, the Node may go through the following state change progression: AVAILABLE -> DEPLOYING -> DEPLOYWAIT -> DEPLOYING -> ACTIVE in: body required: true type: string target_raid_config: description: | Represents the requested RAID configuration of the node, which will be applied when the Node next transitions through the CLEANING state. Introduced with the cleaning feature. in: body required: true type: JSON updated_at: description: | The UTC date and time when the resource was updated, `ISO 8601 `_ format. May be "null". in: body required: true type: string uuid: description: | The UUID for the resource. in: body required: true type: string # variables returned from node-validate v_boot: description: | Status of the "boot" interface in: body required: true type: object v_console: description: | Status of the "console" interface in: body required: true type: object v_deploy: description: | Status of the "deploy" interface in: body required: true type: object v_inspect: description: | Status of the "inspect" interface in: body required: true type: object v_management: description: | Status of the "management" interface in: body required: true type: object v_network: description: | Status of the "network" interface in: body required: true type: object v_power: description: | Status of the "power" interface in: body required: true type: object v_raid: description: | Status of the "raid" interface in: body required: true type: object v_rescue: description: | Status of the "rescue" interface in: body required: true type: object v_storage: description: | Status of the "storage" interface in: body required: true type: object vendor_interface: description: | Interface for vendor-specific functionality on this node, e.g. "no-vendor". in: body required: true type: string version: description: | Versioning of this API response, eg. "1.22". in: body required: true type: string versions: description: | Array of information about currently supported versions. in: body required: true type: array # variables returned from volume-connector volume_connector_connector_id: description: | The identifier of Volume connector. The identifier format depends on the ``type`` of the Volume connector, eg "iqn.2017-05.org.openstack:01:d9a51732c3f" if the ``type`` is "iqn", "192.168.1.2" if the ``type`` is "ip". in: body required: true type: string volume_connector_type: description: | The type of Volume connector such as "iqn", "ip", "wwnn" and "wwpn". in: body required: true type: string volume_connectors: description: | A collection of Volume connector resources. in: body required: true type: array volume_connectors_link: description: | Links to a collection of Volume connector resources. in: body required: true type: array # variables returned from volume-target volume_target_boot_index: description: | The boot index of the Volume target. "0" indicates that this volume is used as a boot volume. in: body required: true type: string volume_target_properties: description: | A set of physical information of the volume such as the identifier (eg. IQN) and LUN number of the volume. This information is used to connect the node to the volume by the storage interface. The contents depend on the volume type. in: body required: true type: object volume_target_volume_id: description: | The identifier of the volume. This ID is used by storage interface to distinguish volumes. in: body required: true type: string volume_target_volume_type: description: | The type of Volume target such as 'iscsi' and 'fibre_channel'. in: body required: true type: string volume_targets: description: | A collection of Volume target resources. in: body required: true type: array volume_targets_link: description: | Links to a collection of Volume target resources. in: body required: true type: array ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538406.1703992 ironic-14.0.1.dev163/api-ref/source/samples/0000755000175000017500000000000000000000000020643 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/allocation-create-request-2.json0000644000175000017500000000014100000000000026745 0ustar00coreycorey00000000000000{ "name": "allocation-2", "resource_class": "bm-large", "traits": ["CUSTOM_GOLD"] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/allocation-create-request.json0000644000175000017500000000010100000000000026602 0ustar00coreycorey00000000000000{ "name": "allocation-1", "resource_class": "bm-large" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/allocation-create-response.json0000644000175000017500000000111100000000000026752 0ustar00coreycorey00000000000000{ "candidate_nodes": [], "created_at": "2019-02-20T09:43:58+00:00", "extra": {}, "last_error": null, "links": [ { "href": "http://127.0.0.1:6385/v1/allocations/5344a3e2-978a-444e-990a-cbf47c62ef88", "rel": "self" }, { "href": "http://127.0.0.1:6385/allocations/5344a3e2-978a-444e-990a-cbf47c62ef88", "rel": "bookmark" } ], "name": "allocation-1", "node_uuid": null, "owner": null, "resource_class": "bm-large", "state": "allocating", "traits": [], "updated_at": null, "uuid": "5344a3e2-978a-444e-990a-cbf47c62ef88" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/allocation-show-response.json0000644000175000017500000000117600000000000026502 0ustar00coreycorey00000000000000{ "candidate_nodes": [], "created_at": "2019-02-20T09:43:58+00:00", "extra": {}, "last_error": null, "links": [ { "href": "http://127.0.0.1:6385/v1/allocations/5344a3e2-978a-444e-990a-cbf47c62ef88", "rel": "self" }, { "href": "http://127.0.0.1:6385/allocations/5344a3e2-978a-444e-990a-cbf47c62ef88", "rel": "bookmark" } ], "name": "allocation-1", "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "owner": null, "resource_class": "bm-large", "state": "active", "traits": [], "updated_at": "2019-02-20T09:43:58+00:00", "uuid": "5344a3e2-978a-444e-990a-cbf47c62ef88" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/allocation-update-request.json0000644000175000017500000000013200000000000026625 0ustar00coreycorey00000000000000[ { "op": "add", "path": "/extra/foo", "value": "bar" } ] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/allocation-update-response.json0000644000175000017500000000120200000000000026772 0ustar00coreycorey00000000000000{ "node_uuid": null, "uuid": "241db410-7b04-4b1c-87ae-4e336435db08", "links": [ { "href": "http://10.66.169.122/v1/allocations/241db410-7b04-4b1c-87ae-4e336435db08", "rel": "self" }, { "href": "http://10.66.169.122/allocations/241db410-7b04-4b1c-87ae-4e336435db08", "rel": "bookmark" } ], "extra": { "foo": "bar" }, "last_error": null, "created_at": "2019-06-04T07:46:25+00:00", "owner": null, "resource_class": "CUSTOM_GOLD", "updated_at": "2019-06-06T03:28:19.496960+00:00", "traits": [], "state": "error", "candidate_nodes": [], "name": "test_allocation" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/allocations-list-response.json0000644000175000017500000000311000000000000026646 0ustar00coreycorey00000000000000{ "allocations": [ { "candidate_nodes": [], "created_at": "2019-02-20T09:43:58+00:00", "extra": {}, "last_error": null, "links": [ { "href": "http://127.0.0.1:6385/v1/allocations/5344a3e2-978a-444e-990a-cbf47c62ef88", "rel": "self" }, { "href": "http://127.0.0.1:6385/allocations/5344a3e2-978a-444e-990a-cbf47c62ef88", "rel": "bookmark" } ], "name": "allocation-1", "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "owner": null, "resource_class": "bm-large", "state": "active", "traits": [], "updated_at": "2019-02-20T09:43:58+00:00", "uuid": "5344a3e2-978a-444e-990a-cbf47c62ef88" }, { "candidate_nodes": [], "created_at": "2019-02-20T09:43:58+00:00", "extra": {}, "last_error": "Failed to process allocation eff80f47-75f0-4d41-b1aa-cf07c201adac: no available nodes match the resource class bm-large.", "links": [ { "href": "http://127.0.0.1:6385/v1/allocations/eff80f47-75f0-4d41-b1aa-cf07c201adac", "rel": "self" }, { "href": "http://127.0.0.1:6385/allocations/eff80f47-75f0-4d41-b1aa-cf07c201adac", "rel": "bookmark" } ], "name": "allocation-2", "node_uuid": null, "owner": null, "resource_class": "bm-large", "state": "error", "traits": [ "CUSTOM_GOLD" ], "updated_at": "2019-02-20T09:43:58+00:00", "uuid": "eff80f47-75f0-4d41-b1aa-cf07c201adac" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/api-root-response.json0000644000175000017500000000113100000000000025120 0ustar00coreycorey00000000000000{ "default_version": { "id": "v1", "links": [ { "href": "http://127.0.0.1:6385/v1/", "rel": "self" } ], "min_version": "1.1", "status": "CURRENT", "version": "1.37" }, "description": "Ironic is an OpenStack project which aims to provision baremetal machines.", "name": "OpenStack Ironic API", "versions": [ { "id": "v1", "links": [ { "href": "http://127.0.0.1:6385/v1/", "rel": "self" } ], "min_version": "1.1", "status": "CURRENT", "version": "1.37" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/api-v1-root-response.json0000644000175000017500000000353700000000000025460 0ustar00coreycorey00000000000000{ "chassis": [ { "href": "http://127.0.0.1:6385/v1/chassis/", "rel": "self" }, { "href": "http://127.0.0.1:6385/chassis/", "rel": "bookmark" } ], "drivers": [ { "href": "http://127.0.0.1:6385/v1/drivers/", "rel": "self" }, { "href": "http://127.0.0.1:6385/drivers/", "rel": "bookmark" } ], "heartbeat": [ { "href": "http://127.0.0.1:6385/v1/heartbeat/", "rel": "self" }, { "href": "http://127.0.0.1:6385/heartbeat/", "rel": "bookmark" } ], "id": "v1", "links": [ { "href": "http://127.0.0.1:6385/v1/", "rel": "self" }, { "href": "https://docs.openstack.org/ironic/latest/contributor/webapi.html", "rel": "describedby", "type": "text/html" } ], "lookup": [ { "href": "http://127.0.0.1:6385/v1/lookup/", "rel": "self" }, { "href": "http://127.0.0.1:6385/lookup/", "rel": "bookmark" } ], "media_types": [ { "base": "application/json", "type": "application/vnd.openstack.ironic.v1+json" } ], "nodes": [ { "href": "http://127.0.0.1:6385/v1/nodes/", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/", "rel": "bookmark" } ], "portgroups": [ { "href": "http://127.0.0.1:6385/v1/portgroups/", "rel": "self" }, { "href": "http://127.0.0.1:6385/portgroups/", "rel": "bookmark" } ], "ports": [ { "href": "http://127.0.0.1:6385/v1/ports/", "rel": "self" }, { "href": "http://127.0.0.1:6385/ports/", "rel": "bookmark" } ], "volume": [ { "href": "http://127.0.0.1:6385/v1/volume/", "rel": "self" }, { "href": "http://127.0.0.1:6385/volume/", "rel": "bookmark" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/chassis-create-request.json0000644000175000017500000000005000000000000026115 0ustar00coreycorey00000000000000{ "description": "Sample chassis" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/chassis-list-details-response.json0000644000175000017500000000150400000000000027423 0ustar00coreycorey00000000000000{ "chassis": [ { "created_at": "2016-08-18T22:28:48.643434+11:11", "description": "Sample chassis", "extra": {}, "links": [ { "href": "http://127.0.0.1:6385/v1/chassis/dff29d23-1ded-43b4-8ae1-5eebb3e30de1", "rel": "self" }, { "href": "http://127.0.0.1:6385/chassis/dff29d23-1ded-43b4-8ae1-5eebb3e30de1", "rel": "bookmark" } ], "nodes": [ { "href": "http://127.0.0.1:6385/v1/chassis/dff29d23-1ded-43b4-8ae1-5eebb3e30de1/nodes", "rel": "self" }, { "href": "http://127.0.0.1:6385/chassis/dff29d23-1ded-43b4-8ae1-5eebb3e30de1/nodes", "rel": "bookmark" } ], "updated_at": null, "uuid": "dff29d23-1ded-43b4-8ae1-5eebb3e30de1" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/chassis-list-response.json0000644000175000017500000000065100000000000026002 0ustar00coreycorey00000000000000{ "chassis": [ { "description": "Sample chassis", "links": [ { "href": "http://127.0.0.1:6385/v1/chassis/dff29d23-1ded-43b4-8ae1-5eebb3e30de1", "rel": "self" }, { "href": "http://127.0.0.1:6385/chassis/dff29d23-1ded-43b4-8ae1-5eebb3e30de1", "rel": "bookmark" } ], "uuid": "dff29d23-1ded-43b4-8ae1-5eebb3e30de1" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/chassis-show-response.json0000644000175000017500000000130100000000000026000 0ustar00coreycorey00000000000000{ "created_at": "2016-08-18T22:28:48.643434+11:11", "description": "Sample chassis", "extra": {}, "links": [ { "href": "http://127.0.0.1:6385/v1/chassis/dff29d23-1ded-43b4-8ae1-5eebb3e30de1", "rel": "self" }, { "href": "http://127.0.0.1:6385/chassis/dff29d23-1ded-43b4-8ae1-5eebb3e30de1", "rel": "bookmark" } ], "nodes": [ { "href": "http://127.0.0.1:6385/v1/chassis/dff29d23-1ded-43b4-8ae1-5eebb3e30de1/nodes", "rel": "self" }, { "href": "http://127.0.0.1:6385/chassis/dff29d23-1ded-43b4-8ae1-5eebb3e30de1/nodes", "rel": "bookmark" } ], "updated_at": null, "uuid": "dff29d23-1ded-43b4-8ae1-5eebb3e30de1" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/chassis-update-request.json0000644000175000017500000000015400000000000026141 0ustar00coreycorey00000000000000[ { "op": "replace", "path": "/description", "value": "Updated Chassis" } ] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/chassis-update-response.json0000644000175000017500000000134000000000000026305 0ustar00coreycorey00000000000000{ "created_at": "2016-08-18T22:28:48.643434+11:11", "description": "Updated Chassis", "extra": {}, "links": [ { "href": "http://127.0.0.1:6385/v1/chassis/dff29d23-1ded-43b4-8ae1-5eebb3e30de1", "rel": "self" }, { "href": "http://127.0.0.1:6385/chassis/dff29d23-1ded-43b4-8ae1-5eebb3e30de1", "rel": "bookmark" } ], "nodes": [ { "href": "http://127.0.0.1:6385/v1/chassis/dff29d23-1ded-43b4-8ae1-5eebb3e30de1/nodes", "rel": "self" }, { "href": "http://127.0.0.1:6385/chassis/dff29d23-1ded-43b4-8ae1-5eebb3e30de1/nodes", "rel": "bookmark" } ], "updated_at": "2016-08-18T22:28:49.653974+00:00", "uuid": "dff29d23-1ded-43b4-8ae1-5eebb3e30de1" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/conductor-list-details-response.json0000644000175000017500000000204400000000000027766 0ustar00coreycorey00000000000000{ "conductors": [ { "links": [ { "href": "http://127.0.0.1:6385/v1/conductors/compute1.localdomain", "rel": "self" }, { "href": "http://127.0.0.1:6385/conductors/compute1.localdomain", "rel": "bookmark" } ], "created_at": "2018-08-07T08:39:21+00:00", "hostname": "compute1.localdomain", "conductor_group": "", "updated_at": "2018-11-30T07:07:23+00:00", "alive": false, "drivers": [ "ipmi" ] }, { "links": [ { "href": "http://127.0.0.1:6385/v1/conductors/compute2.localdomain", "rel": "self" }, { "href": "http://127.0.0.1:6385/conductors/compute2.localdomain", "rel": "bookmark" } ], "created_at": "2018-12-05T07:03:19+00:00", "hostname": "compute2.localdomain", "conductor_group": "", "updated_at": "2018-12-05T07:03:21+00:00", "alive": true, "drivers": [ "ipmi" ] } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/conductor-list-response.json0000644000175000017500000000141100000000000026340 0ustar00coreycorey00000000000000{ "conductors": [ { "hostname": "compute1.localdomain", "conductor_group": "", "links": [ { "href": "http://127.0.0.1:6385/v1/conductors/compute1.localdomain", "rel": "self" }, { "href": "http://127.0.0.1:6385/conductors/compute1.localdomain", "rel": "bookmark" } ], "alive": false }, { "hostname": "compute2.localdomain", "conductor_group": "", "links": [ { "href": "http://127.0.0.1:6385/v1/conductors/compute2.localdomain", "rel": "self" }, { "href": "http://127.0.0.1:6385/conductors/compute2.localdomain", "rel": "bookmark" } ], "alive": true } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/conductor-show-response.json0000644000175000017500000000066400000000000026356 0ustar00coreycorey00000000000000{ "links": [ { "href": "http://127.0.0.1:6385/v1/conductors/compute2.localdomain", "rel": "self" }, { "href": "http://127.0.0.1:6385/conductors/compute2.localdomain", "rel": "bookmark" } ], "created_at": "2018-12-05T07:03:19+00:00", "hostname": "compute2.localdomain", "conductor_group": "", "updated_at": "2018-12-05T07:03:21+00:00", "alive": true, "drivers": [ "ipmi" ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/deploy-template-create-request.json0000644000175000017500000000065100000000000027574 0ustar00coreycorey00000000000000{ "extra": {}, "name": "CUSTOM_HYPERTHREADING_ON", "steps": [ { "interface": "bios", "step": "apply_configuration", "args": { "settings": [ { "name": "LogicalProc", "value": "Enabled" } ] }, "priority": 150 } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/deploy-template-create-response.json0000644000175000017500000000132700000000000027743 0ustar00coreycorey00000000000000{ "created_at": "2016-08-18T22:28:48.643434+11:11", "extra": {}, "links": [ { "href": "http://10.60.253.180:6385/v1/deploy_templates/bbb45f41-d4bc-4307-8d1d-32f95ce1e920", "rel": "self" }, { "href": "http://10.60.253.180:6385/deploy_templates/bbb45f41-d4bc-4307-8d1d-32f95ce1e920", "rel": "bookmark" } ], "name": "CUSTOM_HYPERTHREADING_ON", "steps": [ { "args": { "settings": [ { "name": "LogicalProc", "value": "Enabled" } ] }, "interface": "bios", "priority": 150, "step": "apply_configuration" } ], "updated_at": null, "uuid": "bbb45f41-d4bc-4307-8d1d-32f95ce1e920" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/deploy-template-detail-response.json0000644000175000017500000000156700000000000027750 0ustar00coreycorey00000000000000{ "deploy_templates": [ { "created_at": "2016-08-18T22:28:48.643434+11:11", "extra": {}, "links": [ { "href": "http://10.60.253.180:6385/v1/deploy_templates/bbb45f41-d4bc-4307-8d1d-32f95ce1e920", "rel": "self" }, { "href": "http://10.60.253.180:6385/deploy_templates/bbb45f41-d4bc-4307-8d1d-32f95ce1e920", "rel": "bookmark" } ], "name": "CUSTOM_HYPERTHREADING_ON", "steps": [ { "args": { "settings": [ { "name": "LogicalProc", "value": "Enabled" } ] }, "interface": "bios", "priority": 150, "step": "apply_configuration" } ], "updated_at": null, "uuid": "bbb45f41-d4bc-4307-8d1d-32f95ce1e920" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/deploy-template-list-response.json0000644000175000017500000000071700000000000027455 0ustar00coreycorey00000000000000{ "deploy_templates": [ { "links": [ { "href": "http://10.60.253.180:6385/v1/deploy_templates/bbb45f41-d4bc-4307-8d1d-32f95ce1e920", "rel": "self" }, { "href": "http://10.60.253.180:6385/deploy_templates/bbb45f41-d4bc-4307-8d1d-32f95ce1e920", "rel": "bookmark" } ], "name": "CUSTOM_HYPERTHREADING_ON", "uuid": "bbb45f41-d4bc-4307-8d1d-32f95ce1e920" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/deploy-template-show-response.json0000644000175000017500000000132700000000000027460 0ustar00coreycorey00000000000000{ "created_at": "2016-08-18T22:28:48.643434+11:11", "extra": {}, "links": [ { "href": "http://10.60.253.180:6385/v1/deploy_templates/bbb45f41-d4bc-4307-8d1d-32f95ce1e920", "rel": "self" }, { "href": "http://10.60.253.180:6385/deploy_templates/bbb45f41-d4bc-4307-8d1d-32f95ce1e920", "rel": "bookmark" } ], "name": "CUSTOM_HYPERTHREADING_ON", "steps": [ { "args": { "settings": [ { "name": "LogicalProc", "value": "Enabled" } ] }, "interface": "bios", "priority": 150, "step": "apply_configuration" } ], "updated_at": null, "uuid": "bbb45f41-d4bc-4307-8d1d-32f95ce1e920" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/deploy-template-update-request.json0000644000175000017500000000013500000000000027610 0ustar00coreycorey00000000000000[ { "path" : "/name", "value" : "CUSTOM_HT_ON", "op" : "replace" } ] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/deploy-template-update-response.json0000644000175000017500000000135100000000000027757 0ustar00coreycorey00000000000000{ "created_at": "2016-08-18T22:28:48.643434+11:11", "extra": {}, "links": [ { "href": "http://10.60.253.180:6385/v1/deploy_templates/bbb45f41-d4bc-4307-8d1d-32f95ce1e920", "rel": "self" }, { "href": "http://10.60.253.180:6385/deploy_templates/bbb45f41-d4bc-4307-8d1d-32f95ce1e920", "rel": "bookmark" } ], "name": "CUSTOM_HT_ON", "steps": [ { "args": { "settings": [ { "name": "LogicalProc", "value": "Enabled" } ] }, "interface": "bios", "priority": 150, "step": "apply_configuration" } ], "updated_at": "2016-08-18T22:28:49.653974+00:00", "uuid": "bbb45f41-d4bc-4307-8d1d-32f95ce1e920" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/driver-get-response.json0000644000175000017500000000316300000000000025445 0ustar00coreycorey00000000000000{ "default_bios_interface": "no-bios", "default_boot_interface": "pxe", "default_console_interface": "no-console", "default_deploy_interface": "iscsi", "default_inspect_interface": "no-inspect", "default_management_interface": "ipmitool", "default_network_interface": "flat", "default_power_interface": "ipmitool", "default_raid_interface": "no-raid", "default_rescue_interface": "no-rescue", "default_storage_interface": "noop", "default_vendor_interface": "no-vendor", "enabled_bios_interfaces": [ "no-bios" ], "enabled_boot_interfaces": [ "pxe" ], "enabled_console_interfaces": [ "no-console" ], "enabled_deploy_interfaces": [ "iscsi", "direct" ], "enabled_inspect_interfaces": [ "no-inspect" ], "enabled_management_interfaces": [ "ipmitool" ], "enabled_network_interfaces": [ "flat", "noop" ], "enabled_power_interfaces": [ "ipmitool" ], "enabled_raid_interfaces": [ "no-raid", "agent" ], "enabled_rescue_interfaces": [ "no-rescue" ], "enabled_storage_interfaces": [ "noop" ], "enabled_vendor_interfaces": [ "no-vendor" ], "hosts": [ "897ab1dad809" ], "links": [ { "href": "http://127.0.0.1:6385/v1/drivers/ipmi", "rel": "self" }, { "href": "http://127.0.0.1:6385/drivers/ipmi", "rel": "bookmark" } ], "name": "ipmi", "properties": [ { "href": "http://127.0.0.1:6385/v1/drivers/ipmi/properties", "rel": "self" }, { "href": "http://127.0.0.1:6385/drivers/ipmi/properties", "rel": "bookmark" } ], "type": "dynamic" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/driver-logical-disk-properties-response.json0000644000175000017500000000307100000000000031420 0ustar00coreycorey00000000000000{ "controller": "Controller to use for this logical disk. If not specified, the driver will choose a suitable RAID controller on the bare metal node. Optional.", "disk_type": "The type of disk preferred. Valid values are 'hdd' and 'ssd'. If this is not specified, disk type will not be a selection criterion for choosing backing physical disks. Optional.", "interface_type": "The interface type of disk. Valid values are 'sata', 'scsi' and 'sas'. If this is not specified, interface type will not be a selection criterion for choosing backing physical disks. Optional.", "is_root_volume": "Specifies whether this disk is a root volume. By default, this is False. Optional.", "number_of_physical_disks": "Number of physical disks to use for this logical disk. By default, the driver uses the minimum number of disks required for that RAID level. Optional.", "physical_disks": "The physical disks to use for this logical disk. If not specified, the driver will choose suitable physical disks to use. Optional.", "raid_level": "RAID level for the logical disk. Valid values are 'JBOD', '0', '1', '2', '5', '6', '1+0', '5+0' and '6+0'. Required.", "share_physical_disks": "Specifies whether other logical disks can share physical disks with this logical disk. By default, this is False. Optional.", "size_gb": "Size in GiB (Integer) for the logical disk. Use 'MAX' as size_gb if this logical disk is supposed to use the rest of the space available. Required.", "volume_name": "Name of the volume to be created. If this is not specified, it will be auto-generated. Optional." } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/driver-property-response.json0000644000175000017500000000513200000000000026550 0ustar00coreycorey00000000000000{ "deploy_forces_oob_reboot": "Whether Ironic should force a reboot of the Node via the out-of-band channel after deployment is complete. Provides compatibility with older deploy ramdisks. Defaults to False. Optional.", "deploy_kernel": "UUID (from Glance) of the deployment kernel. Required.", "deploy_ramdisk": "UUID (from Glance) of the ramdisk that is mounted at boot time. Required.", "image_http_proxy": "URL of a proxy server for HTTP connections. Optional.", "image_https_proxy": "URL of a proxy server for HTTPS connections. Optional.", "image_no_proxy": "A comma-separated list of host names, IP addresses and domain names (with optional :port) that will be excluded from proxying. To denote a domain name, use a dot to prefix the domain name. This value will be ignored if ``image_http_proxy`` and ``image_https_proxy`` are not specified. Optional.", "ipmi_address": "IP address or hostname of the node. Required.", "ipmi_bridging": "bridging_type; default is \"no\". One of \"single\", \"dual\", \"no\". Optional.", "ipmi_disable_boot_timeout": "By default ironic will send a raw IPMI command to disable the 60 second timeout for booting. Setting this option to False will NOT send that command; default value is True. Optional.", "ipmi_force_boot_device": "Whether Ironic should specify the boot device to the BMC each time the server is turned on, eg. because the BMC is not capable of remembering the selected boot device across power cycles; default value is False. Optional.", "ipmi_local_address": "local IPMB address for bridged requests. Used only if ipmi_bridging is set to \"single\" or \"dual\". Optional.", "ipmi_password": "password. Optional.", "ipmi_port": "remote IPMI RMCP port. Optional.", "ipmi_priv_level": "privilege level; default is ADMINISTRATOR. One of ADMINISTRATOR, CALLBACK, OPERATOR, USER. Optional.", "ipmi_protocol_version": "the version of the IPMI protocol; default is \"2.0\". One of \"1.5\", \"2.0\". Optional.", "ipmi_target_address": "destination address for bridged request. Required only if ipmi_bridging is set to \"single\" or \"dual\".", "ipmi_target_channel": "destination channel for bridged request. Required only if ipmi_bridging is set to \"single\" or \"dual\".", "ipmi_terminal_port": "node's UDP port to connect to. Only required for console access.", "ipmi_transit_address": "transit address for bridged request. Required only if ipmi_bridging is set to \"dual\".", "ipmi_transit_channel": "transit channel for bridged request. Required only if ipmi_bridging is set to \"dual\".", "ipmi_username": "username; default is NULL user. Optional." } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/drivers-list-detail-response.json0000644000175000017500000001210600000000000027261 0ustar00coreycorey00000000000000{ "drivers": [ { "default_bios_interface": null, "default_boot_interface": null, "default_console_interface": null, "default_deploy_interface": null, "default_inspect_interface": null, "default_management_interface": null, "default_network_interface": null, "default_power_interface": null, "default_raid_interface": null, "default_rescue_interface": null, "default_storage_interface": null, "default_vendor_interface": null, "enabled_bios_interfaces": null, "enabled_boot_interfaces": null, "enabled_console_interfaces": null, "enabled_deploy_interfaces": null, "enabled_inspect_interfaces": null, "enabled_management_interfaces": null, "enabled_network_interfaces": null, "enabled_power_interfaces": null, "enabled_raid_interfaces": null, "enabled_rescue_interfaces": null, "enabled_storage_interfaces": null, "enabled_vendor_interfaces": null, "hosts": [ "897ab1dad809" ], "links": [ { "href": "http://127.0.0.1:6385/v1/drivers/agent_ipmitool", "rel": "self" }, { "href": "http://127.0.0.1:6385/drivers/agent_ipmitool", "rel": "bookmark" } ], "name": "agent_ipmitool", "properties": [ { "href": "http://127.0.0.1:6385/v1/drivers/agent_ipmitool/properties", "rel": "self" }, { "href": "http://127.0.0.1:6385/drivers/agent_ipmitool/properties", "rel": "bookmark" } ], "type": "classic" }, { "default_bios_interface": null, "default_boot_interface": null, "default_console_interface": null, "default_deploy_interface": null, "default_inspect_interface": null, "default_management_interface": null, "default_network_interface": null, "default_power_interface": null, "default_raid_interface": null, "default_rescue_interface": null, "default_storage_interface": null, "default_vendor_interface": null, "enabled_bios_interfaces": null, "enabled_boot_interfaces": null, "enabled_console_interfaces": null, "enabled_deploy_interfaces": null, "enabled_inspect_interfaces": null, "enabled_management_interfaces": null, "enabled_network_interfaces": null, "enabled_power_interfaces": null, "enabled_raid_interfaces": null, "enabled_rescue_interfaces": null, "enabled_storage_interfaces": null, "enabled_vendor_interfaces": null, "hosts": [ "897ab1dad809" ], "links": [ { "href": "http://127.0.0.1:6385/v1/drivers/fake", "rel": "self" }, { "href": "http://127.0.0.1:6385/drivers/fake", "rel": "bookmark" } ], "name": "fake", "properties": [ { "href": "http://127.0.0.1:6385/v1/drivers/fake/properties", "rel": "self" }, { "href": "http://127.0.0.1:6385/drivers/fake/properties", "rel": "bookmark" } ], "type": "classic" }, { "default_bios_interface": "no-bios", "default_boot_interface": "pxe", "default_console_interface": "no-console", "default_deploy_interface": "iscsi", "default_inspect_interface": "no-inspect", "default_management_interface": "ipmitool", "default_network_interface": "flat", "default_power_interface": "ipmitool", "default_raid_interface": "no-raid", "default_rescue_interface": "no-rescue", "default_storage_interface": "noop", "default_vendor_interface": "no-vendor", "enabled_bios_interfaces": [ "no-bios" ], "enabled_boot_interfaces": [ "pxe" ], "enabled_console_interfaces": [ "no-console" ], "enabled_deploy_interfaces": [ "iscsi", "direct" ], "enabled_inspect_interfaces": [ "no-inspect" ], "enabled_management_interfaces": [ "ipmitool" ], "enabled_network_interfaces": [ "flat", "noop" ], "enabled_power_interfaces": [ "ipmitool" ], "enabled_raid_interfaces": [ "no-raid", "agent" ], "enabled_rescue_interfaces": [ "no-rescue" ], "enabled_storage_interfaces": [ "noop" ], "enabled_vendor_interfaces": [ "no-vendor" ], "hosts": [ "897ab1dad809" ], "links": [ { "href": "http://127.0.0.1:6385/v1/drivers/ipmi", "rel": "self" }, { "href": "http://127.0.0.1:6385/drivers/ipmi", "rel": "bookmark" } ], "name": "ipmi", "properties": [ { "href": "http://127.0.0.1:6385/v1/drivers/ipmi/properties", "rel": "self" }, { "href": "http://127.0.0.1:6385/drivers/ipmi/properties", "rel": "bookmark" } ], "type": "dynamic" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/drivers-list-response.json0000644000175000017500000000353100000000000026023 0ustar00coreycorey00000000000000{ "drivers": [ { "hosts": [ "897ab1dad809" ], "links": [ { "href": "http://127.0.0.1:6385/v1/drivers/agent_ipmitool", "rel": "self" }, { "href": "http://127.0.0.1:6385/drivers/agent_ipmitool", "rel": "bookmark" } ], "name": "agent_ipmitool", "properties": [ { "href": "http://127.0.0.1:6385/v1/drivers/agent_ipmitool/properties", "rel": "self" }, { "href": "http://127.0.0.1:6385/drivers/agent_ipmitool/properties", "rel": "bookmark" } ], "type": "classic" }, { "hosts": [ "897ab1dad809" ], "links": [ { "href": "http://127.0.0.1:6385/v1/drivers/fake", "rel": "self" }, { "href": "http://127.0.0.1:6385/drivers/fake", "rel": "bookmark" } ], "name": "fake", "properties": [ { "href": "http://127.0.0.1:6385/v1/drivers/fake/properties", "rel": "self" }, { "href": "http://127.0.0.1:6385/drivers/fake/properties", "rel": "bookmark" } ], "type": "classic" }, { "hosts": [ "897ab1dad809" ], "links": [ { "href": "http://127.0.0.1:6385/v1/drivers/ipmi", "rel": "self" }, { "href": "http://127.0.0.1:6385/drivers/ipmi", "rel": "bookmark" } ], "name": "ipmi", "properties": [ { "href": "http://127.0.0.1:6385/v1/drivers/ipmi/properties", "rel": "self" }, { "href": "http://127.0.0.1:6385/drivers/ipmi/properties", "rel": "bookmark" } ], "type": "dynamic" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/lookup-node-response.json0000644000175000017500000000137500000000000025634 0ustar00coreycorey00000000000000{ "config": { "heartbeat_timeout": 300, "metrics": { "backend": "noop", "global_prefix": null, "prepend_host": false, "prepend_host_reverse": true, "prepend_uuid": false }, "metrics_statsd": { "statsd_host": "localhost", "statsd_port": 8125 } }, "node": { "driver_internal_info": { "clean_steps": null }, "instance_info": {}, "links": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/6d85703a-565d-469a-96ce-30b6de53079d", "rel": "bookmark" } ], "properties": {}, "uuid": "6d85703a-565d-469a-96ce-30b6de53079d" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/node-bios-detail-response.json0000644000175000017500000000076700000000000026523 0ustar00coreycorey00000000000000{ "virtualization": { "created_at": "2016-08-18T22:28:49.653974+00:00", "updated_at": "2016-08-18T22:28:49.653974+00:00", "links": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d/bios/virtualization", "rel": "self" }, { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d/bios/virtualization", "rel": "bookmark" } ], "name": "virtualization", "value": "on" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/node-bios-list-response.json0000644000175000017500000000102500000000000026220 0ustar00coreycorey00000000000000{ "bios": [ { "created_at": "2016-08-18T22:28:49.653974+00:00", "updated_at": "2016-08-18T22:28:49.653974+00:00", "links": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d/bios/virtualization", "rel": "self" }, { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d/bios/virtualization", "rel": "bookmark" } ], "name": "virtualization", "value": "on" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/node-create-request-classic.json0000644000175000017500000000031000000000000027023 0ustar00coreycorey00000000000000{ "name": "test_node_classic", "driver": "agent_ipmitool", "driver_info": { "ipmi_username": "ADMIN", "ipmi_password": "password" }, "resource_class": "bm-large" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/node-create-request-dynamic.json0000644000175000017500000000034100000000000027032 0ustar00coreycorey00000000000000{ "name": "test_node_dynamic", "driver": "ipmi", "driver_info": { "ipmi_username": "ADMIN", "ipmi_password": "password" }, "power_interface": "ipmitool", "resource_class": "bm-large" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/node-create-response.json0000644000175000017500000000530600000000000025564 0ustar00coreycorey00000000000000{ "allocation_uuid": null, "boot_interface": null, "chassis_uuid": null, "clean_step": {}, "conductor_group": "group-1", "console_enabled": false, "console_interface": null, "created_at": "2016-08-18T22:28:48.643434+11:11", "deploy_interface": null, "deploy_step": {}, "description": null, "driver": "agent_ipmitool", "driver_info": { "ipmi_password": "******", "ipmi_username": "ADMIN" }, "driver_internal_info": {}, "extra": {}, "inspect_interface": null, "inspection_finished_at": null, "inspection_started_at": null, "instance_info": {}, "instance_uuid": null, "last_error": null, "lessee": null, "links": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/6d85703a-565d-469a-96ce-30b6de53079d", "rel": "bookmark" } ], "maintenance": false, "maintenance_reason": null, "management_interface": null, "name": "test_node_classic", "network_interface": "flat", "owner": null, "portgroups": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d/portgroups", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/6d85703a-565d-469a-96ce-30b6de53079d/portgroups", "rel": "bookmark" } ], "ports": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d/ports", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/6d85703a-565d-469a-96ce-30b6de53079d/ports", "rel": "bookmark" } ], "power_interface": null, "power_state": null, "properties": {}, "protected": false, "protected_reason": null, "provision_state": "enroll", "provision_updated_at": null, "raid_config": {}, "raid_interface": null, "rescue_interface": null, "reservation": null, "resource_class": "bm-large", "retired": false, "retired_reason": null, "states": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d/states", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/6d85703a-565d-469a-96ce-30b6de53079d/states", "rel": "bookmark" } ], "storage_interface": "noop", "target_power_state": null, "target_provision_state": null, "target_raid_config": {}, "traits": [], "updated_at": null, "uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "vendor_interface": null, "volume": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d/volume", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/6d85703a-565d-469a-96ce-30b6de53079d/volume", "rel": "bookmark" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/node-get-boot-device-response.json0000644000175000017500000000006200000000000027270 0ustar00coreycorey00000000000000{ "boot_device": "pxe", "persistent": false } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/node-get-state-response.json0000644000175000017500000000044400000000000026214 0ustar00coreycorey00000000000000{ "console_enabled": false, "last_error": null, "power_state": "power off", "provision_state": "available", "provision_updated_at": "2016-08-18T22:28:49.946416+00:00", "raid_config": {}, "target_power_state": null, "target_provision_state": null, "target_raid_config": {} } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/node-get-supported-boot-devices-response.json0000644000175000017500000000006000000000000031474 0ustar00coreycorey00000000000000{ "supported_boot_devices": [ "pxe" ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/node-inject-nmi.json0000644000175000017500000000000300000000000024507 0ustar00coreycorey00000000000000{} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/node-maintenance-request.json0000644000175000017500000000005400000000000026430 0ustar00coreycorey00000000000000{ "reason": "Replacing the hard drive" }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/node-port-detail-response.json0000644000175000017500000000165600000000000026551 0ustar00coreycorey00000000000000{ "ports": [ { "address": "22:22:22:22:22:22", "created_at": "2016-08-18T22:28:48.643434+11:11", "extra": {}, "internal_info": {}, "is_smartnic": true, "links": [ { "href": "http://127.0.0.1:6385/v1/ports/d2b30520-907d-46c8-bfee-c5586e6fb3a1", "rel": "self" }, { "href": "http://127.0.0.1:6385/ports/d2b30520-907d-46c8-bfee-c5586e6fb3a1", "rel": "bookmark" } ], "local_link_connection": { "port_id": "Ethernet3/1", "switch_id": "0a:1b:2c:3d:4e:5f", "switch_info": "switch1" }, "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "physical_network": "physnet1", "portgroup_uuid": "e43c722c-248e-4c6e-8ce8-0d8ff129387a", "pxe_enabled": true, "updated_at": "2016-08-18T22:28:49.653974+00:00", "uuid": "d2b30520-907d-46c8-bfee-c5586e6fb3a1" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/node-port-list-response.json0000644000175000017500000000064200000000000026254 0ustar00coreycorey00000000000000{ "ports": [ { "address": "22:22:22:22:22:22", "links": [ { "href": "http://127.0.0.1:6385/v1/ports/d2b30520-907d-46c8-bfee-c5586e6fb3a1", "rel": "self" }, { "href": "http://127.0.0.1:6385/ports/d2b30520-907d-46c8-bfee-c5586e6fb3a1", "rel": "bookmark" } ], "uuid": "d2b30520-907d-46c8-bfee-c5586e6fb3a1" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/node-portgroup-detail-response.json0000644000175000017500000000210700000000000027616 0ustar00coreycorey00000000000000{ "portgroups": [ { "address": "22:22:22:22:22:22", "created_at": "2016-08-18T22:28:48.643434+11:11", "extra": {}, "internal_info": {}, "links": [ { "href": "http://127.0.0.1:6385/v1/portgroups/e43c722c-248e-4c6e-8ce8-0d8ff129387a", "rel": "self" }, { "href": "http://127.0.0.1:6385/portgroups/e43c722c-248e-4c6e-8ce8-0d8ff129387a", "rel": "bookmark" } ], "mode": "active-backup", "name": "test_portgroup", "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "ports": [ { "href": "http://127.0.0.1:6385/v1/portgroups/e43c722c-248e-4c6e-8ce8-0d8ff129387a/ports", "rel": "self" }, { "href": "http://127.0.0.1:6385/portgroups/e43c722c-248e-4c6e-8ce8-0d8ff129387a/ports", "rel": "bookmark" } ], "properties": {}, "standalone_ports_supported": true, "updated_at": "2016-08-18T22:28:49.653974+00:00", "uuid": "e43c722c-248e-4c6e-8ce8-0d8ff129387a" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/node-portgroup-list-response.json0000644000175000017500000000072100000000000027327 0ustar00coreycorey00000000000000{ "portgroups": [ { "address": "22:22:22:22:22:22", "links": [ { "href": "http://127.0.0.1:6385/v1/portgroups/e43c722c-248e-4c6e-8ce8-0d8ff129387a", "rel": "self" }, { "href": "http://127.0.0.1:6385/portgroups/e43c722c-248e-4c6e-8ce8-0d8ff129387a", "rel": "bookmark" } ], "name": "test_portgroup", "uuid": "e43c722c-248e-4c6e-8ce8-0d8ff129387a" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/node-set-active-state.json0000644000175000017500000000014600000000000025644 0ustar00coreycorey00000000000000{ "target": "active", "configdrive": "http://127.0.0.1/images/test-node-config-drive.iso.gz" }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/node-set-available-state.json0000644000175000017500000000003400000000000026305 0ustar00coreycorey00000000000000{ "target": "provide" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/node-set-boot-device.json0000644000175000017500000000006600000000000025454 0ustar00coreycorey00000000000000{ "boot_device": "pxe", "persistent": false } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/node-set-clean-state.json0000644000175000017500000000033100000000000025447 0ustar00coreycorey00000000000000{ "target": "clean", "clean_steps": [ { "interface": "deploy", "step": "upgrade_firmware", "args": { "force": "True" } } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/node-set-manage-state.json0000644000175000017500000000003300000000000025614 0ustar00coreycorey00000000000000{ "target": "manage" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/node-set-power-off.json0000644000175000017500000000003500000000000025154 0ustar00coreycorey00000000000000{ "target": "power off" }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/node-set-raid-request.json0000644000175000017500000000021000000000000025650 0ustar00coreycorey00000000000000{ "logical_disks" : [ { "size_gb" : 100, "is_root_volume" : true, "raid_level" : "1" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/node-set-soft-power-off.json0000644000175000017500000000006700000000000026132 0ustar00coreycorey00000000000000{ "target": "soft power off", "timeout": 300 } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/node-set-traits-request.json0000644000175000017500000000010000000000000026235 0ustar00coreycorey00000000000000{ "traits": [ "CUSTOM_TRAIT1", "HW_CPU_X86_VMX" ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/node-show-response.json0000644000175000017500000000550400000000000025301 0ustar00coreycorey00000000000000{ "allocation_uuid": null, "boot_interface": null, "chassis_uuid": null, "clean_step": {}, "conductor": "compute1.localdomain", "conductor_group": "group-1", "console_enabled": false, "console_interface": null, "created_at": "2016-08-18T22:28:48.643434+11:11", "deploy_interface": null, "deploy_step": {}, "description": null, "driver": "fake", "driver_info": { "ipmi_password": "******", "ipmi_username": "ADMIN" }, "driver_internal_info": { "clean_steps": null }, "extra": {}, "inspect_interface": null, "inspection_finished_at": null, "inspection_started_at": null, "instance_info": {}, "instance_uuid": null, "last_error": null, "lessee": null, "links": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/6d85703a-565d-469a-96ce-30b6de53079d", "rel": "bookmark" } ], "maintenance": false, "maintenance_reason": null, "management_interface": null, "name": "test_node_classic", "network_interface": "flat", "owner": null, "portgroups": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d/portgroups", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/6d85703a-565d-469a-96ce-30b6de53079d/portgroups", "rel": "bookmark" } ], "ports": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d/ports", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/6d85703a-565d-469a-96ce-30b6de53079d/ports", "rel": "bookmark" } ], "power_interface": null, "power_state": "power off", "properties": {}, "protected": false, "protected_reason": null, "provision_state": "available", "provision_updated_at": "2016-08-18T22:28:49.946416+00:00", "raid_config": {}, "raid_interface": null, "rescue_interface": null, "reservation": null, "resource_class": "bm-large", "retired": false, "retired_reason": null, "states": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d/states", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/6d85703a-565d-469a-96ce-30b6de53079d/states", "rel": "bookmark" } ], "storage_interface": "noop", "target_power_state": null, "target_provision_state": null, "target_raid_config": {}, "traits": [], "updated_at": "2016-08-18T22:28:49.653974+00:00", "uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "vendor_interface": null, "volume": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d/volume", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/6d85703a-565d-469a-96ce-30b6de53079d/volume", "rel": "bookmark" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/node-traits-list-response.json0000644000175000017500000000010000000000000026563 0ustar00coreycorey00000000000000{ "traits": [ "CUSTOM_TRAIT1", "HW_CPU_X86_VMX" ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/node-update-driver-info-request.json0000644000175000017500000000054400000000000027656 0ustar00coreycorey00000000000000[ { "op": "replace", "path": "/driver_info/ipmi_username", "value": "OPERATOR" }, { "op": "add", "path": "/driver_info/deploy_kernel", "value": "http://127.0.0.1/images/kernel" }, { "op": "add", "path": "/driver_info/deploy_ramdisk", "value": "http://127.0.0.1/images/ramdisk" } ] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/node-update-driver-info-response.json0000644000175000017500000000573400000000000030032 0ustar00coreycorey00000000000000{ "allocation_uuid": null, "boot_interface": null, "chassis_uuid": null, "clean_step": {}, "conductor": "compute1.localdomain", "conductor_group": "group-1", "console_enabled": false, "console_interface": null, "created_at": "2016-08-18T22:28:48.643434+11:11", "deploy_interface": null, "deploy_step": {}, "driver": "fake", "driver_info": { "deploy_kernel": "http://127.0.0.1/images/kernel", "deploy_ramdisk": "http://127.0.0.1/images/ramdisk", "ipmi_password": "******", "ipmi_username": "OPERATOR" }, "driver_internal_info": { "clean_steps": null }, "extra": {}, "inspect_interface": null, "inspection_finished_at": null, "inspection_started_at": null, "instance_info": {}, "instance_uuid": null, "last_error": null, "lessee": null, "links": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/6d85703a-565d-469a-96ce-30b6de53079d", "rel": "bookmark" } ], "maintenance": true, "maintenance_reason": "Replacing the hard drive", "management_interface": null, "name": "test_node_classic", "network_interface": "flat", "owner": null, "portgroups": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d/portgroups", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/6d85703a-565d-469a-96ce-30b6de53079d/portgroups", "rel": "bookmark" } ], "ports": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d/ports", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/6d85703a-565d-469a-96ce-30b6de53079d/ports", "rel": "bookmark" } ], "power_interface": null, "power_state": "power off", "properties": {}, "protected": false, "protected_reason": null, "provision_state": "available", "provision_updated_at": "2016-08-18T22:28:49.946416+00:00", "raid_config": {}, "raid_interface": null, "rescue_interface": null, "reservation": null, "resource_class": null, "retired": false, "retired_reason": null, "states": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d/states", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/6d85703a-565d-469a-96ce-30b6de53079d/states", "rel": "bookmark" } ], "storage_interface": "noop", "target_power_state": null, "target_provision_state": null, "target_raid_config": {}, "traits": [ "CUSTOM_TRAIT1", "HW_CPU_X86_VMX" ], "updated_at": "2016-08-18T22:28:49.653974+00:00", "uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "vendor_interface": null, "volume": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d/volume", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/6d85703a-565d-469a-96ce-30b6de53079d/volume", "rel": "bookmark" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/node-update-driver.json0000644000175000017500000000012700000000000025234 0ustar00coreycorey00000000000000[ { "op" : "replace", "path" : "/driver", "value" : "fake" } ] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/node-validate-response.json0000644000175000017500000000064100000000000026107 0ustar00coreycorey00000000000000{ "boot": { "result": true }, "console": { "result": true }, "deploy": { "result": true }, "inspect": { "result": true }, "management": { "result": true }, "network": { "result": true }, "power": { "result": true }, "raid": { "result": true }, "rescue": { "reason": "not supported", "result": null }, "storage": { "result": true } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/node-vendor-passthru-response.json0000644000175000017500000000050600000000000027462 0ustar00coreycorey00000000000000{ "bmc_reset": { "async": true, "attach": false, "description": "", "http_methods": [ "POST" ], "require_exclusive_lock": true }, "send_raw": { "async": true, "attach": false, "description": "", "http_methods": [ "POST" ], "require_exclusive_lock": true } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/node-vif-attach-request.json0000644000175000017500000000006500000000000026176 0ustar00coreycorey00000000000000{ "id": "1974dcfa-836f-41b2-b541-686c100900e5" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/node-vif-list-response.json0000644000175000017500000000012300000000000026046 0ustar00coreycorey00000000000000{ "vifs": [ { "id": "1974dcfa-836f-41b2-b541-686c100900e5" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/node-volume-connector-detail-response.json0000644000175000017500000000125600000000000031060 0ustar00coreycorey00000000000000{ "connectors": [ { "connector_id": "iqn.2017-07.org.openstack:02:10190a4153e", "created_at": "2016-08-18T22:28:48.643434+11:11", "extra": {}, "links": [ { "href": "http://127.0.0.1:6385/v1/volume/connectors/9bf93e01-d728-47a3-ad4b-5e66a835037c", "rel": "self" }, { "href": "http://127.0.0.1:6385/volume/connectors/9bf93e01-d728-47a3-ad4b-5e66a835037c", "rel": "bookmark" } ], "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "type": "iqn", "updated_at": "2016-08-18T22:28:49.653974+00:00", "uuid": "9bf93e01-d728-47a3-ad4b-5e66a835037c" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/node-volume-connector-list-response.json0000644000175000017500000000105300000000000030564 0ustar00coreycorey00000000000000{ "connectors": [ { "connector_id": "iqn.2017-07.org.openstack:02:10190a4153e", "links": [ { "href": "http://127.0.0.1:6385/v1/volume/connectors/9bf93e01-d728-47a3-ad4b-5e66a835037c", "rel": "self" }, { "href": "http://127.0.0.1:6385/volume/connectors/9bf93e01-d728-47a3-ad4b-5e66a835037c", "rel": "bookmark" } ], "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "type": "iqn", "uuid": "9bf93e01-d728-47a3-ad4b-5e66a835037c" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/node-volume-list-response.json0000644000175000017500000000152600000000000026601 0ustar00coreycorey00000000000000{ "connectors": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d/volume/connectors", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/6d85703a-565d-469a-96ce-30b6de53079d/volume/connectors", "rel": "bookmark" } ], "links": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d/volume/", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/6d85703a-565d-469a-96ce-30b6de53079d/volume/", "rel": "bookmark" } ], "targets": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d/volume/targets", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/6d85703a-565d-469a-96ce-30b6de53079d/volume/targets", "rel": "bookmark" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/node-volume-target-detail-response.json0000644000175000017500000000132600000000000030352 0ustar00coreycorey00000000000000{ "targets": [ { "boot_index": 0, "created_at": "2016-08-18T22:28:48.643434+11:11", "extra": {}, "links": [ { "href": "http://127.0.0.1:6385/v1/volume/targets/bd4d008c-7d31-463d-abf9-6c23d9d55f7f", "rel": "self" }, { "href": "http://127.0.0.1:6385/volume/targets/bd4d008c-7d31-463d-abf9-6c23d9d55f7f", "rel": "bookmark" } ], "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "properties": {}, "updated_at": "2016-08-18T22:28:49.653974+00:00", "uuid": "bd4d008c-7d31-463d-abf9-6c23d9d55f7f", "volume_id": "7211f7d3-3f32-4efc-b64e-9b8e92e64a8e", "volume_type": "iscsi" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/node-volume-target-list-response.json0000644000175000017500000000107300000000000030062 0ustar00coreycorey00000000000000{ "targets": [ { "boot_index": 0, "links": [ { "href": "http://127.0.0.1:6385/v1/volume/targets/bd4d008c-7d31-463d-abf9-6c23d9d55f7f", "rel": "self" }, { "href": "http://127.0.0.1:6385/volume/targets/bd4d008c-7d31-463d-abf9-6c23d9d55f7f", "rel": "bookmark" } ], "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "uuid": "bd4d008c-7d31-463d-abf9-6c23d9d55f7f", "volume_id": "7211f7d3-3f32-4efc-b64e-9b8e92e64a8e", "volume_type": "iscsi" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/nodes-list-details-response.json0000644000175000017500000001500600000000000027100 0ustar00coreycorey00000000000000{ "nodes": [ { "allocation_uuid": "5344a3e2-978a-444e-990a-cbf47c62ef88", "boot_interface": null, "chassis_uuid": null, "clean_step": {}, "conductor": "compute1.localdomain", "conductor_group": "group-1", "console_enabled": false, "console_interface": null, "created_at": "2016-08-18T22:28:48.643434+11:11", "deploy_interface": null, "deploy_step": {}, "description": null, "driver": "fake", "driver_info": { "ipmi_password": "******", "ipmi_username": "ADMIN" }, "driver_internal_info": { "clean_steps": null }, "extra": {}, "inspect_interface": null, "inspection_finished_at": null, "inspection_started_at": null, "instance_info": {}, "instance_uuid": "5344a3e2-978a-444e-990a-cbf47c62ef88", "last_error": null, "lessee": null, "links": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/6d85703a-565d-469a-96ce-30b6de53079d", "rel": "bookmark" } ], "maintenance": false, "maintenance_reason": null, "management_interface": null, "name": "test_node_classic", "network_interface": "flat", "owner": "john doe", "portgroups": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d/portgroups", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/6d85703a-565d-469a-96ce-30b6de53079d/portgroups", "rel": "bookmark" } ], "ports": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d/ports", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/6d85703a-565d-469a-96ce-30b6de53079d/ports", "rel": "bookmark" } ], "power_interface": null, "power_state": "power off", "properties": {}, "protected": false, "protected_reason": null, "provision_state": "available", "provision_updated_at": "2016-08-18T22:28:49.946416+00:00", "raid_config": {}, "raid_interface": null, "rescue_interface": null, "reservation": null, "resource_class": null, "retired": false, "retired_reason": null, "states": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d/states", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/6d85703a-565d-469a-96ce-30b6de53079d/states", "rel": "bookmark" } ], "storage_interface": "noop", "target_power_state": null, "target_provision_state": null, "target_raid_config": {}, "traits": [], "updated_at": "2016-08-18T22:28:49.653974+00:00", "uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "vendor_interface": null, "volume": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d/volume", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/6d85703a-565d-469a-96ce-30b6de53079d/volume", "rel": "bookmark" } ] }, { "allocation_uuid": null, "boot_interface": "pxe", "chassis_uuid": null, "clean_step": {}, "conductor": "compute1.localdomain", "conductor_group": "", "console_enabled": false, "console_interface": "no-console", "created_at": "2016-08-18T22:28:48.643434+11:11", "deploy_interface": "iscsi", "deploy_step": {}, "driver": "ipmi", "driver_info": { "ipmi_password": "******", "ipmi_username": "ADMIN" }, "driver_internal_info": {}, "extra": {}, "inspect_interface": "no-inspect", "inspection_finished_at": null, "inspection_started_at": null, "instance_info": {}, "instance_uuid": null, "last_error": null, "lessee": null, "links": [ { "href": "http://127.0.0.1:6385/v1/nodes/2b045129-a906-46af-bc1a-092b294b3428", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/2b045129-a906-46af-bc1a-092b294b3428", "rel": "bookmark" } ], "maintenance": false, "maintenance_reason": null, "management_interface": "ipmitool", "name": "test_node_dynamic", "network_interface": "flat", "owner": "43e61ec9-8e42-4dcb-bc45-30d66aa93e5b", "portgroups": [ { "href": "http://127.0.0.1:6385/v1/nodes/2b045129-a906-46af-bc1a-092b294b3428/portgroups", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/2b045129-a906-46af-bc1a-092b294b3428/portgroups", "rel": "bookmark" } ], "ports": [ { "href": "http://127.0.0.1:6385/v1/nodes/2b045129-a906-46af-bc1a-092b294b3428/ports", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/2b045129-a906-46af-bc1a-092b294b3428/ports", "rel": "bookmark" } ], "power_interface": "ipmitool", "power_state": null, "properties": {}, "protected": false, "protected_reason": null, "provision_state": "enroll", "provision_updated_at": null, "raid_config": {}, "raid_interface": "no-raid", "rescue_interface": "no-rescue", "reservation": null, "resource_class": null, "retired": false, "retired_reason": null, "states": [ { "href": "http://127.0.0.1:6385/v1/nodes/2b045129-a906-46af-bc1a-092b294b3428/states", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/2b045129-a906-46af-bc1a-092b294b3428/states", "rel": "bookmark" } ], "storage_interface": "noop", "target_power_state": null, "target_provision_state": null, "target_raid_config": {}, "traits": [], "updated_at": null, "uuid": "2b045129-a906-46af-bc1a-092b294b3428", "vendor_interface": "no-vendor", "volume": [ { "href": "http://127.0.0.1:6385/v1/nodes/2b045129-a906-46af-bc1a-092b294b3428/volume", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/2b045129-a906-46af-bc1a-092b294b3428/volume", "rel": "bookmark" } ] } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/nodes-list-response.json0000644000175000017500000000204200000000000025451 0ustar00coreycorey00000000000000{ "nodes": [ { "instance_uuid": null, "links": [ { "href": "http://127.0.0.1:6385/v1/nodes/6d85703a-565d-469a-96ce-30b6de53079d", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/6d85703a-565d-469a-96ce-30b6de53079d", "rel": "bookmark" } ], "maintenance": false, "name": "test_node_classic", "power_state": "power off", "provision_state": "available", "uuid": "6d85703a-565d-469a-96ce-30b6de53079d" }, { "instance_uuid": null, "links": [ { "href": "http://127.0.0.1:6385/v1/nodes/2b045129-a906-46af-bc1a-092b294b3428", "rel": "self" }, { "href": "http://127.0.0.1:6385/nodes/2b045129-a906-46af-bc1a-092b294b3428", "rel": "bookmark" } ], "maintenance": false, "name": "test_node_dynamic", "power_state": null, "provision_state": "enroll", "uuid": "2b045129-a906-46af-bc1a-092b294b3428" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/port-create-request.json0000644000175000017500000000055600000000000025457 0ustar00coreycorey00000000000000{ "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "portgroup_uuid": "e43c722c-248e-4c6e-8ce8-0d8ff129387a", "address": "11:11:11:11:11:11", "is_smartnic": true, "local_link_connection": { "switch_id": "0a:1b:2c:3d:4e:5f", "port_id": "Ethernet3/1", "switch_info": "switch1" }, "physical_network": "physnet1" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/port-create-response.json0000644000175000017500000000141300000000000025616 0ustar00coreycorey00000000000000{ "address": "11:11:11:11:11:11", "created_at": "2016-08-18T22:28:48.643434+11:11", "extra": {}, "internal_info": {}, "is_smartnic": true, "links": [ { "href": "http://127.0.0.1:6385/v1/ports/d2b30520-907d-46c8-bfee-c5586e6fb3a1", "rel": "self" }, { "href": "http://127.0.0.1:6385/ports/d2b30520-907d-46c8-bfee-c5586e6fb3a1", "rel": "bookmark" } ], "local_link_connection": { "port_id": "Ethernet3/1", "switch_id": "0a:1b:2c:3d:4e:5f", "switch_info": "switch1" }, "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "physical_network": "physnet1", "portgroup_uuid": "e43c722c-248e-4c6e-8ce8-0d8ff129387a", "pxe_enabled": true, "updated_at": null, "uuid": "d2b30520-907d-46c8-bfee-c5586e6fb3a1" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/port-list-detail-response.json0000644000175000017500000000162000000000000026566 0ustar00coreycorey00000000000000{ "ports": [ { "address": "11:11:11:11:11:11", "created_at": "2016-08-18T22:28:48.643434+11:11", "extra": {}, "internal_info": {}, "is_smartnic": true, "links": [ { "href": "http://127.0.0.1:6385/v1/ports/d2b30520-907d-46c8-bfee-c5586e6fb3a1", "rel": "self" }, { "href": "http://127.0.0.1:6385/ports/d2b30520-907d-46c8-bfee-c5586e6fb3a1", "rel": "bookmark" } ], "local_link_connection": { "port_id": "Ethernet3/1", "switch_id": "0a:1b:2c:3d:4e:5f", "switch_info": "switch1" }, "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "physical_network": "physnet1", "portgroup_uuid": "e43c722c-248e-4c6e-8ce8-0d8ff129387a", "pxe_enabled": true, "updated_at": null, "uuid": "d2b30520-907d-46c8-bfee-c5586e6fb3a1" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/port-list-response.json0000644000175000017500000000064200000000000025331 0ustar00coreycorey00000000000000{ "ports": [ { "address": "11:11:11:11:11:11", "links": [ { "href": "http://127.0.0.1:6385/v1/ports/d2b30520-907d-46c8-bfee-c5586e6fb3a1", "rel": "self" }, { "href": "http://127.0.0.1:6385/ports/d2b30520-907d-46c8-bfee-c5586e6fb3a1", "rel": "bookmark" } ], "uuid": "d2b30520-907d-46c8-bfee-c5586e6fb3a1" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/port-update-request.json0000644000175000017500000000014500000000000025470 0ustar00coreycorey00000000000000[ { "path" : "/address", "value" : "22:22:22:22:22:22", "op" : "replace" } ] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/port-update-response.json0000644000175000017500000000145100000000000025637 0ustar00coreycorey00000000000000{ "address": "22:22:22:22:22:22", "created_at": "2016-08-18T22:28:48.643434+11:11", "extra": {}, "internal_info": {}, "is_smartnic": true, "links": [ { "href": "http://127.0.0.1:6385/v1/ports/d2b30520-907d-46c8-bfee-c5586e6fb3a1", "rel": "self" }, { "href": "http://127.0.0.1:6385/ports/d2b30520-907d-46c8-bfee-c5586e6fb3a1", "rel": "bookmark" } ], "local_link_connection": { "port_id": "Ethernet3/1", "switch_id": "0a:1b:2c:3d:4e:5f", "switch_info": "switch1" }, "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "physical_network": "physnet1", "portgroup_uuid": "e43c722c-248e-4c6e-8ce8-0d8ff129387a", "pxe_enabled": true, "updated_at": "2016-08-18T22:28:49.653974+00:00", "uuid": "d2b30520-907d-46c8-bfee-c5586e6fb3a1" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/portgroup-create-request.json0000644000175000017500000000017600000000000026532 0ustar00coreycorey00000000000000{ "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "address": "11:11:11:11:11:11", "name": "test_portgroup" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/portgroup-create-response.json0000644000175000017500000000161300000000000026675 0ustar00coreycorey00000000000000{ "address": "11:11:11:11:11:11", "created_at": "2016-08-18T22:28:48.643434+11:11", "extra": {}, "internal_info": {}, "links": [ { "href": "http://127.0.0.1:6385/v1/portgroups/e43c722c-248e-4c6e-8ce8-0d8ff129387a", "rel": "self" }, { "href": "http://127.0.0.1:6385/portgroups/e43c722c-248e-4c6e-8ce8-0d8ff129387a", "rel": "bookmark" } ], "mode": "active-backup", "name": "test_portgroup", "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "ports": [ { "href": "http://127.0.0.1:6385/v1/portgroups/e43c722c-248e-4c6e-8ce8-0d8ff129387a/ports", "rel": "self" }, { "href": "http://127.0.0.1:6385/portgroups/e43c722c-248e-4c6e-8ce8-0d8ff129387a/ports", "rel": "bookmark" } ], "properties": {}, "standalone_ports_supported": true, "updated_at": null, "uuid": "e43c722c-248e-4c6e-8ce8-0d8ff129387a" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/portgroup-list-detail-response.json0000644000175000017500000000205100000000000027642 0ustar00coreycorey00000000000000{ "portgroups": [ { "address": "11:11:11:11:11:11", "created_at": "2016-08-18T22:28:48.643434+11:11", "extra": {}, "internal_info": {}, "links": [ { "href": "http://127.0.0.1:6385/v1/portgroups/e43c722c-248e-4c6e-8ce8-0d8ff129387a", "rel": "self" }, { "href": "http://127.0.0.1:6385/portgroups/e43c722c-248e-4c6e-8ce8-0d8ff129387a", "rel": "bookmark" } ], "mode": "active-backup", "name": "test_portgroup", "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "ports": [ { "href": "http://127.0.0.1:6385/v1/portgroups/e43c722c-248e-4c6e-8ce8-0d8ff129387a/ports", "rel": "self" }, { "href": "http://127.0.0.1:6385/portgroups/e43c722c-248e-4c6e-8ce8-0d8ff129387a/ports", "rel": "bookmark" } ], "properties": {}, "standalone_ports_supported": true, "updated_at": null, "uuid": "e43c722c-248e-4c6e-8ce8-0d8ff129387a" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/portgroup-list-response.json0000644000175000017500000000072100000000000026404 0ustar00coreycorey00000000000000{ "portgroups": [ { "address": "11:11:11:11:11:11", "links": [ { "href": "http://127.0.0.1:6385/v1/portgroups/e43c722c-248e-4c6e-8ce8-0d8ff129387a", "rel": "self" }, { "href": "http://127.0.0.1:6385/portgroups/e43c722c-248e-4c6e-8ce8-0d8ff129387a", "rel": "bookmark" } ], "name": "test_portgroup", "uuid": "e43c722c-248e-4c6e-8ce8-0d8ff129387a" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/portgroup-port-detail-response.json0000644000175000017500000000165600000000000027665 0ustar00coreycorey00000000000000{ "ports": [ { "address": "22:22:22:22:22:22", "created_at": "2016-08-18T22:28:48.643434+11:11", "extra": {}, "internal_info": {}, "is_smartnic": true, "links": [ { "href": "http://127.0.0.1:6385/v1/ports/d2b30520-907d-46c8-bfee-c5586e6fb3a1", "rel": "self" }, { "href": "http://127.0.0.1:6385/ports/d2b30520-907d-46c8-bfee-c5586e6fb3a1", "rel": "bookmark" } ], "local_link_connection": { "port_id": "Ethernet3/1", "switch_id": "0a:1b:2c:3d:4e:5f", "switch_info": "switch1" }, "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "physical_network": "physnet1", "portgroup_uuid": "e43c722c-248e-4c6e-8ce8-0d8ff129387a", "pxe_enabled": true, "updated_at": "2016-08-18T22:28:49.653974+00:00", "uuid": "d2b30520-907d-46c8-bfee-c5586e6fb3a1" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/portgroup-port-list-response.json0000644000175000017500000000064200000000000027370 0ustar00coreycorey00000000000000{ "ports": [ { "address": "22:22:22:22:22:22", "links": [ { "href": "http://127.0.0.1:6385/v1/ports/d2b30520-907d-46c8-bfee-c5586e6fb3a1", "rel": "self" }, { "href": "http://127.0.0.1:6385/ports/d2b30520-907d-46c8-bfee-c5586e6fb3a1", "rel": "bookmark" } ], "uuid": "d2b30520-907d-46c8-bfee-c5586e6fb3a1" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/portgroup-update-request.json0000644000175000017500000000014500000000000026545 0ustar00coreycorey00000000000000[ { "path" : "/address", "value" : "22:22:22:22:22:22", "op" : "replace" } ] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/portgroup-update-response.json0000644000175000017500000000165100000000000026716 0ustar00coreycorey00000000000000{ "address": "22:22:22:22:22:22", "created_at": "2016-08-18T22:28:48.643434+11:11", "extra": {}, "internal_info": {}, "links": [ { "href": "http://127.0.0.1:6385/v1/portgroups/e43c722c-248e-4c6e-8ce8-0d8ff129387a", "rel": "self" }, { "href": "http://127.0.0.1:6385/portgroups/e43c722c-248e-4c6e-8ce8-0d8ff129387a", "rel": "bookmark" } ], "mode": "active-backup", "name": "test_portgroup", "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "ports": [ { "href": "http://127.0.0.1:6385/v1/portgroups/e43c722c-248e-4c6e-8ce8-0d8ff129387a/ports", "rel": "self" }, { "href": "http://127.0.0.1:6385/portgroups/e43c722c-248e-4c6e-8ce8-0d8ff129387a/ports", "rel": "bookmark" } ], "properties": {}, "standalone_ports_supported": true, "updated_at": "2016-08-18T22:28:49.653974+00:00", "uuid": "e43c722c-248e-4c6e-8ce8-0d8ff129387a" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/volume-connector-create-request.json0000644000175000017500000000022000000000000027756 0ustar00coreycorey00000000000000{ "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "type": "iqn", "connector_id": "iqn.2017-07.org.openstack:01:d9a51732c3f" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/volume-connector-create-response.json0000644000175000017500000000105200000000000030130 0ustar00coreycorey00000000000000{ "connector_id": "iqn.2017-07.org.openstack:01:d9a51732c3f", "created_at": "2016-08-18T22:28:48.643434+11:11", "extra": {}, "links": [ { "href": "http://127.0.0.1:6385/v1/volume/connectors/9bf93e01-d728-47a3-ad4b-5e66a835037c", "rel": "self" }, { "href": "http://127.0.0.1:6385/volume/connectors/9bf93e01-d728-47a3-ad4b-5e66a835037c", "rel": "bookmark" } ], "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "type": "iqn", "updated_at": null, "uuid": "9bf93e01-d728-47a3-ad4b-5e66a835037c" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/volume-connector-list-detail-response.json0000644000175000017500000000122000000000000031075 0ustar00coreycorey00000000000000{ "connectors": [ { "connector_id": "iqn.2017-07.org.openstack:01:d9a51732c3f", "created_at": "2016-08-18T22:28:48.643434+11:11", "extra": {}, "links": [ { "href": "http://127.0.0.1:6385/v1/volume/connectors/9bf93e01-d728-47a3-ad4b-5e66a835037c", "rel": "self" }, { "href": "http://127.0.0.1:6385/volume/connectors/9bf93e01-d728-47a3-ad4b-5e66a835037c", "rel": "bookmark" } ], "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "type": "iqn", "updated_at": null, "uuid": "9bf93e01-d728-47a3-ad4b-5e66a835037c" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/volume-connector-list-response.json0000644000175000017500000000105300000000000027641 0ustar00coreycorey00000000000000{ "connectors": [ { "connector_id": "iqn.2017-07.org.openstack:01:d9a51732c3f", "links": [ { "href": "http://127.0.0.1:6385/v1/volume/connectors/9bf93e01-d728-47a3-ad4b-5e66a835037c", "rel": "self" }, { "href": "http://127.0.0.1:6385/volume/connectors/9bf93e01-d728-47a3-ad4b-5e66a835037c", "rel": "bookmark" } ], "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "type": "iqn", "uuid": "9bf93e01-d728-47a3-ad4b-5e66a835037c" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/volume-connector-update-request.json0000644000175000017500000000020100000000000027774 0ustar00coreycorey00000000000000[ { "path" : "/connector_id", "value" : "iqn.2017-07.org.openstack:02:10190a4153e", "op" : "replace" } ] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/volume-connector-update-response.json0000644000175000017500000000111000000000000030142 0ustar00coreycorey00000000000000{ "connector_id": "iqn.2017-07.org.openstack:02:10190a4153e", "created_at": "2016-08-18T22:28:48.643434+11:11", "extra": {}, "links": [ { "href": "http://127.0.0.1:6385/v1/volume/connectors/9bf93e01-d728-47a3-ad4b-5e66a835037c", "rel": "self" }, { "href": "http://127.0.0.1:6385/volume/connectors/9bf93e01-d728-47a3-ad4b-5e66a835037c", "rel": "bookmark" } ], "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "type": "iqn", "updated_at": "2016-08-18T22:28:49.653974+00:00", "uuid": "9bf93e01-d728-47a3-ad4b-5e66a835037c" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/volume-list-response.json0000644000175000017500000000112400000000000025650 0ustar00coreycorey00000000000000{ "connectors": [ { "href": "http://127.0.0.1:6385/v1/volume/connectors", "rel": "self" }, { "href": "http://127.0.0.1:6385/volume/connectors", "rel": "bookmark" } ], "links": [ { "href": "http://127.0.0.1:6385/v1/volume/", "rel": "self" }, { "href": "http://127.0.0.1:6385/volume/", "rel": "bookmark" } ], "targets": [ { "href": "http://127.0.0.1:6385/v1/volume/targets", "rel": "self" }, { "href": "http://127.0.0.1:6385/volume/targets", "rel": "bookmark" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/volume-target-create-request.json0000644000175000017500000000024700000000000027263 0ustar00coreycorey00000000000000{ "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "volume_type": "iscsi", "boot_index": 0, "volume_id": "04452bed-5367-4202-8bf5-de4335ac56d2" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/volume-target-create-response.json0000644000175000017500000000111500000000000027424 0ustar00coreycorey00000000000000{ "boot_index": 0, "created_at": "2016-08-18T22:28:48.643434+11:11", "extra": {}, "links": [ { "href": "http://127.0.0.1:6385/v1/volume/targets/bd4d008c-7d31-463d-abf9-6c23d9d55f7f", "rel": "self" }, { "href": "http://127.0.0.1:6385/volume/targets/bd4d008c-7d31-463d-abf9-6c23d9d55f7f", "rel": "bookmark" } ], "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "properties": {}, "updated_at": null, "uuid": "bd4d008c-7d31-463d-abf9-6c23d9d55f7f", "volume_id": "04452bed-5367-4202-8bf5-de4335ac56d2", "volume_type": "iscsi" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/volume-target-list-detail-response.json0000644000175000017500000000127000000000000030376 0ustar00coreycorey00000000000000{ "targets": [ { "boot_index": 0, "created_at": "2016-08-18T22:28:48.643434+11:11", "extra": {}, "links": [ { "href": "http://127.0.0.1:6385/v1/volume/targets/bd4d008c-7d31-463d-abf9-6c23d9d55f7f", "rel": "self" }, { "href": "http://127.0.0.1:6385/volume/targets/bd4d008c-7d31-463d-abf9-6c23d9d55f7f", "rel": "bookmark" } ], "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "properties": {}, "updated_at": null, "uuid": "bd4d008c-7d31-463d-abf9-6c23d9d55f7f", "volume_id": "04452bed-5367-4202-8bf5-de4335ac56d2", "volume_type": "iscsi" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/volume-target-list-response.json0000644000175000017500000000107300000000000027137 0ustar00coreycorey00000000000000{ "targets": [ { "boot_index": 0, "links": [ { "href": "http://127.0.0.1:6385/v1/volume/targets/bd4d008c-7d31-463d-abf9-6c23d9d55f7f", "rel": "self" }, { "href": "http://127.0.0.1:6385/volume/targets/bd4d008c-7d31-463d-abf9-6c23d9d55f7f", "rel": "bookmark" } ], "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "uuid": "bd4d008c-7d31-463d-abf9-6c23d9d55f7f", "volume_id": "04452bed-5367-4202-8bf5-de4335ac56d2", "volume_type": "iscsi" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/volume-target-update-request.json0000644000175000017500000000017200000000000027277 0ustar00coreycorey00000000000000[ { "path" : "/volume_id", "value" : "7211f7d3-3f32-4efc-b64e-9b8e92e64a8e", "op" : "replace" } ] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/api-ref/source/samples/volume-target-update-response.json0000644000175000017500000000115300000000000027445 0ustar00coreycorey00000000000000{ "boot_index": 0, "created_at": "2016-08-18T22:28:48.643434+11:11", "extra": {}, "links": [ { "href": "http://127.0.0.1:6385/v1/volume/targets/bd4d008c-7d31-463d-abf9-6c23d9d55f7f", "rel": "self" }, { "href": "http://127.0.0.1:6385/volume/targets/bd4d008c-7d31-463d-abf9-6c23d9d55f7f", "rel": "bookmark" } ], "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", "properties": {}, "updated_at": "2016-08-18T22:28:49.653974+00:00", "uuid": "bd4d008c-7d31-463d-abf9-6c23d9d55f7f", "volume_id": "7211f7d3-3f32-4efc-b64e-9b8e92e64a8e", "volume_type": "iscsi" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/babel.cfg0000644000175000017500000000002100000000000016073 0ustar00coreycorey00000000000000[python: **.py] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/bindep.txt0000644000175000017500000000710200000000000016356 0ustar00coreycorey00000000000000# these are needed to run ironic with default ipmitool and (i)PXE boot drivers ipmitool [default] ipxe [platform:dpkg default] ipxe-bootimgs [platform:rpm default] open-iscsi [platform:dpkg default] socat [default] xinetd [default] tftpd-hpa [platform:dpkg default] tftp-server [platform:rpm default] # Starting with Debian Jessie (and thus in Ubuntu Xenial too), # pxelinux package provides the pxelinux.0 boot loader, # but such package is absent from Debian Wheezy / Ubuntu Trusty. # Also, in Debian Wheezy / Ubuntu Trusty 'syslinux' depends on syslinux-common, # but only recommends it in Jessie/Xenial. # Make sure syslinux-common is installed for those distros as it provides # *.c32 modules for syslinux # TODO remove distro pinning when Wheezy / Trusty are EOLed (May 2019) # or DevStack stops supporting those. # In the mean time, new Debian-based release codenames will have to be added # as distros can not be pinned with 'if-later-than' specified. pxelinux [platform:ubuntu-xenial platform:debian-jessie default] syslinux [platform:rpm platform:ubuntu-trusty platform:debian-wheezy default] syslinux-common [platform:ubuntu-xenial platform:debian-jessie default] socat [default] # Grub2 files for boot loadingusing PXE/GRUB2 shim [platform:dpkg default] grub-efi-amd64-signed [platform:dpkg default] # these are needed to create and access VMs when testing with virtual hardware libvirt-bin [platform:dpkg devstack] libvirt [platform:rpm devstack] libvirt-dev [platform:dpkg devstack] libvirt-devel [platform:rpm devstack] qemu [platform:dpkg devstack build-image-dib] qemu-kvm [platform:dpkg devstack] qemu-utils [platform:dpkg devstack build-image-dib] sgabios [devstack] ipxe-qemu [platform:dpkg devstack] edk2-ovmf [platform:rpm devstack] ipxe-roms-qemu [platform:rpm devstack] openvswitch [platform:rpm devstack] iptables [devstack] net-tools [platform:rpm devstack] # these are needed to compile Python dependencies from sources python-dev [platform:dpkg test] python3-all-dev [platform:dpkg !platform:ubuntu-precise test] python-devel [platform:rpm test] python3-devel [platform:rpm test] build-essential [platform:dpkg test] libssl-dev [platform:dpkg test] # these are needed by infra for python-* jobs libpq-dev [platform:dpkg test] postgresql postgresql-client [platform:dpkg] # postgresql-devel [platform:rpm] postgresql-server [platform:rpm] mariadb [platform:rpm] mariadb-server [platform:rpm] # mariadb-devel [platform:rpm] dev-db/mariadb [platform:gentoo] mysql-client [platform:dpkg] mysql-server [platform:dpkg] # libmysqlclient-dev [platform:dpkg] # gettext and graphviz are needed by doc builds only. For transition, # have them in both doc and test. # TODO(jaegerandi): Remove test once infra scripts are updated. # this is needed for compiling translations gettext [test doc] # this is needed to build the FSM diagram graphviz [!platform:gentoo test doc] # libsrvg2 is needed for sphinxcontrib-svg2pdfconverter in docs builds. librsvg2-tools [doc platform:rpm] librsvg2-bin [doc platform:dpkg] # these are needed to build a deploy ramdisk # NOTE apparmor is an undeclared dependency for docker on ubuntu, # see https://github.com/docker/docker/issues/9745 apparmor [platform:dpkg imagebuild] docker.io [platform:dpkg imagebuild] docker-io [platform:rpm imagebuild] gnupg [imagebuild] squashfs-tools [platform:dpkg platform:redhat imagebuild] squashfs [platform:suse imagebuild] libguestfs0 [platform:dpkg imagebuild] libguestfs [platform:rpm imagebuild] python-guestfs [platform:dpkg imagebuild] # for TinyIPA build wget [imagebuild] python-pip [imagebuild] unzip [imagebuild] sudo [imagebuild] gawk [imagebuild] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538406.1703992 ironic-14.0.1.dev163/devstack/0000755000175000017500000000000000000000000016160 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/devstack/common_settings0000644000175000017500000000543000000000000021315 0ustar00coreycorey00000000000000#!/bin/bash if [[ -f $TOP_DIR/../../old/devstack/.localrc.auto ]]; then source <(cat $TOP_DIR/../../old/devstack/.localrc.auto | grep -v 'enable_plugin') fi CIRROS_VERSION=0.4.0 # Whether configure the nodes to boot in Legacy BIOS or UEFI mode. Accepted # values are: "bios" or "uefi", defaults to "bios". # # WARNING: UEFI is EXPERIMENTAL. The CirrOS images uploaded by DevStack by # default WILL NOT WORK with UEFI. IRONIC_BOOT_MODE=${IRONIC_BOOT_MODE:-bios} IRONIC_DEFAULT_IMAGE_NAME=cirros-${CIRROS_VERSION}-x86_64-uec if [[ "$IRONIC_BOOT_MODE" == "uefi" ]]; then IRONIC_DEFAULT_IMAGE_NAME=cirros-d160722-x86_64-uec fi IRONIC_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-$IRONIC_DEFAULT_IMAGE_NAME} # Add link to download queue, ignore if already exist. # TODO(vsaienko) Move to devstack https://review.opendev.org/420656 function add_image_link { local i_link="$1" if ! [[ "$IMAGE_URLS" =~ "$i_link" ]]; then if [[ -z "$IMAGE_URLS" || "${IMAGE_URLS: -1}" == "," ]]; then IMAGE_URLS+="$i_link" else IMAGE_URLS+=",$i_link" fi fi } if [[ "$IRONIC_BOOT_MODE" == "uefi" ]]; then add_image_link http://download.cirros-cloud.net/daily/20160722/cirros-d160722-x86_64-uec.tar.gz add_image_link http://download.cirros-cloud.net/daily/20160722/cirros-d160722-x86_64-disk.img else # NOTE (vsaienko) We are going to test mixed drivers/partitions in single setup. # Do not restrict downloading image only for specific case. Download both disk and uec images. # NOTE (vdrok): Here the images are actually pre-cached by devstack, in # the files folder, so they won't be downloaded again. add_image_link http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-uec.tar.gz add_image_link http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-disk.img fi export IRONIC_WHOLEDISK_IMAGE_NAME=${IRONIC_WHOLEDISK_IMAGE_NAME:-${IRONIC_IMAGE_NAME/-uec/-disk}} export IRONIC_PARTITIONED_IMAGE_NAME=${IRONIC_PARTITIONED_IMAGE_NAME:-${IRONIC_IMAGE_NAME/-disk/-uec}} # These parameters describe which image will be used to provision a node in # tempest tests if [[ -z "$IRONIC_TEMPEST_WHOLE_DISK_IMAGE" && "$IRONIC_VM_EPHEMERAL_DISK" == 0 ]]; then IRONIC_TEMPEST_WHOLE_DISK_IMAGE=True fi IRONIC_TEMPEST_WHOLE_DISK_IMAGE=$(trueorfalse False IRONIC_TEMPEST_WHOLE_DISK_IMAGE) if [[ "$IRONIC_TEMPEST_WHOLE_DISK_IMAGE" == "True" ]]; then export IRONIC_IMAGE_NAME=$IRONIC_WHOLEDISK_IMAGE_NAME else export IRONIC_IMAGE_NAME=$IRONIC_PARTITIONED_IMAGE_NAME fi # NOTE(vsaienko) set DEFAULT_IMAGE_NAME here, as it is still used by grenade # https://github.com/openstack-dev/grenade/blob/90c4ead2f2a7ed48c873c51cef415b83d655752e/projects/60_nova/resources.sh#L31 export DEFAULT_IMAGE_NAME=$IRONIC_IMAGE_NAME ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538406.1703992 ironic-14.0.1.dev163/devstack/files/0000755000175000017500000000000000000000000017262 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/devstack/files/apache-ipxe-ironic.template0000644000175000017500000000106500000000000024466 0ustar00coreycorey00000000000000Listen %PUBLICPORT% DocumentRoot "%HTTPROOT%" Options Indexes FollowSymLinks AllowOverride None Order allow,deny Allow from all Require all granted ErrorLog %APACHELOGDIR%/ipxe_error.log ErrorLogFormat "%{cu}t [%-m:%l] [pid %P:tid %T] %7F: %E: [client\ %a] [frontend\ %A] %M% ,\ referer\ %{Referer}i" LogLevel info CustomLog %APACHELOGDIR%/ipxe_access.log "%{%Y-%m-%d}t %{%T}t.%{msec_frac}t [%l] %a \"%r\" %>s %b" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/devstack/files/apache-ironic-api-redirect.template0000644000175000017500000000144100000000000026067 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 1.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This is an example Apache2 configuration file for using the # Ironic API through mod_wsgi. This version assumes you are # running devstack to configure the software. Redirect 307 /baremetal %IRONIC_SERVICE_PROTOCOL%://%IRONIC_SERVICE_HOST%/baremetal ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538406.1703992 ironic-14.0.1.dev163/devstack/files/debs/0000755000175000017500000000000000000000000020177 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/devstack/files/debs/ironic0000644000175000017500000000223600000000000021410 0ustar00coreycorey00000000000000# apparmor is an undeclared dependency for docker on ubuntu: https://github.com/docker/docker/issues/9745 # Starting with Debian Jessie (and thus in Ubuntu Xenial too), # pxelinux package provides the pxelinux.0 boot loader, # but such package is absent from Debian Wheezy / Ubuntu Trusty. # Also, in Debian Wheezy / Ubuntu Trusty 'syslinux' depends on syslinux-common, # but only recommends it in Jessie/Xenial. # Make sure syslinux-common is installed for those distros as it provides # *.c32 modules for syslinux # TODO remove distro pinning when Wheezy / Trusty are EOLed (May 2019) # or DevStack stops supporting those. # In the mean time, new Debian-based release codenames will have to be added # as distros can not be pinned with 'if-later-than' specified. apparmor docker.io ipmitool iptables ipxe isolinux gnupg libguestfs0 libguestfs-tools libvirt-bin # dist:xenial,bionic NOPRIME open-iscsi openssh-client # TODO (etingof) pinning to older version in devstack/lib/ironic #ovmf pxelinux # dist:xenial,bionic python-libguestfs qemu qemu-kvm qemu-utils sgabios shellinabox syslinux-common # dist:xenial,bionic tftpd-hpa xinetd squashfs-tools libvirt-dev socat ipxe-qemu jq ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538406.1703992 ironic-14.0.1.dev163/devstack/files/hooks/0000755000175000017500000000000000000000000020405 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/devstack/files/hooks/qemu.py0000755000175000017500000000642700000000000021742 0ustar00coreycorey00000000000000#!/usr/bin/python3 # Copyright (c) 2017 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import os import re import subprocess import sys # This script is run as a libvirt hook. # More information here: https://libvirt.org/hooks.html # The devstack/lib/ironic script in function setup_qemu_log_hook() will replace # LOG_DIR with the correct location. And will place the script into the correct # directory. VM_LOG_DIR = os.path.abspath("%LOG_DIR%") # Regular expression to find ANSI escape sequences at the beginning of a string ANSI_ESCAPE_RE = re.compile(r""" ^\x1b\[ # ANSI escape codes are ESC (0x1b) [ ?([\d;]*)(\w)""", re.VERBOSE) NOW = datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S") def main(): if len(sys.argv) < 3: return guest_name = sys.argv[1] action = sys.argv[2] if action == "started": interfaces = subprocess.check_output( ['ip', 'link', 'show', 'type', 'macvtap'] ).decode().split("\n") for iface_line in interfaces: if 'macvtap' in iface_line: iface_string = iface_line.split('@') ifaces = iface_string[0].split(' ') subprocess.call(['ip', 'link', 'set', 'dev', ifaces[1], 'multicast', 'on', 'allmulticast', 'on']) if action != "release": return if not console_log_exists(guest_name): return new_path = move_console_log(guest_name) if not new_path: return no_ansi_filename = "{}_no_ansi_{}.log".format(guest_name, NOW) no_ansi_path = os.path.join(VM_LOG_DIR, no_ansi_filename) create_no_ansi_file(new_path, no_ansi_path) def create_no_ansi_file(source_filename, dest_filename): with open(source_filename) as in_file: data = in_file.read() data = remove_ansi_codes(data) with open(dest_filename, 'w') as out_file: out_file.write(data) def get_console_log_path(guest_name): logfile_name = "{}_console.log".format(guest_name) return os.path.join(VM_LOG_DIR, logfile_name) def console_log_exists(guest_name): return os.path.isfile(get_console_log_path(guest_name)) def move_console_log(guest_name): new_logfile_name = "{}_console_{}.log".format(guest_name, NOW) new_path = os.path.join(VM_LOG_DIR, new_logfile_name) if os.path.exists(new_path): return False os.rename(get_console_log_path(guest_name), new_path) return new_path def remove_ansi_codes(data): """Remove any ansi codes from the provided string""" output = '' while data: result = ANSI_ESCAPE_RE.match(data) if not result: output += data[0] data = data[1:] else: data = data[result.end():] return output if '__main__' == __name__: sys.exit(main()) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538406.1703992 ironic-14.0.1.dev163/devstack/files/rpms/0000755000175000017500000000000000000000000020243 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/devstack/files/rpms/ironic0000644000175000017500000000046600000000000021457 0ustar00coreycorey00000000000000docker-io ipmitool iptables ipxe-bootimgs gnupg libguestfs libguestfs-tools libvirt libvirt-python qemu-system-x86 net-tools openssh-clients openvswitch sgabios shellinabox syslinux syslinux-nonlinux # dist:rhel8 tftp-server xinetd squashfs-tools libvirt-devel socat edk2-ovmf # dist:f24,f25 ipxe-roms-qemu jq ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538406.1703992 ironic-14.0.1.dev163/devstack/lib/0000755000175000017500000000000000000000000016726 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/devstack/lib/ironic0000644000175000017500000035012000000000000020135 0ustar00coreycorey00000000000000#!/bin/bash # # lib/ironic # Functions to control the configuration and operation of the **Ironic** service # Dependencies: # # - ``functions`` file # - ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined # - ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined # - ``SERVICE_HOST`` # - ``KEYSTONE_TOKEN_FORMAT`` must be defined # ``stack.sh`` calls the entry points in this order: # # - install_ironic # - install_ironicclient # - init_ironic # - start_ironic # - stop_ironic # - cleanup_ironic # ensure we don't re-source this in the same environment [[ -z "$_IRONIC_DEVSTACK_LIB" ]] || return 0 declare -r -g _IRONIC_DEVSTACK_LIB=1 # Save xtrace and pipefail settings _XTRACE_IRONIC=$(set +o | grep xtrace) _PIPEFAIL_IRONIC=$(set +o | grep pipefail) set -o xtrace set +o pipefail # Defaults # -------- # Set up default directories GITDIR["python-ironicclient"]=$DEST/python-ironicclient GITDIR["ironic-lib"]=$DEST/ironic-lib GITREPO["pyghmi"]=${PYGHMI_REPO:-${GIT_BASE}/x/pyghmi} GITBRANCH["pyghmi"]=${PYGHMI_BRANCH:-master} GITDIR["pyghmi"]=$DEST/pyghmi GITREPO["virtualbmc"]=${VIRTUALBMC_REPO:-${GIT_BASE}/openstack/virtualbmc.git} GITBRANCH["virtualbmc"]=${VIRTUALBMC_BRANCH:-master} GITDIR["virtualbmc"]=$DEST/virtualbmc GITREPO["virtualpdu"]=${VIRTUALPDU_REPO:-${GIT_BASE}/openstack/virtualpdu.git} GITBRANCH["virtualpdu"]=${VIRTUALPDU_BRANCH:-master} GITDIR["virtualpdu"]=$DEST/virtualpdu GITREPO["sushy"]=${SUSHY_REPO:-${GIT_BASE}/openstack/sushy.git} GITBRANCH["sushy"]=${SUSHY_BRANCH:-master} GITDIR["sushy"]=$DEST/sushy GITREPO["sushy-tools"]=${SUSHY_TOOLS_REPO:-${GIT_BASE}/openstack/sushy-tools.git} GITBRANCH["sushy-tools"]=${SUSHY_TOOLS_BRANCH:-master} GITDIR["sushy-tools"]=$DEST/sushy-tools IRONIC_DIR=$DEST/ironic IRONIC_DEVSTACK_DIR=$IRONIC_DIR/devstack IRONIC_DEVSTACK_FILES_DIR=$IRONIC_DEVSTACK_DIR/files # TODO(dtantsur): delete these three when we migrate image building to # ironic-python-agent-builder completely IRONIC_PYTHON_AGENT_REPO=${IRONIC_PYTHON_AGENT_REPO:-${GIT_BASE}/openstack/ironic-python-agent.git} IRONIC_PYTHON_AGENT_BRANCH=${IRONIC_PYTHON_AGENT_BRANCH:-$TARGET_BRANCH} IRONIC_PYTHON_AGENT_DIR=$DEST/ironic-python-agent IRONIC_PYTHON_AGENT_BUILDER_REPO=${IRONIC_PYTHON_AGENT_BUILDER_REPO:-${GIT_BASE}/openstack/ironic-python-agent-builder.git} IRONIC_PYTHON_AGENT_BUILDER_BRANCH=${IRONIC_PYTHON_AGENT_BUILDER_BRANCH:-$TARGET_BRANCH} IRONIC_PYTHON_AGENT_BUILDER_DIR=$DEST/ironic-python-agent-builder IRONIC_DIB_BINDEP_FILE=https://opendev.org/openstack/diskimage-builder/raw/branch/master/bindep.txt IRONIC_DATA_DIR=$DATA_DIR/ironic IRONIC_STATE_PATH=/var/lib/ironic IRONIC_AUTH_CACHE_DIR=${IRONIC_AUTH_CACHE_DIR:-/var/cache/ironic} IRONIC_CONF_DIR=${IRONIC_CONF_DIR:-/etc/ironic} IRONIC_CONF_FILE=$IRONIC_CONF_DIR/ironic.conf IRONIC_ROOTWRAP_CONF=$IRONIC_CONF_DIR/rootwrap.conf # Deploy Ironic API under uwsgi (NOT mod_wsgi) server. # Devstack aims to remove mod_wsgi support, so ironic shouldn't use it too. # If set to False that will fall back to use the eventlet server that # can happen on grenade runs. # The (confusing) name IRONIC_USE_MOD_WSGI is left for backward compatibility, # for example during grenade runs # TODO(pas-ha) remove IRONIC_USE_MOD_WSGI var after oldest supported # stable branch is stable/rocky IRONIC_USE_MOD_WSGI=$(trueorfalse $ENABLE_HTTPD_MOD_WSGI_SERVICES IRONIC_USE_MOD_WSGI) # If True, will deploy Ironic API under WSGI server, currently supported one # is uwsgi. # Defaults to the (now confusingly named) IRONIC_USE_MOD_WSGI for backward compat IRONIC_USE_WSGI=$(trueorfalse $IRONIC_USE_MOD_WSGI IRONIC_USE_WSGI) # Whether DevStack will be setup for bare metal or VMs IRONIC_IS_HARDWARE=$(trueorfalse False IRONIC_IS_HARDWARE) # Deploy callback timeout can be changed from its default (1800), if required. IRONIC_CALLBACK_TIMEOUT=${IRONIC_CALLBACK_TIMEOUT:-} # Timeout before retrying PXE boot. Set low to help the CI. if [[ "$IRONIC_IS_HARDWARE" == False ]]; then IRONIC_PXE_BOOT_RETRY_TIMEOUT=${IRONIC_PXE_BOOT_RETRY_TIMEOUT:-600} else IRONIC_PXE_BOOT_RETRY_TIMEOUT=${IRONIC_PXE_BOOT_RETRY_TIMEOUT:-} fi # Ping timeout after the node becomes active IRONIC_PING_TIMEOUT=${IRONIC_PING_TIMEOUT:-} # Deploy to hardware platform IRONIC_HW_NODE_CPU=${IRONIC_HW_NODE_CPU:-1} IRONIC_HW_NODE_RAM=${IRONIC_HW_NODE_RAM:-512} IRONIC_HW_NODE_DISK=${IRONIC_HW_NODE_DISK:-10} IRONIC_HW_EPHEMERAL_DISK=${IRONIC_HW_EPHEMERAL_DISK:-0} IRONIC_HW_ARCH=${IRONIC_HW_ARCH:-x86_64} # The file is composed of multiple lines, each line includes fields # separated by white space, in the format: # # [] # # For example: # # 192.168.110.107 00:1e:67:57:50:4c root otc123 # # Supported IRONIC_DEPLOY_DRIVERs: # ipmi: # # # idrac: # # # irmc: # # IRONIC_HWINFO_FILE=${IRONIC_HWINFO_FILE:-$IRONIC_DATA_DIR/hardware_info} # Set up defaults for functional / integration testing IRONIC_NODE_UUID=${IRONIC_NODE_UUID:-`uuidgen`} IRONIC_SCRIPTS_DIR=${IRONIC_SCRIPTS_DIR:-$IRONIC_DEVSTACK_DIR/tools/ironic/scripts} IRONIC_TEMPLATES_DIR=${IRONIC_TEMPLATES_DIR:-$IRONIC_DEVSTACK_DIR/tools/ironic/templates} IRONIC_BAREMETAL_BASIC_OPS=$(trueorfalse False IRONIC_BAREMETAL_BASIC_OPS) IRONIC_TFTPBOOT_DIR=${IRONIC_TFTPBOOT_DIR:-$IRONIC_DATA_DIR/tftpboot} IRONIC_TFTPSERVER_IP=${IRONIC_TFTPSERVER_IP:-$HOST_IP} IRONIC_TFTP_BLOCKSIZE=${IRONIC_TFTP_BLOCKSIZE:-$((PUBLIC_BRIDGE_MTU-50))} IRONIC_VM_COUNT=${IRONIC_VM_COUNT:-1} IRONIC_VM_SPECS_CPU=${IRONIC_VM_SPECS_CPU:-1} IRONIC_VM_SPECS_RAM=${IRONIC_VM_SPECS_RAM:-1280} IRONIC_VM_SPECS_CPU_ARCH=${IRONIC_VM_SPECS_CPU_ARCH:-'x86_64'} IRONIC_VM_SPECS_DISK=${IRONIC_VM_SPECS_DISK:-10} IRONIC_VM_SPECS_DISK_FORMAT=${IRONIC_VM_SPECS_DISK_FORMAT:-qcow2} IRONIC_VM_EPHEMERAL_DISK=${IRONIC_VM_EPHEMERAL_DISK:-0} IRONIC_VM_EMULATOR=${IRONIC_VM_EMULATOR:-'/usr/bin/qemu-system-x86_64'} IRONIC_VM_ENGINE=${IRONIC_VM_ENGINE:-qemu} IRONIC_VM_NETWORK_BRIDGE=${IRONIC_VM_NETWORK_BRIDGE:-brbm} IRONIC_VM_INTERFACE_COUNT=${IRONIC_VM_INTERFACE_COUNT:-2} IRONIC_VM_VOLUME_COUNT=${IRONIC_VM_VOLUME_COUNT:-1} IRONIC_VM_MACS_CSV_FILE=${IRONIC_VM_MACS_CSV_FILE:-$IRONIC_DATA_DIR/ironic_macs.csv} IRONIC_CLEAN_NET_NAME=${IRONIC_CLEAN_NET_NAME:-${IRONIC_PROVISION_NETWORK_NAME:-${PRIVATE_NETWORK_NAME}}} IRONIC_RESCUE_NET_NAME=${IRONIC_RESCUE_NET_NAME:-${IRONIC_CLEAN_NET_NAME}} IRONIC_EXTRA_PXE_PARAMS=${IRONIC_EXTRA_PXE_PARAMS:-} IRONIC_TTY_DEV=${IRONIC_TTY_DEV:-ttyS0,115200} IRONIC_TEMPEST_BUILD_TIMEOUT=${IRONIC_TEMPEST_BUILD_TIMEOUT:-${BUILD_TIMEOUT:-}} if [[ -n "$BUILD_TIMEOUT" ]]; then echo "WARNING: BUILD_TIMEOUT variable is renamed to IRONIC_TEMPEST_BUILD_TIMEOUT and will be deprecated in Pike." fi IRONIC_DEFAULT_API_VERSION=${IRONIC_DEFAULT_API_VERSION:-} IRONIC_CMD="openstack baremetal" if [[ -n "$IRONIC_DEFAULT_API_VERSION" ]]; then IRONIC_CMD="$IRONIC_CMD --os-baremetal-api-version $IRONIC_DEFAULT_API_VERSION" fi IRONIC_ENABLED_HARDWARE_TYPES=${IRONIC_ENABLED_HARDWARE_TYPES:-"ipmi,fake-hardware"} # list of all available driver interfaces types IRONIC_DRIVER_INTERFACE_TYPES="bios boot power management deploy console inspect raid rescue storage network vendor" IRONIC_ENABLED_BIOS_INTERFACES=${IRONIC_ENABLED_BIOS_INTERFACES:-"fake,no-bios"} IRONIC_ENABLED_BOOT_INTERFACES=${IRONIC_ENABLED_BOOT_INTERFACES:-"fake,ipxe"} IRONIC_ENABLED_CONSOLE_INTERFACES=${IRONIC_ENABLED_CONSOLE_INTERFACES:-"fake,no-console"} IRONIC_ENABLED_DEPLOY_INTERFACES=${IRONIC_ENABLED_DEPLOY_INTERFACES:-"fake,iscsi,direct"} IRONIC_ENABLED_INSPECT_INTERFACES=${IRONIC_ENABLED_INSPECT_INTERFACES:-"fake,no-inspect"} IRONIC_ENABLED_MANAGEMENT_INTERFACES=${IRONIC_ENABLED_MANAGEMENT_INTERFACES:-"fake,ipmitool,noop"} IRONIC_ENABLED_NETWORK_INTERFACES=${IRONIC_ENABLED_NETWORK_INTERFACES:-"flat,noop"} IRONIC_ENABLED_POWER_INTERFACES=${IRONIC_ENABLED_POWER_INTERFACES:-"fake,ipmitool"} IRONIC_ENABLED_RAID_INTERFACES=${IRONIC_ENABLED_RAID_INTERFACES:-"fake,agent,no-raid"} IRONIC_ENABLED_RESCUE_INTERFACES=${IRONIC_ENABLED_RESCUE_INTERFACES:-"fake,no-rescue"} IRONIC_ENABLED_STORAGE_INTERFACES=${IRONIC_ENABLED_STORAGE_INTERFACES:-"fake,cinder,noop"} IRONIC_ENABLED_VENDOR_INTERFACES=${IRONIC_ENABLED_VENDOR_INTERFACES:-"fake,ipmitool,no-vendor"} # for usage with hardware types IRONIC_DEFAULT_BIOS_INTERFACE=${IRONIC_DEFAULT_BIOS_INTERFACE:-} IRONIC_DEFAULT_BOOT_INTERFACE=${IRONIC_DEFAULT_BOOT_INTERFACE:-} IRONIC_DEFAULT_CONSOLE_INTERFACE=${IRONIC_DEFAULT_CONSOLE_INTERFACE:-} IRONIC_DEFAULT_DEPLOY_INTERFACE=${IRONIC_DEFAULT_DEPLOY_INTERFACE:-} IRONIC_DEFAULT_INSPECT_INTERFACE=${IRONIC_DEFAULT_INSPECT_INTERFACE:-} IRONIC_DEFAULT_MANAGEMENT_INTERFACE=${IRONIC_DEFAULT_MANAGEMENT_INTERFACE:-} IRONIC_DEFAULT_NETWORK_INTERFACE=${IRONIC_DEFAULT_NETWORK_INTERFACE:-} IRONIC_DEFAULT_POWER_INTERFACE=${IRONIC_DEFAULT_POWER_INTERFACE:-} IRONIC_DEFAULT_RAID_INTERFACE=${IRONIC_DEFAULT_RAID_INTERFACE:-} IRONIC_DEFAULT_RESCUE_INTERFACE=${IRONIC_DEFAULT_RESCUE_INTERFACE:-} IRONIC_DEFAULT_STORAGE_INTERFACE=${IRONIC_DEFAULT_STORAGE_INTERFACE:-} IRONIC_DEFAULT_VENDOR_INTERFACE=${IRONIC_DEFAULT_VENDOR_INTERFACE:-} # If IRONIC_VM_ENGINE is explicitly set to "auto" or "kvm", # devstack will attempt to use hardware virtualization # (aka nested kvm). We do not enable it in the infra gates # because it is not consistently supported/working across # all gate infrastructure providers. if [[ "$IRONIC_VM_ENGINE" == "auto" ]]; then sudo modprobe kvm || true if [ ! -e /dev/kvm ]; then echo "WARNING: Switching to QEMU" IRONIC_VM_ENGINE=qemu if [[ -z "$IRONIC_VM_EMULATOR" ]]; then IRONIC_VM_EMULATOR='/usr/bin/qemu-system-x86_64' fi else IRONIC_VM_ENGINE=kvm fi fi if [[ "$IRONIC_VM_ENGINE" == "kvm" ]]; then # Set this to empty, so configure-vm.py can autodetect location # of KVM binary IRONIC_VM_EMULATOR="" fi # By default, baremetal VMs will console output to file. IRONIC_VM_LOG_CONSOLE=$(trueorfalse True IRONIC_VM_LOG_CONSOLE) IRONIC_VM_LOG_DIR=${IRONIC_VM_LOG_DIR:-$IRONIC_DATA_DIR/logs/} IRONIC_VM_LOG_ROTATE=$(trueorfalse True IRONIC_VM_LOG_ROTATE) # Set resource_classes for nodes to use Nova's placement engine IRONIC_DEFAULT_RESOURCE_CLASS=${IRONIC_DEFAULT_RESOURCE_CLASS:-baremetal} # Set traits for nodes. Traits should be separated by whitespace. IRONIC_DEFAULT_TRAITS=${IRONIC_DEFAULT_TRAITS-CUSTOM_GOLD} # Whether to build the ramdisk or download a prebuilt one. IRONIC_BUILD_DEPLOY_RAMDISK=$(trueorfalse True IRONIC_BUILD_DEPLOY_RAMDISK) # Ironic IPA ramdisk type, supported types are: IRONIC_SUPPORTED_RAMDISK_TYPES_RE="^(tinyipa|dib)$" IRONIC_RAMDISK_TYPE=${IRONIC_RAMDISK_TYPE:-dib} # Confirm we have a supported ramdisk type or fail early. if [[ ! "$IRONIC_RAMDISK_TYPE" =~ $IRONIC_SUPPORTED_RAMDISK_TYPES_RE ]]; then die $LINENO "Unrecognized IRONIC_RAMDISK_TYPE: $IRONIC_RAMDISK_TYPE. Expected 'tinyipa' or 'dib'" fi # If present, these files are used as deploy ramdisk/kernel. # (The value must be an absolute path) IRONIC_DEPLOY_RAMDISK=${IRONIC_DEPLOY_RAMDISK:-$TOP_DIR/files/ir-deploy-$IRONIC_DEPLOY_DRIVER.initramfs} IRONIC_DEPLOY_KERNEL=${IRONIC_DEPLOY_KERNEL:-$TOP_DIR/files/ir-deploy-$IRONIC_DEPLOY_DRIVER.kernel} IRONIC_DEPLOY_ISO=${IRONIC_DEPLOY_ISO:-$TOP_DIR/files/ir-deploy-$IRONIC_DEPLOY_DRIVER.iso} # If present, this file is used to deploy/boot nodes over virtual media # (The value must be an absolute path) IRONIC_EFIBOOT=${IRONIC_EFIBOOT:-$TOP_DIR/files/ir-deploy-$IRONIC_DEPLOY_DRIVER.efiboot} # NOTE(jroll) this needs to be updated when stable branches are cut IPA_DOWNLOAD_BRANCH=${IPA_DOWNLOAD_BRANCH:-master} IPA_DOWNLOAD_BRANCH=$(echo $IPA_DOWNLOAD_BRANCH | tr / -) # OS for using with DIB images IRONIC_DIB_RAMDISK_OS=${IRONIC_DIB_RAMDISK_OS:-centos8} IRONIC_DIB_RAMDISK_RELEASE=${IRONIC_DIB_RAMDISK_RELEASE:-} # Configure URLs required to download ramdisk if we're not building it, and # IRONIC_DEPLOY_RAMDISK/KERNEL or the RAMDISK/KERNEL_URLs have not been # preconfigured. if [[ "$IRONIC_BUILD_DEPLOY_RAMDISK" == "False" && \ ! (-e "$IRONIC_DEPLOY_RAMDISK" && -e "$IRONIC_DEPLOY_KERNEL") && \ (-z "$IRONIC_AGENT_KERNEL_URL" || -z "$IRONIC_AGENT_RAMDISK_URL") ]]; then case $IRONIC_RAMDISK_TYPE in tinyipa) IRONIC_AGENT_KERNEL_FILE=tinyipa-${IPA_DOWNLOAD_BRANCH}.vmlinuz IRONIC_AGENT_RAMDISK_FILE=tinyipa-${IPA_DOWNLOAD_BRANCH}.gz ;; dib) IRONIC_AGENT_KERNEL_FILE=ipa-${IRONIC_DIB_RAMDISK_OS}-${IPA_DOWNLOAD_BRANCH}.kernel IRONIC_AGENT_RAMDISK_FILE=ipa-${IRONIC_DIB_RAMDISK_OS}-${IPA_DOWNLOAD_BRANCH}.initramfs ;; esac IRONIC_AGENT_KERNEL_URL=https://tarballs.openstack.org/ironic-python-agent/${IRONIC_RAMDISK_TYPE}/files/${IRONIC_AGENT_KERNEL_FILE} IRONIC_AGENT_RAMDISK_URL=https://tarballs.openstack.org/ironic-python-agent/${IRONIC_RAMDISK_TYPE}/files/${IRONIC_AGENT_RAMDISK_FILE} fi # This refers the options for disk-image-create and the platform on which # to build the dib based ironic-python-agent ramdisk. IRONIC_DIB_RAMDISK_OPTIONS=${IRONIC_DIB_RAMDISK_OPTIONS:-} if [[ -z "$IRONIC_DIB_RAMDISK_OPTIONS" ]]; then if [[ "$IRONIC_DIB_RAMDISK_OS" == "centos8" ]]; then # Adapt for DIB naming change IRONIC_DIB_RAMDISK_OS=centos-minimal IRONIC_DIB_RAMDISK_RELEASE=8 fi IRONIC_DIB_RAMDISK_OPTIONS="$IRONIC_DIB_RAMDISK_OS" fi # DHCP timeout for the dhcp-all-interfaces element. IRONIC_DIB_DHCP_TIMEOUT=${IRONIC_DIB_DHCP_TIMEOUT:-60} # Some drivers in Ironic require deploy ramdisk in bootable ISO format. # Set this variable to "true" to build an ISO for deploy ramdisk and # upload to Glance. IRONIC_DEPLOY_ISO_REQUIRED=$(trueorfalse False IRONIC_DEPLOY_ISO_REQUIRED) if [[ "$IRONIC_DEPLOY_ISO_REQUIRED" = "True" \ && "$IRONIC_BUILD_DEPLOY_RAMDISK" = "False" \ && ! -e "$IRONIC_DEPLOY_ISO" ]]; then die "Prebuilt ISOs are not available, provide an ISO via IRONIC_DEPLOY_ISO \ or set IRONIC_BUILD_DEPLOY_RAMDISK=True to use ISOs" fi # Which deploy driver to use - valid choices right now # are ``ipmi``, ``snmp`` and ``redfish``. # # Additional valid choices if IRONIC_IS_HARDWARE == true are: # ``idrac`` and ``irmc``. IRONIC_DEPLOY_DRIVER=${IRONIC_DEPLOY_DRIVER:-ipmi} # If the requested driver is not yet enable, enable it, if it is not it will fail anyway if [[ -z "$(echo ${IRONIC_ENABLED_HARDWARE_TYPES} | grep -w ${IRONIC_DEPLOY_DRIVER})" ]]; then die "The deploy driver $IRONIC_DEPLOY_DRIVER is not in the list of enabled \ hardware types $IRONIC_ENABLED_HARDWARE_TYPES" fi # Support entry points installation of console scripts IRONIC_BIN_DIR=$(get_python_exec_prefix) IRONIC_UWSGI_CONF=$IRONIC_CONF_DIR/ironic-uwsgi.ini IRONIC_UWSGI=$IRONIC_BIN_DIR/ironic-api-wsgi # Ironic connection info. Note the port must be specified. if is_service_enabled tls-proxy; then IRONIC_SERVICE_PROTOCOL=https fi IRONIC_SERVICE_PROTOCOL=${IRONIC_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} IRONIC_SERVICE_PORT=${IRONIC_SERVICE_PORT:-6385} IRONIC_SERVICE_PORT_INT=${IRONIC_SERVICE_PORT_INT:-16385} # If ironic api running under apache or UWSGI we use the path rather than port if [[ "$IRONIC_USE_WSGI" == "True" ]]; then IRONIC_HOSTPORT=${IRONIC_HOSTPORT:-$SERVICE_HOST/baremetal} else IRONIC_HOSTPORT=${IRONIC_HOSTPORT:-$SERVICE_HOST:$IRONIC_SERVICE_PORT} fi # Enable iPXE IRONIC_IPXE_ENABLED=$(trueorfalse True IRONIC_IPXE_ENABLED) # Options below are only applied when IRONIC_IPXE_ENABLED is True IRONIC_IPXE_USE_SWIFT=$(trueorfalse False IRONIC_IPXE_USE_SWIFT) IRONIC_HTTP_DIR=${IRONIC_HTTP_DIR:-$IRONIC_DATA_DIR/httpboot} IRONIC_HTTP_PORT=${IRONIC_HTTP_PORT:-3928} # Allow using JSON RPC instead of oslo.messaging IRONIC_RPC_TRANSPORT=${IRONIC_RPC_TRANSPORT:-oslo} IRONIC_JSON_RPC_PORT=${IRONIC_JSON_RPC_PORT:-8089} # The first port in the range to bind the Virtual BMCs. The number of # ports that will be used depends on $IRONIC_VM_COUNT variable, e.g if # $IRONIC_VM_COUNT=3 the ports 6230, 6231 and 6232 will be used for the # Virtual BMCs, one for each VM. IRONIC_VBMC_PORT_RANGE_START=${IRONIC_VBMC_PORT_RANGE_START:-6230} IRONIC_VBMC_CONFIG_FILE=${IRONIC_VBMC_CONFIG_FILE:-$IRONIC_CONF_DIR/virtualbmc/virtualbmc.conf} IRONIC_VBMC_LOGFILE=${IRONIC_VBMC_LOGFILE:-$IRONIC_VM_LOG_DIR/virtualbmc.log} IRONIC_VBMC_SYSTEMD_SERVICE=devstack@virtualbmc.service # Virtual PDU configs IRONIC_VPDU_CONFIG_FILE=${IRONIC_VPDU_CONFIG_FILE:-$IRONIC_CONF_DIR/virtualpdu/virtualpdu.conf} IRONIC_VPDU_PORT_RANGE_START=${IRONIC_VPDU_PORT_RANGE_START:-1} IRONIC_VPDU_LISTEN_PORT=${IRONIC_VPDU_LISTEN_PORT:-1161} IRONIC_VPDU_COMMUNITY=${IRONIC_VPDU_COMMUNITY:-private} IRONIC_VPDU_SNMPDRIVER=${IRONIC_VPDU_SNMPDRIVER:-apc_rackpdu} IRONIC_VPDU_SYSTEMD_SERVICE=devstack@virtualpdu.service # Redfish configs IRONIC_REDFISH_EMULATOR_PORT=${IRONIC_REDFISH_EMULATOR_PORT:-9132} IRONIC_REDFISH_EMULATOR_SYSTEMD_SERVICE="devstack@redfish-emulator.service" IRONIC_REDFISH_EMULATOR_CONFIG=${IRONIC_REDFISH_EMULATOR_CONFIG:-$IRONIC_CONF_DIR/redfish/emulator.conf} # To explicitly enable configuration of Glance with Swift # (which is required by some vendor drivers), set this # variable to true. IRONIC_CONFIGURE_GLANCE_WITH_SWIFT=$(trueorfalse False IRONIC_CONFIGURE_GLANCE_WITH_SWIFT) # The path to the libvirt hooks directory, used if IRONIC_VM_LOG_ROTATE is True IRONIC_LIBVIRT_HOOKS_PATH=${IRONIC_LIBVIRT_HOOKS_PATH:-/etc/libvirt/hooks/} LIBVIRT_STORAGE_POOL=${LIBVIRT_STORAGE_POOL:-"default"} LIBVIRT_STORAGE_POOL_PATH=${LIBVIRT_STORAGE_POOL_PATH:-/var/lib/libvirt/images} # The authentication strategy used by ironic-api. Valid values are: # keystone and noauth. IRONIC_AUTH_STRATEGY=${IRONIC_AUTH_STRATEGY:-keystone} # By default, terminal SSL certificate is disabled. IRONIC_TERMINAL_SSL=$(trueorfalse False IRONIC_TERMINAL_SSL) IRONIC_TERMINAL_CERT_DIR=${IRONIC_TERMINAL_CERT_DIR:-$IRONIC_DATA_DIR/terminal_cert/} # This flag is used to allow adding Link-Local-Connection info # to ironic port-create command. LLC info is obtained from # IRONIC_{VM,HW}_NODES_FILE IRONIC_USE_LINK_LOCAL=$(trueorfalse False IRONIC_USE_LINK_LOCAL) # Allow selecting dhcp provider IRONIC_DHCP_PROVIDER=${IRONIC_DHCP_PROVIDER:-neutron} # This is the network interface to use for a node IRONIC_NETWORK_INTERFACE=${IRONIC_NETWORK_INTERFACE:-} # Ironic provision network name, if this value is set it means we are using # multi-tenant networking. If not set, then we are not using multi-tenant # networking and are therefore using a 'flat' network. IRONIC_PROVISION_NETWORK_NAME=${IRONIC_PROVISION_NETWORK_NAME:-} # Provision network provider type. Can be flat or vlan. # This is only used if IRONIC_PROVISION_NETWORK_NAME has been set. IRONIC_PROVISION_PROVIDER_NETWORK_TYPE=${IRONIC_PROVISION_PROVIDER_NETWORK_TYPE:-'vlan'} # If IRONIC_PROVISION_PROVIDER_NETWORK_TYPE is vlan. VLAN_ID may be specified. If it is not set, # vlan will be allocated dynamically. # This is only used if IRONIC_PROVISION_NETWORK_NAME has been set. IRONIC_PROVISION_SEGMENTATION_ID=${IRONIC_PROVISION_SEGMENTATION_ID:-} # Allocation network pool for provision network # Example: IRONIC_PROVISION_ALLOCATION_POOL=start=10.0.5.10,end=10.0.5.100 # This is only used if IRONIC_PROVISION_NETWORK_NAME has been set. IRONIC_PROVISION_ALLOCATION_POOL=${IRONIC_PROVISION_ALLOCATION_POOL:-'start=10.0.5.10,end=10.0.5.100'} # Ironic provision subnet name. # This is only used if IRONIC_PROVISION_NETWORK_NAME has been set. IRONIC_PROVISION_PROVIDER_SUBNET_NAME=${IRONIC_PROVISION_PROVIDER_SUBNET_NAME:-${IRONIC_PROVISION_NETWORK_NAME}-subnet} # When enabled this will set the physical_network attribute for ironic ports # and subnet-to-segment association on provisioning network will be configured. # NOTE: The neutron segments service_plugin must be loaded for this. IRONIC_USE_NEUTRON_SEGMENTS=$(trueorfalse False IRONIC_USE_NEUTRON_SEGMENTS) # This is the storage interface to use for a node # Only 'cinder' can be set for testing boot from volume IRONIC_STORAGE_INTERFACE=${IRONIC_STORAGE_INTERFACE:-} # With multinode case all ironic-conductors should have IP from provisioning network. # IRONIC_PROVISION_SUBNET_GATEWAY - is configured on primary node. # Ironic provision subnet gateway. IRONIC_PROVISION_SUBNET_GATEWAY=${IRONIC_PROVISION_SUBNET_GATEWAY:-'10.0.5.1'} IRONIC_PROVISION_SUBNET_SUBNODE_IP=${IRONIC_PROVISION_SUBNET_SUBNODE_IP:-'10.0.5.2'} # Ironic provision subnet prefix # Example: IRONIC_PROVISION_SUBNET_PREFIX=10.0.5.0/24 IRONIC_PROVISION_SUBNET_PREFIX=${IRONIC_PROVISION_SUBNET_PREFIX:-'10.0.5.0/24'} if [[ "$HOST_TOPOLOGY_ROLE" == "primary" ]]; then IRONIC_TFTPSERVER_IP=$IRONIC_PROVISION_SUBNET_GATEWAY IRONIC_HTTP_SERVER=$IRONIC_PROVISION_SUBNET_GATEWAY fi if [[ "$HOST_TOPOLOGY_ROLE" == "subnode" ]]; then IRONIC_TFTPSERVER_IP=$IRONIC_PROVISION_SUBNET_SUBNODE_IP IRONIC_HTTP_SERVER=$IRONIC_PROVISION_SUBNET_SUBNODE_IP fi IRONIC_HTTP_SERVER=${IRONIC_HTTP_SERVER:-$IRONIC_TFTPSERVER_IP} # Port that must be permitted for iSCSI connections to be # established from the tenant network. ISCSI_SERVICE_PORT=${ISCSI_SERVICE_PORT:-3260} # Retrieving logs from the deploy ramdisk # # IRONIC_DEPLOY_LOGS_COLLECT possible values are: # * always: Collect the ramdisk logs from the deployment on success or # failure (Default in DevStack for debugging purpose). # * on_failure: Collect the ramdisk logs upon a deployment failure # (Default in Ironic). # * never: Never collect the ramdisk logs. IRONIC_DEPLOY_LOGS_COLLECT=${IRONIC_DEPLOY_LOGS_COLLECT:-always} # IRONIC_DEPLOY_LOGS_STORAGE_BACKEND possible values are: # * local: To store the logs in the local filesystem (Default in Ironic and DevStack). # * swift: To store the logs in Swift. IRONIC_DEPLOY_LOGS_STORAGE_BACKEND=${IRONIC_DEPLOY_LOGS_STORAGE_BACKEND:-local} # The path to the directory where Ironic should put the logs when IRONIC_DEPLOY_LOGS_STORAGE_BACKEND is set to "local" IRONIC_DEPLOY_LOGS_LOCAL_PATH=${IRONIC_DEPLOY_LOGS_LOCAL_PATH:-$IRONIC_VM_LOG_DIR/deploy_logs} # Fast track option IRONIC_DEPLOY_FAST_TRACK=${IRONIC_DEPLOY_FAST_TRACK:-False} # Agent Token requirement IRONIC_REQUIRE_AGENT_TOKEN=${IRONIC_REQUIRE_AGENT_TOKEN:-True} # Define baremetal min_microversion in tempest config. Default value None is picked from tempest. TEMPEST_BAREMETAL_MIN_MICROVERSION=${TEMPEST_BAREMETAL_MIN_MICROVERSION:-} # Define baremetal max_microversion in tempest config. No default value means that it is picked from tempest. TEMPEST_BAREMETAL_MAX_MICROVERSION=${TEMPEST_BAREMETAL_MAX_MICROVERSION:-} # get_pxe_boot_file() - Get the PXE/iPXE boot file path function get_pxe_boot_file { local pxe_boot_file if [[ "$IRONIC_IPXE_ENABLED" == "True" ]] ; then if is_ubuntu; then pxe_boot_file=/usr/lib/ipxe/undionly.kpxe elif is_fedora || is_suse; then pxe_boot_file=/usr/share/ipxe/undionly.kpxe fi else # Standard PXE if is_ubuntu; then # Ubuntu Xenial (16.04) places the file under /usr/lib/PXELINUX pxe_paths="/usr/lib/syslinux/pxelinux.0 /usr/lib/PXELINUX/pxelinux.0" for p in $pxe_paths; do if [[ -f $p ]]; then pxe_boot_file=$p fi done elif is_fedora || is_suse; then pxe_boot_file=/usr/share/syslinux/pxelinux.0 fi fi echo $pxe_boot_file } # PXE boot image IRONIC_PXE_BOOT_IMAGE=${IRONIC_PXE_BOOT_IMAGE:-$(get_pxe_boot_file)} IRONIC_AUTOMATED_CLEAN_ENABLED=$(trueorfalse True IRONIC_AUTOMATED_CLEAN_ENABLED) IRONIC_SECURE_BOOT=${IRONIC_SECURE_BOOT:-False} IRONIC_UEFI_BOOT_LOADER=${IRONIC_UEFI_BOOT_LOADER:-grub2} IRONIC_GRUB2_SHIM_FILE=${IRONIC_GRUB2_SHIM_FILE:-} IRONIC_GRUB2_FILE=${IRONIC_GRUB2_FILE:-} IRONIC_UEFI_FILES_DIR=${IRONIC_UEFI_FILES_DIR:-/var/lib/libvirt/images} UEFI_LOADER_PATH=$IRONIC_UEFI_FILES_DIR/OVMF_CODE.fd UEFI_NVRAM_PATH=$IRONIC_UEFI_FILES_DIR/OVMF_VARS.fd # Handle architecture specific package installs if [[ $IRONIC_HW_ARCH == "x86_64" ]]; then install_package shim if is_ubuntu; then install_package grub-efi-amd64-signed elif is_fedora; then install_package grub2-efi fi fi # Sanity checks if [[ "$IRONIC_BOOT_MODE" == "uefi" ]]; then if [[ "$IRONIC_IPXE_ENABLED" == "False" ]] && [[ "$IRONIC_UEFI_BOOT_LOADER" != "grub2" ]]; then die $LINENO "Boot mode UEFI is only supported with iPXE and grub2 bootloaders." fi if ! is_fedora && ! is_ubuntu; then die $LINENO "Boot mode UEFI only works in Ubuntu or Fedora for now." fi if is_arch "x86_64"; then if is_ubuntu; then install_package grub-efi elif is_fedora; then install_package grub2 grub2-efi fi fi if is_ubuntu && [[ -z $IRONIC_GRUB2_FILE ]]; then IRONIC_GRUB2_SHIM_FILE=/usr/lib/shim/shimx64.efi IRONIC_GRUB2_FILE=/usr/lib/grub/x86_64-efi-signed/grubx64.efi.signed fi if [[ "$IRONIC_IPXE_ENABLED" == "False" ]]; then # NOTE(TheJulia): While we no longer directly copy the # IRONIC_GRUB2_FILE, we still check the existence as # without the bootloader package we would be unable to build # the netboot core image. if [[ -z $IRONIC_GRUB2_SHIM_FILE ]] || [[ -z $IRONIC_GRUB2_FILE ]] || [[ ! -f $IRONIC_GRUB2_SHIM_FILE ]] || [[ ! -f $IRONIC_GRUB2_FILE ]]; then die $LINENO "Grub2 Bootloader and Shim file missing." fi fi fi # TODO(dtantsur): change this when we change the default value. IRONIC_DEFAULT_BOOT_OPTION=${IRONIC_DEFAULT_BOOT_OPTION:-netboot} if [ $IRONIC_DEFAULT_BOOT_OPTION != "netboot" ] && [ $IRONIC_DEFAULT_BOOT_OPTION != "local" ]; then die $LINENO "Supported values for IRONIC_DEFAULT_BOOT_OPTION are 'netboot' and 'local' only." fi # TODO(pas-ha) find a way to (cross-)sign the custom CA bundle used by tls-proxy # with default iPXE cert - for reference see http://ipxe.org/crypto if is_service_enabled tls-proxy && [[ "$IRONIC_IPXE_USE_SWIFT" == "True" ]]; then die $LINENO "Ironic in DevStack does not yet support booting iPXE from HTTPS URLs" fi # Timeout for "manage" action. 2 minutes is more than enough. IRONIC_MANAGE_TIMEOUT=${IRONIC_MANAGE_TIMEOUT:-120} # Timeout for "provide" action. This involves cleaning. Generally, 15 minutes # should be enough, but real hardware may need more. IRONIC_CLEANING_TIMEOUT=${IRONIC_CLEANING_TIMEOUT:-1200} IRONIC_CLEANING_DELAY=10 IRONIC_CLEANING_ATTEMPTS=$(( $IRONIC_CLEANING_TIMEOUT / $IRONIC_CLEANING_DELAY )) # Timeout for ironic-neutron-agent to report state before providing nodes. # The agent reports every 60 seconds, 2 minutes should do. IRONIC_NEUTRON_AGENT_REPORT_STATE_DELAY=10 IRONIC_NEUTRON_AGENT_REPORT_STATE_TIMEOUT=${IRONIC_NEUTRON_AGENT_REPORT_STATE_TIMEOUT:-120} IRONIC_NEUTRON_AGENT_REPORT_STATE_ATTEMPTS=$(( $IRONIC_NEUTRON_AGENT_REPORT_STATE_TIMEOUT / IRONIC_NEUTRON_AGENT_REPORT_STATE_DELAY )) # Username to use by Ansible to access ramdisk, # to be set as '[ansible]/default_username' option. # If not set here (default), will be set to 'tc' for TinyIPA ramdisk, # for other ramdisks it must be either provided here, # or set manually per-node via ironic API IRONIC_ANSIBLE_SSH_USER=${IRONIC_ANSIBLE_SSH_USER:-} # Path to the private SSH key to use by ansible deploy interface # that will be set as '[ansible]/default_key_file' option in config. # The public key path is assumed to be ${IRONIC_ANSIBLE_SSH_KEY}.pub # and will be used when rebuilding the image to include this public key # in ~/.ssh/authorized_keys of a $IRONIC_ANSIBLE_SSH_USER in the ramdisk. # Only the TinyIPA ramdisks are currently supported for such rebuild. # For TinyIPA ramdisks, if the specified file doesn't exist, it will # be created and will contain a new RSA passwordless key. We assume # that the directories in the path to this file exist and are # writable. # For other ramdisk types, make sure the corresponding public key is baked into # the ramdisk to be used by DevStack and provide the path to the private key here, # or set it manually per node via ironic API. # FIXME(pas-ha) auto-generated keys currently won't work for multi-node # DevStack deployment, as we do not distribute this generated key to subnodes yet. IRONIC_ANSIBLE_SSH_KEY=${IRONIC_ANSIBLE_SSH_KEY:-$IRONIC_DATA_DIR/ansible_ssh_key} IRONIC_AGENT_IMAGE_DOWNLOAD_SOURCE=${IRONIC_AGENT_IMAGE_DOWNLOAD_SOURCE:-swift} # Functions # --------- # UEFI related functions function get_uefi_ipxe_boot_file { if is_ubuntu; then echo /usr/lib/ipxe/ipxe.efi elif is_fedora; then echo /usr/share/ipxe/ipxe-x86_64.efi fi } function get_uefi_loader { if is_ubuntu; then echo /usr/share/OVMF/OVMF_CODE.fd elif is_fedora; then echo /usr/share/edk2/ovmf/OVMF_CODE.fd fi } function get_uefi_nvram { if is_ubuntu; then echo /usr/share/OVMF/OVMF_VARS.fd elif is_fedora; then echo /usr/share/edk2/ovmf/OVMF_VARS.fd fi } # Misc function restart_libvirt { local libvirt_service_name="libvirtd" if is_ubuntu && ! type libvirtd; then libvirt_service_name="libvirt-bin" fi restart_service $libvirt_service_name } # Test if any Ironic services are enabled # is_ironic_enabled function is_ironic_enabled { [[ ,${ENABLED_SERVICES} =~ ,"ir-" ]] && return 0 return 1 } function is_deployed_by_agent { [[ -z "${IRONIC_DEPLOY_DRIVER%%agent*}" || "$IRONIC_DEFAULT_DEPLOY_INTERFACE" == "direct" ]] && return 0 return 1 } function is_deployed_by_ipmi { [[ "$IRONIC_DEPLOY_DRIVER" == ipmi ]] && return 0 return 1 } function is_deployed_by_ilo { [[ "${IRONIC_DEPLOY_DRIVER}" == ilo ]] && return 0 return 1 } function is_deployed_by_drac { [[ "${IRONIC_DEPLOY_DRIVER}" == idrac ]] && return 0 return 1 } function is_deployed_by_snmp { [[ "${IRONIC_DEPLOY_DRIVER}" == snmp ]] && return 0 return 1 } function is_deployed_by_redfish { [[ "$IRONIC_DEPLOY_DRIVER" == redfish ]] && return 0 return 1 } function is_deployed_by_irmc { [[ "$IRONIC_DEPLOY_DRIVER" == irmc ]] && return 0 return 1 } function is_deployed_by_xclarity { [[ "$IRONIC_DEPLOY_DRIVER" == xclarity ]] && return 0 return 1 } function is_drac_enabled { [[ -z "${IRONIC_ENABLED_HARDWARE_TYPES%%*idrac*}" ]] && return 0 return 1 } function is_ansible_deploy_enabled { [[ -z "${IRONIC_ENABLED_DEPLOY_INTERFACES%%*ansible*}" ]] && return 0 return 1 } function is_redfish_enabled { [[ -z "${IRONIC_ENABLED_HARDWARE_TYPES%%*redfish*}" ]] && return 0 return 1 } function is_ansible_with_tinyipa { # NOTE(pas-ha) we support rebuilding the ramdisk to include (generated) SSH keys # as needed for ansible deploy interface only for TinyIPA ramdisks for now is_ansible_deploy_enabled && [[ "$IRONIC_RAMDISK_TYPE" == "tinyipa" ]] && return 0 return 1 } function is_glance_configuration_required { is_deployed_by_agent || is_ansible_deploy_enabled || [[ "$IRONIC_CONFIGURE_GLANCE_WITH_SWIFT" == "True" ]] && return 0 return 1 } function is_deploy_iso_required { [[ "$IRONIC_IS_HARDWARE" == "True" && "$IRONIC_DEPLOY_ISO_REQUIRED" == "True" ]] && return 0 return 1 } # Assert that the redfish hardware type is enabled in case we are using # the redfish driver if is_deployed_by_redfish && [[ "$IRONIC_ENABLED_HARDWARE_TYPES" != *"redfish"* ]]; then die $LINENO "Please make sure that the redfish hardware" \ "type is enabled. Take a look at the " \ "IRONIC_ENABLED_HARDWARE_TYPES configuration option" \ "for DevStack" fi # Assert that for non-TynyIPA ramdisks and Ansible, the private SSH key file to use exists. if is_ansible_deploy_enabled && [[ "$IRONIC_RAMDISK_TYPE" != "tinyipa" ]]; then if [[ ! -f $IRONIC_ANSIBLE_SSH_KEY ]]; then die $LINENO "Using non-TinyIPA ramdisks with ansible deploy interface" \ "requires setting IRONIC_ANSIBLE_SSH_KEY to existing"\ "private SSH key file to be used by Ansible." fi fi # Syslinux >= 5.00 pxelinux.0 binary is not "stand-alone" anymore, # it depends on some c32 modules to work correctly. # More info: http://www.syslinux.org/wiki/index.php/Library_modules function setup_syslinux_modules { # Ignore it for iPXE, it doesn't repend on syslinux modules [[ "$IRONIC_IPXE_ENABLED" == "True" ]] && return 0 # Ubuntu Xenial keeps doesn't ship pxelinux.0 as part of syslinux anymore if is_ubuntu && [[ -d /usr/lib/PXELINUX/ ]]; then # TODO(lucasagomes): Figure out whether its UEFI or BIOS once # we have UEFI support in DevStack cp -aR /usr/lib/syslinux/modules/bios/*.c32 $IRONIC_TFTPBOOT_DIR else cp -aR $(dirname $IRONIC_PXE_BOOT_IMAGE)/*.c32 $IRONIC_TFTPBOOT_DIR fi } function start_virtualbmc { start_service $IRONIC_VBMC_SYSTEMD_SERVICE } function stop_virtualbmc { stop_service $IRONIC_VBMC_SYSTEMD_SERVICE } function cleanup_virtualbmc { stop_virtualbmc disable_service $IRONIC_VBMC_SYSTEMD_SERVICE local unitfile="$SYSTEMD_DIR/$IRONIC_VBMC_SYSTEMD_SERVICE" sudo rm -f $unitfile $SYSTEMCTL daemon-reload } function install_virtualbmc { # Install pyghmi from source, if requested, otherwise it will be # downloaded as part of the virtualbmc installation if use_library_from_git "pyghmi"; then git_clone_by_name "pyghmi" setup_dev_lib "pyghmi" fi if use_library_from_git "virtualbmc"; then git_clone_by_name "virtualbmc" setup_dev_lib "virtualbmc" else pip_install_gr "virtualbmc" fi local cmd cmd=$(which vbmcd) cmd+=" --foreground" write_user_unit_file $IRONIC_VBMC_SYSTEMD_SERVICE "$cmd" "" "$STACK_USER" local unitfile="$SYSTEMD_DIR/$IRONIC_VBMC_SYSTEMD_SERVICE" iniset -sudo $unitfile "Service" "Environment" "VIRTUALBMC_CONFIG=$IRONIC_VBMC_CONFIG_FILE" enable_service $IRONIC_VBMC_SYSTEMD_SERVICE } function configure_virtualbmc { if [[ ! -d $(dirname $IRONIC_VBMC_CONFIG_FILE) ]]; then mkdir -p $(dirname $IRONIC_VBMC_CONFIG_FILE) fi iniset -sudo $IRONIC_VBMC_CONFIG_FILE log debug True } function start_virtualpdu { start_service $IRONIC_VPDU_SYSTEMD_SERVICE } function stop_virtualpdu { stop_service $IRONIC_VPDU_SYSTEMD_SERVICE } function cleanup_virtualpdu { stop_virtualpdu disable_service $IRONIC_VPDU_SYSTEMD_SERVICE local unitfile="$SYSTEMD_DIR/$IRONIC_VPDU_SYSTEMD_SERVICE" sudo rm -f $unitfile $SYSTEMCTL daemon-reload } function install_virtualpdu { if use_library_from_git "virtualpdu"; then git_clone_by_name "virtualpdu" setup_dev_lib "virtualpdu" else pip_install "virtualpdu" fi local cmd cmd=$(which virtualpdu) cmd+=" $IRONIC_VPDU_CONFIG_FILE" write_user_unit_file $IRONIC_VPDU_SYSTEMD_SERVICE "$cmd" "" "$STACK_USER" enable_service $IRONIC_VPDU_SYSTEMD_SERVICE } function configure_virtualpdu { mkdir -p $(dirname $IRONIC_VPDU_CONFIG_FILE) iniset -sudo $IRONIC_VPDU_CONFIG_FILE global debug True iniset -sudo $IRONIC_VPDU_CONFIG_FILE global libvirt_uri "qemu:///system" iniset -sudo $IRONIC_VPDU_CONFIG_FILE PDU listen_address ${HOST_IP} iniset -sudo $IRONIC_VPDU_CONFIG_FILE PDU listen_port ${IRONIC_VPDU_LISTEN_PORT} iniset -sudo $IRONIC_VPDU_CONFIG_FILE PDU community ${IRONIC_VPDU_COMMUNITY} iniset -sudo $IRONIC_VPDU_CONFIG_FILE PDU ports $(_generate_pdu_ports) iniset -sudo $IRONIC_VPDU_CONFIG_FILE PDU outlet_default_state "OFF" } # _generate_pdu_ports() - Generates list of port:node_name. function _generate_pdu_ports { pdu_port_number=${IRONIC_VPDU_PORT_RANGE_START} port_config=() for vm_name in $(_ironic_bm_vm_names); do port_config+=("${pdu_port_number}:${vm_name}") pdu_port_number=$(( pdu_port_number + 1 )) done echo ${port_config[*]} | tr ' ' ',' } function start_redfish { start_service $IRONIC_REDFISH_EMULATOR_SYSTEMD_SERVICE } function stop_redfish { stop_service $IRONIC_REDFISH_EMULATOR_SYSTEMD_SERVICE } function cleanup_redfish { stop_redfish rm -f $IRONIC_REDFISH_EMULATOR_CONFIG disable_service $IRONIC_REDFISH_EMULATOR_SYSTEMD_SERVICE local unitfile="$SYSTEMD_DIR/$IRONIC_REDFISH_EMULATOR_SYSTEMD_SERVICE" sudo rm -f $unitfile $SYSTEMCTL daemon-reload } function install_redfish { # TODO(lucasagomes): Use Apache WSGI instead of gunicorn gunicorn=gunicorn if is_ubuntu; then if python3_enabled; then gunicorn=${gunicorn}3 fi install_package $gunicorn else pip_install_gr "gunicorn" fi if use_library_from_git "sushy-tools"; then git_clone_by_name "sushy-tools" setup_dev_lib "sushy-tools" else pip_install "sushy-tools" fi local cmd cmd=$(which $gunicorn) cmd+=" sushy_tools.emulator.main:app" cmd+=" --bind ${HOST_IP}:${IRONIC_REDFISH_EMULATOR_PORT}" cmd+=" --env FLASK_DEBUG=1" cmd+=" --env SUSHY_EMULATOR_CONFIG=${IRONIC_REDFISH_EMULATOR_CONFIG}" write_user_unit_file $IRONIC_REDFISH_EMULATOR_SYSTEMD_SERVICE "$cmd" "" "$STACK_USER" enable_service $IRONIC_REDFISH_EMULATOR_SYSTEMD_SERVICE } function configure_redfish { if [[ ! -d $(dirname $IRONIC_REDFISH_EMULATOR_CONFIG) ]]; then mkdir -p $(dirname $IRONIC_REDFISH_EMULATOR_CONFIG) fi cat - < $IRONIC_REDFISH_EMULATOR_CONFIG SUSHY_EMULATOR_BOOT_LOADER_MAP = { 'UEFI': { 'x86_64': '$UEFI_LOADER_PATH' }, 'Legacy': { 'x86_64': None } } EOF } function setup_sushy { if use_library_from_git "sushy"; then git_clone_by_name "sushy" setup_dev_lib "sushy" else pip_install_gr "sushy" fi } # install_ironic() - Install the things! function install_ironic { # NOTE(vsaienko) do not check required_services on subnode if [[ "$HOST_TOPOLOGY_ROLE" != "subnode" ]]; then # make sure all needed service were enabled local req_services="key" if is_service_enabled nova && [[ "$VIRT_DRIVER" == "ironic" ]]; then req_services+=" nova glance neutron" fi for srv in $req_services; do if ! is_service_enabled "$srv"; then die $LINENO "$srv should be enabled for Ironic." fi done fi if use_library_from_git "ironic-lib"; then git_clone_by_name "ironic-lib" setup_dev_lib "ironic-lib" fi setup_develop $IRONIC_DIR if [[ "$IRONIC_USE_WSGI" == "True" || "$IRONIC_IPXE_ENABLED" == "True" ]]; then install_apache_wsgi fi if [[ "$IRONIC_BOOT_MODE" == "uefi" && "$IRONIC_IS_HARDWARE" == "False" ]]; then # Append the nvram configuration to libvirt if it's not present already if ! sudo grep -q "^nvram" /etc/libvirt/qemu.conf; then echo "nvram=[\"$UEFI_LOADER_PATH:$UEFI_NVRAM_PATH\"]" | sudo tee -a /etc/libvirt/qemu.conf fi # Replace the default virtio PXE ROM in QEMU with an EFI capable # one. The EFI ROM should work on with both boot modes, Legacy # BIOS and UEFI. if is_ubuntu; then # (rpittau) in bionic the UEFI in the ovmf 0~20180205.c0d9813c-2 # package is broken: EFI v2.70 by EDK II # As a workaround, here we download and install the old working # version from the multiverse repository: EFI v2.60 by EDK II # Bug reference: # https://bugs.launchpad.net/ubuntu/+source/edk2/+bug/1821729 local temp_deb temp_deb="$(mktemp)" wget http://archive.ubuntu.com/ubuntu/pool/multiverse/e/edk2/ovmf_0~20160408.ffea0a2c-2_all.deb -O "$temp_deb" sudo dpkg -i "$temp_deb" rm -f "$temp_deb" # NOTE(TheJulia): This no longer seems required as the ovmf images # DO correctly network boot. The effect of this is making the # default boot loader iPXE, which is not always desired nor # realistic for hardware in the field. # If it is after Train, we should likely just delete the lines # below and consider the same for Fedora. # sudo rm /usr/share/qemu/pxe-virtio.rom # sudo ln -s /usr/lib/ipxe/qemu/efi-virtio.rom /usr/share/qemu/pxe-virtio.rom elif is_fedora; then sudo rm /usr/share/qemu/pxe-virtio.rom sudo ln -s /usr/share/ipxe.efi/1af41000.rom /usr/share/qemu/pxe-virtio.rom fi # Restart libvirt to the changes to take effect restart_libvirt fi if is_redfish_enabled || is_deployed_by_redfish; then setup_sushy fi if [[ "$IRONIC_IS_HARDWARE" == "False" ]]; then if is_deployed_by_ipmi; then install_virtualbmc fi if is_deployed_by_snmp; then install_virtualpdu fi if is_deployed_by_redfish; then install_redfish fi fi if is_drac_enabled; then pip_install python-dracclient fi if is_ansible_deploy_enabled; then pip_install "$(grep '^ansible' $IRONIC_DIR/driver-requirements.txt | awk '{print $1}')" fi } # install_ironicclient() - Collect sources and prepare function install_ironicclient { if use_library_from_git "python-ironicclient"; then git_clone_by_name "python-ironicclient" setup_dev_lib "python-ironicclient" else # nothing actually "requires" ironicclient, so force instally from pypi pip_install_gr python-ironicclient fi } # _cleanup_ironic_apache_additions() - Remove uwsgi files, disable and remove apache vhost file function _cleanup_ironic_apache_additions { if [[ "$IRONIC_IPXE_ENABLED" == "True" ]]; then sudo rm -rf $IRONIC_HTTP_DIR disable_apache_site ipxe-ironic sudo rm -f $(apache_site_config_for ipxe-ironic) fi if [[ "$IRONIC_USE_WSGI" == "True" ]]; then remove_uwsgi_config "$IRONIC_UWSGI_CONF" "$IRONIC_UWSGI" fi restart_apache_server } # _config_ironic_apache_ipxe() - Configure ironic IPXE site function _config_ironic_apache_ipxe { local ipxe_apache_conf ipxe_apache_conf=$(apache_site_config_for ipxe-ironic) sudo cp $IRONIC_DEVSTACK_FILES_DIR/apache-ipxe-ironic.template $ipxe_apache_conf sudo sed -e " s|%PUBLICPORT%|$IRONIC_HTTP_PORT|g; s|%HTTPROOT%|$IRONIC_HTTP_DIR|g; s|%APACHELOGDIR%|$APACHE_LOG_DIR|g; " -i $ipxe_apache_conf enable_apache_site ipxe-ironic } # cleanup_ironic_config_files() - Remove residual cache/config/log files, # left over from previous runs that would need to clean up. function cleanup_ironic_config_files { sudo rm -rf $IRONIC_AUTH_CACHE_DIR $IRONIC_CONF_DIR sudo rm -rf $IRONIC_VM_LOG_DIR/* } # cleanup_ironic() - Clean everything left from Ironic function cleanup_ironic { cleanup_ironic_config_files # Cleanup additions made to Apache if [[ "$IRONIC_USE_WSGI" == "True" || "$IRONIC_IPXE_ENABLED" == "True" ]]; then _cleanup_ironic_apache_additions fi cleanup_virtualbmc cleanup_virtualpdu cleanup_redfish # Remove the hook to disable log rotate sudo rm -rf $IRONIC_LIBVIRT_HOOKS_PATH/qemu } # configure_ironic_dirs() - Create all directories required by Ironic and # associated services. function configure_ironic_dirs { sudo install -d -o $STACK_USER $IRONIC_CONF_DIR $STACK_USER $IRONIC_DATA_DIR \ $IRONIC_STATE_PATH $IRONIC_TFTPBOOT_DIR $IRONIC_TFTPBOOT_DIR/pxelinux.cfg sudo chown -R $STACK_USER:$STACK_USER $IRONIC_TFTPBOOT_DIR if [[ "$IRONIC_IPXE_ENABLED" == "True" ]]; then sudo install -d -o $STACK_USER -g $STACK_USER $IRONIC_HTTP_DIR fi if [ ! -f "$IRONIC_PXE_BOOT_IMAGE" ]; then die $LINENO "PXE boot file $IRONIC_PXE_BOOT_IMAGE not found." fi # Copy PXE binary # NOTE(mjturek): The PXE binary is x86_64 specific. So it should only be copied when # deploying to an x86_64 node. if [[ $IRONIC_HW_ARCH == "x86_64" ]]; then cp $IRONIC_PXE_BOOT_IMAGE $IRONIC_TFTPBOOT_DIR setup_syslinux_modules fi if [[ "$IRONIC_BOOT_MODE" == "uefi" ]]; then local uefi_boot_file uefi_boot_file=$(get_uefi_ipxe_boot_file) if [ ! -f $uefi_boot_file ]; then die $LINENO "UEFI boot file $uefi_boot_file not found." fi cp $uefi_boot_file $IRONIC_TFTPBOOT_DIR if [[ "$IRONIC_IS_HARDWARE" == "False" ]]; then local uefi_loader local uefi_nvram # Copy the OVMF images to libvirt's path uefi_loader=$(get_uefi_loader) uefi_nvram=$(get_uefi_nvram) sudo cp $uefi_loader $UEFI_LOADER_PATH sudo cp $uefi_nvram $UEFI_NVRAM_PATH fi fi # Create the logs directory when saving the deploy logs to the filesystem if [[ "$IRONIC_DEPLOY_LOGS_STORAGE_BACKEND" == "local" && "$IRONIC_DEPLOY_LOGS_COLLECT" != "never" ]]; then install -d -o $STACK_USER $IRONIC_DEPLOY_LOGS_LOCAL_PATH fi } function configure_ironic_networks { if [[ -n "${IRONIC_PROVISION_NETWORK_NAME}" ]]; then echo_summary "Configuring Ironic provisioning network" configure_ironic_provision_network fi echo_summary "Configuring Ironic cleaning network" configure_ironic_cleaning_network echo_summary "Configuring Ironic rescue network" configure_ironic_rescue_network } function configure_ironic_cleaning_network { iniset $IRONIC_CONF_FILE neutron cleaning_network $IRONIC_CLEAN_NET_NAME } function configure_ironic_rescue_network { iniset $IRONIC_CONF_FILE neutron rescuing_network $IRONIC_RESCUE_NET_NAME } function configure_ironic_provision_network { # This is only called if IRONIC_PROVISION_NETWORK_NAME has been set and # means we are using multi-tenant networking. local net_id local ironic_provision_network_ip # NOTE(vsaienko) For multinode case there is no need to create a new provisioning # network on subnode, as it was created on primary node. Just get an existed network UUID. if [[ "$HOST_TOPOLOGY_ROLE" != "subnode" ]]; then die_if_not_set $LINENO IRONIC_PROVISION_SUBNET_PREFIX "You must specify the IRONIC_PROVISION_SUBNET_PREFIX" die_if_not_set $LINENO PHYSICAL_NETWORK "You must specify the PHYSICAL_NETWORK" die_if_not_set $LINENO IRONIC_PROVISION_SUBNET_GATEWAY "You must specify the IRONIC_PROVISION_SUBNET_GATEWAY" net_id=$(openstack network create --provider-network-type $IRONIC_PROVISION_PROVIDER_NETWORK_TYPE \ --provider-physical-network "$PHYSICAL_NETWORK" \ ${IRONIC_PROVISION_SEGMENTATION_ID:+--provider-segment $IRONIC_PROVISION_SEGMENTATION_ID} \ ${IRONIC_PROVISION_NETWORK_NAME} -f value -c id) die_if_not_set $LINENO net_id "Failure creating net_id for $IRONIC_PROVISION_NETWORK_NAME" if [[ "${IRONIC_USE_NEUTRON_SEGMENTS}" == "True" ]]; then local net_segment_id net_segment_id=$(openstack network segment list --network $net_id -f value -c ID) die_if_not_set $LINENO net_segment_id "Failure getting net_segment_id for $IRONIC_PROVISION_NETWORK_NAME" fi local subnet_id subnet_id="$(openstack subnet create --ip-version 4 \ ${IRONIC_PROVISION_ALLOCATION_POOL:+--allocation-pool $IRONIC_PROVISION_ALLOCATION_POOL} \ ${net_segment_id:+--network-segment $net_segment_id} \ $IRONIC_PROVISION_PROVIDER_SUBNET_NAME \ --gateway $IRONIC_PROVISION_SUBNET_GATEWAY --network $net_id \ --subnet-range $IRONIC_PROVISION_SUBNET_PREFIX -f value -c id)" die_if_not_set $LINENO subnet_id "Failure creating SUBNET_ID for $IRONIC_PROVISION_NETWORK_NAME" ironic_provision_network_ip=$IRONIC_PROVISION_SUBNET_GATEWAY else net_id=$(openstack network show $IRONIC_PROVISION_NETWORK_NAME -f value -c id) ironic_provision_network_ip=$IRONIC_PROVISION_SUBNET_SUBNODE_IP fi IRONIC_PROVISION_SEGMENTATION_ID=${IRONIC_PROVISION_SEGMENTATION_ID:-`openstack network show ${net_id} -f value -c provider:segmentation_id`} provision_net_prefix=${IRONIC_PROVISION_SUBNET_PREFIX##*/} # Set provision network GW on physical interface # Add vlan on br interface in case of IRONIC_PROVISION_PROVIDER_NETWORK_TYPE==vlan # othervise assign ip to br interface directly. if [[ "$IRONIC_PROVISION_PROVIDER_NETWORK_TYPE" == "vlan" ]]; then sudo ip link add link $OVS_PHYSICAL_BRIDGE name $OVS_PHYSICAL_BRIDGE.$IRONIC_PROVISION_SEGMENTATION_ID type vlan id $IRONIC_PROVISION_SEGMENTATION_ID sudo ip link set dev $OVS_PHYSICAL_BRIDGE up sudo ip link set dev $OVS_PHYSICAL_BRIDGE.$IRONIC_PROVISION_SEGMENTATION_ID up sudo ip addr add dev $OVS_PHYSICAL_BRIDGE.$IRONIC_PROVISION_SEGMENTATION_ID $ironic_provision_network_ip/$provision_net_prefix else sudo ip link set dev $OVS_PHYSICAL_BRIDGE up sudo ip addr add dev $OVS_PHYSICAL_BRIDGE $ironic_provision_network_ip/$provision_net_prefix fi iniset $IRONIC_CONF_FILE neutron provisioning_network $IRONIC_PROVISION_NETWORK_NAME } function cleanup_ironic_provision_network { # Cleanup OVS_PHYSICAL_BRIDGE subinterfaces local bridge_subint bridge_subint=$(cat /proc/net/dev | sed -n "s/^\(${OVS_PHYSICAL_BRIDGE}\.[0-9]*\).*/\1/p") for sub_int in $bridge_subint; do sudo ip link set dev $sub_int down sudo ip link del dev $sub_int done } # configure_ironic() - Set config files, create data dirs, etc function configure_ironic { configure_ironic_dirs # (re)create ironic configuration file and configure common parameters. rm -f $IRONIC_CONF_FILE iniset $IRONIC_CONF_FILE DEFAULT debug True inicomment $IRONIC_CONF_FILE DEFAULT log_file iniset $IRONIC_CONF_FILE database connection `database_connection_url ironic` iniset $IRONIC_CONF_FILE DEFAULT state_path $IRONIC_STATE_PATH iniset $IRONIC_CONF_FILE DEFAULT use_syslog $SYSLOG # NOTE(vsaienko) with multinode each conductor should have its own host. iniset $IRONIC_CONF_FILE DEFAULT host $LOCAL_HOSTNAME # Retrieve deployment logs iniset $IRONIC_CONF_FILE agent deploy_logs_collect $IRONIC_DEPLOY_LOGS_COLLECT iniset $IRONIC_CONF_FILE agent deploy_logs_storage_backend $IRONIC_DEPLOY_LOGS_STORAGE_BACKEND iniset $IRONIC_CONF_FILE agent deploy_logs_local_path $IRONIC_DEPLOY_LOGS_LOCAL_PATH # Set image_download_source for direct interface iniset $IRONIC_CONF_FILE agent image_download_source $IRONIC_AGENT_IMAGE_DOWNLOAD_SOURCE # Configure JSON RPC backend iniset $IRONIC_CONF_FILE DEFAULT rpc_transport $IRONIC_RPC_TRANSPORT iniset $IRONIC_CONF_FILE json_rpc port $IRONIC_JSON_RPC_PORT # Set fast track options iniset $IRONIC_CONF_FILE deploy fast_track $IRONIC_DEPLOY_FAST_TRACK # Set requirement for agent tokens iniset $IRONIC_CONF_FILE DEFAULT require_agent_token $IRONIC_REQUIRE_AGENT_TOKEN # No need to check if RabbitMQ is enabled, this call does it in a smart way if [[ "$IRONIC_RPC_TRANSPORT" == "oslo" ]]; then iniset_rpc_backend ironic $IRONIC_CONF_FILE fi # Configure Ironic conductor, if it was enabled. if is_service_enabled ir-cond; then configure_ironic_conductor fi # Configure Ironic API, if it was enabled. if is_service_enabled ir-api; then configure_ironic_api fi # Format logging setup_logging $IRONIC_CONF_FILE # Adds ironic site for IPXE if [[ "$IRONIC_IPXE_ENABLED" == "True" ]]; then _config_ironic_apache_ipxe fi # Adds uWSGI for Ironic API if [[ "$IRONIC_USE_WSGI" == "True" ]]; then write_uwsgi_config "$IRONIC_UWSGI_CONF" "$IRONIC_UWSGI" "/baremetal" fi if [[ "$os_VENDOR" =~ (Debian|Ubuntu) ]]; then # The groups change with newer libvirt. Older Ubuntu used # 'libvirtd', but now uses libvirt like Debian. Do a quick check # to see if libvirtd group already exists to handle grenade's case. LIBVIRT_GROUP=$(cut -d ':' -f 1 /etc/group | grep 'libvirtd$' || true) LIBVIRT_GROUP=${LIBVIRT_GROUP:-libvirt} else LIBVIRT_GROUP=libvirtd fi if ! getent group $LIBVIRT_GROUP >/dev/null; then sudo groupadd $LIBVIRT_GROUP fi # NOTE(vsaienko) Add stack to libvirt group when installing without nova. if ! is_service_enabled nova; then # Disable power state change callbacks to nova. iniset $IRONIC_CONF_FILE nova send_power_notifications false add_user_to_group $STACK_USER $LIBVIRT_GROUP # This is the basic set of devices allowed / required by all virtual machines. # Add /dev/net/tun to cgroup_device_acl, needed for type=ethernet interfaces if ! sudo grep -q '^cgroup_device_acl' /etc/libvirt/qemu.conf; then cat <${IRONIC_VM_LOG_DIR}/README << EOF This directory contains the serial console log files from the virtual Ironic bare-metal nodes. The *_console_* log files are the original log files and include ANSI control codes which can make the output difficult to read. The *_no_ansi_* log files have had ANSI control codes removed from the file and are easier to read. On some occasions there won't be a corresponding *_no_ansi_* log file, for example if the job failed due to a time-out. You may see a log file without a date/time in the file name. In that case you can display the logfile in your console by doing: $ curl URL_TO_LOGFILE This will have your terminal process the ANSI escape codes. Another option, if you have the 'pv' executable installed, is to simulate a low-speed connection. In this example simulate a 300 Bytes/second connection. $ curl URL_TO_LOGFILE | pv -q -L 300 This can allow you to see some of the content before the screen is cleared by an ANSI escape sequence. EOF } function initialize_libvirt_storage_pool { [ -d $LIBVIRT_STORAGE_POOL_PATH ] || sudo mkdir -p $LIBVIRT_STORAGE_POOL_PATH if ! sudo virsh pool-list --all | grep -q $LIBVIRT_STORAGE_POOL; then sudo virsh pool-define-as --name $LIBVIRT_STORAGE_POOL dir \ --target $LIBVIRT_STORAGE_POOL_PATH >&2 sudo virsh pool-autostart $LIBVIRT_STORAGE_POOL >&2 sudo virsh pool-start $LIBVIRT_STORAGE_POOL >&2 fi pool_state=$(sudo virsh pool-info $LIBVIRT_STORAGE_POOL | grep State | awk '{ print $2 }') if [ "$pool_state" != "running" ] ; then sudo virsh pool-start $LIBVIRT_STORAGE_POOL >&2 fi } function create_bridge_and_vms { # Call libvirt setup scripts in a new shell to ensure any new group membership sudo su $STACK_USER -c "$IRONIC_SCRIPTS_DIR/setup-network.sh $IRONIC_VM_NETWORK_BRIDGE $PUBLIC_BRIDGE_MTU" if [[ "$IRONIC_VM_LOG_CONSOLE" == "True" ]] ; then local log_arg="-l $IRONIC_VM_LOG_DIR" if [[ "$IRONIC_VM_LOG_ROTATE" == "True" ]] ; then setup_qemu_log_hook fi else local log_arg="" fi local vbmc_port=$IRONIC_VBMC_PORT_RANGE_START local pdu_outlet=$IRONIC_VPDU_PORT_RANGE_START local vm_name local vm_opts="" if [[ -n "$IRONIC_VM_EMULATOR" ]]; then vm_opts+=" -e $IRONIC_VM_EMULATOR" fi vm_opts+=" -E $IRONIC_VM_ENGINE" if [[ "$IRONIC_BOOT_MODE" == "uefi" ]]; then vm_opts+=" -L $UEFI_LOADER_PATH -N $UEFI_NVRAM_PATH" fi if [[ -n "$LIBVIRT_NIC_DRIVER" ]]; then vm_opts+=" -D $LIBVIRT_NIC_DRIVER" elif [[ "$IRONIC_BOOT_MODE" == "uefi" ]]; then # Note(derekh) UEFI for the moment doesn't work with the e1000 net driver vm_opts+=" -D virtio" fi initialize_libvirt_storage_pool local bridge_mac bridge_mac=$(ip link show dev $IRONIC_VM_NETWORK_BRIDGE | grep -Eo "ether [A-Za-z0-9:]+"|sed "s/ether\ //") for vm_name in $(_ironic_bm_vm_names); do # pick up the $LIBVIRT_GROUP we have possibly joint newgrp $LIBVIRT_GROUP <> $IRONIC_VM_MACS_CSV_FILE SUBSHELL if is_deployed_by_ipmi; then vbmc --no-daemon add $vm_name --port $vbmc_port vbmc --no-daemon start $vm_name fi echo " ${bridge_mac} $IRONIC_VM_NETWORK_BRIDGE" >> $IRONIC_VM_MACS_CSV_FILE vbmc_port=$((vbmc_port+1)) pdu_outlet=$((pdu_outlet+1)) # It is sometimes useful to dump out the VM configuration to validate it. sudo virsh dumpxml $vm_name done if [[ -z "${IRONIC_PROVISION_NETWORK_NAME}" ]]; then local ironic_net_id ironic_net_id=$(openstack network show "$PRIVATE_NETWORK_NAME" -c id -f value) create_ovs_taps $ironic_net_id # NOTE(vsaienko) Neutron no longer setup routing to private network. # https://github.com/openstack-dev/devstack/commit/1493bdeba24674f6634160d51b8081c571df4017 # Add route here to have connection to VMs during provisioning. local pub_router_id local r_net_gateway pub_router_id=$(openstack router show $Q_ROUTER_NAME -f value -c id) r_net_gateway=$(sudo ip netns exec qrouter-$pub_router_id ip -4 route get 8.8.8.8 |grep dev | awk '{print $7}') local replace_range=${SUBNETPOOL_PREFIX_V4} if [[ -z "${SUBNETPOOL_V4_ID}" ]]; then replace_range=${FIXED_RANGE} fi sudo ip route replace $replace_range via $r_net_gateway fi # Here is a good place to restart tcpdump to begin capturing packets. # See: https://docs.openstack.org/devstack/latest/debugging.html # stop_tcpdump # start_tcpdump } function wait_for_nova_resources { # After nodes have been enrolled, we need to wait for both ironic and # nova's periodic tasks to populate the resource tracker with available # nodes and resources. Wait up to 2 minutes for a given resource before # timing out. local expected_count=$1 local resource_class=${IRONIC_DEFAULT_RESOURCE_CLASS^^} # TODO(dtantsur): switch to Placement OSC plugin, once it exists local token token=$(openstack token issue -f value -c id) local endpoint endpoint=$(openstack endpoint list --service placement --interface public -f value -c URL) die_if_not_set $LINENO endpoint "Cannot find Placement API endpoint" local i local count echo_summary "Waiting up to 3 minutes for placement to pick up $expected_count nodes" for i in $(seq 1 12); do # Fetch provider UUIDs from Placement local providers providers=$(curl -sH "X-Auth-Token: $token" $endpoint/resource_providers \ | jq -r '.resource_providers[].uuid') local p # Total count of the resource class, has to be equal to nodes count count=0 for p in $providers; do local amount # A resource class inventory record looks something like # {"max_unit": 1, "min_unit": 1, "step_size": 1, "reserved": 0, "total": 1, "allocation_ratio": 1} # Subtrack reserved from total (defaulting both to 0) amount=$(curl -sH "X-Auth-Token: $token" $endpoint/resource_providers/$p/inventories \ | jq ".inventories.CUSTOM_$resource_class as \$cls | (\$cls.total // 0) - (\$cls.reserved // 0)") # Check whether the resource provider has all expected traits # registered against it. rp_traits=$(curl -sH "X-Auth-Token: $token" \ -H "OpenStack-API-Version: placement 1.6" \ $endpoint/resource_providers/$p/traits) for trait in $IRONIC_DEFAULT_TRAITS; do if [[ $(echo "$rp_traits" | jq ".traits | contains([\"$trait\"])") == false ]]; then amount=0 fi done if [ $amount -gt 0 ]; then count=$(( count + $amount )) fi done if [ $count -ge $expected_count ]; then return 0 fi if is_service_enabled n-api; then $TOP_DIR/tools/discover_hosts.sh fi sleep 15 done die $LINENO "Timed out waiting for Nova to track $expected_count nodes" } function _clean_ncpu_failure { SCREEN_NAME=${SCREEN_NAME:-stack} SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} n_cpu_failure="$SERVICE_DIR/$SCREEN_NAME/n-cpu.failure" if [ -f ${n_cpu_failure} ]; then mv ${n_cpu_failure} "${n_cpu_failure}.before-restart-by-ironic" fi } function provide_nodes { local nodes=$@ for node_id in $nodes; do $IRONIC_CMD node provide $node_id done local attempt for attempt in $(seq 1 $IRONIC_CLEANING_ATTEMPTS); do local available available=$(openstack baremetal node list --provision-state available -f value -c UUID) local nodes_not_finished= for node_id in $nodes; do if ! echo $available | grep -q $node_id; then nodes_not_finished+=" $node_id" fi done nodes=$nodes_not_finished if [[ "$nodes" == "" ]]; then break fi echo "Waiting for nodes to become available: $nodes" echo "Currently available: $available" sleep $IRONIC_CLEANING_DELAY done if [[ "$nodes" != "" ]]; then die $LINENO "Some nodes did not finish cleaning: $nodes" fi } function wait_for_ironic_neutron_agent_report_state_for_all_nodes { local nodes=$@ echo "Waiting for ironic-neutron-agent to report state for nodes: $nodes" local attempt for attempt in $(seq 1 $IRONIC_NEUTRON_AGENT_REPORT_STATE_ATTEMPTS); do local reported reported=$(openstack network agent list -f value -c Host -c Binary | grep ironic-neutron-agent | cut -d ' ' -f 1 | paste -s -d ' ') echo "Currently reported nodes: $reported" local can_break for node_id in $nodes; do if echo $reported | grep -q $node_id; then can_break="True" else can_break="False" break fi done if [[ $can_break == "True" ]]; then break fi sleep $IRONIC_NEUTRON_AGENT_REPORT_STATE_DELAY done if [[ "$can_break" == "False" ]]; then die $LINENO "ironic-neutron-agent did not report some nodes." fi } function enroll_nodes { local chassis_id chassis_id=$($IRONIC_CMD chassis create --description "ironic test chassis" -f value -c uuid) die_if_not_set $LINENO chassis_id "Failed to create chassis" local node_prefix node_prefix=$(get_ironic_node_prefix) local interface_info if [[ "$IRONIC_IS_HARDWARE" == "False" ]]; then local ironic_node_cpu=$IRONIC_VM_SPECS_CPU local ironic_node_ram=$IRONIC_VM_SPECS_RAM local ironic_node_disk=$IRONIC_VM_SPECS_DISK local ironic_ephemeral_disk=$IRONIC_VM_EPHEMERAL_DISK local ironic_node_arch=x86_64 local ironic_hwinfo_file=$IRONIC_VM_MACS_CSV_FILE if is_deployed_by_ipmi; then local node_options="\ --driver-info ipmi_address=${HOST_IP} \ --driver-info ipmi_username=admin \ --driver-info ipmi_password=password" elif is_deployed_by_snmp; then local node_options="\ --driver-info snmp_driver=${IRONIC_VPDU_SNMPDRIVER} \ --driver-info snmp_address=${HOST_IP} \ --driver-info snmp_port=${IRONIC_VPDU_LISTEN_PORT} \ --driver-info snmp_protocol=2c \ --driver-info snmp_community=${IRONIC_VPDU_COMMUNITY}" elif is_deployed_by_redfish; then local node_options="\ --driver-info redfish_address=http://${HOST_IP}:${IRONIC_REDFISH_EMULATOR_PORT} \ --driver-info redfish_username=admin \ --driver-info redfish_password=password" fi else local ironic_node_cpu=$IRONIC_HW_NODE_CPU local ironic_node_ram=$IRONIC_HW_NODE_RAM local ironic_node_disk=$IRONIC_HW_NODE_DISK local ironic_ephemeral_disk=$IRONIC_HW_EPHEMERAL_DISK local ironic_node_arch=$IRONIC_HW_ARCH local ironic_hwinfo_file=$IRONIC_HWINFO_FILE fi local total_nodes=0 local total_cpus=0 local node_uuids= local node_id while read hardware_info; do local node_name node_name=$node_prefix-$total_nodes local node_capabilities="" if [[ "$IRONIC_BOOT_MODE" == "uefi" ]]; then node_capabilities+=" --property capabilities=boot_mode:uefi" fi if [[ "$IRONIC_SECURE_BOOT" == "True" ]]; then if [[ -n "$node_capabilities" ]]; then node_capabilities+=",secure_boot:true" else node_capabilities+=" --property capabilities=secure_boot:true" fi fi if [[ "$IRONIC_IS_HARDWARE" == "False" ]]; then interface_info=$(echo $hardware_info | awk '{print $1}') if is_deployed_by_ipmi; then local vbmc_port vbmc_port=$(echo $hardware_info | awk '{print $2}') node_options+=" --driver-info ipmi_port=$vbmc_port" elif is_deployed_by_snmp; then local pdu_outlet pdu_outlet=$(echo $hardware_info | awk '{print $3}') node_options+=" --driver-info snmp_outlet=$pdu_outlet" elif is_deployed_by_redfish; then node_options+=" --driver-info redfish_system_id=/redfish/v1/Systems/$node_name" fi # Local-link-connection options local llc_opts="" if [[ "${IRONIC_USE_LINK_LOCAL}" == "True" ]]; then local switch_info local switch_id switch_id=$(echo $hardware_info |awk '{print $4}') switch_info=$(echo $hardware_info |awk '{print $5}') # NOTE(vsaienko) we will add port_id later in the code. llc_opts="--local-link-connection switch_id=${switch_id} \ --local-link-connection switch_info=${switch_info} " fi if [[ "${IRONIC_STORAGE_INTERFACE}" == "cinder" ]]; then local connector_iqn="iqn.2017-05.org.openstack.$node_prefix-$total_nodes" if [[ -n "$node_capabilities" ]]; then node_capabilities+=",iscsi_boot:True" else node_capabilities+=" --property capabilities=iscsi_boot:True" fi fi else # Currently we require all hardware platform have same CPU/RAM/DISK info # in future, this can be enhanced to support different type, and then # we create the bare metal flavor with minimum value local bmc_address bmc_address=$(echo $hardware_info |awk '{print $1}') local mac_address mac_address=$(echo $hardware_info |awk '{print $2}') local bmc_username bmc_username=$(echo $hardware_info |awk '{print $3}') local bmc_passwd bmc_passwd=$(echo $hardware_info |awk '{print $4}') local node_options="" if is_deployed_by_ipmi; then node_options+=" --driver-info ipmi_address=$bmc_address \ --driver-info ipmi_password=$bmc_passwd \ --driver-info ipmi_username=$bmc_username" elif is_deployed_by_ilo; then node_options+=" --driver-info ilo_address=$bmc_address \ --driver-info ilo_password=$bmc_passwd \ --driver-info ilo_username=$bmc_username" if [[ $IRONIC_ENABLED_BOOT_INTERFACES == *"ilo-virtual-media"* ]]; then node_options+=" --driver-info ilo_deploy_iso=$IRONIC_DEPLOY_ISO_ID" fi elif is_deployed_by_drac; then node_options+=" --driver-info drac_address=$bmc_address \ --driver-info drac_password=$bmc_passwd \ --driver-info drac_username=$bmc_username" elif is_deployed_by_redfish; then local bmc_redfish_system_id bmc_redfish_system_id=$(echo $hardware_info |awk '{print $5}') node_options+=" --driver-info redfish_address=https://$bmc_address \ --driver-info redfish_system_id=$bmc_redfish_system_id \ --driver-info redfish_password=$bmc_passwd \ --driver-info redfish_username=$bmc_username \ --driver-info redfish_verify_ca=False" elif is_deployed_by_irmc; then node_options+=" --driver-info irmc_address=$bmc_address \ --driver-info irmc_password=$bmc_passwd \ --driver-info irmc_username=$bmc_username" if [[ -n "$IRONIC_DEPLOY_ISO_ID" ]]; then node_options+=" --driver-info irmc_deploy_iso=$IRONIC_DEPLOY_ISO_ID" fi elif is_deployed_by_xclarity; then local xclarity_hardware_id xclarity_hardware_id=$(echo $hardware_info |awk '{print $5}') node_options+=" --driver-info xclarity_manager_ip=$bmc_address \ --driver-info xclarity_password=$bmc_passwd \ --driver-info xclarity_username=$bmc_username \ --driver-info xclarity_hardware_id=$xclarity_hardware_id" fi interface_info="${mac_address}" fi # First node created will be used for testing in ironic w/o glance # scenario, so we need to know its UUID. local standalone_node_uuid="" if [ $total_nodes -eq 0 ]; then standalone_node_uuid="--uuid $IRONIC_NODE_UUID" fi # TODO(dtantsur): it would be cool to test with different resource # classes, but for now just use the same. node_id=$($IRONIC_CMD node create $standalone_node_uuid \ --chassis $chassis_id \ --driver $IRONIC_DEPLOY_DRIVER \ --name $node_name \ --resource-class $IRONIC_DEFAULT_RESOURCE_CLASS \ --property cpu_arch=$ironic_node_arch \ $node_capabilities \ $node_options \ -f value -c uuid) die_if_not_set $LINENO node_id "Failed to create node" node_uuids+=" $node_id" if [[ -n $IRONIC_DEFAULT_TRAITS ]]; then $IRONIC_CMD node add trait $node_id $IRONIC_DEFAULT_TRAITS fi $IRONIC_CMD node manage $node_id --wait $IRONIC_MANAGE_TIMEOUT || \ die $LINENO "Node did not reach manageable state in $IRONIC_MANAGE_TIMEOUT seconds" # NOTE(vsaienko) IPA didn't automatically recognize root devices less than 4Gb. # Setting root hint allows to install OS on such devices. # 0x1af4 is VirtIO vendor device ID. if [[ "$ironic_node_disk" -lt "4" && is_deployed_by_agent ]]; then $IRONIC_CMD node set $node_id --property \ root_device='{"vendor": "0x1af4"}' fi # In case we using portgroups, we should API version that support them. # Othervise API will return 406 ERROR # NOTE(vsaienko) interface_info is in the following format here: # mac1,tap-node0i1;mac2,tap-node0i2;...;macN,tap-node0iN for info in ${interface_info//;/ }; do local mac_address="" local port_id="" local llc_port_opt="" local physical_network="" mac_address=$(echo $info| awk -F ',' '{print $1}') port_id=$(echo $info| awk -F ',' '{print $2}') if [[ "${IRONIC_USE_LINK_LOCAL}" == "True" ]]; then llc_port_opt+=" --local-link-connection port_id=${port_id} " fi if [[ "${IRONIC_USE_NEUTRON_SEGMENTS}" == "True" ]]; then physical_network=" --physical-network ${PHYSICAL_NETWORK} " fi $IRONIC_CMD port create --node $node_id $llc_opts $llc_port_opt $mac_address $physical_network done # NOTE(vsaienko) use node-update instead of specifying network_interface # during node creation. If node is added with latest version of API it # will NOT go to available state automatically. if [[ -n "${IRONIC_NETWORK_INTERFACE}" ]]; then $IRONIC_CMD node set $node_id --network-interface $IRONIC_NETWORK_INTERFACE || \ die $LINENO "Failed to update network interface for node" fi if [[ -n "${IRONIC_STORAGE_INTERFACE}" ]]; then $IRONIC_CMD node set $node_id --storage-interface $IRONIC_STORAGE_INTERFACE || \ die $LINENO "Failed to update storage interface for node $node_id" if [[ -n "${connector_iqn}" ]]; then $IRONIC_CMD volume connector create --node $node_id --type iqn \ --connector-id $connector_iqn || \ die $LINENO "Failed to create volume connector for node $node_id" fi fi total_nodes=$((total_nodes+1)) done < $ironic_hwinfo_file # NOTE(hjensas): ensure ironic-neutron-agent has done report_state for all # nodes we attempt cleaning. if [[ "${IRONIC_USE_NEUTRON_SEGMENTS}" == "True" ]]; then wait_for_ironic_neutron_agent_report_state_for_all_nodes $node_uuids fi # NOTE(dtantsur): doing it outside of the loop, because of cleaning provide_nodes $node_uuids if is_service_enabled nova && [[ "$VIRT_DRIVER" == "ironic" ]]; then if [[ "$HOST_TOPOLOGY_ROLE" != "subnode" ]]; then local adjusted_disk adjusted_disk=$(($ironic_node_disk - $ironic_ephemeral_disk)) openstack flavor create --ephemeral $ironic_ephemeral_disk --ram $ironic_node_ram --disk $adjusted_disk --vcpus $ironic_node_cpu baremetal local resource_class=${IRONIC_DEFAULT_RESOURCE_CLASS^^} openstack flavor set baremetal --property "resources:CUSTOM_$resource_class"="1" openstack flavor set baremetal --property "resources:DISK_GB"="0" openstack flavor set baremetal --property "resources:MEMORY_MB"="0" openstack flavor set baremetal --property "resources:VCPU"="0" openstack flavor set baremetal --property "cpu_arch"="$ironic_node_arch" if [[ "$IRONIC_BOOT_MODE" == "uefi" ]]; then openstack flavor set baremetal --property "capabilities:boot_mode"="uefi" fi for trait in $IRONIC_DEFAULT_TRAITS; do openstack flavor set baremetal --property "trait:$trait"="required" done if [[ "$IRONIC_SECURE_BOOT" == "True" ]]; then openstack flavor set baremetal --property "capabilities:secure_boot"="true" fi # NOTE(dtantsur): sometimes nova compute fails to start with ironic due # to keystone restarting and not being able to authenticate us. # Restart it just to be sure (and avoid gate problems like bug 1537076) stop_nova_compute || /bin/true # NOTE(pas-ha) if nova compute failed before restart, .failure file # that was created will fail the service_check in the end of the deployment _clean_ncpu_failure start_nova_compute else # NOTE(vsaienko) we enrolling IRONIC_VM_COUNT on each node. So on subnode # we expect to have 2 x total_cpus total_nodes=$(( total_nodes * 2 )) fi wait_for_nova_resources $total_nodes fi } function die_if_module_not_loaded { if ! grep -q $1 /proc/modules; then die $LINENO "$1 kernel module is not loaded" fi } function configure_iptables { # enable tftp natting for allowing connections to HOST_IP's tftp server if ! running_in_container; then sudo modprobe nf_conntrack_tftp sudo modprobe nf_nat_tftp else die_if_module_not_loaded nf_conntrack_tftp die_if_module_not_loaded nf_nat_tftp fi # explicitly allow DHCP - packets are occasionally being dropped here sudo iptables -I INPUT -p udp --dport 67:68 --sport 67:68 -j ACCEPT || true # nodes boot from TFTP and callback to the API server listening on $HOST_IP sudo iptables -I INPUT -d $IRONIC_TFTPSERVER_IP -p udp --dport 69 -j ACCEPT || true # To use named /baremetal endpoint we should open default apache port if [[ "$IRONIC_USE_WSGI" == "False" ]]; then sudo iptables -I INPUT -d $HOST_IP -p tcp --dport $IRONIC_SERVICE_PORT -j ACCEPT || true # open ironic API on baremetal network sudo iptables -I INPUT -d $IRONIC_HTTP_SERVER -p tcp --dport $IRONIC_SERVICE_PORT -j ACCEPT || true # allow IPA to connect to ironic API on subnode sudo iptables -I FORWARD -p tcp --dport $IRONIC_SERVICE_PORT -j ACCEPT || true else sudo iptables -I INPUT -d $HOST_IP -p tcp --dport 80 -j ACCEPT || true sudo iptables -I INPUT -d $HOST_IP -p tcp --dport 443 -j ACCEPT || true # open ironic API on baremetal network sudo iptables -I INPUT -d $IRONIC_HTTP_SERVER -p tcp --dport 80 -j ACCEPT || true sudo iptables -I INPUT -d $IRONIC_HTTP_SERVER -p tcp --dport 443 -j ACCEPT || true fi if is_deployed_by_agent; then # agent ramdisk gets instance image from swift sudo iptables -I INPUT -d $HOST_IP -p tcp --dport ${SWIFT_DEFAULT_BIND_PORT:-8080} -j ACCEPT || true sudo iptables -I INPUT -d $HOST_IP -p tcp --dport $GLANCE_SERVICE_PORT -j ACCEPT || true fi if [[ "$IRONIC_IPXE_ENABLED" == "True" ]] ; then sudo iptables -I INPUT -d $IRONIC_HTTP_SERVER -p tcp --dport $IRONIC_HTTP_PORT -j ACCEPT || true fi if [[ "${IRONIC_STORAGE_INTERFACE}" == "cinder" ]]; then sudo iptables -I INPUT -d $HOST_IP -p tcp --dport $ISCSI_SERVICE_PORT -s $FLOATING_RANGE -j ACCEPT || true fi # (rpittau) workaround to allow TFTP traffic on ubuntu bionic with conntrack helper disabled local qrouter qrouter=$(sudo ip netns list | grep qrouter | awk '{print $1;}') if [[ ! -z "$qrouter" ]]; then sudo ip netns exec $qrouter /sbin/iptables -A PREROUTING -t raw -p udp --dport 69 -j CT --helper tftp fi } function configure_tftpd { # stop tftpd and setup serving via xinetd stop_service tftpd-hpa || true [ -f /etc/init/tftpd-hpa.conf ] && echo "manual" | sudo tee /etc/init/tftpd-hpa.override sudo cp $IRONIC_TEMPLATES_DIR/tftpd-xinetd.template /etc/xinetd.d/tftp sudo sed -e "s|%TFTPBOOT_DIR%|$IRONIC_TFTPBOOT_DIR|g" -i /etc/xinetd.d/tftp sudo sed -e "s|%MAX_BLOCKSIZE%|$IRONIC_TFTP_BLOCKSIZE|g" -i /etc/xinetd.d/tftp # setup tftp file mapping to satisfy requests at the root (booting) and # /tftpboot/ sub-dir (as per deploy-ironic elements) # this section is only for ubuntu and fedora if [[ "$IRONIC_IPXE_ENABLED" == "False" && \ ( "$IRONIC_BOOT_MODE" == "uefi" || "$IRONIC_SECURE_BOOT" == "True" ) && \ "$IRONIC_UEFI_BOOT_LOADER" == "grub2" ]]; then local grub_dir echo "re ^($IRONIC_TFTPBOOT_DIR/) $IRONIC_TFTPBOOT_DIR/\2" >$IRONIC_TFTPBOOT_DIR/map-file echo "re ^$IRONIC_TFTPBOOT_DIR/ $IRONIC_TFTPBOOT_DIR/" >>$IRONIC_TFTPBOOT_DIR/map-file echo "re ^(^/) $IRONIC_TFTPBOOT_DIR/\1" >>$IRONIC_TFTPBOOT_DIR/map-file echo "re ^([^/]) $IRONIC_TFTPBOOT_DIR/\1" >>$IRONIC_TFTPBOOT_DIR/map-file sudo cp $IRONIC_GRUB2_SHIM_FILE $IRONIC_TFTPBOOT_DIR/bootx64.efi if is_fedora; then grub_subdir="EFI/fedora" elif is_ubuntu; then grub_subdir="boot/grub" fi grub_dir=$IRONIC_TFTPBOOT_DIR/$grub_subdir mkdir -p $grub_dir # Grub looks for numerous files when the grubnetx.efi binary is used :\ # specifically .lst files which define module lists which we can't seem # to find on disk. That being said, the grub-mknetdir utility generates # these files for us. grub-mknetdir --net-directory="$IRONIC_TFTPBOOT_DIR" --subdir="$grub_subdir" sudo cp $grub_dir/x86_64-efi/core.efi $IRONIC_TFTPBOOT_DIR/grubx64.efi cat << EOF > $grub_dir/grub.cfg set default=master set timeout=1 set hidden_timeout_quiet=false menuentry "master" { configfile $IRONIC_TFTPBOOT_DIR/\$net_default_mac.conf } EOF chmod 644 $grub_dir/grub.cfg iniset $IRONIC_CONF_FILE pxe uefi_pxe_config_template '$pybasedir/drivers/modules/pxe_grub_config.template' iniset $IRONIC_CONF_FILE pxe uefi_pxe_bootfile_name "bootx64.efi" else echo "r ^([^/]) $IRONIC_TFTPBOOT_DIR/\1" >$IRONIC_TFTPBOOT_DIR/map-file echo "r ^(/tftpboot/) $IRONIC_TFTPBOOT_DIR/\2" >>$IRONIC_TFTPBOOT_DIR/map-file fi sudo chmod -R 0755 $IRONIC_TFTPBOOT_DIR restart_service xinetd } function build_ipa_ramdisk { local kernel_path=$1 local ramdisk_path=$2 local iso_path=$3 case $IRONIC_RAMDISK_TYPE in 'tinyipa') build_tinyipa_ramdisk $kernel_path $ramdisk_path $iso_path ;; 'dib') build_ipa_dib_ramdisk $kernel_path $ramdisk_path $iso_path ;; *) die $LINENO "Unrecognised IRONIC_RAMDISK_TYPE: $IRONIC_RAMDISK_TYPE. Expected either of 'dib' or 'tinyipa'." ;; esac } function setup_ipa_builder { git_clone $IRONIC_PYTHON_AGENT_BUILDER_REPO $IRONIC_PYTHON_AGENT_BUILDER_DIR $IRONIC_PYTHON_AGENT_BUILDER_BRANCH } function build_tinyipa_ramdisk { echo "Building ironic-python-agent deploy ramdisk" local kernel_path=$1 local ramdisk_path=$2 local iso_path=$3 cd $IRONIC_PYTHON_AGENT_BUILDER_DIR/tinyipa export BUILD_AND_INSTALL_TINYIPA=true if is_ansible_deploy_enabled; then export AUTHORIZE_SSH=true export SSH_PUBLIC_KEY=$IRONIC_ANSIBLE_SSH_KEY.pub fi make cp tinyipa.gz $ramdisk_path cp tinyipa.vmlinuz $kernel_path if is_deploy_iso_required; then make iso cp tinyipa.iso $iso_path fi make clean cd - } function rebuild_tinyipa_for_ansible { local ansible_tinyipa_ramdisk_name pushd $IRONIC_PYTHON_AGENT_BUILDER_DIR/tinyipa export TINYIPA_RAMDISK_FILE=$IRONIC_DEPLOY_RAMDISK export SSH_PUBLIC_KEY=$IRONIC_ANSIBLE_SSH_KEY.pub make addssh ansible_tinyipa_ramdisk_name="ansible-$(basename $IRONIC_DEPLOY_RAMDISK)" mv $ansible_tinyipa_ramdisk_name $TOP_DIR/files make clean popd IRONIC_DEPLOY_RAMDISK=$TOP_DIR/files/$ansible_tinyipa_ramdisk_name } # install_diskimage_builder() - Collect source and prepare or install from pip function install_diskimage_builder { if use_library_from_git "diskimage-builder"; then git_clone_by_name "diskimage-builder" setup_dev_lib -bindep "diskimage-builder" else local bindep_file bindep_file=$(mktemp) curl -o "$bindep_file" "$IRONIC_DIB_BINDEP_FILE" install_bindep "$bindep_file" pip_install_gr "diskimage-builder" fi } function build_ipa_dib_ramdisk { local kernel_path=$1 local ramdisk_path=$2 local iso_path=$3 local tempdir tempdir=$(mktemp -d --tmpdir=${DEST}) # install diskimage-builder if not present if ! $(type -P disk-image-create > /dev/null); then install_diskimage_builder fi echo "Building IPA ramdisk with DIB options: $IRONIC_DIB_RAMDISK_OPTIONS" if is_deploy_iso_required; then IRONIC_DIB_RAMDISK_OPTIONS+=" iso" fi git_clone $IRONIC_PYTHON_AGENT_BUILDER_REPO $IRONIC_PYTHON_AGENT_BUILDER_DIR $IRONIC_PYTHON_AGENT_BUILDER_BRANCH ELEMENTS_PATH="$IRONIC_PYTHON_AGENT_BUILDER_DIR/dib" \ DIB_DHCP_TIMEOUT=$IRONIC_DIB_DHCP_TIMEOUT \ DIB_RELEASE=$IRONIC_DIB_RAMDISK_RELEASE \ DIB_REPOLOCATION_ironic_python_agent="$IRONIC_PYTHON_AGENT_DIR" \ DIB_REPOLOCATION_requirements="$DEST/requirements" \ disk-image-create "$IRONIC_DIB_RAMDISK_OPTIONS" \ -x -o "$tempdir/ironic-agent" \ ironic-python-agent-ramdisk chmod -R +r $tempdir mv "$tempdir/ironic-agent.kernel" "$kernel_path" mv "$tempdir/ironic-agent.initramfs" "$ramdisk_path" if is_deploy_iso_required; then mv "$tempdir/ironic-agent.iso" "$iso_path" fi rm -rf $tempdir } # download EFI boot loader image and upload it to glance # this function sets ``IRONIC_EFIBOOT_ID`` function upload_baremetal_ironic_efiboot { declare -g IRONIC_EFIBOOT_ID local efiboot_name efiboot_name=$(basename $IRONIC_EFIBOOT) echo_summary "Building and uploading EFI boot image for ironic" if [ ! -e "$IRONIC_EFIBOOT" ]; then local efiboot_path efiboot_path=$(mktemp -d --tmpdir=${DEST})/$efiboot_name local efiboot_mount efiboot_mount=$(mktemp -d --tmpdir=${DEST}) dd if=/dev/zero \ of=$efiboot_path \ bs=4096 count=1024 mkfs.fat -s 4 -r 512 -S 4096 $efiboot_path sudo mount $efiboot_path $efiboot_mount sudo mkdir -p $efiboot_mount/efi/boot sudo grub-mkimage \ -C xz \ -O x86_64-efi \ -p /boot/grub \ -o $efiboot_mount/efi/boot/bootx64.efi \ boot linux linuxefi search normal configfile \ part_gpt btrfs ext2 fat iso9660 loopback \ test keystatus gfxmenu regexp probe \ efi_gop efi_uga all_video gfxterm font \ echo read ls cat png jpeg halt reboot sudo umount $efiboot_mount mv $efiboot_path $IRONIC_EFIBOOT fi # load efiboot into glance IRONIC_EFIBOOT_ID=$(openstack \ image create \ $efiboot_name \ --public --disk-format=raw \ --container-format=bare \ -f value -c id \ < $IRONIC_EFIBOOT) die_if_not_set $LINENO IRONIC_EFIBOOT_ID "Failed to load EFI bootloader image into glance" iniset $IRONIC_CONF_FILE conductor bootloader $IRONIC_EFIBOOT_ID } # build deploy kernel+ramdisk, then upload them to glance # this function sets ``IRONIC_DEPLOY_KERNEL_ID``, ``IRONIC_DEPLOY_RAMDISK_ID`` function upload_baremetal_ironic_deploy { declare -g IRONIC_DEPLOY_KERNEL_ID IRONIC_DEPLOY_RAMDISK_ID local ironic_deploy_kernel_name local ironic_deploy_ramdisk_name ironic_deploy_kernel_name=$(basename $IRONIC_DEPLOY_KERNEL) ironic_deploy_ramdisk_name=$(basename $IRONIC_DEPLOY_RAMDISK) if [[ "$HOST_TOPOLOGY_ROLE" != "subnode" ]]; then echo_summary "Creating and uploading baremetal images for ironic" if [ ! -e "$IRONIC_DEPLOY_RAMDISK" ] || \ [ ! -e "$IRONIC_DEPLOY_KERNEL" ] || \ ( is_deploy_iso_required && [ ! -e "$IRONIC_DEPLOY_ISO" ] ); then # setup IRONIC_PYTHON_AGENT_BUILDER_DIR setup_ipa_builder # files don't exist, need to build them if [ "$IRONIC_BUILD_DEPLOY_RAMDISK" = "True" ]; then # we can build them only if we're not offline if [ "$OFFLINE" != "True" ]; then build_ipa_ramdisk $IRONIC_DEPLOY_KERNEL $IRONIC_DEPLOY_RAMDISK $IRONIC_DEPLOY_ISO else die $LINENO "Deploy kernel+ramdisk or iso files don't exist and cannot be built in OFFLINE mode" fi else # Grab the agent image tarball, either from a local file or remote URL if [[ "$IRONIC_AGENT_KERNEL_URL" =~ "file://" ]]; then cp ${IRONIC_AGENT_KERNEL_URL:7} $IRONIC_DEPLOY_KERNEL else wget "$IRONIC_AGENT_KERNEL_URL" -O $IRONIC_DEPLOY_KERNEL fi if [[ "$IRONIC_AGENT_RAMDISK_URL" =~ "file://" ]]; then cp ${IRONIC_AGENT_RAMDISK_URL:7} $IRONIC_DEPLOY_RAMDISK else wget "$IRONIC_AGENT_RAMDISK_URL" -O $IRONIC_DEPLOY_RAMDISK fi if is_ansible_with_tinyipa; then # NOTE(pas-ha) if using ansible-deploy and tinyipa, # this will rebuild ramdisk and override $IRONIC_DEPLOY_RAMDISK rebuild_tinyipa_for_ansible fi fi fi # load them into glance if ! is_deploy_iso_required; then IRONIC_DEPLOY_KERNEL_ID=$(openstack \ image create \ $ironic_deploy_kernel_name \ --public --disk-format=aki \ --container-format=aki \ < $IRONIC_DEPLOY_KERNEL | grep ' id ' | get_field 2) die_if_not_set $LINENO IRONIC_DEPLOY_KERNEL_ID "Failed to load kernel image into glance" IRONIC_DEPLOY_RAMDISK_ID=$(openstack \ image create \ $ironic_deploy_ramdisk_name \ --public --disk-format=ari \ --container-format=ari \ < $IRONIC_DEPLOY_RAMDISK | grep ' id ' | get_field 2) die_if_not_set $LINENO IRONIC_DEPLOY_RAMDISK_ID "Failed to load ramdisk image into glance" else IRONIC_DEPLOY_ISO_ID=$(openstack \ image create \ $(basename $IRONIC_DEPLOY_ISO) \ --public --disk-format=iso \ --container-format=bare \ < $IRONIC_DEPLOY_ISO -f value -c id) die_if_not_set $LINENO IRONIC_DEPLOY_ISO_ID "Failed to load deploy iso into glance" fi else if is_ansible_with_tinyipa; then ironic_deploy_ramdisk_name="ansible-$ironic_deploy_ramdisk_name" fi IRONIC_DEPLOY_KERNEL_ID=$(openstack image show $ironic_deploy_kernel_name -f value -c id) IRONIC_DEPLOY_RAMDISK_ID=$(openstack image show $ironic_deploy_ramdisk_name -f value -c id) fi iniset $IRONIC_CONF_FILE conductor deploy_kernel $IRONIC_DEPLOY_KERNEL_ID iniset $IRONIC_CONF_FILE conductor deploy_ramdisk $IRONIC_DEPLOY_RAMDISK_ID iniset $IRONIC_CONF_FILE conductor rescue_kernel $IRONIC_DEPLOY_KERNEL_ID iniset $IRONIC_CONF_FILE conductor rescue_ramdisk $IRONIC_DEPLOY_RAMDISK_ID } function prepare_baremetal_basic_ops { if [[ "$IRONIC_BAREMETAL_BASIC_OPS" != "True" ]]; then return 0 fi if ! is_service_enabled nova && [[ "$IRONIC_IPXE_ENABLED" == "True" ]] ; then local image_file_path if [[ ${IRONIC_WHOLEDISK_IMAGE_NAME} =~ \.img$ ]]; then image_file_path=$FILES/${IRONIC_WHOLEDISK_IMAGE_NAME} else image_file_path=$FILES/${IRONIC_WHOLEDISK_IMAGE_NAME}.img fi sudo install -g $LIBVIRT_GROUP -o $STACK_USER -m 644 $image_file_path $IRONIC_HTTP_DIR fi upload_baremetal_ironic_deploy if [[ "$IRONIC_BOOT_MODE" == "uefi" && is_deployed_by_redfish ]]; then upload_baremetal_ironic_efiboot fi configure_tftpd configure_iptables } function cleanup_baremetal_basic_ops { if [[ "$IRONIC_BAREMETAL_BASIC_OPS" != "True" ]]; then return 0 fi rm -f $IRONIC_VM_MACS_CSV_FILE sudo rm -rf $IRONIC_DATA_DIR $IRONIC_STATE_PATH local vm_name for vm_name in $(_ironic_bm_vm_names); do # Delete the Virtual BMCs if is_deployed_by_ipmi; then vbmc --no-daemon list | grep -a $vm_name && vbmc --no-daemon delete $vm_name || /bin/true fi # pick up the $LIBVIRT_GROUP we have possibly joint newgrp $LIBVIRT_GROUP < """ CONSOLE_PTY = """ """ def main(): parser = argparse.ArgumentParser( description="Configure a kvm virtual machine for the seed image.") parser.add_argument('--name', default='seed', help='the name to give the machine in libvirt.') parser.add_argument('--image', action='append', default=[], help='Use a custom image file (must be qcow2).') parser.add_argument('--engine', default='qemu', help='The virtualization engine to use') parser.add_argument('--arch', default='i686', help='The architecture to use') parser.add_argument('--memory', default='2097152', help="Maximum memory for the VM in KB.") parser.add_argument('--cpus', default='1', help="CPU count for the VM.") parser.add_argument('--bootdev', default='hd', help="What boot device to use (hd/network).") parser.add_argument('--libvirt-nic-driver', default='virtio', help='The libvirt network driver to use') parser.add_argument('--interface-count', default=1, type=int, help='The number of interfaces to add to VM.'), parser.add_argument('--mac', default=None, help='The mac for the first interface on the vm') parser.add_argument('--console-log', help='File to log console') parser.add_argument('--emulator', default=None, help='Path to emulator bin for vm template') parser.add_argument('--disk-format', default='qcow2', help='Disk format to use.') parser.add_argument('--uefi-loader', default='', help='The absolute path of the UEFI firmware blob.') parser.add_argument('--uefi-nvram', default='', help=('The absolute path of the non-volatile memory ' 'to store the UEFI variables. Should be used ' 'only when --uefi-loader is also specified.')) args = parser.parse_args() env = jinja2.Environment(loader=jinja2.FileSystemLoader(templatedir)) template = env.get_template('vm.xml') images = list(zip(args.image, string.ascii_lowercase)) if not images or len(images) > 6: # 6 is an artificial limitation because of the way we generate PCI IDs sys.exit("Up to 6 images are required") params = { 'name': args.name, 'images': images, 'engine': args.engine, 'arch': args.arch, 'memory': args.memory, 'cpus': args.cpus, 'bootdev': args.bootdev, 'interface_count': args.interface_count, 'mac': args.mac, 'nicdriver': args.libvirt_nic_driver, 'emulator': args.emulator, 'disk_format': args.disk_format, 'uefi_loader': args.uefi_loader, 'uefi_nvram': args.uefi_nvram, } if args.emulator: params['emulator'] = args.emulator else: qemu_kvm_locations = ['/usr/bin/kvm', '/usr/bin/qemu-kvm', '/usr/libexec/qemu-kvm'] for location in qemu_kvm_locations: if os.path.exists(location): params['emulator'] = location break else: raise RuntimeError("Unable to find location of kvm executable") if args.console_log: params['console'] = CONSOLE_LOG % {'console_log': args.console_log} else: params['console'] = CONSOLE_PTY libvirt_template = template.render(**params) conn = libvirt.open("qemu:///system") a = conn.defineXML(libvirt_template) print("Created machine %s with UUID %s" % (args.name, a.UUIDString())) if __name__ == '__main__': main() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/devstack/tools/ironic/scripts/create-node.sh0000755000175000017500000001025200000000000025017 0ustar00coreycorey00000000000000#!/usr/bin/env bash # **create-nodes** # Creates baremetal poseur nodes for ironic testing purposes set -ex # Make tracing more educational export PS4='+ ${BASH_SOURCE:-}:${FUNCNAME[0]:-}:L${LINENO:-}: ' # Keep track of the DevStack directory TOP_DIR=$(cd $(dirname "$0")/.. && pwd) while getopts "n:c:i:m:M:d:a:b:e:E:p:o:f:l:L:N:A:D:v:P:" arg; do case $arg in n) NAME=$OPTARG;; c) CPU=$OPTARG;; i) INTERFACE_COUNT=$OPTARG;; M) INTERFACE_MTU=$OPTARG;; m) MEM=$(( 1024 * OPTARG ));; # Extra G to allow fuzz for partition table : flavor size and registered # size need to be different to actual size. d) DISK=$(( OPTARG + 1 ));; a) ARCH=$OPTARG;; b) BRIDGE=$OPTARG;; e) EMULATOR=$OPTARG;; E) ENGINE=$OPTARG;; p) VBMC_PORT=$OPTARG;; o) PDU_OUTLET=$OPTARG;; f) DISK_FORMAT=$OPTARG;; l) LOGDIR=$OPTARG;; L) UEFI_LOADER=$OPTARG;; N) UEFI_NVRAM=$OPTARG;; A) MAC_ADDRESS=$OPTARG;; D) NIC_DRIVER=$OPTARG;; v) VOLUME_COUNT=$OPTARG;; P) STORAGE_POOL=$OPTARG;; esac done shift $(( $OPTIND - 1 )) if [ -z "$UEFI_LOADER" ] && [ ! -z "$UEFI_NVRAM" ]; then echo "Parameter -N (UEFI NVRAM) cannot be used without -L (UEFI Loader)" exit 1 fi LIBVIRT_NIC_DRIVER=${NIC_DRIVER:-"e1000"} LIBVIRT_STORAGE_POOL=${STORAGE_POOL:-"default"} LIBVIRT_CONNECT_URI=${LIBVIRT_CONNECT_URI:-"qemu:///system"} export VIRSH_DEFAULT_CONNECT_URI=$LIBVIRT_CONNECT_URI if [ -n "$LOGDIR" ] ; then mkdir -p "$LOGDIR" fi PREALLOC= if [ -f /etc/debian_version -a "$DISK_FORMAT" == "qcow2" ]; then PREALLOC="--prealloc-metadata" fi if [ -n "$LOGDIR" ] ; then VM_LOGGING="--console-log $LOGDIR/${NAME}_console.log" else VM_LOGGING="" fi UEFI_OPTS="" if [ ! -z "$UEFI_LOADER" ]; then UEFI_OPTS="--uefi-loader $UEFI_LOADER" if [ ! -z "$UEFI_NVRAM" ]; then UEFI_OPTS+=" --uefi-nvram $UEFI_NVRAM" fi fi # Create bridge and add VM interface to it. # Additional interface will be added to this bridge and # it will be plugged to OVS. # This is needed in order to have interface in OVS even # when VM is in shutdown state INTERFACE_COUNT=${INTERFACE_COUNT:-1} for int in $(seq 1 $INTERFACE_COUNT); do tapif=tap-${NAME}i${int} ovsif=ovs-${NAME}i${int} # NOTE(vsaienko) use veth pair here to ensure that interface # exists in OVS even when VM is powered off. sudo ip link add dev $tapif type veth peer name $ovsif for l in $tapif $ovsif; do sudo ip link set dev $l up sudo ip link set $l mtu $INTERFACE_MTU done sudo ovs-vsctl add-port $BRIDGE $ovsif done if [ -n "$MAC_ADDRESS" ] ; then MAC_ADDRESS="--mac $MAC_ADDRESS" fi VOLUME_COUNT=${VOLUME_COUNT:-1} if ! virsh list --all | grep -q $NAME; then vm_opts="" for int in $(seq 1 $VOLUME_COUNT); do if [[ "$int" == "1" ]]; then # Compatibility with old naming vol_name="$NAME.$DISK_FORMAT" else vol_name="$NAME-$int.$DISK_FORMAT" fi virsh vol-list --pool $LIBVIRT_STORAGE_POOL | grep -q $vol_name && virsh vol-delete $vol_name --pool $LIBVIRT_STORAGE_POOL >&2 virsh vol-create-as $LIBVIRT_STORAGE_POOL ${vol_name} ${DISK}G --format $DISK_FORMAT $PREALLOC >&2 volume_path=$(virsh vol-path --pool $LIBVIRT_STORAGE_POOL $vol_name) # Pre-touch the VM to set +C, as it can only be set on empty files. sudo touch "$volume_path" sudo chattr +C "$volume_path" || true vm_opts+="--image $volume_path " done if [[ -n "$EMULATOR" ]]; then vm_opts+="--emulator $EMULATOR " fi $PYTHON $TOP_DIR/scripts/configure-vm.py \ --bootdev network --name $NAME \ --arch $ARCH --cpus $CPU --memory $MEM --libvirt-nic-driver $LIBVIRT_NIC_DRIVER \ --disk-format $DISK_FORMAT $VM_LOGGING --engine $ENGINE $UEFI_OPTS $vm_opts \ --interface-count $INTERFACE_COUNT $MAC_ADDRESS >&2 fi # echo mac in format mac1,ovs-node-0i1;mac2,ovs-node-0i2;...;macN,ovs-node0iN VM_MAC=$(echo -n $(virsh domiflist $NAME |awk '/tap-/{print $5","$3}')|tr ' ' ';' |sed s/tap-/ovs-/g) echo -n "$VM_MAC $VBMC_PORT $PDU_OUTLET" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/devstack/tools/ironic/scripts/setup-network.sh0000755000175000017500000000210200000000000025453 0ustar00coreycorey00000000000000#!/usr/bin/env bash # **setup-network** # Setups openvswitch libvirt network suitable for # running baremetal poseur nodes for ironic testing purposes set -exu # Make tracing more educational export PS4='+ ${BASH_SOURCE:-}:${FUNCNAME[0]:-}:L${LINENO:-}: ' LIBVIRT_CONNECT_URI=${LIBVIRT_CONNECT_URI:-"qemu:///system"} # Keep track of the DevStack directory TOP_DIR=$(cd $(dirname "$0")/.. && pwd) BRIDGE_NAME=${1:-brbm} PUBLIC_BRIDGE_MTU=${2:-1500} export VIRSH_DEFAULT_CONNECT_URI="$LIBVIRT_CONNECT_URI" # Only add bridge if missing. Bring it UP. (sudo ovs-vsctl list-br | grep ${BRIDGE_NAME}) || sudo ovs-vsctl add-br ${BRIDGE_NAME} sudo ip link set dev ${BRIDGE_NAME} up # Remove bridge before replacing it. (virsh net-list | grep "${BRIDGE_NAME} ") && virsh net-destroy ${BRIDGE_NAME} (virsh net-list --inactive | grep "${BRIDGE_NAME} ") && virsh net-undefine ${BRIDGE_NAME} virsh net-define <(sed s/brbm/$BRIDGE_NAME/ $TOP_DIR/templates/brbm.xml) virsh net-autostart ${BRIDGE_NAME} virsh net-start ${BRIDGE_NAME} sudo ip link set dev ${BRIDGE_NAME} mtu $PUBLIC_BRIDGE_MTU ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538406.1743991 ironic-14.0.1.dev163/devstack/tools/ironic/templates/0000755000175000017500000000000000000000000022601 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/devstack/tools/ironic/templates/brbm.xml0000644000175000017500000000020000000000000024235 0ustar00coreycorey00000000000000 brbm ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/devstack/tools/ironic/templates/tftpd-xinetd.template0000644000175000017500000000070400000000000026751 0ustar00coreycorey00000000000000service tftp { protocol = udp port = 69 socket_type = dgram wait = yes user = root server = /usr/sbin/in.tftpd server_args = -v -v -v -v -v --blocksize %MAX_BLOCKSIZE% --map-file %TFTPBOOT_DIR%/map-file %TFTPBOOT_DIR% disable = no # This is a workaround for Fedora, where TFTP will listen only on # IPv6 endpoint, if IPv4 flag is not used. flags = IPv4 } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/devstack/tools/ironic/templates/vm.xml0000644000175000017500000000462600000000000023755 0ustar00coreycorey00000000000000 {{ name }} {{ memory }} {{ cpus }} hvm {% if bootdev == 'network' and not uefi_loader %} {% endif %} {% if uefi_loader %} {{ uefi_loader }} {% if uefi_nvram %} {{ uefi_nvram }}-{{ name }} {% endif %} {% endif %} {% if engine == 'kvm' %} {% endif %} destroy destroy restart {{ emulator }} {% for (imagefile, letter) in images %} {% if uefi_loader %}
{% else %}
{% endif %} {% endfor %}
{% for n in range(1, interface_count+1) %} {% if n == 1 and mac %} {% endif %}
{% if uefi_loader and bootdev == 'network' %} {% endif %} {% endfor %} {{ console }}
././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538406.1743991 ironic-14.0.1.dev163/devstack/upgrade/0000755000175000017500000000000000000000000017607 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538406.1743991 ironic-14.0.1.dev163/devstack/upgrade/from-queens/0000755000175000017500000000000000000000000022050 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/devstack/upgrade/from-queens/upgrade-ironic0000644000175000017500000000035100000000000024702 0ustar00coreycorey00000000000000function configure_ironic_upgrade { # Remove the classic drivers from the configuration (forced by devstack-gate) # TODO(dtantsur): remove when classic drivers are removed sed -i '/^enabled_drivers/d' $IRONIC_CONF_FILE } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/devstack/upgrade/resources.sh0000755000175000017500000001421000000000000022156 0ustar00coreycorey00000000000000#!/bin/bash # # Copyright 2015 Hewlett-Packard Development Company, L.P. # Copyright 2016 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. set -o errexit source $GRENADE_DIR/grenaderc source $GRENADE_DIR/functions source $TOP_DIR/openrc admin admin IRONIC_DEVSTACK_DIR=$(cd $(dirname "$0")/.. && pwd) source $IRONIC_DEVSTACK_DIR/lib/ironic RESOURCES_NETWORK_GATEWAY=${RESOURCES_NETWORK_GATEWAY:-10.2.0.1} RESOURCES_FIXED_RANGE=${RESOURCES_FIXED_RANGE:-10.2.0.0/20} NEUTRON_NET=ironic_grenade set -o xtrace # TODO(dtantsur): remove in Rocky, needed for parsing Placement API responses install_package jq function wait_for_ironic_resources { local i local nodes_count nodes_count=$(openstack baremetal node list -f value -c "Provisioning State" | wc -l) echo_summary "Waiting 5 minutes for Ironic resources become available again" for i in $(seq 1 30); do if openstack baremetal node list -f value -c "Provisioning State" | grep -qi failed; then die $LINENO "One of nodes is in failed state." fi if [[ $(openstack baremetal node list -f value -c "Provisioning State" | grep -ci available) == $nodes_count ]]; then return 0 fi sleep 10 done openstack baremetal node list die $LINENO "Timed out waiting for Ironic nodes are available again." } total_nodes=$IRONIC_VM_COUNT if [[ "${HOST_TOPOLOGY}" == "multinode" ]]; then total_nodes=$(( 2 * $total_nodes )) fi function early_create { # We need these steps only in case of flat-network if [[ -n "${IRONIC_PROVISION_NETWORK_NAME}" ]]; then return fi # Ironic needs to have network access to the instance during deployment # from the control plane (ironic-conductor). This 'early_create' function # creates a new network with a unique CIDR, adds a route to this network # from ironic-conductor and creates taps between br-int and brbm. # ironic-conductor will be able to access the ironic nodes via this new # network. # TODO(vsaienko) use OSC when Neutron commands are supported in the stable # release. local net_id net_id=$(openstack network create --share $NEUTRON_NET -f value -c id) resource_save network net_id $net_id local subnet_params="" subnet_params+="--ip_version 4 " subnet_params+="--gateway $RESOURCES_NETWORK_GATEWAY " subnet_params+="--name $NEUTRON_NET " subnet_params+="$net_id $RESOURCES_FIXED_RANGE" local subnet_id subnet_id=$(neutron subnet-create $subnet_params | grep ' id ' | get_field 2) resource_save network subnet_id $subnet_id local router_id router_id=$(openstack router create $NEUTRON_NET -f value -c id) resource_save network router_id $router_id neutron router-interface-add $NEUTRON_NET $subnet_id neutron router-gateway-set $NEUTRON_NET public # Add a route to the baremetal network via the Neutron public router. # ironic-conductor will be able to access the ironic nodes via this new # route. local r_net_gateway # Determine the IP address of the interface (ip -4 route get 8.8.8.8) that # will be used to access a public IP on the router we created ($router_id). # In this case we use the Google DNS server at 8.8.8.8 as the public IP # address. This does not actually attempt to contact 8.8.8.8, it just # determines the IP address of the interface that traffic to 8.8.8.8 would # use. We use the IP address of this interface to setup the route. test_with_retry "sudo ip netns exec qrouter-$router_id ip -4 route get 8.8.8.8 " "Route did not start" 60 r_net_gateway=$(sudo ip netns exec qrouter-$router_id ip -4 route get 8.8.8.8 |grep dev | awk '{print $7}') sudo ip route replace $RESOURCES_FIXED_RANGE via $r_net_gateway # NOTE(vsaienko) remove connection between br-int and brbm from old setup sudo ovs-vsctl -- --if-exists del-port ovs-1-tap1 sudo ovs-vsctl -- --if-exists del-port brbm-1-tap1 create_ovs_taps $net_id } function create { : } function verify { : } function verify_noapi { : } function destroy { # We need these steps only in case of flat-network if [[ -n "${IRONIC_PROVISION_NETWORK_NAME}" ]]; then return fi # NOTE(vsaienko) move ironic VMs back to private network. local net_id net_id=$(openstack network show private -f value -c id) create_ovs_taps $net_id # NOTE(vsaienko) during early_create phase we update grenade resources neutron/subnet_id, # neutron/router_id, neutron/net_id. It was needed to instruct nova to boot instances # in ironic_grenade network instead of neutron_grenade during resources phase. As result # during neutron/resources.sh destroy phase ironic_grenade router|subnet|network were deleted. # Make sure that we removed neutron resources here. neutron router-gateway-clear neutron_grenade || /bin/true neutron router-interface-delete neutron_grenade neutron_grenade || /bin/true neutron router-delete neutron_grenade || /bin/true neutron net-delete neutron_grenade || /bin/true } # Dispatcher case $1 in "early_create") wait_for_ironic_resources wait_for_nova_resources $total_nodes early_create ;; "create") create ;; "verify_noapi") # NOTE(vdrok): our implementation of verify_noapi is a noop, but # grenade always passes the upgrade side (pre-upgrade or post-upgrade) # as an argument to it. Pass all the arguments grenade passes further. verify_noapi "${@:2}" ;; "verify") # NOTE(vdrok): pass all the arguments grenade passes further. verify "${@:2}" ;; "destroy") destroy ;; esac ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/devstack/upgrade/settings0000644000175000017500000000311000000000000021365 0ustar00coreycorey00000000000000# Grenade needs to know that Ironic has a Grenade plugin. This is done in the # gate by setting GRENADE_PLUGINRC when using openstack-infra/devstack-gate. # That means that in the project openstack-infra/project-config we will need to # update the Ironic grenade job(s) in jenkins/jobs/devstack-gate.yaml with # this: # export GRENADE_PLUGINRC="enable_grenade_plugin ironic https://opendev.org/openstack/ironic" # If openstack-infra/project-config is not updated then the Grenade tests will # never get run for Ironic register_project_for_upgrade ironic register_db_to_save ironic # Duplicate some settings from devstack. Use old devstack as we install base # environment from it. In common_settings we also source the old localrc # variables, so we need to do this before checking the HOST_TOPOLOGY value IRONIC_BASE_DEVSTACK_DIR=$TOP_DIR/../../old/ironic/devstack source $IRONIC_BASE_DEVSTACK_DIR/common_settings if [[ "${HOST_TOPOLOGY}" != "multinode" ]]; then # Disable automated cleaning on single node grenade to save a time and resources. export IRONIC_AUTOMATED_CLEAN_ENABLED=False fi # NOTE(jlvillal): For multi-node grenade jobs we do not want to upgrade Nova if [[ "${HOST_TOPOLOGY}" == "multinode" ]]; then # Remove 'nova' from the list of projects to upgrade UPGRADE_PROJECTS=$(echo $UPGRADE_PROJECTS | sed -e 's/\s*nova//g' ) fi # NOTE(vdrok): Do not setup multicell during upgrade export CELLSV2_SETUP="singleconductor" # https://storyboard.openstack.org/#!/story/2003808 # pxe booting with virtio broken in xenial-updates/queens/main export LIBVIRT_NIC_DRIVER=e1000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/devstack/upgrade/shutdown.sh0000755000175000017500000000070600000000000022024 0ustar00coreycorey00000000000000#!/bin/bash # # set -o errexit source $GRENADE_DIR/grenaderc source $GRENADE_DIR/functions # We need base DevStack functions for this source $BASE_DEVSTACK_DIR/functions source $BASE_DEVSTACK_DIR/stackrc # needed for status directory source $BASE_DEVSTACK_DIR/lib/tls source $BASE_DEVSTACK_DIR/lib/apache # Keep track of the DevStack directory IRONIC_DEVSTACK_DIR=$(dirname "$0")/.. source $IRONIC_DEVSTACK_DIR/lib/ironic set -o xtrace stop_ironic ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/devstack/upgrade/upgrade.sh0000755000175000017500000001137500000000000021604 0ustar00coreycorey00000000000000#!/usr/bin/env bash # ``upgrade-ironic`` echo "*********************************************************************" echo "Begin $0" echo "*********************************************************************" # Clean up any resources that may be in use cleanup() { set +o errexit echo "*********************************************************************" echo "ERROR: Abort $0" echo "*********************************************************************" # Kill ourselves to signal any calling process trap 2; kill -2 $$ } trap cleanup SIGHUP SIGINT SIGTERM # Keep track of the grenade directory RUN_DIR=$(cd $(dirname "$0") && pwd) # Source params source $GRENADE_DIR/grenaderc # Import common functions source $GRENADE_DIR/functions # This script exits on an error so that errors don't compound and you see # only the first error that occurred. set -o errexit # Upgrade Ironic # ============ # Duplicate some setup bits from target DevStack source $TARGET_DEVSTACK_DIR/stackrc source $TARGET_DEVSTACK_DIR/lib/tls source $TARGET_DEVSTACK_DIR/lib/nova source $TARGET_DEVSTACK_DIR/lib/neutron-legacy source $TARGET_DEVSTACK_DIR/lib/apache source $TARGET_DEVSTACK_DIR/lib/keystone source $TOP_DIR/openrc admin admin # Keep track of the DevStack directory IRONIC_DEVSTACK_DIR=$(dirname "$0")/.. source $IRONIC_DEVSTACK_DIR/lib/ironic # Print the commands being run so that we can see the command that triggers # an error. It is also useful for following allowing as the install occurs. set -o xtrace function wait_for_keystone { if ! wait_for_service $SERVICE_TIMEOUT ${KEYSTONE_AUTH_URI}/v$IDENTITY_API_VERSION/; then die $LINENO "keystone did not start" fi } # Save current config files for posterity if [[ -d $IRONIC_CONF_DIR ]] && [[ ! -d $SAVE_DIR/etc.ironic ]] ; then cp -pr $IRONIC_CONF_DIR $SAVE_DIR/etc.ironic fi stack_install_service ironic # calls upgrade-ironic for specific release upgrade_project ironic $RUN_DIR $BASE_DEVSTACK_BRANCH $TARGET_DEVSTACK_BRANCH # NOTE(rloo): make sure it is OK to do an upgrade. Except that we aren't # parsing/checking the output of this command because the output could change # based on the checks it makes. $IRONIC_BIN_DIR/ironic-status upgrade check $IRONIC_BIN_DIR/ironic-dbsync --config-file=$IRONIC_CONF_FILE # NOTE(vsaienko) pin_release only on multinode job, for cold upgrade (single node) # run online data migration instead. if [[ "${HOST_TOPOLOGY}" == "multinode" ]]; then iniset $IRONIC_CONF_FILE DEFAULT pin_release_version ${BASE_DEVSTACK_BRANCH#*/} else ironic-dbsync online_data_migrations fi ensure_started='ironic-conductor nova-compute ' ensure_stopped='' # Multinode grenade is designed to upgrade services only on primary node. And there is no way to manipulate # subnode during grenade phases. With this after upgrade we can have upgraded (new) services on primary # node and not upgraded (old) services on subnode. # According to Ironic upgrade procedure, we shouldn't have upgraded (new) ironic-api and not upgraded (old) # ironic-conductor. By setting redirect of API requests from primary node to subnode during upgrade # allow to satisfy ironic upgrade requirements. if [[ "$HOST_TOPOLOGY_ROLE" == "primary" ]]; then disable_service ir-api ensure_stopped+='ironic-api' ironic_wsgi_conf=$(apache_site_config_for ironic-api-wsgi) sudo cp $IRONIC_DEVSTACK_FILES_DIR/apache-ironic-api-redirect.template $ironic_wsgi_conf sudo sed -e " s|%IRONIC_SERVICE_PROTOCOL%|$IRONIC_SERVICE_PROTOCOL|g; s|%IRONIC_SERVICE_HOST%|$IRONIC_PROVISION_SUBNET_SUBNODE_IP|g; " -i $ironic_wsgi_conf enable_apache_site ipxe-ironic else ensure_started+='ironic-api ' fi start_ironic # NOTE(vsaienko) do not restart n-cpu on multinode as we didn't upgrade nova. if [[ "${HOST_TOPOLOGY}" != "multinode" ]]; then # NOTE(vsaienko) installing ironic service triggers apache restart, that # may cause nova-compute failure due to LP1537076 stop_nova_compute || true wait_for_keystone start_nova_compute fi if [[ -n "$ensure_stopped" ]]; then ensure_services_stopped $ensure_stopped fi ensure_services_started $ensure_started # We need these steps only in case of flat-network # NOTE(vsaienko) starting from Ocata when Neutron is restarted there is no guarantee that # internal tag, that was assigned to network will be the same. As result we need to update # tag on link between br-int and brbm to new value after restart. if [[ -z "${IRONIC_PROVISION_NETWORK_NAME}" ]]; then net_id=$(openstack network show ironic_grenade -f value -c id) create_ovs_taps $net_id fi set +o xtrace echo "*********************************************************************" echo "SUCCESS: End $0" echo "*********************************************************************" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538406.1743991 ironic-14.0.1.dev163/doc/0000755000175000017500000000000000000000000015121 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/requirements.txt0000644000175000017500000000047500000000000020413 0ustar00coreycorey00000000000000mock>=3.0.0 # BSD openstackdocstheme>=1.31.2 # Apache-2.0 os-api-ref>=1.4.0 # Apache-2.0 reno>=2.5.0 # Apache-2.0 sphinx!=1.6.6,!=1.6.7,!=2.1.0,>=1.6.2 # BSD sphinxcontrib-apidoc>=0.2.0 # BSD sphinxcontrib-pecanwsme>=0.10.0 # Apache-2.0 sphinxcontrib-seqdiag>=0.8.4 # BSD sphinxcontrib-svg2pdfconverter>=0.1.0 # BSD ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538406.1743991 ironic-14.0.1.dev163/doc/source/0000755000175000017500000000000000000000000016421 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538406.1743991 ironic-14.0.1.dev163/doc/source/_exts/0000755000175000017500000000000000000000000017543 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/_exts/automated_steps.py0000644000175000017500000001427200000000000023324 0ustar00coreycorey00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from collections import defaultdict import inspect import itertools import operator import os.path from docutils import nodes from docutils.parsers import rst from docutils.parsers.rst import directives from docutils.statemachine import ViewList from sphinx.util import logging from sphinx.util.nodes import nested_parse_with_titles import stevedore from ironic.common import driver_factory LOG = logging.getLogger(__name__) def _list_table(add, headers, data, title='', columns=None): """Build a list-table directive. :param add: Function to add one row to output. :param headers: List of header values. :param data: Iterable of row data, yielding lists or tuples with rows. """ add('.. list-table:: %s' % title) add(' :header-rows: 1') if columns: add(' :widths: %s' % (','.join(str(c) for c in columns))) add('') add(' - * %s' % headers[0]) for h in headers[1:]: add(' * %s' % h) for row in data: add(' - * %s' % row[0]) for r in row[1:]: lines = str(r).splitlines() if not lines: # empty string add(' * ') else: # potentially multi-line string add(' * %s' % lines[0]) for l in lines[1:]: add(' %s' % l) add('') def _format_doc(doc): "Format one method docstring to be shown in the step table." paras = doc.split('\n\n') if paras[-1].startswith(':'): # Remove the field table that commonly appears at the end of a # docstring. paras = paras[:-1] return '\n\n'.join(paras) _clean_steps = {} def _init_steps_by_driver(): "Load step information from drivers." # NOTE(dhellmann): This reproduces some of the logic of # ironic.drivers.base.BaseInterface.__new__ and # ironic.common.driver_factory but does so without # instantiating the interface classes, which means that if # some of the preconditions aren't met we can still inspect # the methods of the class. for interface_name in sorted(driver_factory.driver_base.ALL_INTERFACES): LOG.info('[{}] probing available plugins for interface {}'.format( __name__, interface_name)) loader = stevedore.ExtensionManager( 'ironic.hardware.interfaces.{}'.format(interface_name), invoke_on_load=False, ) for plugin in loader: if plugin.name == 'fake': continue steps = [] for method_name, method in inspect.getmembers(plugin.plugin): if not getattr(method, '_is_clean_step', False): continue step = { 'step': method.__name__, 'priority': method._clean_step_priority, 'abortable': method._clean_step_abortable, 'argsinfo': method._clean_step_argsinfo, 'interface': interface_name, 'doc': _format_doc(inspect.getdoc(method)), } LOG.info('[{}] interface {!r} driver {!r} STEP {}'.format( __name__, interface_name, plugin.name, step)) steps.append(step) if steps: if interface_name not in _clean_steps: _clean_steps[interface_name] = {} _clean_steps[interface_name][plugin.name] = steps def _format_args(argsinfo): argsinfo = argsinfo or {} return '\n\n'.join( '``{}``{}{} {}'.format( argname, ' (*required*)' if argdetail.get('required') else '', ' --' if argdetail.get('description') else '', argdetail.get('description', ''), ) for argname, argdetail in sorted(argsinfo.items()) ) class AutomatedStepsDirective(rst.Directive): option_spec = { 'phase': directives.unchanged, } def run(self): series = self.options.get('series', 'cleaning') if series != 'cleaning': raise NotImplementedError('Showing deploy steps not implemented') source_name = '<{}>'.format(__name__) result = ViewList() for interface_name in ['power', 'management', 'deploy', 'bios', 'raid']: interface_info = _clean_steps.get(interface_name, {}) if not interface_info: continue title = '{} Interface'.format(interface_name.capitalize()) result.append(title, source_name) result.append('~' * len(title), source_name) for driver_name, steps in sorted(interface_info.items()): _list_table( title='{} cleaning steps'.format(driver_name), add=lambda x: result.append(x, source_name), headers=['Name', 'Details', 'Priority', 'Stoppable', 'Arguments'], columns=[20, 30, 10, 10, 30], data=( ('``{}``'.format(s['step']), s['doc'], s['priority'], 'yes' if s['abortable'] else 'no', _format_args(s['argsinfo']), ) for s in steps ), ) # NOTE(dhellmann): Useful for debugging. # print('\n'.join(result)) node = nodes.section() node.document = self.state.document nested_parse_with_titles(self.state, result, node) return node.children def setup(app): app.add_directive('show-steps', AutomatedStepsDirective) _init_steps_by_driver() ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586538406.178399 ironic-14.0.1.dev163/doc/source/admin/0000755000175000017500000000000000000000000017511 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/admin/adoption.rst0000644000175000017500000002002200000000000022054 0ustar00coreycorey00000000000000.. _adoption: ============= Node adoption ============= Overview ======== As part of hardware inventory lifecycle management, it is not an unreasonable need to have the capability to be able to add hardware that should be considered "in-use" by the Bare Metal service, that may have been deployed by another Bare Metal service installation or deployed via other means. As such, the node adoption feature allows a user to define a node as ``active`` while skipping the ``available`` and ``deploying`` states, which will prevent the node from being seen by the Compute service as ready for use. This feature is leveraged as part of the state machine workflow, where a node in ``manageable`` can be moved to ``active`` state via the provision_state verb ``adopt``. To view the state transition capabilities, please see :ref:`states`. How it works ============ A node initially enrolled begins in the ``enroll`` state. An operator must then move the node to ``manageable`` state, which causes the node's ``power`` interface to be validated. Once in ``manageable`` state, an operator can then explicitly choose to adopt a node. Adoption of a node results in the validation of its ``boot`` interface, and upon success the process leverages what is referred to as the "takeover" logic. The takeover process is intended for conductors to take over the management of nodes for a conductor that has failed. The takeover process involves the deploy interface's ``prepare`` and ``take_over`` methods being called. These steps take specific actions such as downloading and staging the deployment kernel and ramdisk, ISO image, any required boot image, or boot ISO image and then places any PXE or virtual media configuration necessary for the node should it be required. The adoption process makes no changes to the physical node, with the exception of operator supplied configurations where virtual media is used to boot the node under normal circumstances. An operator should ensure that any supplied configuration defining the node is sufficient for the continued operation of the node moving forward. Such as, if the node is configured to network boot via instance_info/boot_option="netboot", then appropriate driver specific node configuration should be set to support this capability. Possible Risk ============= The main risk with this feature is that supplied configuration may ultimately be incorrect or invalid which could result in potential operational issues: * ``rebuild`` verb - Rebuild is intended to allow a user to re-deploy the node to a fresh state. The risk with adoption is that the image defined when an operator adopts the node may not be the valid image for the pre-existing configuration. If this feature is utilized for a migration from one deployment to another, and pristine original images are loaded and provided, then ultimately the risk is the same with any normal use of the ``rebuild`` feature, the server is effectively wiped. * When deleting a node, the deletion or cleaning processes may fail if the incorrect deployment image is supplied in the configuration as the node may NOT have been deployed with the supplied image and driver or compatibility issues may exist as a result. Operators will need to be cognizant of that possibility and should plan accordingly to ensure that deployment images are known to be compatible with the hardware in their environment. * Networking - Adoption will assert no new networking configuration to the newly adopted node as that would be considered modifying the node. Operators will need to plan accordingly and have network configuration such that the nodes will be able to network boot. How to use ========== .. NOTE:: The power state that the ironic-conductor observes upon the first successful power state check, as part of the transition to the ``manageable`` state will be enforced with a node that has been adopted. This means a node that is in ``power off`` state will, by default, have the power state enforced as ``power off`` moving forward, unless an administrator actively changes the power state using the Bare Metal service. Requirements ------------ Requirements for use are essentially the same as to deploy a node: * Sufficient driver information to allow for a successful power management validation. * Sufficient instance_info to pass deploy interface preparation. Each driver may have additional requirements dependent upon the configuration that is supplied. An example of this would be defining a node to always boot from the network, which will cause the conductor to attempt to retrieve the pertinent files. Inability to do so will result in the adoption failing, and the node being placed in the ``adopt failed`` state. Example ------- This is an example to create a new node, named ``testnode``, with sufficient information to pass basic validation in order to be taken from the ``manageable`` state to ``active`` state:: # Explicitly set the client API version environment variable to # 1.17, which introduces the adoption capability. export OS_BAREMETAL_API_VERSION=1.17 openstack baremetal node create --name testnode \ --driver ipmi \ --driver-info ipmi_address= \ --driver-info ipmi_username= \ --driver-info ipmi_password= \ --driver-info deploy_kernel= \ --driver-info deploy_ramdisk= openstack baremetal port create --node openstack baremetal node set testnode \ --instance-info image_source="http://localhost:8080/blankimage" \ --instance-info capabilities="{\"boot_option\": \"local\"}" openstack baremetal node manage testnode --wait openstack baremetal node adopt testnode --wait .. NOTE:: In the above example, the image_source setting must reference a valid image or file, however that image or file can ultimately be empty. .. NOTE:: The above example utilizes a capability that defines the boot operation to be local. It is recommended to define the node as such unless network booting is desired. .. NOTE:: The above example will fail a re-deployment as a fake image is defined and no instance_info/image_checksum value is defined. As such any actual attempt to write the image out will fail as the image_checksum value is only validated at time of an actual deployment operation. .. NOTE:: A user may wish to assign an instance_uuid to a node, which could be used to match an instance in the Compute service. Doing so is not required for the proper operation of the Bare Metal service. openstack baremetal node set --instance-uuid .. NOTE:: In Newton, coupled with API version 1.20, the concept of a network_interface was introduced. A user of this feature may wish to add new nodes with a network_interface of ``noop`` and then change the interface at a later point and time. Troubleshooting =============== Should an adoption operation fail for a node, the error that caused the failure will be logged in the node's ``last_error`` field when viewing the node. This error, in the case of node adoption, will largely be due to failure of a validation step. Validation steps are dependent upon what driver is selected for the node. Any node that is in the ``adopt failed`` state can have the ``adopt`` verb re-attempted. Example:: openstack baremetal node adopt If a user wishes to abort their attempt at adopting, they can then move the node back to ``manageable`` from ``adopt failed`` state by issuing the ``manage`` verb. Example:: openstack baremetal node manage If all else fails the hardware node can be removed from the Bare Metal service. The ``node delete`` command, which is **not** the same as setting the provision state to ``deleted``, can be used while the node is in ``adopt failed`` state. This will delete the node without cleaning occurring to preserve the node's current state. Example:: openstack baremetal node delete ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/admin/agent-token.rst0000644000175000017500000001071700000000000022465 0ustar00coreycorey00000000000000.. _agent_token: =========== Agent Token =========== Purpose ======= The concept of agent tokens is to provide a mechanism by which the relationship between an operating deployment of the Bare Metal Service and an instance of the ``ironic-python-agent`` is verified. In a sense, this token can be viewed as a session identifier or authentication token. .. warning:: This functionality does not remove the risk of a man-in-the-middle attack that could occur from connection intercept or when TLS is not used for all communication. This becomes useful in the case of deploying an "edge" node where intermediate networks are not trustworthy. How it works ============ These tokens are provided in one of two ways to the running agent. 1. A pre-generated token which is embedded into virtual media ISOs. 2. A one-time generated token that are provided upon the first "lookup" of the node. In both cases, the tokens are a randomly generated length of 128 characters. Once the token has been provided, the token cannot be retrieved or accessed. It remains available to the conductors, and is stored in memory of the ``ironic-python-agent``. .. note:: In the case of the token being embedded with virtual media, it is read from a configuration file with-in the image. Ideally this should be paired with Swift temporary URLs. With the token is available in memory in the agent, the token is embedded with ``heartbeat`` operations to the ironic API endpoint. This enables the API to authenticate the heartbeat request, and refuse "heartbeat" requests from the ``ironic-python-agent``. With the ``Ussuri`` release, the confiuration option ``[DEFAULT]require_agent_token`` can be set ``True`` to explicitly require token use. .. warning:: If the Bare Metal Service is updated, and the version of ``ironic-python-agent`` should be updated to enable this feature. In addition to heartbeats being verified, commands from the ``ironic-conductor`` service to the ``ironic-python-agent`` also include the token, allowing the agent to authenticate the caller. With Virtual Media ------------------ .. seqdiag:: :scale: 80 diagram { API; Conductor; Baremetal; Swift; IPA; activation = none; span_height = 1; edge_length = 250; default_note_color = white; default_fontsize = 14; Conductor -> Conductor [label = "Generates a random token"]; Conductor -> Conductor [label = "Generates configuration for IPA ramdisk"]; Conductor -> Swift [label = "IPA image, with configuration is uploaded"]; Conductor -> Baremetal [label = "Attach IPA virtual media in Swift as virtual CD"]; Conductor -> Baremetal [label = "Conductor turns power on"]; Baremetal -> Swift [label = "Baremetal reads virtual media"]; Baremetal -> Baremetal [label = "Boots IPA virtual media image"]; Baremetal -> Baremetal [label = "IPA is started"]; IPA -> Baremetal [label = "IPA loads configuration and agent token into memory"]; IPA -> API [label = "Lookup node"]; API -> IPA [label = "API responds with node UUID and token value of '******'"]; IPA -> API [label = "Heartbeat with agent token"]; } With PXE/iPXE/etc. ------------------ .. seqdiag:: :scale: 80 diagram { API; Conductor; Baremetal; iPXE; IPA; activation = none; span_height = 1; edge_length = 250; default_note_color = white; default_fontsize = 14; Conductor -> Baremetal [label = "Conductor turns power on"]; Baremetal -> iPXE [label = "Baremetal reads kernel/ramdisk and starts boot"]; Baremetal -> Baremetal [label = "Boots IPA virtual media image"]; Baremetal -> Baremetal [label = "IPA is started"]; IPA -> Baremetal [label = "IPA loads configuration"]; IPA -> API [label = "Lookup node"]; API -> Conductor [label = "API requests conductor to generates a random token"]; API -> IPA [label = "API responds with node UUID and token value"]; IPA -> API [label = "Heartbeat with agent token"]; } Agent Configuration =================== An additional setting which may be leveraged with the ``ironic-python-agent`` is a ``agent_token_required`` setting. Under normal circumstances, this setting can be asserted via the configuration supplied from the Bare Metal service deployment upon the ``lookup`` action, but can be asserted via the embedded configuration for the agent in the ramdisk. This setting is also available via kernel command line as ``ipa-agent-token-required``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/admin/api-audit-support.rst0000644000175000017500000000720400000000000023635 0ustar00coreycorey00000000000000.. _api-audit-support: ================= API Audit Logging ================= Audit middleware supports delivery of CADF audit events via Oslo messaging notifier capability. Based on `notification_driver` configuration, audit events can be routed to messaging infrastructure (notification_driver = messagingv2) or can be routed to a log file (`[oslo_messaging_notifications]/driver = log`). Audit middleware creates two events per REST API interaction. First event has information extracted from request data and the second one has request outcome (response). Enabling API Audit Logging ========================== Audit middleware is available as part of `keystonemiddleware` (>= 1.6) library. For information regarding how audit middleware functions refer :keystonemiddleware-doc:`here `. Auditing can be enabled for the Bare Metal service by making the following changes to ``/etc/ironic/ironic.conf``. #. To enable audit logging of API requests:: [audit] ... enabled=true #. To customize auditing API requests, the audit middleware requires the audit_map_file setting to be defined. Update the value of configuration setting 'audit_map_file' to set its location. Audit map file configuration options for the Bare Metal service are included in the etc/ironic/ironic_api_audit_map.conf.sample file. To understand CADF format specified in ironic_api_audit_map.conf file refer to `CADF Format. `_:: [audit] ... audit_map_file=/etc/ironic/api_audit_map.conf #. Comma separated list of Ironic REST API HTTP methods to be ignored during audit. It is used only when API audit is enabled. For example:: [audit] ... ignore_req_list=GET,POST Sample Audit Event ================== Following is the sample of audit event for ironic node list request. .. code-block:: json { "event_type":"audit.http.request", "timestamp":"2016-06-15 06:04:30.904397", "payload":{ "typeURI":"http://schemas.dmtf.org/cloud/audit/1.0/event", "eventTime":"2016-06-15T06:04:30.903071+0000", "target":{ "id":"ironic", "typeURI":"unknown", "addresses":[ { "url":"http://{ironic_admin_host}:6385", "name":"admin" }, { "url":"http://{ironic_internal_host}:6385", "name":"private" }, { "url":"http://{ironic_public_host}:6385", "name":"public" } ], "name":"ironic" }, "observer":{ "id":"target" }, "tags":[ "correlation_id?value=685f1abb-620e-5d5d-b74a-b4135fb32373" ], "eventType":"activity", "initiator":{ "typeURI":"service/security/account/user", "name":"admin", "credential":{ "token":"***", "identity_status":"Confirmed" }, "host":{ "agent":"python-ironicclient", "address":"10.1.200.129" }, "project_id":"d8f52dd7d9e1475dbbf3ba47a4a83313", "id":"8c1a948bad3948929aa5d5b50627a174" }, "action":"read", "outcome":"pending", "id":"061b7aa7-5879-5225-a331-c002cf23cb6c", "requestPath":"/v1/nodes/?associated=True" }, "priority":"INFO", "publisher_id":"ironic-api", "message_id":"2f61ebaa-2d3e-4023-afba-f9fca6f21fc2" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/admin/bios.rst0000644000175000017500000001000600000000000021174 0ustar00coreycorey00000000000000.. _bios: ================== BIOS Configuration ================== Overview ======== The Bare Metal service supports BIOS configuration for bare metal nodes. It allows administrators to retrieve and apply the desired BIOS settings via CLI or REST API. The desired BIOS settings are applied during manual cleaning. Prerequisites ============= Bare metal servers must be configured by the administrator to be managed via ironic hardware type that supports BIOS configuration. Enabling hardware types ----------------------- Enable a specific hardware type that supports BIOS configuration. Refer to :doc:`/install/enabling-drivers` for how to enable a hardware type. Enabling hardware interface --------------------------- To enable the bios interface: .. code-block:: ini [DEFAULT] enabled_bios_interfaces = no-bios Append the actual bios interface name supported by the enabled hardware type to ``enabled_bios_interfaces`` with comma separated values in ``ironic.conf``. All available in-tree bios interfaces are listed in setup.cfg file in the source code tree, for example: .. code-block:: ini ironic.hardware.interfaces.bios = fake = ironic.drivers.modules.fake:FakeBIOS no-bios = ironic.drivers.modules.noop:NoBIOS Retrieve BIOS settings ====================== To retrieve the cached BIOS configuration from a specified node:: $ openstack baremetal node bios setting list BIOS settings are cached on each node cleaning operation or when settings have been applied successfully via BIOS cleaning steps. The return of above command is a table of last cached BIOS settings from specified node. If ``-f json`` is added as suffix to above command, it returns BIOS settings as following:: [ { "setting name": { "name": "setting name", "value": "value" } }, { "setting name": { "name": "setting name", "value": "value" } }, ... ] To get a specified BIOS setting for a node:: $ openstack baremetal node bios setting show If ``-f json`` is added as suffix to above command, it returns BIOS settings as following:: { "setting name": { "name": "setting name", "value": "value" } } Configure BIOS settings ======================= Two :ref:`manual_cleaning` steps are available for managing nodes' BIOS settings: Factory reset ------------- This cleaning step resets all BIOS settings to factory default for a given node:: { "target":"clean", "clean_steps": [ { "interface": "bios", "step": "factory_reset" } ] } The ``factory_reset`` cleaning step does not require any arguments, as it resets all BIOS settings to factory defaults. Apply BIOS configuration ------------------------ This cleaning step applies a set of BIOS settings for a node:: { "target":"clean", "clean_steps": [ { "interface": "bios", "step": "apply_configuration", "args": { "settings": [ { "name": "name", "value": "value" }, { "name": "name", "value": "value" } ] } } ] } The representation of ``apply_configuration`` cleaning step follows the same format of :ref:`manual_cleaning`. The desired BIOS settings can be provided via the ``settings`` argument which contains a list of BIOS options to be applied, each BIOS option is a dictionary with ``name`` and ``value`` keys. To check whether the desired BIOS configuration is set properly, use the command mentioned in the `Retrieve BIOS settings`_ section. .. note:: When applying BIOS settings to a node, vendor-specific driver may take the given BIOS settings from the argument and compare them with the current BIOS settings on the node and only apply when there is a difference. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/admin/boot-from-volume.rst0000644000175000017500000002060300000000000023455 0ustar00coreycorey00000000000000.. _boot-from-volume: ================ Boot From Volume ================ Overview ======== The Bare Metal service supports booting from a Cinder iSCSI volume as of the Pike release. This guide will primarily deal with this use case, but will be updated as more paths for booting from a volume, such as FCoE, are introduced. The boot from volume is supported on both legacy BIOS and UEFI (iPXE binary for EFI booting) boot mode. We need to perform with suitable images which will be created by diskimage-builder tool. Prerequisites ============= Currently booting from a volume requires: - Bare Metal service version 9.0.0 - Bare Metal API microversion 1.33 or later - A driver that utilizes the :doc:`PXE boot mechanism `. Currently booting from a volume is supported by the reference drivers that utilize PXE boot mechanisms when iPXE is enabled. - iPXE is an explicit requirement, as it provides the mechanism that attaches and initiates booting from an iSCSI volume. - Metadata services need to be configured and available for the instance images to obtain configuration such as keys. Configuration drives are not supported due to minimum disk extension sizes. Conductor Configuration ======================= In ironic.conf, you can specify a list of enabled storage interfaces. Check ``[DEFAULT]enabled_storage_interfaces`` in your ironic.conf to ensure that your desired interface is enabled. For example, to enable the ``cinder`` and ``noop`` storage interfaces:: [DEFAULT] enabled_storage_interfaces = cinder,noop If you want to specify a default storage interface rather than setting the storage interface on a per node basis, set ``[DEFAULT]default_storage_interface`` in ironic.conf. The ``default_storage_interface`` will be used for any node that doesn't have a storage interface defined. Node Configuration ================== Storage Interface ----------------- You will need to specify what storage interface the node will use to handle storage operations. For example, to set the storage interface to ``cinder`` on an existing node:: openstack --os-baremetal-api-version 1.33 baremetal node set \ --storage-interface cinder $NODE_UUID A default storage interface can be specified in ironic.conf. See the `Conductor Configuration`_ section for details. iSCSI Configuration ------------------- In order for a bare metal node to boot from an iSCSI volume, the ``iscsi_boot`` capability for the node must be set to ``True``. For example, if you want to update an existing node to boot from volume:: openstack --os-baremetal-api-version 1.33 baremetal node set \ --property capabilities=iscsi_boot:True $NODE_UUID You will also need to create a volume connector for the node, so the storage interface will know how to communicate with the node for storage operation. In the case of iSCSI, you will need to provide an iSCSI Qualifying Name (IQN) that is unique to your SAN. For example, to create a volume connector for iSCSI:: openstack --os-baremetal-api-version 1.33 baremetal volume connector create \ --node $NODE_UUID --type iqn --connector-id iqn.2017-08.org.openstack.$NODE_UUID Image Creation ============== We use ``disk-image-create`` in diskimage-builder tool to create images for boot from volume feature. Some required elements for this mechanism for corresponding boot modes are as following: - Legacy BIOS boot mode: ``iscsi-boot`` element. - UEFI boot mode: ``iscsi-boot`` and ``block-device-efi`` elements. An example below:: export IMAGE_NAME= export DIB_CLOUD_INIT_DATASOURCES="ConfigDrive, OpenStack" disk-image-create centos7 vm cloud-init-datasources dhcp-all-interfaces iscsi-boot dracut-regenerate block-device-efi -o $IMAGE_NAME .. note:: * For CentOS images, we must add dependent element named ``dracut-regenerate`` during image creation. Otherwise, the image creation will fail with an error. * For Ubuntu images, we only support ``iscsi-boot`` element without ``dracut-regenerate`` element during image creation. Advanced Topics =============== Use without the Compute Service ------------------------------- As discussed in other sections, the Bare Metal service has a concept of a `connector` that is used to represent an interface that is intended to be utilized to attach the remote volume. In addition to the connectors, we have a concept of a `target` that can be defined via the API. While a user of this feature through the Compute service would automatically have a new target record created for them, it is not explicitly required, and can be performed manually. A target record can be created using a command similar to the example below:: openstack --os-baremetal-api-version 1.33 baremetal volume target create \ --node $NODE_UUID --type iscsi --boot-index 0 --volume $VOLUME_UUID .. Note:: A ``boot-index`` value of ``0`` represents the boot volume for a node. As the ``boot-index`` is per-node in sequential order, only one boot volume is permitted for each node. Use Without Cinder ------------------ In the Rocky release, an ``external`` storage interface is available that can be utilized without a Block Storage Service installation. Under normal circumstances the ``cinder`` storage interface interacts with the Block Storage Service to orchestrate and manage attachment and detachment of volumes from the underlying block service system. The ``external`` storage interface contains the logic to allow the Bare Metal service to determine if the Bare Metal node has been requested with a remote storage volume for booting. This is in contrast to the default ``noop`` storage interface which does not contain logic to determine if the node should or could boot from a remote volume. It must be noted that minimal configuration or value validation occurs with the ``external`` storage interface. The ``cinder`` storage interface contains more extensive validation, that is likely un-necessary in a ``external`` scenario. Setting the external storage interface:: openstack baremetal node set --storage-interface external $NODE_UUID Setting a volume:: openstack baremetal volume target create --node $NODE_UUID \ --type iscsi --boot-index 0 --volume-id $VOLUME_UUID \ --property target_iqn="iqn.2010-10.com.example:vol-X" \ --property target_lun="0" \ --property target_portal="192.168.0.123:3260" \ --property auth_method="CHAP" \ --property auth_username="ABC" \ --property auth_password="XYZ" \ Ensure that no image_source is defined:: openstack baremetal node unset \ --instance-info image_source $NODE_UUID Deploy the node:: openstack baremetal node deploy $NODE_UUID Upon deploy, the boot interface for the baremetal node will attempt to either create iPXE configuration OR set boot parameters out-of-band via the management controller. Such action is boot interface specific and may not support all forms of volume target configuration. As of the Rocky release, the bare metal service does not support writing an Operating System image to a remote boot from volume target, so that also must be ensured by the user in advance. Records of volume targets are removed upon the node being undeployed, and as such are not presistent across deployments. Cinder Multi-attach ------------------- Volume multi-attach is a function that is commonly performed in computing clusters where dedicated storage subsystems are utilized. For some time now, the Block Storage service has supported the concept of multi-attach. However, the Compute service, as of the Pike release, does not yet have support to leverage multi-attach. Concurrently, multi-attach requires the backend volume driver running as part of the Block Storage service to contain support for multi-attach volumes. When support for storage interfaces was added to the Bare Metal service, specifically for the ``cinder`` storage interface, the concept of volume multi-attach was accounted for, however has not been fully tested, and is unlikely to be fully tested until there is Compute service integration as well as volume driver support. The data model for storage of volume targets in the Bare Metal service has no constraints on the same target volume from being utilized. When interacting with the Block Storage service, the Bare Metal service will prevent the use of volumes that are being reported as ``in-use`` if they do not explicitly support multi-attach. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/admin/building-windows-images.rst0000644000175000017500000000724100000000000024777 0ustar00coreycorey00000000000000.. _building_image_windows: Building images for Windows --------------------------- We can use ``New-WindowsOnlineImage`` in `windows-openstack-imaging-tools`_ tool as an option to create Windows images (whole disk images) corresponding boot modes which will support for Windows NIC Teaming. And allow the utilization of link aggregation when the instance is spawned on hardware servers (Bare metals). Requirements: ~~~~~~~~~~~~~ * A Microsoft Windows Server Operating System along with ``Hyper-V virtualization`` enabled, ``PowerShell`` version >=4 supported, ``Windows Assessment and Deployment Kit``, in short ``Windows ADK``. * The windows Server compatible drivers. * Working git environment. Preparation: ~~~~~~~~~~~~ * Download a Windows Server 2012R2/ 2016 installation ISO. * Install Windows Server 2012R2/ 2016 OS on workstation PC along with following feature: - Enable Hyper-V virtualization. - Install PowerShell 4.0. - Install Git environment & import git proxy (if have). - Create new ``Path`` in Microsoft Windows Server Operating System which support for submodule update via ``git submodule update –init`` command:: - Variable name: Path - Variable value: C:\Windows\System32\WindowsPowerShell\v1.0\;C:\Program Files\Git\bin - Rename virtual switch name in Windows Server 2012R2/ 2016 in ``Virtual Switch Manager`` into `external`. Implementation: ~~~~~~~~~~~~~~~ * ``Step 1``: Create folders: ``C:\`` where output images will be located, ``C:\`` where you need to place the necessary hardware drivers. * ``Step 2``: Copy and extract necessary hardware drivers in ``C:\``. * ``Step 3``: Insert or burn Windows Server 2016 ISO to ``D:\``. * ``Step 4``: Download ``windows-openstack-imaging-tools`` tools. .. code-block:: console git clone https://github.com/cloudbase/windows-openstack-imaging-tools.git * ``Step 5``: Create & running script `create-windows-cloud-image.ps1`: .. code-block:: console git submodule update --init Import-Module WinImageBuilder.psm1 $windowsImagePath = "C:\\.qcow2" $VirtIOISOPath = "C:\\virtio.iso" $virtIODownloadLink = "https://fedorapeople.org/groups/virt/virtio-win/direct-downloads/archive-virtio/virtio-win-0.1.133-2/virtio-win.iso" (New-Object System.Net.WebClient).DownloadFile($virtIODownloadLink, $VirtIOISOPath) $wimFilePath = "D:\sources\install.wim" $extraDriversPath = "C:\\" $image = (Get-WimFileImagesInfo -WimFilePath $wimFilePath)[1] $switchName = 'external' New-WindowsOnlineImage -WimFilePath $wimFilePath -ImageName $image.ImageName ` -WindowsImagePath $windowsImagePath -Type 'KVM' -ExtraFeatures @() ` -SizeBytes 20GB -CpuCores 2 -Memory 2GB -SwitchName $switchName ` -ProductKey $productKey -DiskLayout 'BIOS' ` -ExtraDriversPath $extraDriversPath ` -InstallUpdates:$false -AdministratorPassword 'Pa$$w0rd' ` -PurgeUpdates:$true -DisableSwap:$true After executing this command you will get two output files, first one being "C:\\.qcow2", which is the resulting windows whole disk image and "C:\\virtio.iso", which is virtio iso contains all the synthetic drivers for the KVM hypervisor. See `example_windows_images`_ for more details and examples. .. note:: We can change ``SizeBytes``, ``CpuCores`` and ``Memory`` depending on requirements. .. _`example_windows_images`: https://github.com/cloudbase/windows-openstack-imaging-tools/blob/master/Examples .. _`windows-openstack-imaging-tools`: https://github.com/cloudbase/windows-openstack-imaging-tools ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/admin/cleaning.rst0000644000175000017500000003053700000000000022033 0ustar00coreycorey00000000000000.. _cleaning: ============= Node cleaning ============= Overview ======== Ironic provides two modes for node cleaning: ``automated`` and ``manual``. ``Automated cleaning`` is automatically performed before the first workload has been assigned to a node and when hardware is recycled from one workload to another. ``Manual cleaning`` must be invoked by the operator. .. _automated_cleaning: Automated cleaning ================== When hardware is recycled from one workload to another, ironic performs automated cleaning on the node to ensure it's ready for another workload. This ensures the tenant will get a consistent bare metal node deployed every time. Ironic implements automated cleaning by collecting a list of cleaning steps to perform on a node from the Power, Deploy, Management, BIOS, and RAID interfaces of the driver assigned to the node. These steps are then ordered by priority and executed on the node when the node is moved to ``cleaning`` state, if automated cleaning is enabled. With automated cleaning, nodes move to ``cleaning`` state when moving from ``active`` -> ``available`` state (when the hardware is recycled from one workload to another). Nodes also traverse cleaning when going from ``manageable`` -> ``available`` state (before the first workload is assigned to the nodes). For a full understanding of all state transitions into cleaning, please see :ref:`states`. Ironic added support for automated cleaning in the Kilo release. .. _enabling-cleaning: Enabling automated cleaning --------------------------- To enable automated cleaning, ensure that your ironic.conf is set as follows: .. code-block:: ini [conductor] automated_clean=true This will enable the default set of cleaning steps, based on your hardware and ironic hardware types used for nodes. This includes, by default, erasing all of the previous tenant's data. You may also need to configure a `Cleaning Network`_. Cleaning steps -------------- Cleaning steps used for automated cleaning are ordered from higher to lower priority, where a larger integer is a higher priority. In case of a conflict between priorities across interfaces, the following resolution order is used: Power, Management, Deploy, BIOS, and RAID interfaces. You can skip a cleaning step by setting the priority for that cleaning step to zero or 'None'. You can reorder the cleaning steps by modifying the integer priorities of the cleaning steps. See `How do I change the priority of a cleaning step?`_ for more information. .. show-steps:: :phase: cleaning .. _manual_cleaning: Manual cleaning =============== ``Manual cleaning`` is typically used to handle long running, manual, or destructive tasks that an operator wishes to perform either before the first workload has been assigned to a node or between workloads. When initiating a manual clean, the operator specifies the cleaning steps to be performed. Manual cleaning can only be performed when a node is in the ``manageable`` state. Once the manual cleaning is finished, the node will be put in the ``manageable`` state again. Ironic added support for manual cleaning in the 4.4 (Mitaka series) release. Setup ----- In order for manual cleaning to work, you may need to configure a `Cleaning Network`_. Starting manual cleaning via API -------------------------------- Manual cleaning can only be performed when a node is in the ``manageable`` state. The REST API request to initiate it is available in API version 1.15 and higher:: PUT /v1/nodes//states/provision (Additional information is available `here `_.) This API will allow operators to put a node directly into ``cleaning`` provision state from ``manageable`` state via 'target': 'clean'. The PUT will also require the argument 'clean_steps' to be specified. This is an ordered list of cleaning steps. A cleaning step is represented by a dictionary (JSON), in the form:: { "interface": "", "step": "", "args": {"": "", ..., "": } } The 'interface' and 'step' keys are required for all steps. If a cleaning step method takes keyword arguments, the 'args' key may be specified. It is a dictionary of keyword variable arguments, with each keyword-argument entry being : . If any step is missing a required keyword argument, manual cleaning will not be performed and the node will be put in ``clean failed`` provision state with an appropriate error message. If, during the cleaning process, a cleaning step determines that it has incorrect keyword arguments, all earlier steps will be performed and then the node will be put in ``clean failed`` provision state with an appropriate error message. An example of the request body for this API:: { "target":"clean", "clean_steps": [{ "interface": "raid", "step": "create_configuration", "args": {"create_nonroot_volumes": false} }, { "interface": "deploy", "step": "erase_devices" }] } In the above example, the node's RAID interface would configure hardware RAID without non-root volumes, and then all devices would be erased (in that order). Starting manual cleaning via "openstack baremetal" CLI ------------------------------------------------------ Manual cleaning is available via the ``openstack baremetal node clean`` command, starting with Bare Metal API version 1.15. The argument ``--clean-steps`` must be specified. Its value is one of: - a JSON string - path to a JSON file whose contents are passed to the API - '-', to read from stdin. This allows piping in the clean steps. Using '-' to signify stdin is common in Unix utilities. The following examples assume that the Bare Metal API version was set via the ``OS_BAREMETAL_API_VERSION`` environment variable. (The alternative is to add ``--os-baremetal-api-version 1.15`` to the command.):: export OS_BAREMETAL_API_VERSION=1.15 Examples of doing this with a JSON string:: openstack baremetal node clean \ --clean-steps '[{"interface": "deploy", "step": "erase_devices_metadata"}]' openstack baremetal node clean \ --clean-steps '[{"interface": "deploy", "step": "erase_devices"}]' Or with a file:: openstack baremetal node clean \ --clean-steps my-clean-steps.txt Or with stdin:: cat my-clean-steps.txt | openstack baremetal node clean \ --clean-steps - Cleaning Network ================ If you are using the Neutron DHCP provider (the default) you will also need to ensure you have configured a cleaning network. This network will be used to boot the ramdisk for in-band cleaning. You can use the same network as your tenant network. For steps to set up the cleaning network, please see :ref:`configure-cleaning`. .. _InbandvsOutOfBandCleaning: In-band vs out-of-band ====================== Ironic uses two main methods to perform actions on a node: in-band and out-of-band. Ironic supports using both methods to clean a node. In-band ------- In-band steps are performed by ironic making API calls to a ramdisk running on the node using a deploy interface. Currently, all the deploy interfaces support in-band cleaning. By default, ironic-python-agent ships with a minimal cleaning configuration, only erasing disks. However, you can add your own cleaning steps and/or override default cleaning steps with a custom Hardware Manager. Out-of-band ----------- Out-of-band are actions performed by your management controller, such as IPMI, iLO, or DRAC. Out-of-band steps will be performed by ironic using a power or management interface. Which steps are performed depends on the hardware type and hardware itself. For Out-of-Band cleaning operations supported by iLO hardware types, refer to :ref:`ilo_node_cleaning`. FAQ === How are cleaning steps ordered? ------------------------------- For automated cleaning, cleaning steps are ordered by integer priority, where a larger integer is a higher priority. In case of a conflict between priorities across hardware interfaces, the following resolution order is used: #. Power interface #. Management interface #. Deploy interface #. BIOS interface #. RAID interface For manual cleaning, the cleaning steps should be specified in the desired order. How do I skip a cleaning step? ------------------------------ For automated cleaning, cleaning steps with a priority of 0 or None are skipped. How do I change the priority of a cleaning step? ------------------------------------------------ For manual cleaning, specify the cleaning steps in the desired order. For automated cleaning, it depends on whether the cleaning steps are out-of-band or in-band. Most out-of-band cleaning steps have an explicit configuration option for priority. Changing the priority of an in-band (ironic-python-agent) cleaning step requires use of a custom HardwareManager. The only exception is ``erase_devices``, which can have its priority set in ironic.conf. For instance, to disable erase_devices, you'd set the following configuration option:: [deploy] erase_devices_priority=0 To enable/disable the in-band disk erase using ``ilo`` hardware type, use the following configuration option:: [ilo] clean_priority_erase_devices=0 The generic hardware manager first tries to perform ATA disk erase by using ``hdparm`` utility. If ATA disk erase is not supported, it performs software based disk erase using ``shred`` utility. By default, the number of iterations performed by ``shred`` for software based disk erase is 1. To configure the number of iterations, use the following configuration option:: [deploy] erase_devices_iterations=1 What cleaning step is running? ------------------------------ To check what cleaning step the node is performing or attempted to perform and failed, run the following command; it will return the value in the node's ``driver_internal_info`` field:: openstack baremetal node show $node_ident -f value -c driver_internal_info The ``clean_steps`` field will contain a list of all remaining steps with their priorities, and the first one listed is the step currently in progress or that the node failed before going into ``clean failed`` state. Should I disable automated cleaning? ------------------------------------ Automated cleaning is recommended for ironic deployments, however, there are some tradeoffs to having it enabled. For instance, ironic cannot deploy a new instance to a node that is currently cleaning, and cleaning can be a time consuming process. To mitigate this, we suggest using disks with support for cryptographic ATA Security Erase, as typically the erase_devices step in the deploy interface takes the longest time to complete of all cleaning steps. Why can't I power on/off a node while it's cleaning? ---------------------------------------------------- During cleaning, nodes may be performing actions that shouldn't be interrupted, such as BIOS or Firmware updates. As a result, operators are forbidden from changing power state via the ironic API while a node is cleaning. Troubleshooting =============== If cleaning fails on a node, the node will be put into ``clean failed`` state and placed in maintenance mode, to prevent ironic from taking actions on the node. Nodes in ``clean failed`` will not be powered off, as the node might be in a state such that powering it off could damage the node or remove useful information about the nature of the cleaning failure. A ``clean failed`` node can be moved to ``manageable`` state, where it cannot be scheduled by nova and you can safely attempt to fix the node. To move a node from ``clean failed`` to ``manageable``:: openstack baremetal node manage $node_ident You can now take actions on the node, such as replacing a bad disk drive. Strategies for determining why a cleaning step failed include checking the ironic conductor logs, viewing logs on the still-running ironic-python-agent (if an in-band step failed), or performing general hardware troubleshooting on the node. When the node is repaired, you can move the node back to ``available`` state, to allow it to be scheduled by nova. :: # First, move it out of maintenance mode openstack baremetal node maintenance unset $node_ident # Now, make the node available for scheduling by nova openstack baremetal node provide $node_ident The node will begin automated cleaning from the start, and move to ``available`` state when complete. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/admin/conductor-groups.rst0000644000175000017500000000464000000000000023564 0ustar00coreycorey00000000000000.. _conductor-groups: ================ Conductor Groups ================ Overview ======== Large scale operators tend to have needs that involve creating well defined and delinated resources. In some cases, these systems may reside close by or in far away locations. Reasoning may be simple or complex, and yet is only known to the deployer and operator of the infrastructure. A common case is the need for delineated high availability domains where it would be much more efficient to manage a datacenter in Antarctica with a conductor in Antarctica, as opposed to a conductor in New York City. How it works ============ Starting in ironic 11.1, each node has a ``conductor_group`` field which influences how the ironic conductor calculates (and thus allocates) baremetal nodes under ironic's management. This calculation is performed independently by each operating conductor and as such if a conductor has a ``[conductor]conductor_group`` configuration option defined in its `ironic.conf` configuration file, the conductor will then be limited to only managing nodes with a matching ``conductor_group`` string. .. note:: Any conductor without a ``[conductor]conductor_group`` setting will only manage baremetal nodes without a ``conductor_group`` value set upon node creation. If no such conductor is present when conductor groups are configured, node creation will fail unless a ``conductor_group`` is specified upon node creation. .. warning:: Nodes without a ``conductor_group`` setting can only be managed when a conductor exists that does not have a ``[conductor]conductor_group`` defined. If all conductors have been migrated to use a conductor group, such nodes are effectively "orphaned". How to use ========== A conductor group value may be any case insensitive string up to 255 characters long which matches the ``^[a-zA-Z0-9_\-\.]*$`` regular expression. #. Set the ``[conductor]conductor_group`` option in ironic.conf on one or more, but not all conductors:: [conductor] conductor_group = OperatorDefinedString #. Restart the ironic-conductor service. #. Set the conductor group on one or more nodes:: openstack baremetal node set \ --conductor-group "OperatorDefinedString" #. As desired and as needed, remaining conductors can be updated with the first two steps. Please be mindful of the constraints covered earlier in the document related to ability to manage nodes. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/admin/console.rst0000644000175000017500000002312300000000000021706 0ustar00coreycorey00000000000000.. _console: ================================= Configuring Web or Serial Console ================================= Overview -------- There are two types of console which are available in Bare Metal service, one is web console (`Node web console`_) which is available directly from web browser, another is serial console (`Node serial console`_). Node web console ---------------- The web console can be configured in Bare Metal service in the following way: * Install shellinabox in ironic conductor node. For RHEL/CentOS, shellinabox package is not present in base repositories, user must enable EPEL repository, you can find more from `FedoraProject page`_. .. note:: shellinabox is no longer maintained by the authorized author. `This `_ is a fork of the project on GitHub that aims to continue with maintenance of the shellinabox project. Installation example: Ubuntu:: sudo apt-get install shellinabox RHEL7/CentOS7:: sudo yum install shellinabox Fedora:: sudo dnf install shellinabox You can find more about shellinabox on the `shellinabox page`_. You can optionally use the SSL certificate in shellinabox. If you want to use the SSL certificate in shellinabox, you should install openssl and generate the SSL certificate. 1. Install openssl, for example: Ubuntu:: sudo apt-get install openssl RHEL7/CentOS7:: sudo yum install openssl Fedora:: sudo dnf install openssl 2. Generate the SSL certificate, here is an example, you can find more about openssl on the `openssl page`_:: cd /tmp/ca openssl genrsa -des3 -out my.key 1024 openssl req -new -key my.key -out my.csr cp my.key my.key.org openssl rsa -in my.key.org -out my.key openssl x509 -req -days 3650 -in my.csr -signkey my.key -out my.crt cat my.crt my.key > certificate.pem * Customize the console section in the Bare Metal service configuration file (/etc/ironic/ironic.conf), if you want to use SSL certificate in shellinabox, you should specify ``terminal_cert_dir``. for example:: [console] # # Options defined in ironic.drivers.modules.console_utils # # Path to serial console terminal program. Used only by Shell # In A Box console. (string value) #terminal=shellinaboxd # Directory containing the terminal SSL cert (PEM) for serial # console access. Used only by Shell In A Box console. (string # value) terminal_cert_dir=/tmp/ca # Directory for holding terminal pid files. If not specified, # the temporary directory will be used. (string value) #terminal_pid_dir= # Time interval (in seconds) for checking the status of # console subprocess. (integer value) #subprocess_checking_interval=1 # Time (in seconds) to wait for the console subprocess to # start. (integer value) #subprocess_timeout=10 * Append console parameters for bare metal PXE boot in the Bare Metal service configuration file (/etc/ironic/ironic.conf). See the reference for configuration in :ref:`kernel-boot-parameters`. * Enable the ``ipmitool-shellinabox`` console interface, for example: .. code-block:: ini [DEFAULT] enabled_console_interfaces = ipmitool-shellinabox,no-console * Configure node web console. If the node uses a hardware type, for example ``ipmi``, set the node's console interface to ``ipmitool-shellinabox``:: openstack --os-baremetal-api-version 1.31 baremetal node set \ --console-interface ipmitool-shellinabox Enable the web console, for example:: openstack baremetal node set \ --driver-info = openstack baremetal node console enable Check whether the console is enabled, for example:: openstack baremetal node validate Disable the web console, for example:: openstack baremetal node console disable openstack baremetal node unset --driver-info The ```` is driver dependent. The actual name of this field can be checked in driver properties, for example:: openstack baremetal driver property list For the ``ipmi`` hardware type, this option is ``ipmi_terminal_port``. Give a customized port number to ````, for example ``8023``, this customized port is used in web console url. Get web console information for a node as follows:: openstack baremetal node console show +-----------------+----------------------------------------------------------------------+ | Property | Value | +-----------------+----------------------------------------------------------------------+ | console_enabled | True | | console_info | {u'url': u'http://:', u'type': u'shellinabox'} | +-----------------+----------------------------------------------------------------------+ You can open web console using above ``url`` through web browser. If ``console_enabled`` is ``false``, ``console_info`` is ``None``, web console is disabled. If you want to launch web console, see the ``Configure node web console`` part. .. _`shellinabox page`: https://code.google.com/archive/p/shellinabox/ .. _`openssl page`: https://www.openssl.org/ .. _`FedoraProject page`: https://fedoraproject.org/wiki/Infrastructure/Mirroring Node serial console ------------------- Serial consoles for nodes are implemented using `socat`_. It is supported by the ``ipmi`` and ``irmc`` hardware types. Serial consoles can be configured in the Bare Metal service as follows: * Install socat on the ironic conductor node. Also, ``socat`` needs to be in the $PATH environment variable that the ironic-conductor service uses. Installation example: Ubuntu:: sudo apt-get install socat RHEL7/CentOS7:: sudo yum install socat Fedora:: sudo dnf install socat * Append console parameters for bare metal PXE boot in the Bare Metal service configuration file. See the reference on how to configure them in :ref:`kernel-boot-parameters`. * Enable the ``ipmitool-socat`` console interface, for example: .. code-block:: ini [DEFAULT] enabled_console_interfaces = ipmitool-socat,no-console * Configure node console. If the node uses a hardware type, for example ``ipmi``, set the node's console interface to ``ipmitool-socat``:: openstack --os-baremetal-api-version 1.31 baremetal node set \ --console-interface ipmitool-socat Enable the serial console, for example:: openstack baremetal node set --driver-info ipmi_terminal_port= openstack baremetal node console enable Check whether the serial console is enabled, for example:: openstack baremetal node validate Disable the serial console, for example:: openstack baremetal node console disable openstack baremetal node unset --driver-info Serial console information is available from the Bare Metal service. Get serial console information for a node from the Bare Metal service as follows:: openstack baremetal node console show +-----------------+----------------------------------------------------------------------+ | Property | Value | +-----------------+----------------------------------------------------------------------+ | console_enabled | True | | console_info | {u'url': u'tcp://:', u'type': u'socat'} | +-----------------+----------------------------------------------------------------------+ If ``console_enabled`` is ``false`` or ``console_info`` is ``None`` then the serial console is disabled. If you want to launch serial console, see the ``Configure node console``. Node serial console of the Bare Metal service is compatible with the serial console of the Compute service. Hence, serial consoles to Bare Metal nodes can be seen and interacted with via the Dashboard service. In order to achieve that, you need to follow the documentation for :nova-doc:`Serial Console ` from the Compute service. Configuring HA ~~~~~~~~~~~~~~ When using Bare Metal serial console under High Availability (HA) configuration, you may consider some settings below. * If you use HAProxy, you may need to set the timeout for both client and server sides with appropriate values. Here is an example of the configuration for the timeout parameter. :: frontend nova_serial_console bind 192.168.20.30:6083 timeout client 10m # This parameter is necessary use_backend nova_serial_console if <...> backend nova_serial_console balance source timeout server 10m # This parameter is necessary option tcpka option tcplog server controller01 192.168.30.11:6083 check inter 2000 rise 2 fall 5 server controller02 192.168.30.12:6083 check inter 2000 rise 2 fall 5 * The Compute service's caching feature may need to be enabled in order to make the Bare Metal serial console work under a HA configuration. Here is an example of caching configuration in ``nova.conf``. .. code-block:: ini [cache] enabled = true backend = dogpile.cache.memcached memcache_servers = memcache01:11211,memcache02:11211,memcache03:11211 .. _`socat`: http://www.dest-unreach.org/socat ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/admin/deploy-steps.rst0000644000175000017500000000016300000000000022673 0ustar00coreycorey00000000000000============ Deploy Steps ============ The deploy steps section has moved to :ref:`node-deployment-deploy-steps`. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538406.1823993 ironic-14.0.1.dev163/doc/source/admin/drivers/0000755000175000017500000000000000000000000021167 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/admin/drivers/ansible.rst0000644000175000017500000004271000000000000023342 0ustar00coreycorey00000000000000======================== Ansible deploy interface ======================== `Ansible`_ is a mature and popular automation tool, written in Python and requiring no agents running on the node being configured. All communications with the node are by default performed over secure SSH transport. The ``ansible`` deploy interface uses Ansible playbooks to define the deployment logic. It is not based on :ironic-python-agent-doc:`Ironic Python Agent (IPA) <>` and does not generally need IPA to be running in the deploy ramdisk. Overview ======== The main advantage of this deploy interface is extended flexibility in regards to changing and adapting node deployment logic for specific use cases, via Ansible tooling that is already familiar to operators. It can be used to shorten the usual feature development cycle of * implementing logic in ironic, * implementing logic in IPA, * rebuilding deploy ramdisk, * uploading deploy ramdisk to Glance/HTTP storage, * reassigning deploy ramdisk to nodes, * restarting ironic-conductor service(s) and * running a test deployment by using a "stable" deploy ramdisk and not requiring ironic-conductor restarts (see `Extending playbooks`_). The main disadvantage of this deploy interface is the synchronous manner of performing deployment/cleaning tasks. A separate ``ansible-playbook`` process is spawned for each node being provisioned or cleaned, which consumes one thread from the thread pool available to the ``ironic-conductor`` process and blocks this thread until the node provisioning or cleaning step is finished or fails. This has to be taken into account when planning an ironic deployment that enables this deploy interface. Each action (deploy, clean) is described by a single playbook with roles, which is run whole during deployment, or tag-wise during cleaning. Control of cleaning steps is through tags and auxiliary clean steps file. The playbooks for actions can be set per-node, as can the clean steps file. Features -------- Similar to deploy interfaces relying on :ironic-python-agent-doc:`Ironic Python Agent (IPA) <>`, this deploy interface also depends on the deploy ramdisk calling back to ironic API's ``heartbeat`` endpoint. However, the driver is currently synchronous, so only the first heartbeat is processed and is used as a signal to start ``ansible-playbook`` process. User images ~~~~~~~~~~~ Supports whole-disk images and partition images: - compressed images are downloaded to RAM and converted to disk device; - raw images are streamed to disk directly. For partition images the driver will create root partition, and, if requested, ephemeral and swap partitions as set in node's ``instance_info`` by the Compute service or operator. The create partition table will be of ``msdos`` type by default, the node's ``disk_label`` capability is honored if set in node's ``instance_info`` (see also :ref:`choosing_the_disk_label`). Configdrive partition ~~~~~~~~~~~~~~~~~~~~~ Creating a configdrive partition is supported for both whole disk and partition images, on both ``msdos`` and ``GPT`` labeled disks. Root device hints ~~~~~~~~~~~~~~~~~ Root device hints are currently supported in their basic form only, with exact matches (see :ref:`root-device-hints` for more details). If no root device hint is provided for the node, the first device returned as part of ``ansible_devices`` fact is used as root device to create partitions on or write the whole disk image to. Node cleaning ~~~~~~~~~~~~~ Cleaning is supported, both automated and manual. The driver has two default clean steps: - wiping device metadata - disk shredding Their priority can be overridden via ``[deploy]\erase_devices_metadata_priority`` and ``[deploy]\erase_devices_priority`` options, respectively, in the ironic configuration file. As in the case of this driver all cleaning steps are known to the ironic-conductor service, booting the deploy ramdisk is completely skipped when there are no cleaning steps to perform. .. note:: Aborting cleaning steps is not supported. Logging ~~~~~~~ Logging is implemented as custom Ansible callback module, that makes use of ``oslo.log`` and ``oslo.config`` libraries and can re-use logging configuration defined in the main ironic configuration file to set logging for Ansible events, or use a separate file for this purpose. It works best when ``journald`` support for logging is enabled. Requirements ============ Ansible Tested with, and targets, Ansible 2.5.x Bootstrap image requirements ---------------------------- - password-less sudo permissions for the user used by Ansible - python 2.7.x - openssh-server - GNU coreutils - utils-linux - parted - gdisk - qemu-utils - python-requests (for ironic callback and streaming image download) - python-netifaces (for ironic callback) A set of scripts to build a suitable deploy ramdisk based on TinyCore Linux and ``tinyipa`` ramdisk, and an element for ``diskimage-builder`` can be found in ironic-staging-drivers_ project but will be eventually migrated to the new ironic-python-agent-builder_ project. Setting up your environment =========================== #. Install ironic (either as part of OpenStack or standalone) - If using ironic as part of OpenStack, ensure that the Image service is configured to use the Object Storage service as backend, and the Bare Metal service is configured accordingly, see :doc:`Configure the Image service for temporary URLs <../../install/configure-glance-swift>`. #. Install Ansible version as specified in ``ironic/driver-requirements.txt`` file #. Edit ironic configuration file A. Add ``ansible`` to the list of deploy interfaces defined in ``[DEFAULT]\enabled_deploy_interfaces`` option. B. Ensure that a hardware type supporting ``ansible`` deploy interface is enabled in ``[DEFAULT]\enabled_hardware_types`` option. C. Modify options in the ``[ansible]`` section of ironic's configuration file if needed (see `Configuration file`_). #. (Re)start ironic-conductor service #. Build suitable deploy kernel and ramdisk images #. Upload them to Glance or put in your HTTP storage #. Create new or update existing nodes to use the enabled driver of your choice and populate `Driver properties for the Node`_ when different from defaults. #. Deploy the node as usual. Ansible-deploy options ---------------------- Configuration file ~~~~~~~~~~~~~~~~~~~ Driver options are configured in ``[ansible]`` section of ironic configuration file, for their descriptions and default values please see `configuration file sample <../../configuration/config.html#ansible>`_. Driver properties for the Node ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Set them per-node via ``openstack baremetal node set`` command, for example: .. code-block:: shell openstack baremetal node set \ --deploy-interface ansible \ --driver-info ansible_username=stack \ --driver-info ansible_key_file=/etc/ironic/id_rsa ansible_username User name to use for Ansible to access the node. Default is taken from ``[ansible]/default_username`` option of the ironic configuration file (defaults to ``ansible``). ansible_key_file Private SSH key used to access the node. Default is taken from ``[ansible]/default_key_file`` option of the ironic configuration file. If neither is set, the default private SSH keys of the user running the ``ironic-conductor`` process will be used. ansible_deploy_playbook Playbook to use when deploying this node. Default is taken from ``[ansible]/default_deploy_playbook`` option of the ironic configuration file (defaults to ``deploy.yaml``). ansible_shutdown_playbook Playbook to use to gracefully shutdown the node in-band. Default is taken from ``[ansible]/default_shutdown_playbook`` option of the ironic configuration file (defaults to ``shutdown.yaml``). ansible_clean_playbook Playbook to use when cleaning the node. Default is taken from ``[ansible]/default_clean_playbook`` option of the ironic configuration file (defaults to ``clean.yaml``). ansible_clean_steps_config Auxiliary YAML file that holds description of cleaning steps used by this node, and defines playbook tags in ``ansible_clean_playbook`` file corresponding to each cleaning step. Default is taken from ``[ansible]/default_clean_steps_config`` option of the ironic configuration file (defaults to ``clean_steps.yaml``). ansible_python_interpreter Absolute path to the python interpreter on the managed machine. Default is taken from ``[ansible]/default_python_interpreter`` option of the ironic configuration file. Ansible uses ``/usr/bin/python`` by default. Customizing the deployment logic ================================ Expected playbooks directory layout ----------------------------------- The ``[ansible]\playbooks_path`` option in the ironic configuration file is expected to have a standard layout for an Ansible project with some additions:: | \_ inventory \_ add-ironic-nodes.yaml \_ roles \_ role1 \_ role2 \_ ... | \_callback_plugins \_ ... | \_ library \_ ... The extra files relied by this driver are: inventory Ansible inventory file containing a single entry of ``conductor ansible_connection=local``. This basically defines an alias to ``localhost``. Its purpose is to make logging for tasks performed by Ansible locally and referencing the localhost in playbooks more intuitive. This also suppresses warnings produced by Ansible about ``hosts`` file being empty. add-ironic-nodes.yaml This file contains an Ansible play that populates in-memory Ansible inventory with access information received from the ansible-deploy interface, as well as some per-node variables. Include it in all your custom playbooks as the first play. The default ``deploy.yaml`` playbook is using several smaller roles that correspond to particular stages of deployment process: - ``discover`` - e.g. set root device and image target - ``prepare`` - if needed, prepare system, for example create partitions - ``deploy`` - download/convert/write user image and configdrive - ``configure`` - post-deployment steps, e.g. installing the bootloader Some more included roles are: - ``shutdown`` - used to gracefully power the node off in-band - ``clean`` - defines cleaning procedure, with each clean step defined as separate playbook tag. Extending playbooks ------------------- Most probably you'd start experimenting like this: #. Create a copy of ``deploy.yaml`` playbook *in the same folder*, name it distinctively. #. Create Ansible roles with your customized logic in ``roles`` folder. A. In your custom deploy playbook, replace the ``prepare`` role with your own one that defines steps to be run *before* image download/writing. This is a good place to set facts overriding those provided/omitted by the driver, like ``ironic_partitions`` or ``ironic_root_device``, and create custom partitions or (software) RAIDs. B. In your custom deploy playbook, replace the ``configure`` role with your own one that defines steps to be run *after* image is written to disk. This is a good place for example to configure the bootloader and add kernel options to avoid additional reboots. C. Use those new roles in your new playbook. #. Assign the custom deploy playbook you've created to the node's ``driver_info/ansible_deploy_playbook`` field. #. Run deployment. A. No ironic-conductor restart is necessary. B. A new deploy ramdisk must be built and assigned to nodes only when you want to use a command/script/package not present in the current deploy ramdisk and you can not or do not want to install those at runtime. Variables you have access to ---------------------------- This driver will pass the single JSON-ified extra var argument to Ansible (as in ``ansible-playbook -e ..``). Those values are then accessible in your plays as well (some of them are optional and might not be defined): .. code-block:: yaml ironic: nodes: - ip: "" name: "" user: "" extra: "" image: url: "" disk_format: "" container_format: "" checksum: "" mem_req: "" tags: "" properties: "" configdrive: type: "" location: "" partition_info: label: "" preserve_ephemeral: "" ephemeral_format: "" partitions: "" raid_config: "" ``ironic.nodes`` List of dictionaries (currently of only one element) that will be used by ``add-ironic-nodes.yaml`` play to populate in-memory inventory. It also contains a copy of node's ``extra`` field so you can access it in the playbooks. The Ansible's host is set to node's UUID. ``ironic.image`` All fields of node's ``instance_info`` that start with ``image_`` are passed inside this variable. Some extra notes and fields: - ``mem_req`` is calculated from image size (if available) and config option ``[ansible]extra_memory``. - if ``checksum`` is not in the form ``:``, hashing algorithm is assumed to be ``md5`` (default in Glance). - ``validate_certs`` - boolean (``yes/no``) flag that turns validating image store SSL certificate on or off (default is 'yes'). Governed by ``[ansible]image_store_insecure`` option in ironic configuration file. - ``cafile`` - custom CA bundle to use for validating image store SSL certificate. Takes value of ``[ansible]image_store_cafile`` if that is defined. Currently is not used by default playbooks, as Ansible has no way to specify the custom CA bundle to use for single HTTPS actions, however you can use this value in your custom playbooks to for example upload and register this CA in the ramdisk at deploy time. - ``client_cert`` - cert file for client-side SSL authentication. Takes value of ``[ansible]image_store_certfile`` option if defined. Currently is not used by default playbooks, however you can use this value in your custom playbooks. - ``client_key`` - private key file for client-side SSL authentication. Takes value of ``[ansible]image_store_keyfile`` option if defined. Currently is not used by default playbooks, however you can use this value in your custom playbooks. ``ironic.partition_info.partitions`` Optional. List of dictionaries defining partitions to create on the node in the form: .. code-block:: yaml partitions: - name: "" unit: "" size: "" type: "" align: "" format: "" flags: flag_name: "" The driver will populate this list from ``root_gb``, ``swap_mb`` and ``ephemeral_gb`` fields of ``instance_info``. The driver will also prepend the ``bios_grub``-labeled partition when deploying on GPT-labeled disk, and pre-create a 64 MiB partition for configdrive if it is set in ``instance_info``. Please read the documentation included in the ``ironic_parted`` module's source for more info on the module and its arguments. ``ironic.partition_info.ephemeral_format`` Optional. Taken from ``instance_info``, it defines file system to be created on the ephemeral partition. Defaults to the value of ``[pxe]\default_ephemeral_format`` option in ironic configuration file. ``ironic.partition_info.preserve_ephemeral`` Optional. Taken from the ``instance_info``, it specifies if the ephemeral partition must be preserved or rebuilt. Defaults to ``no``. ``ironic.raid_config`` Taken from the ``target_raid_config`` if not empty, it specifies the RAID configuration to apply. As usual for Ansible playbooks, you also have access to standard Ansible facts discovered by ``setup`` module. Included custom Ansible modules ------------------------------- The provided ``playbooks_path/library`` folder includes several custom Ansible modules used by default implementation of ``deploy`` and ``prepare`` roles. You can use these modules in your playbooks as well. ``stream_url`` Streaming download from HTTP(S) source to the disk device directly, tries to be compatible with Ansible's ``get_url`` module in terms of module arguments. Due to the low level of such operation it is not idempotent. ``ironic_parted`` creates partition tables and partitions with ``parted`` utility. Due to the low level of such operation it is not idempotent. Please read the documentation included in the module's source for more information about this module and its arguments. The name is chosen so that the ``parted`` module included in Ansible is not shadowed. .. _Ansible: https://docs.ansible.com/ansible/latest/index.html .. _ironic-staging-drivers: https://opendev.org/x/ironic-staging-drivers/src/branch/stable/pike/imagebuild .. _ironic-python-agent-builder: https://opendev.org/openstack/ironic-python-agent-builder ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/admin/drivers/ibmc.rst0000644000175000017500000000712200000000000022635 0ustar00coreycorey00000000000000=============== iBMC driver =============== .. warning:: The ``ibmc`` driver has been deprecated due to a lack of a functioning third party CI and will be removed in the Victoria development cycle. Overview ======== The ``ibmc`` driver is targeted for Huawei V5 series rack server such as 2288H V5, CH121 V5. The iBMC hardware type enables the user to take advantage of features of `Huawei iBMC`_ to control Huawei server. Prerequisites ============= The `HUAWEI iBMC Client library`_ should be installed on the ironic conductor node(s). For example, it can be installed with ``pip``:: sudo pip install python-ibmcclient Enabling the iBMC driver ============================ #. Add ``ibmc`` to the list of ``enabled_hardware_types``, ``enabled_power_interfaces``, ``enabled_vendor_interfaces`` and ``enabled_management_interfaces`` in ``/etc/ironic/ironic.conf``. For example:: [DEFAULT] ... enabled_hardware_types = ibmc,ipmi enabled_power_interfaces = ibmc,ipmitool enabled_management_interfaces = ibmc,ipmitool enabled_vendor_interfaces = ibmc #. Restart the ironic conductor service:: sudo service ironic-conductor restart # Or, for RDO: sudo systemctl restart openstack-ironic-conductor Registering a node with the iBMC driver =========================================== Nodes configured to use the driver should have the ``driver`` property set to ``ibmc``. The following properties are specified in the node's ``driver_info`` field: - ``ibmc_address``: The URL address to the ibmc controller. It must include the authority portion of the URL, and can optionally include the scheme. If the scheme is missing, https is assumed. For example: https://ibmc.example.com. This is required. - ``ibmc_username``: User account with admin/server-profile access privilege. This is required. - ``ibmc_password``: User account password. This is required. - ``ibmc_verify_ca``: If ibmc_address has the **https** scheme, the driver will use a secure (TLS_) connection when talking to the ibmc controller. By default (if this is set to True), the driver will try to verify the host certificates. This can be set to the path of a certificate file or directory with trusted certificates that the driver will use for verification. To disable verifying TLS_, set this to False. This is optional. The ``openstack baremetal node create`` command can be used to enroll a node with the ``ibmc`` driver. For example: .. code-block:: bash openstack baremetal node create --driver ibmc --driver-info ibmc_address=https://example.com \ --driver-info ibmc_username=admin \ --driver-info ibmc_password=password For more information about enrolling nodes see :ref:`enrollment` in the install guide. Features of the ``ibmc`` hardware type ========================================= Query boot up sequence ^^^^^^^^^^^^^^^^^^^^^^ The ``ibmc`` hardware type can query current boot up sequence from the bare metal node .. code-block:: bash openstack baremetal node passthru call --http-method GET \ boot_up_seq PXE Boot and iSCSI Deploy Process with Ironic Standalone Environment ==================================================================== .. figure:: ../../images/ironic_standalone_with_ibmc_driver.svg :width: 960px :align: left :alt: Ironic standalone with iBMC driver node .. _Huawei iBMC: https://e.huawei.com/en/products/cloud-computing-dc/servers/accessories/ibmc .. _TLS: https://en.wikipedia.org/wiki/Transport_Layer_Security .. _HUAWEI iBMC Client library: https://pypi.org/project/python-ibmcclient/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/admin/drivers/idrac.rst0000644000175000017500000005162500000000000023014 0ustar00coreycorey00000000000000============ iDRAC driver ============ Overview ======== The integrated Dell Remote Access Controller (iDRAC_) is an out-of-band management platform on Dell EMC servers, and is supported directly by the ``idrac`` hardware type. This driver uses the Dell Web Services for Management (WSMAN) protocol and the standard Distributed Management Task Force (DMTF) Redfish protocol to perform all of its functions. iDRAC_ hardware is also supported by the generic ``ipmi`` and ``redfish`` hardware types, though with smaller feature sets. Key features of the Dell iDRAC driver include: * Out-of-band node inspection * Boot device management * Power management * RAID controller management and RAID volume configuration * BIOS settings configuration Ironic Features --------------- The ``idrac`` hardware type supports the following Ironic interfaces: * `Inspect Interface`_: Hardware inspection * Management Interface: Boot device management * Power Interface: Power management * `RAID Interface`_: RAID controller and disk management * `Vendor Interface`_: BIOS management Prerequisites ------------- The ``idrac`` hardware type requires the ``python-dracclient`` library to be installed on the ironic conductor node(s) if an Ironic node is configured to use an ``idrac-wsman`` interface implementation, for example:: sudo pip install 'python-dracclient>=3.1.0' Additionally, the ``idrac`` hardware type requires the ``sushy`` library to be installed on the ironic conductor node(s) if an Ironic node is configured to use an ``idrac-redfish`` interface implementation, for example:: sudo pip install 'python-dracclient>=3.1.0' 'sushy>=2.0.0' Enabling -------- The iDRAC driver supports WSMAN for the inspect, management, power, raid, and vendor interfaces. In addition, it supports Redfish for the inspect, management, and power interfaces. The iDRAC driver allows you to mix and match WSMAN and Redfish interfaces. The ``idrac-wsman`` implementation must be enabled to use WSMAN for an interface. The ``idrac-redfish`` implementation must be enabled to use Redfish for an interface. .. NOTE:: Redfish is supported for only the inspect, management, and power interfaces at the present time. To enable the ``idrac`` hardware type with the minimum interfaces, all using WSMAN, add the following to your ``/etc/ironic/ironic.conf``: .. code-block:: ini [DEFAULT] enabled_hardware_types=idrac enabled_management_interfaces=idrac-wsman enabled_power_interfaces=idrac-wsman To enable all optional features (inspection, RAID, and vendor passthru) using Redfish where it is supported and WSMAN where not, use the following configuration: .. code-block:: ini [DEFAULT] enabled_hardware_types=idrac enabled_inspect_interfaces=idrac-redfish enabled_management_interfaces=idrac-redfish enabled_power_interfaces=idrac-redfish enabled_raid_interfaces=idrac-wsman enabled_vendor_interfaces=idrac-wsman Below is the list of supported interface implementations in priority order: ================ =================================================== Interface Supported Implementations ================ =================================================== ``bios`` ``no-bios`` ``boot`` ``ipxe``, ``pxe`` ``console`` ``no-console`` ``deploy`` ``iscsi``, ``direct``, ``ansible``, ``ramdisk`` ``inspect`` ``idrac-wsman``, ``idrac``, ``idrac-redfish``, ``inspector``, ``no-inspect`` ``management`` ``idrac-wsman``, ``idrac``, ``idrac-redfish`` ``network`` ``flat``, ``neutron``, ``noop`` ``power`` ``idrac-wsman``, ``idrac``, ``idrac-redfish`` ``raid`` ``idrac-wsman``, ``idrac``, ``no-raid`` ``rescue`` ``no-rescue``, ``agent`` ``storage`` ``noop``, ``cinder``, ``external`` ``vendor`` ``idrac-wsman``, ``idrac``, ``no-vendor`` ================ =================================================== .. NOTE:: ``idrac`` is the legacy name of the WSMAN interface. It has been deprecated in favor of ``idrac-wsman`` and may be removed in a future release. Protocol-specific Properties ---------------------------- The WSMAN and Redfish protocols require different properties to be specified in the Ironic node's ``driver_info`` field to communicate with the bare metal system's iDRAC. The WSMAN protocol requires the following properties: * ``drac_username``: The WSMAN user name to use when communicating with the iDRAC. Usually ``root``. * ``drac_password``: The password for the WSMAN user to use when communicating with the iDRAC. * ``drac_address``: The IP address of the iDRAC. The Redfish protocol requires the following properties: * ``redfish_username``: The Redfish user name to use when communicating with the iDRAC. Usually ``root``. * ``redfish_password``: The password for the Redfish user to use when communicating with the iDRAC. * ``redfish_address``: The URL address of the iDRAC. It must include the authority portion of the URL, and can optionally include the scheme. If the scheme is missing, https is assumed. * ``redfish_system_id``: The Redfish ID of the server to be managed. This should always be: ``/redfish/v1/Systems/System.Embedded.1``. If using only interfaces which use WSMAN (``idrac-wsman``), then only the WSMAN properties must be supplied. If using only interfaces which use Redfish (``idrac-redfish``), then only the Redfish properties must be supplied. If using a mix of interfaces, where some use WSMAN and others use Redfish, both the WSMAN and Redfish properties must be supplied. Enrolling --------- The following command enrolls a bare metal node with the ``idrac`` hardware type using WSMAN for all interfaces: .. code-block:: bash openstack baremetal node create --driver idrac \ --driver-info drac_username=user \ --driver-info drac_password=pa$$w0rd \ --driver-info drac_address=drac.host The following command enrolls a bare metal node with the ``idrac`` hardware type using Redfish for all interfaces: .. code-block:: bash openstack baremetal node create --driver idrac \ --driver-info redfish_username=user \ --driver-info redfish_password=pa$$w0rd \ --driver-info redfish_address=drac.host \ --driver-info redfish_system_id=/redfish/v1/Systems/System.Embedded.1 \ --inspect-interface idrac-redfish \ --management-interface idrac-redfish \ --power-interface idrac-redfish \ --raid-interface no-raid \ --vendor-interface no-vendor The following command enrolls a bare metal node with the ``idrac`` hardware type assuming a mix of Redfish and WSMAN interfaces are used: .. code-block:: bash openstack baremetal node create --driver idrac \ --driver-info drac_username=user \ --driver-info drac_password=pa$$w0rd --driver-info drac_address=drac.host \ --driver-info redfish_username=user \ --driver-info redfish_password=pa$$w0rd \ --driver-info redfish_address=drac.host \ --driver-info redfish_system_id=/redfish/v1/Systems/System.Embedded.1 \ --inspect-interface idrac-redfish \ --management-interface idrac-redfish \ --power-interface idrac-redfish .. NOTE:: If using WSMAN for the management interface, then WSMAN must be used for the power interface. The same applies to Redfish. It is currently not possible to use Redfish for one and WSMAN for the other. Inspect Interface ================= The Dell iDRAC out-of-band inspection process catalogs all the same attributes of the server as the IPMI driver. Unlike IPMI, it does this without requiring the system to be rebooted, or even to be powered on. Inspection is performed using the Dell WSMAN or Redfish protocol directly without affecting the operation of the system being inspected. The inspection discovers the following properties: * ``cpu_arch``: cpu architecture * ``cpus``: number of cpus * ``local_gb``: disk size in gigabytes * ``memory_mb``: memory size in megabytes Extra capabilities: * ``boot_mode``: UEFI or BIOS boot mode. It also creates baremetal ports for each NIC port detected in the system. The ``idrac-wsman`` inspect interface discovers which NIC ports are configured to PXE boot and sets ``pxe_enabled`` to ``True`` on those ports. The ``idrac-redfish`` inspect interface does not currently set ``pxe_enabled`` on the ports. The user should ensure that ``pxe_enabled`` is set correctly on the ports following inspection with the ``idrac-redfish`` inspect interface. RAID Interface ============== See :doc:`/admin/raid` for more information on Ironic RAID support. The following properties are supported by the iDRAC WSMAN raid interface implementation, ``idrac-wsman``: Mandatory properties -------------------- * ``size_gb``: Size in gigabytes (integer) for the logical disk. Use ``MAX`` as ``size_gb`` if this logical disk is supposed to use the rest of the space available. * ``raid_level``: RAID level for the logical disk. Valid values are ``0``, ``1``, ``5``, ``6``, ``1+0``, ``5+0`` and ``6+0``. .. NOTE:: ``JBOD`` and ``2`` are not supported, and will fail with reason: 'Cannot calculate spans for RAID level.' Optional properties ------------------- * ``is_root_volume``: Optional. Specifies whether this disk is a root volume. By default, this is ``False``. * ``volume_name``: Optional. Name of the volume to be created. If this is not specified, it will be auto-generated. Backing physical disk hints --------------------------- See :doc:`/admin/raid` for more information on backing disk hints. These are machine-independent information. The hints are specified for each logical disk to help Ironic find the desired disks for RAID configuration. * ``disk_type`` * ``interface_type`` * ``share_physical_disks`` * ``number_of_physical_disks`` Backing physical disks ---------------------- These are Dell RAID controller-specific values and must match the names provided by the iDRAC. * ``controller``: Mandatory. The name of the controller to use. * ``physical_disks``: Optional. The names of the physical disks to use. .. NOTE:: ``physical_disks`` is a mandatory parameter if the property ``size_gb`` is set to ``MAX``. Examples -------- Creation of RAID ``1+0`` logical disk with six disks on one controller: .. code-block:: json { "logical_disks": [ { "controller": "RAID.Integrated.1-1", "is_root_volume": "True", "physical_disks": [ "Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1", "Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1", "Disk.Bay.2:Enclosure.Internal.0-1:RAID.Integrated.1-1", "Disk.Bay.3:Enclosure.Internal.0-1:RAID.Integrated.1-1", "Disk.Bay.4:Enclosure.Internal.0-1:RAID.Integrated.1-1", "Disk.Bay.5:Enclosure.Internal.0-1:RAID.Integrated.1-1"], "raid_level": "1+0", "size_gb": "MAX"}]} Manual RAID Invocation ---------------------- The following command can be used to delete any existing RAID configuration. It deletes all virtual disks/RAID volumes, unassigns all global and dedicated hot spare physical disks, and clears foreign configuration: .. code-block:: bash openstack baremetal node clean --clean-steps \ '[{"interface": "raid", "step": "delete_configuration"}]' ${node_uuid} The following command shows an example of how to set the target RAID configuration: .. code-block:: bash openstack baremetal node set --target-raid-config '{ "logical_disks": [ { "controller": "RAID.Integrated.1-1", "is_root_volume": true, "physical_disks": [ "Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1", "Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1"], "raid_level": "0", "size_gb": "MAX"}]}' ${node_uuid} The following command can be used to create a RAID configuration: .. code-block:: bash openstack baremetal node clean --clean-steps \ '[{"interface": "raid", "step": "create_configuration"}]' ${node_uuid} When the physical disk names or controller names are not known, the following Python code example shows how the ``python-dracclient`` can be used to fetch the information directly from the Dell bare metal: .. code-block:: python import dracclient.client client = dracclient.client.DRACClient( host="192.168.1.1", username="root", password="calvin") controllers = client.list_raid_controllers() print(controllers) physical_disks = client.list_physical_disks() print(physical_disks) Vendor Interface ================ Dell iDRAC BIOS management is available through the Ironic vendor passthru interface. ======================== ============ ====================================== Method Name HTTP Method Description ======================== ============ ====================================== ``abandon_bios_config`` ``DELETE`` Abandon a BIOS configuration job. ``commit_bios_config`` ``POST`` Commit a BIOS configuration job submitted through ``set_bios_config``. Required argument: ``reboot`` - indicates whether a reboot job should be automatically created with the config job. Returns a dictionary containing the ``job_id`` key with the ID of the newly created config job, and the ``reboot_required`` key indicating whether the node needs to be rebooted to execute the config job. ``get_bios_config`` ``GET`` Returns a dictionary containing the node's BIOS settings. ``list_unfinished_jobs`` ``GET`` Returns a dictionary containing the key ``unfinished_jobs``; its value is a list of dictionaries. Each dictionary represents an unfinished config job object. ``set_bios_config`` ``POST`` Change the BIOS configuration on a node. Required argument: a dictionary of {``AttributeName``: ``NewValue``}. Returns a dictionary containing the ``is_commit_required`` key indicating whether ``commit_bios_config`` needs to be called to apply the changes and the ``is_reboot_required`` value indicating whether the server must also be rebooted. Possible values are ``true`` and ``false``. ======================== ============ ====================================== Examples -------- Get BIOS Config ~~~~~~~~~~~~~~~ .. code-block:: bash openstack baremetal node passthru call --http-method GET ${node_uuid} get_bios_config Snippet of output showing virtualization enabled: .. code-block:: json {"ProcVirtualization": { "current_value": "Enabled", "instance_id": "BIOS.Setup.1-1:ProcVirtualization", "name": "ProcVirtualization", "pending_value": null, "possible_values": [ "Enabled", "Disabled"], "read_only": false }} There are a number of items to note from the above snippet: * ``name``: this is the name to use in a call to ``set_bios_config``. * ``current_value``: the current state of the setting. * ``pending_value``: if the value has been set, but not yet committed, the new value is shown here. The change can either be committed or abandoned. * ``possible_values``: shows a list of valid values which can be used in a call to ``set_bios_config``. * ``read_only``: indicates if the value is capable of being changed. Set BIOS Config ~~~~~~~~~~~~~~~ .. code-block:: bash openstack baremetal node passthru call ${node_uuid} set_bios_config --arg "name=value" Walkthrough of perfoming a BIOS configuration change: The following section demonstrates how to change BIOS configuration settings, detect that a commit and reboot are required, and act on them accordingly. The two properties that are being changed are: * Enable virtualization technology of the processor * Globally enable SR-IOV .. code-block:: bash openstack baremetal node passthru call ${node_uuid} set_bios_config \ --arg "ProcVirtualization=Enabled" \ --arg "SriovGlobalEnable=Enabled" This returns a dictionary indicating what actions are required next: .. code-block:: json { "is_reboot_required": true, "is_commit_required": true } Commit BIOS Changes ~~~~~~~~~~~~~~~~~~~ The next step is to commit the pending change to the BIOS. Note that in this example, the ``reboot`` argument is set to ``true``. The response indicates that a reboot is no longer required as it has been scheduled automatically by the ``commit_bios_config`` call. If the reboot argument is not supplied, the job is still created, however it remains in the ``scheduled`` state until a reboot is performed. The reboot can be initiated through the Ironic power API. .. code-block:: bash openstack baremetal node passthru call ${node_uuid} commit_bios_config \ --arg "reboot=true" .. code-block:: json { "job_id": "JID_499377293428", "reboot_required": false } The state of any executing job can be queried: .. code-block:: bash openstack baremetal node passthru call --http-method GET ${node_uuid} list_unfinished_jobs .. code-block:: json {"unfinished_jobs": [{"status": "Scheduled", "name": "ConfigBIOS:BIOS.Setup.1-1", "until_time": "TIME_NA", "start_time": "TIME_NOW", "message": "Task successfully scheduled.", "percent_complete": "0", "id": "JID_499377293428"}]} Abandon BIOS Changes ~~~~~~~~~~~~~~~~~~~~ Instead of committing, a pending change can be abandoned: .. code-block:: bash openstack baremetal node passthru call --http-method DELETE ${node_uuid} abandon_bios_config The abandon command does not provide a response body. Change Boot Mode ---------------- The boot mode of the iDRAC can be changed to: * BIOS - Also called legacy or traditional boot mode. The BIOS initializes the system’s processors, memory, bus controllers, and I/O devices. After initialization is complete, the BIOS passes control to operating system (OS) software. The OS loader uses basic services provided by the system BIOS to locate and load OS modules into system memory. After booting the system, the BIOS and embedded management controllers execute system management algorithms, which monitor and optimize the condition of the underlying hardware. BIOS configuration settings enable fine-tuning of the performance, power management, and reliability features of the system. * UEFI - The Unified Extensible Firmware Interface does not change the traditional purposes of the system BIOS. To a large extent, a UEFI-compliant BIOS performs the same initialization, boot, configuration, and management tasks as a traditional BIOS. However, UEFI does change the interfaces and data structures the BIOS uses to interact with I/O device firmware and operating system software. The primary intent of UEFI is to eliminate shortcomings in the traditional BIOS environment, enabling system firmware to continue scaling with industry trends. The UEFI boot mode offers: * Improved partitioning scheme for boot media * Support for media larger than 2 TB * Redundant partition tables * Flexible handoff from BIOS to OS * Consolidated firmware user interface * Enhanced resource allocation for boot device firmware The boot mode can be changed via the vendor passthru interface as follows: .. code-block:: bash openstack baremetal node passthru call ${node_uuid} set_bios_config \ --arg "BootMode=Uefi" openstack baremetal node passthru call ${node_uuid} commit_bios_config \ --arg "reboot=true" .. code-block:: bash openstack baremetal node passthru call ${node_uuid} set_bios_config \ --arg "BootMode=Bios" openstack baremetal node passthru call ${node_uuid} commit_bios_config \ --arg "reboot=true" Known Issues ============ Nodes go into maintenance mode ------------------------------ After some period of time, nodes managed by the ``idrac`` hardware type may go into maintenance mode in Ironic. This issue can be worked around by changing the Ironic power state poll interval to 70 seconds. See ``[conductor]sync_power_state_interval`` in ``/etc/ironic/ironic.conf``. .. _Ironic_RAID: https://docs.openstack.org/ironic/latest/admin/raid.html .. _iDRAC: https://www.dell.com/idracmanuals ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/admin/drivers/ilo.rst0000644000175000017500000024525200000000000022516 0ustar00coreycorey00000000000000.. _ilo: ========== iLO driver ========== Overview ======== iLO driver enables to take advantage of features of iLO management engine in HPE ProLiant servers. The ``ilo`` hardware type is targeted for HPE ProLiant Gen8 and Gen9 systems which have `iLO 4 management engine`_. From **Pike** release ``ilo`` hardware type supports ProLiant Gen10 systems which have `iLO 5 management engine`_. iLO5 conforms to `Redfish`_ API and hence hardware type ``redfish`` (see :doc:`redfish`) is also an option for this kind of hardware but it lacks the iLO specific features. For more details and for up-to-date information (like tested platforms, known issues, etc), please check the `iLO driver wiki page `_. For enabling Gen10 systems and getting detailed information on Gen10 feature support in Ironic please check this `Gen10 wiki section`_. Hardware type ============= ProLiant hardware is primarily supported by the ``ilo`` hardware type. ``ilo5`` hardware type is only supported on ProLiant Gen10 and later systems. Both hardware can be used with reference hardware type ``ipmi`` (see :doc:`ipmitool`) and ``redfish`` (see :doc:`redfish`). For information on how to enable the ``ilo`` and ``ilo5`` hardware type, see :ref:`enable-hardware-types`. .. note:: Only HPE ProLiant Gen10 servers supports hardware type ``redfish``. The hardware type ``ilo`` supports following HPE server features: * `Boot mode support`_ * `UEFI Secure Boot Support`_ * `Node Cleaning Support`_ * `Node Deployment Customization`_ * `Hardware Inspection Support`_ * `Swiftless deploy for intermediate images`_ * `HTTP(S) Based Deploy Support`_ * `Support for iLO driver with Standalone Ironic`_ * `RAID Support`_ * `Disk Erase Support`_ * `Initiating firmware update as manual clean step`_ * `Smart Update Manager (SUM) based firmware update`_ * `Activating iLO Advanced license as manual clean step`_ * `Firmware based UEFI iSCSI boot from volume support`_ * `Certificate based validation in iLO`_ * `Rescue mode support`_ * `Inject NMI support`_ * `Soft power operation support`_ * `BIOS configuration support`_ * `IPv6 support`_ Apart from above features hardware type ``ilo5`` also supports following features: * `Out of Band RAID Support`_ * `Out of Band Sanitize Disk Erase Support`_ Hardware interfaces ^^^^^^^^^^^^^^^^^^^ The ``ilo`` hardware type supports following hardware interfaces: * bios Supports ``ilo`` and ``no-bios``. The default is ``ilo``. They can be enabled by using the ``[DEFAULT]enabled_bios_interfaces`` option in ``ironic.conf`` as given below: .. code-block:: ini [DEFAULT] enabled_hardware_types = ilo enabled_bios_interfaces = ilo,no-bios * boot Supports ``ilo-virtual-media``, ``ilo-pxe`` and ``ilo-ipxe``. The default is ``ilo-virtual-media``. The ``ilo-virtual-media`` interface provides security enhanced PXE-less deployment by using iLO virtual media to boot up the bare metal node. The ``ilo-pxe`` and ``ilo-ipxe`` interfaces use PXE and iPXE respectively for deployment(just like :ref:`pxe-boot`). These interfaces do not require iLO Advanced license. They can be enabled by using the ``[DEFAULT]enabled_boot_interfaces`` option in ``ironic.conf`` as given below: .. code-block:: ini [DEFAULT] enabled_hardware_types = ilo enabled_boot_interfaces = ilo-virtual-media,ilo-pxe,ilo-ipxe * console Supports ``ilo`` and ``no-console``. The default is ``ilo``. They can be enabled by using the ``[DEFAULT]enabled_console_interfaces`` option in ``ironic.conf`` as given below: .. code-block:: ini [DEFAULT] enabled_hardware_types = ilo enabled_console_interfaces = ilo,no-console .. note:: To use ``ilo`` console interface you need to enable iLO feature 'IPMI/DCMI over LAN Access' on `iLO4 `_ and `iLO5 `_ management engine. * inspect Supports ``ilo`` and ``inspector``. The default is ``ilo``. They can be enabled by using the ``[DEFAULT]enabled_inspect_interfaces`` option in ``ironic.conf`` as given below: .. code-block:: ini [DEFAULT] enabled_hardware_types = ilo enabled_inspect_interfaces = ilo,inspector .. note:: :ironic-inspector-doc:`Ironic Inspector <>` needs to be configured to use ``inspector`` as the inspect interface. * management Supports only ``ilo``. It can be enabled by using the ``[DEFAULT]enabled_management_interfaces`` option in ``ironic.conf`` as given below: .. code-block:: ini [DEFAULT] enabled_hardware_types = ilo enabled_management_interfaces = ilo * power Supports only ``ilo``. It can be enabled by using the ``[DEFAULT]enabled_power_interfaces`` option in ``ironic.conf`` as given below: .. code-block:: ini [DEFAULT] enabled_hardware_types = ilo enabled_power_interfaces = ilo * raid Supports ``agent`` and ``no-raid``. The default is ``no-raid``. They can be enabled by using the ``[DEFAULT]enabled_raid_interfaces`` option in ``ironic.conf`` as given below: .. code-block:: ini [DEFAULT] enabled_hardware_types = ilo enabled_raid_interfaces = agent,no-raid * storage Supports ``cinder`` and ``noop``. The default is ``noop``. They can be enabled by using the ``[DEFAULT]enabled_storage_interfaces`` option in ``ironic.conf`` as given below: .. code-block:: ini [DEFAULT] enabled_hardware_types = ilo enabled_storage_interfaces = cinder,noop .. note:: The storage interface ``cinder`` is supported only when corresponding boot interface of the ``ilo`` hardware type based node is ``ilo-pxe`` or ``ilo-ipxe``. Please refer to :doc:`/admin/boot-from-volume` for configuring ``cinder`` as a storage interface. * rescue Supports ``agent`` and ``no-rescue``. The default is ``no-rescue``. They can be enabled by using the ``[DEFAULT]enabled_rescue_interfaces`` option in ``ironic.conf`` as given below: .. code-block:: ini [DEFAULT] enabled_hardware_types = ilo enabled_rescue_interfaces = agent,no-rescue The ``ilo5`` hardware type supports all the ``ilo`` interfaces described above, except for ``raid`` interface. The details of ``raid`` interface is as under: * raid Supports ``ilo5`` and ``no-raid``. The default is ``ilo5``. They can be enabled by using the ``[DEFAULT]enabled_raid_interfaces`` option in ``ironic.conf`` as given below: .. code-block:: ini [DEFAULT] enabled_hardware_types = ilo5 enabled_raid_interfaces = ilo5,no-raid The ``ilo`` and ``ilo5`` hardware type support all standard ``deploy`` and ``network`` interface implementations, see :ref:`enable-hardware-interfaces` for details. The following command can be used to enroll a ProLiant node with ``ilo`` hardware type: .. code-block:: console openstack baremetal node create --os-baremetal-api-version=1.38 \ --driver ilo \ --deploy-interface direct \ --raid-interface agent \ --rescue-interface agent \ --driver-info ilo_address= \ --driver-info ilo_username= \ --driver-info ilo_password= \ --driver-info ilo_deploy_iso= \ --driver-info ilo_rescue_iso= The following command can be used to enroll a ProLiant node with ``ilo5`` hardware type: .. code-block:: console openstack baremetal node create \ --driver ilo5 \ --deploy-interface direct \ --raid-interface ilo5 \ --rescue-interface agent \ --driver-info ilo_address= \ --driver-info ilo_username= \ --driver-info ilo_password= \ --driver-info ilo_deploy_iso= \ --driver-info ilo_rescue_iso= Please refer to :doc:`/install/enabling-drivers` for detailed explanation of hardware type. Node configuration ^^^^^^^^^^^^^^^^^^ * Each node is configured for ``ilo`` and ``ilo5`` hardware type by setting the following ironic node object's properties in ``driver_info``: - ``ilo_address``: IP address or hostname of the iLO. - ``ilo_username``: Username for the iLO with administrator privileges. - ``ilo_password``: Password for the above iLO user. - ``client_port``: (optional) Port to be used for iLO operations if you are using a custom port on the iLO. Default port used is 443. - ``client_timeout``: (optional) Timeout for iLO operations. Default timeout is 60 seconds. - ``ca_file``: (optional) CA certificate file to validate iLO. - ``console_port``: (optional) Node's UDP port for console access. Any unused port on the ironic conductor node may be used. This is required only when ``ilo-console`` interface is used. * The following properties are also required in node object's ``driver_info`` if ``ilo-virtual-media`` boot interface is used: - ``ilo_deploy_iso``: The glance UUID of the deploy ramdisk ISO image. - ``instance info/ilo_boot_iso`` property to be either boot iso Glance UUID or a HTTP(S) URL. This is optional property and is used when ``boot_option`` is set to ``netboot`` or ``ramdisk``. .. note:: When ``boot_option`` is set to ``ramdisk``, the ironic node must be configured to use ``ramdisk`` deploy interface. See :ref:`ramdisk-deploy` for details. - ``ilo_rescue_iso``: The glance UUID of the rescue ISO image. This is optional property and is used when ``rescue`` interface is set to ``agent``. * The following properties are also required in node object's ``driver_info`` if ``ilo-pxe`` or ``ilo-ipxe`` boot interface is used: - ``deploy_kernel``: The glance UUID or a HTTP(S) URL of the deployment kernel. - ``deploy_ramdisk``: The glance UUID or a HTTP(S) URL of the deployment ramdisk. - ``rescue_kernel``: The glance UUID or a HTTP(S) URL of the rescue kernel. This is optional property and is used when ``rescue`` interface is set to ``agent``. - ``rescue_ramdisk``: The glance UUID or a HTTP(S) URL of the rescue ramdisk. This is optional property and is used when ``rescue`` interface is set to ``agent``. * The following parameters are mandatory in ``driver_info`` if ``ilo-inspect`` inspect inteface is used and SNMPv3 inspection (`SNMPv3 Authentication` in `HPE iLO4 User Guide`_) is desired: * ``snmp_auth_user`` : The SNMPv3 user. * ``snmp_auth_prot_password`` : The auth protocol pass phrase. * ``snmp_auth_priv_password`` : The privacy protocol pass phrase. The following parameters are optional for SNMPv3 inspection: * ``snmp_auth_protocol`` : The Auth Protocol. The valid values are "MD5" and "SHA". The iLO default value is "MD5". * ``snmp_auth_priv_protocol`` : The Privacy protocol. The valid values are "AES" and "DES". The iLO default value is "DES". .. note:: If configuration values for ``ca_file``, ``client_port`` and ``client_timeout`` are not provided in the ``driver_info`` of the node, the corresponding config variables defined under ``[ilo]`` section in ironic.conf will be used. Prerequisites ============= * `proliantutils `_ is a python package which contains a set of modules for managing HPE ProLiant hardware. Install ``proliantutils`` module on the ironic conductor node. Minimum version required is 2.8.0:: $ pip install "proliantutils>=2.8.0" * ``ipmitool`` command must be present on the service node(s) where ``ironic-conductor`` is running. On most distros, this is provided as part of the ``ipmitool`` package. Please refer to `Hardware Inspection Support`_ for more information on recommended version. Different configuration for ilo hardware type ============================================= Glance Configuration ^^^^^^^^^^^^^^^^^^^^ 1. :glance-doc:`Configure Glance image service with its storage backend as Swift `. 2. Set a temp-url key for Glance user in Swift. For example, if you have configured Glance with user ``glance-swift`` and tenant as ``service``, then run the below command:: swift --os-username=service:glance-swift post -m temp-url-key:mysecretkeyforglance 3. Fill the required parameters in the ``[glance]`` section in ``/etc/ironic/ironic.conf``. Normally you would be required to fill in the following details:: [glance] swift_temp_url_key=mysecretkeyforglance swift_endpoint_url=https://10.10.1.10:8080 swift_api_version=v1 swift_account=AUTH_51ea2fb400c34c9eb005ca945c0dc9e1 swift_container=glance The details can be retrieved by running the below command: .. code-block:: bash $ swift --os-username=service:glance-swift stat -v | grep -i url StorageURL: http://10.10.1.10:8080/v1/AUTH_51ea2fb400c34c9eb005ca945c0dc9e1 Meta Temp-Url-Key: mysecretkeyforglance 4. Swift must be accessible with the same admin credentials configured in Ironic. For example, if Ironic is configured with the below credentials in ``/etc/ironic/ironic.conf``:: [keystone_authtoken] admin_password = password admin_user = ironic admin_tenant_name = service Ensure ``auth_version`` in ``keystone_authtoken`` to 2. Then, the below command should work.: .. code-block:: bash $ swift --os-username ironic --os-password password --os-tenant-name service --auth-version 2 stat Account: AUTH_22af34365a104e4689c46400297f00cb Containers: 2 Objects: 18 Bytes: 1728346241 Objects in policy "policy-0": 18 Bytes in policy "policy-0": 1728346241 Meta Temp-Url-Key: mysecretkeyforglance X-Timestamp: 1409763763.84427 X-Trans-Id: tx51de96a28f27401eb2833-005433924b Content-Type: text/plain; charset=utf-8 Accept-Ranges: bytes 5. Restart the Ironic conductor service:: $ service ironic-conductor restart Web server configuration on conductor ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ * The HTTP(S) web server can be configured in many ways. For apache web server on Ubuntu, refer `here `_ * Following config variables need to be set in ``/etc/ironic/ironic.conf``: * ``use_web_server_for_images`` in ``[ilo]`` section:: [ilo] use_web_server_for_images = True * ``http_url`` and ``http_root`` in ``[deploy]`` section:: [deploy] # Ironic compute node's http root path. (string value) http_root=/httpboot # Ironic compute node's HTTP server URL. Example: # http://192.1.2.3:8080 (string value) http_url=http://192.168.0.2:8080 ``use_web_server_for_images``: If the variable is set to ``false``, the ``ilo-virtual-media`` boot interface uses swift containers to host the intermediate floppy image and the boot ISO. If the variable is set to ``true``, it uses the local web server for hosting the intermediate files. The default value for ``use_web_server_for_images`` is False. ``http_url``: The value for this variable is prefixed with the generated intermediate files to generate a URL which is attached in the virtual media. ``http_root``: It is the directory location to which ironic conductor copies the intermediate floppy image and the boot ISO. .. note:: HTTPS is strongly recommended over HTTP web server configuration for security enhancement. The ``ilo-virtual-media`` boot interface will send the instance's configdrive over an encrypted channel if web server is HTTPS enabled. Enable driver ============= 1. Build a deploy ISO (and kernel and ramdisk) image, see :ref:`deploy-ramdisk` 2. See `Glance Configuration`_ for configuring glance image service with its storage backend as ``swift``. 3. Upload this image to Glance:: glance image-create --name deploy-ramdisk.iso --disk-format iso --container-format bare < deploy-ramdisk.iso 4. Enable hardware type and hardware interfaces in ``/etc/ironic/ironic.conf``:: [DEFAULT] enabled_hardware_types = ilo enabled_bios_interfaces = ilo enabled_boot_interfaces = ilo-virtual-media,ilo-pxe,ilo-ipxe enabled_power_interfaces = ilo enabled_console_interfaces = ilo enabled_raid_interfaces = agent enabled_management_interfaces = ilo enabled_inspect_interfaces = ilo enabled_rescue_interfaces = agent 5. Restart the ironic conductor service:: $ service ironic-conductor restart Optional functionalities for the ``ilo`` hardware type ====================================================== Boot mode support ^^^^^^^^^^^^^^^^^ The hardware type ``ilo`` supports automatic detection and setting of boot mode (Legacy BIOS or UEFI). * When boot mode capability is not configured: - If config variable ``default_boot_mode`` in ``[ilo]`` section of ironic configuration file is set to either 'bios' or 'uefi', then iLO driver uses that boot mode for provisioning the baremetal ProLiant servers. - If the pending boot mode is set on the node then iLO driver uses that boot mode for provisioning the baremetal ProLiant servers. - If the pending boot mode is not set on the node then iLO driver uses 'uefi' boot mode for UEFI capable servers and "bios" when UEFI is not supported. * When boot mode capability is configured, the driver sets the pending boot mode to the configured value. * Only one boot mode (either ``uefi`` or ``bios``) can be configured for the node. * If the operator wants a node to boot always in ``uefi`` mode or ``bios`` mode, then they may use ``capabilities`` parameter within ``properties`` field of an ironic node. To configure a node in ``uefi`` mode, then set ``capabilities`` as below:: openstack baremetal node set --property capabilities='boot_mode:uefi' Nodes having ``boot_mode`` set to ``uefi`` may be requested by adding an ``extra_spec`` to the nova flavor:: nova flavor-key ironic-test-3 set capabilities:boot_mode="uefi" nova boot --flavor ironic-test-3 --image test-image instance-1 If ``capabilities`` is used in ``extra_spec`` as above, nova scheduler (``ComputeCapabilitiesFilter``) will match only ironic nodes which have the ``boot_mode`` set appropriately in ``properties/capabilities``. It will filter out rest of the nodes. The above facility for matching in nova can be used in heterogeneous environments where there is a mix of ``uefi`` and ``bios`` machines, and operator wants to provide a choice to the user regarding boot modes. If the flavor doesn't contain ``boot_mode`` then nova scheduler will not consider boot mode as a placement criteria, hence user may get either a BIOS or UEFI machine that matches with user specified flavors. The automatic boot ISO creation for UEFI boot mode has been enabled in Kilo. The manual creation of boot ISO for UEFI boot mode is also supported. For the latter, the boot ISO for the deploy image needs to be built separately and the deploy image's ``boot_iso`` property in glance should contain the glance UUID of the boot ISO. For building boot ISO, add ``iso`` element to the diskimage-builder command to build the image. For example:: disk-image-create ubuntu baremetal iso .. _`iLO UEFI Secure Boot Support`: UEFI Secure Boot Support ^^^^^^^^^^^^^^^^^^^^^^^^ The hardware type ``ilo`` supports secure boot deploy. The UEFI secure boot can be configured in ironic by adding ``secure_boot`` parameter in the ``capabilities`` parameter within ``properties`` field of an ironic node. ``secure_boot`` is a boolean parameter and takes value as ``true`` or ``false``. To enable ``secure_boot`` on a node add it to ``capabilities`` as below:: openstack baremetal node set --property capabilities='secure_boot:true' Alternatively see `Hardware Inspection Support`_ to know how to automatically populate the secure boot capability. Nodes having ``secure_boot`` set to ``true`` may be requested by adding an ``extra_spec`` to the nova flavor:: nova flavor-key ironic-test-3 set capabilities:secure_boot="true" nova boot --flavor ironic-test-3 --image test-image instance-1 If ``capabilities`` is used in ``extra_spec`` as above, nova scheduler (``ComputeCapabilitiesFilter``) will match only ironic nodes which have the ``secure_boot`` set appropriately in ``properties/capabilities``. It will filter out rest of the nodes. The above facility for matching in nova can be used in heterogeneous environments where there is a mix of machines supporting and not supporting UEFI secure boot, and operator wants to provide a choice to the user regarding secure boot. If the flavor doesn't contain ``secure_boot`` then nova scheduler will not consider secure boot mode as a placement criteria, hence user may get a secure boot capable machine that matches with user specified flavors but deployment would not use its secure boot capability. Secure boot deploy would happen only when it is explicitly specified through flavor. Use element ``ubuntu-signed`` or ``fedora`` to build signed deploy iso and user images from `diskimage-builder `_. Please refer to :ref:`deploy-ramdisk` for more information on building deploy ramdisk. The below command creates files named cloud-image-boot.iso, cloud-image.initrd, cloud-image.vmlinuz and cloud-image.qcow2 in the current working directory:: cd ./bin/disk-image-create -o cloud-image ubuntu-signed baremetal iso .. note:: In UEFI secure boot, digitally signed bootloader should be able to validate digital signatures of kernel during boot process. This requires that the bootloader contains the digital signatures of the kernel. For the ``ilo-virtual-media`` boot interface, it is recommended that ``boot_iso`` property for user image contains the glance UUID of the boot ISO. If ``boot_iso`` property is not updated in glance for the user image, it would create the ``boot_iso`` using bootloader from the deploy iso. This ``boot_iso`` will be able to boot the user image in UEFI secure boot environment only if the bootloader is signed and can validate digital signatures of user image kernel. Ensure the public key of the signed image is loaded into bare metal to deploy signed images. For HPE ProLiant Gen9 servers, one can enroll public key using iLO System Utilities UI. Please refer to section ``Accessing Secure Boot options`` in `HP UEFI System Utilities User Guide `_. One can also refer to white paper on `Secure Boot for Linux on HP ProLiant servers `_ for additional details. For more up-to-date information, refer `iLO driver wiki page `_ .. _ilo_node_cleaning: Node Cleaning Support ^^^^^^^^^^^^^^^^^^^^^ The hardware type ``ilo`` supports node cleaning. For more information on node cleaning, see :ref:`cleaning` Supported **Automated** Cleaning Operations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * The automated cleaning operations supported are: * ``reset_bios_to_default``: Resets system ROM settings to default. By default, enabled with priority 10. This clean step is supported only on Gen9 and above servers. * ``reset_secure_boot_keys_to_default``: Resets secure boot keys to manufacturer's defaults. This step is supported only on Gen9 and above servers. By default, enabled with priority 20 . * ``reset_ilo_credential``: Resets the iLO password, if ``ilo_change_password`` is specified as part of node's driver_info. By default, enabled with priority 30. * ``clear_secure_boot_keys``: Clears all secure boot keys. This step is supported only on Gen9 and above servers. By default, this step is disabled. * ``reset_ilo``: Resets the iLO. By default, this step is disabled. * ``erase_devices``: An inband clean step that performs disk erase on all the disks including the disks visible to OS as well as the raw disks visible to Smart Storage Administrator (SSA). This step supports erasing of the raw disks visible to SSA in Proliant servers only with the ramdisk created using diskimage-builder from Ocata release. By default, this step is disabled. See `Disk Erase Support`_ for more details. * For supported in-band cleaning operations, see :ref:`InbandvsOutOfBandCleaning`. * All the automated cleaning steps have an explicit configuration option for priority. In order to disable or change the priority of the automated clean steps, respective configuration option for priority should be updated in ironic.conf. * Updating clean step priority to 0, will disable that particular clean step and will not run during automated cleaning. * Configuration Options for the automated clean steps are listed under ``[ilo]`` and ``[deploy]`` section in ironic.conf :: [ilo] clean_priority_reset_ilo=0 clean_priority_reset_bios_to_default=10 clean_priority_reset_secure_boot_keys_to_default=20 clean_priority_clear_secure_boot_keys=0 clean_priority_reset_ilo_credential=30 [deploy] erase_devices_priority=0 For more information on node automated cleaning, see :ref:`automated_cleaning` Supported **Manual** Cleaning Operations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * The manual cleaning operations supported are: ``activate_license``: Activates the iLO Advanced license. This is an out-of-band manual cleaning step associated with the ``management`` interface. See `Activating iLO Advanced license as manual clean step`_ for user guidance on usage. Please note that this operation cannot be performed using the ``ilo-virtual-media`` boot interface as it needs this type of advanced license already active to use virtual media to boot into to start cleaning operation. Virtual media is an advanced feature. If an advanced license is already active and the user wants to overwrite the current license key, for example in case of a multi-server activation key delivered with a flexible-quantity kit or after completing an Activation Key Agreement (AKA), then the driver can still be used for executing this cleaning step. ``update_firmware``: Updates the firmware of the devices. Also an out-of-band step associated with the ``management`` interface. See `Initiating firmware update as manual clean step`_ for user guidance on usage. The supported devices for firmware update are: ``ilo``, ``cpld``, ``power_pic``, ``bios`` and ``chassis``. Please refer to below table for their commonly used descriptions. .. csv-table:: :header: "Device", "Description" :widths: 30, 80 "``ilo``", "BMC for HPE ProLiant servers" "``cpld``", "System programmable logic device" "``power_pic``", "Power management controller" "``bios``", "HPE ProLiant System ROM" "``chassis``", "System chassis device" Some devices firmware cannot be updated via this method, such as: storage controllers, host bus adapters, disk drive firmware, network interfaces and Onboard Administrator (OA). ``update_firmware_sum``: Updates all or list of user specified firmware components on the node using Smart Update Manager (SUM). It is an inband step associated with the ``management`` interface. See `Smart Update Manager (SUM) based firmware update`_ for more information on usage. * iLO with firmware version 1.5 is minimally required to support all the operations. For more information on node manual cleaning, see :ref:`manual_cleaning` Node Deployment Customization ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The hardware type ``ilo`` supports customization of node deployment via deploy templates, see :ref:`node-deployment-deploy-steps` The supported deploy steps are: * ``apply_configuration``: Applies given BIOS settings on the node. See `BIOS configuration support`_. This step is part of the ``bios`` interface. * ``factory_reset``: Resets the BIOS settings on the node to factory defaults. See `BIOS configuration support`_. This step is part of the ``bios`` interface. * ``reset_bios_to_default``: Resets system ROM settings to default. This step is supported only on Gen9 and above servers. This step is part of the ``management`` interface. * ``reset_secure_boot_keys_to_default``: Resets secure boot keys to manufacturer's defaults. This step is supported only on Gen9 and above servers. This step is part of the ``management`` interface. * ``reset_ilo_credential``: Resets the iLO password. The password need to be specified in ``ilo_password`` argument of the step. This step is part of the ``management`` interface. * ``clear_secure_boot_keys``: Clears all secure boot keys. This step is supported only on Gen9 and above servers. This step is part of the ``management`` interface. * ``reset_ilo``: Resets the iLO. This step is part of the ``management`` interface. * ``update_firmware``: Updates the firmware of the devices. This step is part of the ``management`` interface. See `Initiating firmware update as manual clean step`_ for user guidance on usage. The supported devices for firmware update are: ``ilo``, ``cpld``, ``power_pic``, ``bios`` and ``chassis``. This step is part of ``management`` interface. Please refer to below table for their commonly used descriptions. .. csv-table:: :header: "Device", "Description" :widths: 30, 80 "``ilo``", "BMC for HPE ProLiant servers" "``cpld``", "System programmable logic device" "``power_pic``", "Power management controller" "``bios``", "HPE ProLiant System ROM" "``chassis``", "System chassis device" Some devices firmware cannot be updated via this method, such as: storage controllers, host bus adapters, disk drive firmware, network interfaces and Onboard Administrator (OA). * ``apply_configuration``: Applies RAID configuration on the node. See :ref:`raid` for more information. This step is part of the ``raid`` interface. * ``delete_configuration``: Deletes RAID configuration on the node. See :ref:`raid` for more information. This step is part of the ``raid`` interface. Example of using deploy template with the Compute service ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Create a deploy template with a single step: .. code-block:: console openstack baremetal deploy template create \ CUSTOM_HYPERTHREADING_ON \ --steps '[{"interface": "bios", "step": "apply_configuration", "args": {"settings": [{"name": "ProcHyperthreading", "value": "Enabled"}]}, "priority": 150}]' Add the trait ``CUSTOM_HYPERTHREADING_ON`` to the node represented by ``$node_ident``: .. code-block:: console openstack baremetal node add trait $node_ident CUSTOM_HYPERTHREADING_ON Update the flavor ``bm-hyperthreading-on`` in the Compute service with the following property: .. code-block:: console openstack flavor set --property trait:CUSTOM_HYPERTHREADING_ON=required bm-hyperthreading-on Creating a Compute instance with this flavor will ensure that the instance is scheduled only to Bare Metal nodes with the ``CUSTOM_HYPERTHREADING_ON`` trait. When an instance is created using the ``bm-hyperthreading-on`` flavor, then the deploy steps of deploy template ``CUSTOM_HYPERTHREADING_ON`` will be executed during the deployment of the scheduled node, causing Hyperthreading to be enabled in the node's BIOS configuration. .. _ilo-inspection: Hardware Inspection Support ^^^^^^^^^^^^^^^^^^^^^^^^^^^ The hardware type ``ilo`` supports hardware inspection. .. note:: * The disk size is returned by RIBCL/RIS only when RAID is preconfigured on the storage. If the storage is Direct Attached Storage, then RIBCL/RIS fails to get the disk size. * The SNMPv3 inspection gets disk size for all types of storages. If RIBCL/RIS is unable to get disk size and SNMPv3 inspection is requested, the proliantutils does SNMPv3 inspection to get the disk size. If proliantutils is unable to get the disk size, it raises an error. This feature is available in proliantutils release version >= 2.2.0. * The iLO must be updated with SNMPv3 authentication details. Pleae refer to the section `SNMPv3 Authentication` in `HPE iLO4 User Guide`_ for setting up authentication details on iLO. The following parameters are mandatory to be given in driver_info for SNMPv3 inspection: * ``snmp_auth_user`` : The SNMPv3 user. * ``snmp_auth_prot_password`` : The auth protocol pass phrase. * ``snmp_auth_priv_password`` : The privacy protocol pass phrase. The following parameters are optional for SNMPv3 inspection: * ``snmp_auth_protocol`` : The Auth Protocol. The valid values are "MD5" and "SHA". The iLO default value is "MD5". * ``snmp_auth_priv_protocol`` : The Privacy protocol. The valid values are "AES" and "DES". The iLO default value is "DES". The inspection process will discover the following essential properties (properties required for scheduling deployment): * ``memory_mb``: memory size * ``cpus``: number of cpus * ``cpu_arch``: cpu architecture * ``local_gb``: disk size Inspection can also discover the following extra capabilities for iLO driver: * ``ilo_firmware_version``: iLO firmware version * ``rom_firmware_version``: ROM firmware version * ``secure_boot``: secure boot is supported or not. The possible values are 'true' or 'false'. The value is returned as 'true' if secure boot is supported by the server. * ``server_model``: server model * ``pci_gpu_devices``: number of gpu devices connected to the bare metal. * ``nic_capacity``: the max speed of the embedded NIC adapter. * ``sriov_enabled``: true, if server has the SRIOV supporting NIC. * ``has_rotational``: true, if server has HDD disk. * ``has_ssd``: true, if server has SSD disk. * ``has_nvme_ssd``: true, if server has NVME SSD disk. * ``cpu_vt``: true, if server supports cpu virtualization. * ``hardware_supports_raid``: true, if RAID can be configured on the server using RAID controller. * ``nvdimm_n``: true, if server has NVDIMM_N type of persistent memory. * ``persistent_memory``: true, if server has persistent memory. * ``logical_nvdimm_n``: true, if server has logical NVDIMM_N configured. * ``rotational_drive__rpm``: The capabilities ``rotational_drive_4800_rpm``, ``rotational_drive_5400_rpm``, ``rotational_drive_7200_rpm``, ``rotational_drive_10000_rpm`` and ``rotational_drive_15000_rpm`` are set to true if the server has HDD drives with speed of 4800, 5400, 7200, 10000 and 15000 rpm respectively. * ``logical_raid_level_``: The capabilities ``logical_raid_level_0``, ``logical_raid_level_1``, ``logical_raid_level_2``, ``logical_raid_level_5``, ``logical_raid_level_6``, ``logical_raid_level_10``, ``logical_raid_level_50`` and ``logical_raid_level_60`` are set to true if any of the raid levels among 0, 1, 2, 5, 6, 10, 50 and 60 are configured on the system. .. note:: * The capability ``nic_capacity`` can only be discovered if ipmitool version >= 1.8.15 is used on the conductor. The latest version can be downloaded from `here `__. * The iLO firmware version needs to be 2.10 or above for nic_capacity to be discovered. * To discover IPMI based attributes you need to enable iLO feature 'IPMI/DCMI over LAN Access' on `iLO4 `_ and `iLO5 `_ management engine. * The proliantutils returns only active NICs for Gen10 ProLiant HPE servers. The user would need to delete the ironic ports corresponding to inactive NICs for Gen8 and Gen9 servers as proliantutils returns all the discovered (active and otherwise) NICs for Gen8 and Gen9 servers and ironic ports are created for all of them. Inspection logs a warning if the node under inspection is Gen8 or Gen9. The operator can specify these capabilities in nova flavor for node to be selected for scheduling:: nova flavor-key my-baremetal-flavor set capabilities:server_model=" Gen8" nova flavor-key my-baremetal-flavor set capabilities:nic_capacity="10Gb" nova flavor-key my-baremetal-flavor set capabilities:ilo_firmware_version=" 2.10" nova flavor-key my-baremetal-flavor set capabilities:has_ssd="true" See :ref:`capabilities-discovery` for more details and examples. Swiftless deploy for intermediate images ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The hardware type ``ilo`` with ``ilo-virtual-media`` as boot interface can deploy and boot the server with and without ``swift`` being used for hosting the intermediate temporary floppy image (holding metadata for deploy kernel and ramdisk) and the boot ISO. A local HTTP(S) web server on each conductor node needs to be configured. Please refer to `Web server configuration on conductor`_ for more information. The HTTPS web server needs to be enabled (instead of HTTP web server) in order to send management information and images in encrypted channel over HTTPS. .. note:: This feature assumes that the user inputs are on Glance which uses swift as backend. If swift dependency has to be eliminated, please refer to `HTTP(S) Based Deploy Support`_ also. Deploy Process ~~~~~~~~~~~~~~ Please refer to `Netboot in swiftless deploy for intermediate images`_ for partition image support and `Localboot in swiftless deploy for intermediate images`_ for whole disk image support. HTTP(S) Based Deploy Support ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The user input for the images given in ``driver_info`` like ``ilo_deploy_iso``, ``deploy_kernel`` and ``deploy_ramdisk`` and in ``instance_info`` like ``image_source``, ``kernel``, ``ramdisk`` and ``ilo_boot_iso`` may also be given as HTTP(S) URLs. The HTTP(S) web server can be configured in many ways. For the Apache web server on Ubuntu, refer `here `_. The web server may reside on a different system than the conductor nodes, but its URL must be reachable by the conductor and the bare metal nodes. Deploy Process ~~~~~~~~~~~~~~ Please refer to `Netboot with HTTP(S) based deploy`_ for partition image boot and `Localboot with HTTP(S) based deploy`_ for whole disk image boot. Support for iLO driver with Standalone Ironic ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ It is possible to use ironic as standalone services without other OpenStack services. The ``ilo`` hardware type can be used in standalone ironic. This feature is referred to as ``iLO driver with standalone ironic`` in this document. Configuration ~~~~~~~~~~~~~ The HTTP(S) web server needs to be configured as described in `HTTP(S) Based Deploy Support`_ and `Web server configuration on conductor`_ needs to be configured for hosting intermediate images on conductor as described in `Swiftless deploy for intermediate images`_. Deploy Process ============== Netboot with glance and swift ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. seqdiag:: :scale: 80 diagram { Glance; Conductor; Baremetal; Swift; IPA; iLO; activation = none; span_height = 1; edge_length = 250; default_note_color = white; default_fontsize = 14; Conductor -> iLO [label = "Powers off the node"]; Conductor -> Glance [label = "Download user image"]; Conductor -> Glance [label = "Get the metadata for deploy ISO"]; Conductor -> Conductor [label = "Generates swift tempURL for deploy ISO"]; Conductor -> Conductor [label = "Creates the FAT32 image containing Ironic API URL and driver name"]; Conductor -> Swift [label = "Uploads the FAT32 image"]; Conductor -> Conductor [label = "Generates swift tempURL for FAT32 image"]; Conductor -> iLO [label = "Attaches the FAT32 image swift tempURL as virtual media floppy"]; Conductor -> iLO [label = "Attaches the deploy ISO swift tempURL as virtual media CDROM"]; Conductor -> iLO [label = "Sets one time boot to CDROM"]; Conductor -> iLO [label = "Reboot the node"]; iLO -> Swift [label = "Downloads deploy ISO"]; Baremetal -> iLO [label = "Boots deploy kernel/ramdisk from iLO virtual media CDROM"]; IPA -> Conductor [label = "Lookup node"]; Conductor -> IPA [label = "Provides node UUID"]; IPA -> Conductor [label = "Heartbeat"]; Conductor -> IPA [label = "Exposes the disk over iSCSI"]; Conductor -> Conductor [label = "Connects to bare metal's disk over iSCSI and writes image"]; Conductor -> Conductor [label = "Generates the boot ISO"]; Conductor -> Swift [label = "Uploads the boot ISO"]; Conductor -> Conductor [label = "Generates swift tempURL for boot ISO"]; Conductor -> iLO [label = "Attaches boot ISO swift tempURL as virtual media CDROM"]; Conductor -> iLO [label = "Sets boot device to CDROM"]; Conductor -> IPA [label = "Power off the node"]; Conductor -> iLO [label = "Power on the node"]; iLO -> Swift [label = "Downloads boot ISO"]; iLO -> Baremetal [label = "Boots the instance kernel/ramdisk from iLO virtual media CDROM"]; Baremetal -> Baremetal [label = "Instance kernel finds root partition and continues booting from disk"]; } Localboot with glance and swift for partition images ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. seqdiag:: :scale: 80 diagram { Glance; Conductor; Baremetal; Swift; IPA; iLO; activation = none; span_height = 1; edge_length = 250; default_note_color = white; default_fontsize = 14; Conductor -> iLO [label = "Powers off the node"]; Conductor -> Glance [label = "Get the metadata for deploy ISO"]; Glance -> Conductor [label = "Returns the metadata for deploy ISO"]; Conductor -> Conductor [label = "Generates swift tempURL for deploy ISO"]; Conductor -> Conductor [label = "Creates the FAT32 image containing ironic API URL and driver name"]; Conductor -> Swift [label = "Uploads the FAT32 image"]; Conductor -> Conductor [label = "Generates swift tempURL for FAT32 image"]; Conductor -> iLO [label = "Attaches the FAT32 image swift tempURL as virtual media floppy"]; Conductor -> iLO [label = "Attaches the deploy ISO swift tempURL as virtual media CDROM"]; Conductor -> iLO [label = "Sets one time boot to CDROM"]; Conductor -> iLO [label = "Reboot the node"]; iLO -> Swift [label = "Downloads deploy ISO"]; Baremetal -> iLO [label = "Boots deploy kernel/ramdisk from iLO virtual media CDROM"]; IPA -> Conductor [label = "Lookup node"]; Conductor -> IPA [label = "Provides node UUID"]; IPA -> Conductor [label = "Heartbeat"]; Conductor -> IPA [label = "Sends the user image HTTP(S) URL"]; IPA -> Swift [label = "Retrieves the user image on bare metal"]; IPA -> IPA [label = "Writes user image to root partition"]; IPA -> IPA [label = "Installs boot loader"]; IPA -> Conductor [label = "Heartbeat"]; Conductor -> Baremetal [label = "Sets boot device to disk"]; Conductor -> IPA [label = "Power off the node"]; Conductor -> iLO [label = "Power on the node"]; Baremetal -> Baremetal [label = "Boot user image from disk"]; } Localboot with glance and swift ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. seqdiag:: :scale: 80 diagram { Glance; Conductor; Baremetal; Swift; IPA; iLO; activation = none; span_height = 1; edge_length = 250; default_note_color = white; default_fontsize = 14; Conductor -> iLO [label = "Powers off the node"]; Conductor -> Glance [label = "Get the metadata for deploy ISO"]; Glance -> Conductor [label = "Returns the metadata for deploy ISO"]; Conductor -> Conductor [label = "Generates swift tempURL for deploy ISO"]; Conductor -> Conductor [label = "Creates the FAT32 image containing ironic API URL and driver name"]; Conductor -> Swift [label = "Uploads the FAT32 image"]; Conductor -> Conductor [label = "Generates swift tempURL for FAT32 image"]; Conductor -> iLO [label = "Attaches the FAT32 image swift tempURL as virtual media floppy"]; Conductor -> iLO [label = "Attaches the deploy ISO swift tempURL as virtual media CDROM"]; Conductor -> iLO [label = "Sets one time boot to CDROM"]; Conductor -> iLO [label = "Reboot the node"]; iLO -> Swift [label = "Downloads deploy ISO"]; Baremetal -> iLO [label = "Boots deploy kernel/ramdisk from iLO virtual media CDROM"]; IPA -> Conductor [label = "Lookup node"]; Conductor -> IPA [label = "Provides node UUID"]; IPA -> Conductor [label = "Heartbeat"]; Conductor -> IPA [label = "Sends the user image HTTP(S) URL"]; IPA -> Swift [label = "Retrieves the user image on bare metal"]; IPA -> IPA [label = "Writes user image to disk"]; IPA -> Conductor [label = "Heartbeat"]; Conductor -> Baremetal [label = "Sets boot device to disk"]; Conductor -> IPA [label = "Power off the node"]; Conductor -> iLO [label = "Power on the node"]; Baremetal -> Baremetal [label = "Boot user image from disk"]; } Netboot in swiftless deploy for intermediate images ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. seqdiag:: :scale: 80 diagram { Glance; Conductor; Baremetal; ConductorWebserver; IPA; iLO; activation = none; span_height = 1; edge_length = 250; default_note_color = white; default_fontsize = 14; Conductor -> iLO [label = "Powers off the node"]; Conductor -> Glance [label = "Download user image"]; Conductor -> Glance [label = "Get the metadata for deploy ISO"]; Conductor -> Conductor [label = "Generates swift tempURL for deploy ISO"]; Conductor -> Conductor [label = "Creates the FAT32 image containing Ironic API URL and driver name"]; Conductor -> ConductorWebserver [label = "Uploads the FAT32 image"]; Conductor -> iLO [label = "Attaches the FAT32 image URL as virtual media floppy"]; Conductor -> iLO [label = "Attaches the deploy ISO swift tempURL as virtual media CDROM"]; Conductor -> iLO [label = "Sets one time boot to CDROM"]; Conductor -> iLO [label = "Reboot the node"]; iLO -> Swift [label = "Downloads deploy ISO"]; Baremetal -> iLO [label = "Boots deploy kernel/ramdisk from iLO virtual media CDROM"]; IPA -> Conductor [label = "Lookup node"]; Conductor -> IPA [label = "Provides node UUID"]; IPA -> Conductor [label = "Heartbeat"]; Conductor -> IPA [label = "Exposes the disk over iSCSI"]; Conductor -> Conductor [label = "Connects to bare metal's disk over iSCSI and writes image"]; Conductor -> Conductor [label = "Generates the boot ISO"]; Conductor -> ConductorWebserver [label = "Uploads the boot ISO"]; Conductor -> iLO [label = "Attaches boot ISO URL as virtual media CDROM"]; Conductor -> iLO [label = "Sets boot device to CDROM"]; Conductor -> IPA [label = "Power off the node"]; Conductor -> iLO [label = "Power on the node"]; iLO -> ConductorWebserver [label = "Downloads boot ISO"]; iLO -> Baremetal [label = "Boots the instance kernel/ramdisk from iLO virtual media CDROM"]; Baremetal -> Baremetal [label = "Instance kernel finds root partition and continues booting from disk"]; } Localboot in swiftless deploy for intermediate images ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. seqdiag:: :scale: 80 diagram { Glance; Conductor; Baremetal; ConductorWebserver; IPA; iLO; activation = none; span_height = 1; edge_length = 250; default_note_color = white; default_fontsize = 14; Conductor -> iLO [label = "Powers off the node"]; Conductor -> Glance [label = "Get the metadata for deploy ISO"]; Glance -> Conductor [label = "Returns the metadata for deploy ISO"]; Conductor -> Conductor [label = "Generates swift tempURL for deploy ISO"]; Conductor -> Conductor [label = "Creates the FAT32 image containing Ironic API URL and driver name"]; Conductor -> ConductorWebserver [label = "Uploads the FAT32 image"]; Conductor -> iLO [label = "Attaches the FAT32 image URL as virtual media floppy"]; Conductor -> iLO [label = "Attaches the deploy ISO swift tempURL as virtual media CDROM"]; Conductor -> iLO [label = "Sets one time boot to CDROM"]; Conductor -> iLO [label = "Reboot the node"]; iLO -> Swift [label = "Downloads deploy ISO"]; Baremetal -> iLO [label = "Boots deploy kernel/ramdisk from iLO virtual media CDROM"]; IPA -> Conductor [label = "Lookup node"]; Conductor -> IPA [label = "Provides node UUID"]; IPA -> Conductor [label = "Heartbeat"]; Conductor -> IPA [label = "Sends the user image HTTP(S) URL"]; IPA -> Swift [label = "Retrieves the user image on bare metal"]; IPA -> IPA [label = "Writes user image to disk"]; IPA -> Conductor [label = "Heartbeat"]; Conductor -> Baremetal [label = "Sets boot device to disk"]; Conductor -> IPA [label = "Power off the node"]; Conductor -> Baremetal [label = "Power on the node"]; Baremetal -> Baremetal [label = "Boot user image from disk"]; } Netboot with HTTP(S) based deploy ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. seqdiag:: :scale: 80 diagram { Webserver; Conductor; Baremetal; Swift; IPA; iLO; activation = none; span_height = 1; edge_length = 250; default_note_color = white; default_fontsize = 14; Conductor -> iLO [label = "Powers off the node"]; Conductor -> Webserver [label = "Download user image"]; Conductor -> Conductor [label = "Creates the FAT32 image containing Ironic API URL and driver name"]; Conductor -> Swift [label = "Uploads the FAT32 image"]; Conductor -> Conductor [label = "Generates swift tempURL for FAT32 image"]; Conductor -> iLO [label = "Attaches the FAT32 image swift tempURL as virtual media floppy"]; Conductor -> iLO [label = "Attaches the deploy ISO URL as virtual media CDROM"]; Conductor -> iLO [label = "Sets one time boot to CDROM"]; Conductor -> iLO [label = "Reboot the node"]; iLO -> Webserver [label = "Downloads deploy ISO"]; Baremetal -> iLO [label = "Boots deploy kernel/ramdisk from iLO virtual media CDROM"]; IPA -> Conductor [label = "Lookup node"]; Conductor -> IPA [label = "Provides node UUID"]; IPA -> Conductor [label = "Heartbeat"]; Conductor -> IPA [label = "Exposes the disk over iSCSI"]; Conductor -> Conductor [label = "Connects to bare metal's disk over iSCSI and writes image"]; Conductor -> Conductor [label = "Generates the boot ISO"]; Conductor -> Swift [label = "Uploads the boot ISO"]; Conductor -> Conductor [label = "Generates swift tempURL for boot ISO"]; Conductor -> iLO [label = "Attaches boot ISO swift tempURL as virtual media CDROM"]; Conductor -> iLO [label = "Sets boot device to CDROM"]; Conductor -> IPA [label = "Power off the node"]; Conductor -> iLO [label = "Power on the node"]; iLO -> Swift [label = "Downloads boot ISO"]; iLO -> Baremetal [label = "Boots the instance kernel/ramdisk from iLO virtual media CDROM"]; Baremetal -> Baremetal [label = "Instance kernel finds root partition and continues booting from disk"]; } Localboot with HTTP(S) based deploy ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. seqdiag:: :scale: 80 diagram { Webserver; Conductor; Baremetal; Swift; IPA; iLO; activation = none; span_height = 1; edge_length = 250; default_note_color = white; default_fontsize = 14; Conductor -> iLO [label = "Powers off the node"]; Conductor -> Conductor [label = "Creates the FAT32 image containing ironic API URL and driver name"]; Conductor -> Swift [label = "Uploads the FAT32 image"]; Conductor -> Conductor [label = "Generates swift tempURL for FAT32 image"]; Conductor -> iLO [label = "Attaches the FAT32 image swift tempURL as virtual media floppy"]; Conductor -> iLO [label = "Attaches the deploy ISO URL as virtual media CDROM"]; Conductor -> iLO [label = "Sets one time boot to CDROM"]; Conductor -> iLO [label = "Reboot the node"]; iLO -> Webserver [label = "Downloads deploy ISO"]; Baremetal -> iLO [label = "Boots deploy kernel/ramdisk from iLO virtual media CDROM"]; IPA -> Conductor [label = "Lookup node"]; Conductor -> IPA [label = "Provides node UUID"]; IPA -> Conductor [label = "Heartbeat"]; Conductor -> IPA [label = "Sends the user image HTTP(S) URL"]; IPA -> Webserver [label = "Retrieves the user image on bare metal"]; IPA -> IPA [label = "Writes user image to disk"]; IPA -> Conductor [label = "Heartbeat"]; Conductor -> Baremetal [label = "Sets boot device to disk"]; Conductor -> IPA [label = "Power off the node"]; Conductor -> Baremetal [label = "Power on the node"]; Baremetal -> Baremetal [label = "Boot user image from disk"]; } Netboot in standalone ironic ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. seqdiag:: :scale: 80 diagram { Webserver; Conductor; Baremetal; ConductorWebserver; IPA; iLO; activation = none; span_height = 1; edge_length = 250; default_note_color = white; default_fontsize = 14; Conductor -> iLO [label = "Powers off the node"]; Conductor -> Webserver [label = "Download user image"]; Conductor -> Conductor [label = "Creates the FAT32 image containing Ironic API URL and driver name"]; Conductor -> ConductorWebserver[label = "Uploads the FAT32 image"]; Conductor -> iLO [label = "Attaches the FAT32 image URL as virtual media floppy"]; Conductor -> iLO [label = "Attaches the deploy ISO URL as virtual media CDROM"]; Conductor -> iLO [label = "Sets one time boot to CDROM"]; Conductor -> iLO [label = "Reboot the node"]; iLO -> Webserver [label = "Downloads deploy ISO"]; Baremetal -> iLO [label = "Boots deploy kernel/ramdisk from iLO virtual media CDROM"]; IPA -> Conductor [label = "Lookup node"]; Conductor -> IPA [label = "Provides node UUID"]; IPA -> Conductor [label = "Heartbeat"]; Conductor -> IPA [label = "Exposes the disk over iSCSI"]; Conductor -> Conductor [label = "Connects to bare metal's disk over iSCSI and writes image"]; Conductor -> Conductor [label = "Generates the boot ISO"]; Conductor -> ConductorWebserver [label = "Uploads the boot ISO"]; Conductor -> iLO [label = "Attaches boot ISO URL as virtual media CDROM"]; Conductor -> iLO [label = "Sets boot device to CDROM"]; Conductor -> IPA [label = "Power off the node"]; Conductor -> iLO [label = "Power on the node"]; iLO -> ConductorWebserver [label = "Downloads boot ISO"]; iLO -> Baremetal [label = "Boots the instance kernel/ramdisk from iLO virtual media CDROM"]; Baremetal -> Baremetal [label = "Instance kernel finds root partition and continues booting from disk"]; } Localboot in standalone ironic ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. seqdiag:: :scale: 80 diagram { Webserver; Conductor; Baremetal; ConductorWebserver; IPA; iLO; activation = none; span_height = 1; edge_length = 250; default_note_color = white; default_fontsize = 14; Conductor -> iLO [label = "Powers off the node"]; Conductor -> Conductor [label = "Creates the FAT32 image containing Ironic API URL and driver name"]; Conductor -> ConductorWebserver [label = "Uploads the FAT32 image"]; Conductor -> Conductor [label = "Generates URL for FAT32 image"]; Conductor -> iLO [label = "Attaches the FAT32 image URL as virtual media floppy"]; Conductor -> iLO [label = "Attaches the deploy ISO URL as virtual media CDROM"]; Conductor -> iLO [label = "Sets one time boot to CDROM"]; Conductor -> iLO [label = "Reboot the node"]; iLO -> Webserver [label = "Downloads deploy ISO"]; Baremetal -> iLO [label = "Boots deploy kernel/ramdisk from iLO virtual media CDROM"]; IPA -> Conductor [label = "Lookup node"]; Conductor -> IPA [label = "Provides node UUID"]; IPA -> Conductor [label = "Heartbeat"]; Conductor -> IPA [label = "Sends the user image HTTP(S) URL"]; IPA -> Webserver [label = "Retrieves the user image on bare metal"]; IPA -> IPA [label = "Writes user image to disk"]; IPA -> Conductor [label = "Heartbeat"]; Conductor -> Baremetal [label = "Sets boot device to disk"]; Conductor -> IPA [label = "Power off the node"]; Conductor -> Baremetal [label = "Power on the node"]; Baremetal -> Baremetal [label = "Boot user image from disk"]; } Activating iLO Advanced license as manual clean step ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ iLO driver can activate the iLO Advanced license key as a manual cleaning step. Any manual cleaning step can only be initiated when a node is in the ``manageable`` state. Once the manual cleaning is finished, the node will be put in the ``manageable`` state again. User can follow steps from :ref:`manual_cleaning` to initiate manual cleaning operation on a node. An example of a manual clean step with ``activate_license`` as the only clean step could be:: "clean_steps": [{ "interface": "management", "step": "activate_license", "args": { "ilo_license_key": "ABC12-XXXXX-XXXXX-XXXXX-YZ345" } }] The different attributes of ``activate_license`` clean step are as follows: .. csv-table:: :header: "Attribute", "Description" :widths: 30, 120 "``interface``", "Interface of clean step, here ``management``" "``step``", "Name of clean step, here ``activate_license``" "``args``", "Keyword-argument entry (: ) being passed to clean step" "``args.ilo_license_key``", "iLO Advanced license key to activate enterprise features. This is mandatory." Initiating firmware update as manual clean step ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ iLO driver can invoke secure firmware update as a manual cleaning step. Any manual cleaning step can only be initiated when a node is in the ``manageable`` state. Once the manual cleaning is finished, the node will be put in the ``manageable`` state again. A user can follow steps from :ref:`manual_cleaning` to initiate manual cleaning operation on a node. An example of a manual clean step with ``update_firmware`` as the only clean step could be:: "clean_steps": [{ "interface": "management", "step": "update_firmware", "args": { "firmware_update_mode": "ilo", "firmware_images":[ { "url": "file:///firmware_images/ilo/1.5/CP024444.scexe", "checksum": "a94e683ea16d9ae44768f0a65942234d", "component": "ilo" }, { "url": "swift://firmware_container/cpld2.3.rpm", "checksum": "", "component": "cpld" }, { "url": "http://my_address:port/firmwares/bios_vLatest.scexe", "checksum": "", "component": "bios" }, { "url": "https://my_secure_address_url/firmwares/chassis_vLatest.scexe", "checksum": "", "component": "chassis" }, { "url": "file:///home/ubuntu/firmware_images/power_pic/pmc_v3.0.bin", "checksum": "", "component": "power_pic" } ] } }] The different attributes of ``update_firmware`` clean step are as follows: .. csv-table:: :header: "Attribute", "Description" :widths: 30, 120 "``interface``", "Interface of clean step, here ``management``" "``step``", "Name of clean step, here ``update_firmware``" "``args``", "Keyword-argument entry (: ) being passed to clean step" "``args.firmware_update_mode``", "Mode (or mechanism) of out-of-band firmware update. Supported value is ``ilo``. This is mandatory." "``args.firmware_images``", "Ordered list of dictionaries of images to be flashed. This is mandatory." Each firmware image block is represented by a dictionary (JSON), in the form:: { "url": "", "checksum": "", "component": "" } All the fields in the firmware image block are mandatory. * The different types of firmware url schemes supported are: ``file``, ``http``, ``https`` and ``swift``. .. note:: This feature assumes that while using ``file`` url scheme the file path is on the conductor controlling the node. .. note:: The ``swift`` url scheme assumes the swift account of the ``service`` project. The ``service`` project (tenant) is a special project created in the Keystone system designed for the use of the core OpenStack services. When Ironic makes use of Swift for storage purpose, the account is generally ``service`` and the container is generally ``ironic`` and ``ilo`` driver uses a container named ``ironic_ilo_container`` for their own purpose. .. note:: While using firmware files with a ``.rpm`` extension, make sure the commands ``rpm2cpio`` and ``cpio`` are present on the conductor, as they are utilized to extract the firmware image from the package. * The firmware components that can be updated are: ``ilo``, ``cpld``, ``power_pic``, ``bios`` and ``chassis``. * The firmware images will be updated in the order given by the operator. If there is any error during processing of any of the given firmware images provided in the list, none of the firmware updates will occur. The processing error could happen during image download, image checksum verification or image extraction. The logic is to process each of the firmware files and update them on the devices only if all the files are processed successfully. If, during the update (uploading and flashing) process, an update fails, then the remaining updates, if any, in the list will be aborted. But it is recommended to triage and fix the failure and re-attempt the manual clean step ``update_firmware`` for the aborted ``firmware_images``. The devices for which the firmwares have been updated successfully would start functioning using their newly updated firmware. * As a troubleshooting guidance on the complete process, check Ironic conductor logs carefully to see if there are any firmware processing or update related errors which may help in root causing or gain an understanding of where things were left off or where things failed. You can then fix or work around and then try again. A common cause of update failure is HPE Secure Digital Signature check failure for the firmware image file. * To compute ``md5`` checksum for your image file, you can use the following command:: $ md5sum image.rpm 66cdb090c80b71daa21a67f06ecd3f33 image.rpm Smart Update Manager (SUM) based firmware update ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The firmware update based on `SUM`_ is an inband clean step supported by iLO driver. The firmware update is performed on all or list of user specified firmware components on the node. Refer to `SUM User Guide`_ to get more information on SUM based firmware update. ``update_firmware_sum`` clean step requires the agent ramdisk with ``Proliant Hardware Manager`` from the proliantutils version 2.5.0 or higher. See `DIB support for Proliant Hardware Manager`_ to create the agent ramdisk with ``Proliant Hardware Manager``. The attributes of ``update_firmware_sum`` clean step are as follows: .. csv-table:: :header: "Attribute", "Description" :widths: 30, 120 "``interface``", "Interface of the clean step, here ``management``" "``step``", "Name of the clean step, here ``update_firmware_sum``" "``args``", "Keyword-argument entry (: ) being passed to the clean step" The keyword arguments used for the clean step are as follows: * ``url``: URL of SPP (Service Pack for Proliant) ISO. It is mandatory. The URL schemes supported are ``http``, ``https`` and ``swift``. * ``checksum``: MD5 checksum of SPP ISO to verify the image. It is mandatory. * ``components``: List of filenames of the firmware components to be flashed. It is optional. If not provided, the firmware update is performed on all the firmware components. The clean step performs an update on all or a list of firmware components and returns the SUM log files. The log files include ``hpsum_log.txt`` and ``hpsum_detail_log.txt`` which holds the information about firmware components, firmware version for each component and their update status. The log object will be named with the following pattern:: [_]_update_firmware_sum_.tar.gz Refer to :ref:`retrieve_deploy_ramdisk_logs` for more information on enabling and viewing the logs returned from the ramdisk. An example of ``update_firmware_sum`` clean step: .. code-block:: json { "interface": "management", "step": "update_firmware_sum", "args": { "url": "http://my_address:port/SPP.iso", "checksum": "abcdefxyz", "components": ["CP024356.scexe", "CP008097.exe"] } } The clean step fails if there is any error in the processing of clean step arguments. The processing error could happen during validation of components' file extension, image download, image checksum verification or image extraction. In case of a failure, check Ironic conductor logs carefully to see if there are any validation or firmware processing related errors which may help in root cause analysis or gaining an understanding of where things were left off or where things failed. You can then fix or work around and then try again. .. warning:: This feature is officially supported only with RHEL and SUSE based IPA ramdisk. Refer to `SUM`_ for supported OS versions for specific SUM version. .. note:: Refer `Guidelines for SPP ISO`_ for steps to get SPP (Service Pack for ProLiant) ISO. RAID Support ^^^^^^^^^^^^ The inband RAID functionality is supported by iLO driver. See :ref:`raid` for more information. Bare Metal service update node with following information after successful configuration of RAID: * Node ``properties/local_gb`` is set to the size of root volume. * Node ``properties/root_device`` is filled with ``wwn`` details of root volume. It is used by iLO driver as root device hint during provisioning. * The value of raid level of root volume is added as ``raid_level`` capability to the node's ``capabilities`` parameter within ``properties`` field. The operator can specify the ``raid_level`` capability in nova flavor for node to be selected for scheduling:: nova flavor-key ironic-test set capabilities:raid_level="1+0" nova boot --flavor ironic-test --image test-image instance-1 .. _DIB_raid_support: DIB support for Proliant Hardware Manager ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Install ``ironic-python-agent-builder`` following the guide [1]_ To create an agent ramdisk with ``Proliant Hardware Manager``, use the ``proliant-tools`` element in DIB:: ironic-python-agent-builder -o proliant-agent-ramdisk -e proliant-tools fedora Disk Erase Support ^^^^^^^^^^^^^^^^^^ ``erase_devices`` is an inband clean step supported by iLO driver. It performs erase on all the disks including the disks visible to OS as well as the raw disks visible to the Smart Storage Administrator (SSA). This inband clean step requires ``ssacli`` utility starting from version ``2.60-19.0`` to perform the erase on physical disks. See the `ssacli documentation`_ for more information on ssacli utility and different erase methods supported by SSA. The disk erasure via ``shred`` is used to erase disks visible to the OS and its implementation is available in Ironic Python Agent. The raw disks connected to the Smart Storage Controller are erased using Sanitize erase which is a ssacli supported erase method. If Sanitize erase is not supported on the Smart Storage Controller the disks are erased using One-pass erase (overwrite with zeros). This clean step is supported when the agent ramdisk contains the ``Proliant Hardware Manager`` from the proliantutils version 2.3.0 or higher. This clean step is performed as part of automated cleaning and it is disabled by default. See :ref:`InbandvsOutOfBandCleaning` for more information on enabling/disabling a clean step. Install ``ironic-python-agent-builder`` following the guide [1]_ To create an agent ramdisk with ``Proliant Hardware Manager``, use the ``proliant-tools`` element in DIB:: ironic-python-agent-builder -o proliant-agent-ramdisk -e proliant-tools fedora See the `proliant-tools`_ for more information on creating agent ramdisk with ``proliant-tools`` element in DIB. Firmware based UEFI iSCSI boot from volume support ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ With Gen9 (UEFI firmware version 1.40 or higher) and Gen10 HPE Proliant servers, the driver supports firmware based UEFI boot of an iSCSI cinder volume. This feature requires the node to be configured to boot in ``UEFI`` boot mode, as well as user image should be ``UEFI`` bootable image, and ``PortFast`` needs to be enabled in switch configuration for immediate spanning tree forwarding state so it wouldn't take much time setting the iSCSI target as persistent device. The driver does not support this functionality when in ``bios`` boot mode. In case the node is configured with ``ilo-pxe`` or ``ilo-ipxe`` as boot interface and the boot mode configured on the bare metal is ``bios``, the iscsi boot from volume is performed using iPXE. See :doc:`/admin/boot-from-volume` for more details. To use this feature, configure the boot mode of the bare metal to ``uefi`` and configure the corresponding ironic node using the steps given in :doc:`/admin/boot-from-volume`. In a cloud environment with nodes configured to boot from ``bios`` and ``uefi`` boot modes, the virtual media driver only supports uefi boot mode, and that attempting to use iscsi boot at the same time with a bios volume will result in an error. BIOS configuration support ^^^^^^^^^^^^^^^^^^^^^^^^^^ The ``ilo`` and ``ilo5`` hardware types support ``ilo`` BIOS interface. The support includes providing manual clean steps *apply_configuration* and *factory_reset* to manage supported BIOS settings on the node. See :ref:`bios` for more details and examples. .. note:: Prior to the Stein release the user is required to reboot the node manually in order for the settings to take into effect. Starting with the Stein release, iLO drivers reboot the node after running clean steps related to the BIOS configuration. The BIOS settings are cached and the clean step is marked as success only if all the requested settings are applied without any failure. If application of any of the settings fails, the clean step is marked as failed and the settings are not cached. Configuration ~~~~~~~~~~~~~ Following are the supported BIOS settings and the corresponding brief description for each of the settings. For a detailed description please refer to `HPE Integrated Lights-Out REST API Documentation `_. - ``AdvancedMemProtection``: Configure additional memory protection with ECC (Error Checking and Correcting). Allowed values are ``AdvancedEcc``, ``OnlineSpareAdvancedEcc``, ``MirroredAdvancedEcc``. - ``AutoPowerOn``: Configure the server to automatically power on when AC power is applied to the system. Allowed values are ``AlwaysPowerOn``, ``AlwaysPowerOff``, ``RestoreLastState``. - ``BootMode``: Select the boot mode of the system. Allowed values are ``Uefi``, ``LegacyBios`` - ``BootOrderPolicy``: Configure how the system attempts to boot devices per the Boot Order when no bootable device is found. Allowed values are ``RetryIndefinitely``, ``AttemptOnce``, ``ResetAfterFailed``. - ``CollabPowerControl``: Enables the Operating System to request processor frequency changes even if the Power Regulator option on the server configured for Dynamic Power Savings Mode. Allowed values are ``Enabled``, ``Disabled``. - ``DynamicPowerCapping``: Configure when the System ROM executes power calibration during the boot process. Allowed values are ``Enabled``, ``Disabled``, ``Auto``. - ``DynamicPowerResponse``: Enable the System BIOS to control processor performance and power states depending on the processor workload. Allowed values are ``Fast``, ``Slow``. - ``IntelligentProvisioning``: Enable or disable the Intelligent Provisioning functionality. Allowed values are ``Enabled``, ``Disabled``. - ``IntelPerfMonitoring``: Exposes certain chipset devices that can be used with the Intel Performance Monitoring Toolkit. Allowed values are ``Enabled``, ``Disabled``. - ``IntelProcVtd``: Hypervisor or operating system supporting this option can use hardware capabilities provided by Intel's Virtualization Technology for Directed I/O. Allowed values are ``Enabled``, ``Disabled``. - ``IntelQpiFreq``: Set the QPI Link frequency to a lower speed. Allowed values are ``Auto``, ``MinQpiSpeed``. - ``IntelTxt``: Option to modify Intel TXT support. Allowed values are ``Enabled``, ``Disabled``. - ``PowerProfile``: Set the power profile to be used. Allowed values are ``BalancedPowerPerf``, ``MinPower``, ``MaxPerf``, ``Custom``. - ``PowerRegulator``: Determines how to regulate the power consumption. Allowed values are ``DynamicPowerSavings``, ``StaticLowPower``, ``StaticHighPerf``, ``OsControl``. - ``ProcAes``: Enable or disable the Advanced Encryption Standard Instruction Set (AES-NI) in the processor. Allowed values are ``Enabled``, ``Disabled``. - ``ProcCoreDisable``: Disable processor cores using Intel's Core Multi-Processing (CMP) Technology. Allowed values are Integers ranging from ``0`` to ``24``. - ``ProcHyperthreading``: Enable or disable Intel Hyperthreading. Allowed values are ``Enabled``, ``Disabled``. - ``ProcNoExecute``: Protect your system against malicious code and viruses. Allowed values are ``Enabled``, ``Disabled``. - ``ProcTurbo``: Enables the processor to transition to a higher frequency than the processor's rated speed using Turbo Boost Technology if the processor has available power and is within temperature specifications. Allowed values are ``Enabled``, ``Disabled``. - ``ProcVirtualization``: Enables or Disables a hypervisor or operating system supporting this option to use hardware capabilities provided by Intel's Virtualization Technology. Allowed values are ``Enabled``, ``Disabled``. - ``SecureBootStatus``: The current state of Secure Boot configuration. Allowed values are ``Enabled``, ``Disabled``. .. note:: This setting is read-only and can't be modified with ``apply_configuration`` clean step. - ``Sriov``: If enabled, SR-IOV support enables a hypervisor to create virtual instances of a PCI-express device, potentially increasing performance. If enabled, the BIOS allocates additional resources to PCI-express devices. Allowed values are ``Enabled``, ``Disabled``. - ``ThermalConfig``: select the fan cooling solution for the system. Allowed values are ``OptimalCooling``, ``IncreasedCooling``, ``MaxCooling`` - ``ThermalShutdown``: Control the reaction of the system to caution level thermal events. Allowed values are ``Enabled``, ``Disabled``. - ``TpmState``: Current TPM device state. Allowed values are ``NotPresent``, ``PresentDisabled``, ``PresentEnabled``. .. note:: This setting is read-only and can't be modified with ``apply_configuration`` clean step. - ``TpmType``: Current TPM device type. Allowed values are ``NoTpm``, ``Tpm12``, ``Tpm20``, ``Tm10``. .. note:: This setting is read-only and can't be modified with ``apply_configuration`` clean step. - ``UefiOptimizedBoot``: Enables or Disables the System BIOS boot using native UEFI graphics drivers. Allowed values are ``Enabled``, ``Disabled``. - ``WorkloadProfile``: Change the Workload Profile to accomodate your desired workload. Allowed values are ``GeneralPowerEfficientCompute``, ``GeneralPeakFrequencyCompute``, ``GeneralThroughputCompute``, ``Virtualization-PowerEfficient``, ``Virtualization-MaxPerformance``, ``LowLatency``, ``MissionCritical``, ``TransactionalApplicationProcessing``, ``HighPerformanceCompute``, ``DecisionSupport``, ``GraphicProcessing``, ``I/OThroughput``, ``Custom`` .. note:: This setting is only applicable to ProLiant Gen10 servers with iLO 5 management systems. Certificate based validation in iLO ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The driver supports validation of certificates on the HPE Proliant servers. The path to certificate file needs to be appropriately set in ``ca_file`` in the node's ``driver_info``. To update SSL certificates into iLO, refer to `HPE Integrated Lights-Out Security Technology Brief `_. Use iLO hostname or IP address as a 'Common Name (CN)' while generating Certificate Signing Request (CSR). Use the same value as `ilo_address` while enrolling node to Bare Metal service to avoid SSL certificate validation errors related to hostname mismatch. Rescue mode support ^^^^^^^^^^^^^^^^^^^ The hardware type ``ilo`` supports rescue functionality. Rescue operation can be used to boot nodes into a rescue ramdisk so that the ``rescue`` user can access the node. Please refer to :doc:`/admin/rescue` for detailed explanation of rescue feature. Inject NMI support ^^^^^^^^^^^^^^^^^^ The management interface ``ilo`` supports injection of non-maskable interrupt (NMI) to a bare metal. Following command can be used to inject NMI on a server: .. code-block:: console openstack baremetal node inject nmi Following command can be used to inject NMI via Compute service: .. code-block:: console openstack server dump create .. note:: This feature is supported on HPE ProLiant Gen9 servers and beyond. Soft power operation support ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The power interface ``ilo`` supports soft power off and soft reboot operations on a bare metal. Following commands can be used to perform soft power operations on a server: .. code-block:: console openstack baremetal node reboot --soft \ [--power-timeout ] openstack baremetal node power off --soft \ [--power-timeout ] .. note:: The configuration ``[conductor]soft_power_off_timeout`` is used as a default timeout value when no timeout is provided while invoking hard or soft power operations. .. note:: Server POST state is used to track the power status of HPE ProLiant Gen9 servers and beyond. Out of Band RAID Support ^^^^^^^^^^^^^^^^^^^^^^^^ With Gen10 HPE Proliant servers and later the ``ilo5`` hardware type supports firmware based RAID configuration as a clean step. This feature requires the node to be configured to ``ilo5`` hardware type and its raid interface to be ``ilo5``. See :ref:`raid` for more information. After a successful RAID configuration, the Bare Metal service will update the node with the following information: * Node ``properties/local_gb`` is set to the size of root volume. * Node ``properties/root_device`` is filled with ``wwn`` details of root volume. It is used by iLO driver as root device hint during provisioning. Later the value of raid level of root volume can be added in ``baremetal-with-RAID10`` (RAID10 for raid level 10) resource class. And consequently flavor needs to be updated to request the resource class to create the server using selected node:: openstack baremetal node set test_node --resource-class \ baremetal-with-RAID10 openstack flavor set --property \ resources:CUSTOM_BAREMETAL_WITH_RAID10=1 test-flavor openstack server create --flavor test-flavor --image test-image instance-1 .. note:: Supported raid levels for ``ilo5`` hardware type are: 0, 1, 5, 6, 10, 50, 60 IPv6 support ^^^^^^^^^^^^ With the IPv6 support in ``proliantutils>=2.8.0``, nodes can be enrolled into the baremetal service using the iLO IPv6 addresses. .. code-block:: console openstack baremetal node create --driver ilo --deploy-interface direct \ --driver-info ilo_address=2001:0db8:85a3:0000:0000:8a2e:0370:7334 \ --driver-info ilo_username=test-user \ --driver-info ilo_password=test-password \ --driver-info ilo_deploy_iso=test-iso \ --driver-info ilo_rescue_iso=test-iso .. note:: No configuration changes (in e.g. ironic.conf) are required in order to support IPv6. Out of Band Sanitize Disk Erase Support ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ With Gen10 HPE Proliant servers and later the ``ilo5`` hardware type supports firmware based sanitize disk erase as a clean step. This feature requires the node to be configured to ``ilo5`` hardware type and its management interface to be ``ilo5``. The possible erase pattern its supports are: * For HDD - 'overwrite', 'zero', 'crypto' * For SSD - 'block', 'zero', 'crypto' The default erase pattern are, for HDD, 'overwrite' and for SSD, 'block'. .. note:: In average 300GB HDD with default pattern "overwrite" would take approx. 9 hours and 300GB SSD with default pattern "block" would take approx. 30 seconds to complete the erase. .. _`ssacli documentation`: https://support.hpe.com/hpsc/doc/public/display?docId=c03909334 .. _`proliant-tools`: https://docs.openstack.org/diskimage-builder/latest/elements/proliant-tools/README.html .. _`HPE iLO4 User Guide`: https://h20566.www2.hpe.com/hpsc/doc/public/display?docId=c03334051 .. _`iLO 4 management engine`: https://www.hpe.com/us/en/servers/integrated-lights-out-ilo.html .. _`iLO 5 management engine`: https://www.hpe.com/us/en/servers/integrated-lights-out-ilo.html#innovations .. _`Redfish`: https://www.dmtf.org/standards/redfish .. _`Gen10 wiki section`: https://wiki.openstack.org/wiki/Ironic/Drivers/iLODrivers/master#Enabling_ProLiant_Gen10_systems_in_Ironic .. _`Guidelines for SPP ISO`: https://h17007.www1.hpe.com/us/en/enterprise/servers/products/service_pack/spp .. _`SUM`: https://h17007.www1.hpe.com/us/en/enterprise/servers/products/service_pack/hpsum/index.aspx .. _`SUM User Guide`: https://h20565.www2.hpe.com/hpsc/doc/public/display?docId=c05210448 .. [1] `ironic-python-agent-builder`: https://docs.openstack.org/ironic-python-agent-builder/latest/install/index.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/admin/drivers/intel-ipmi.rst0000644000175000017500000001277300000000000024002 0ustar00coreycorey00000000000000================= Intel IPMI driver ================= Overview ======== The ``intel-ipmi`` hardware type is same as the :doc:`ipmitool` hardware type except for the support of Intel Speed Select Performance Profile (Intel SST-PP_) feature. Intel SST-PP allows a server to run different workloads by configuring the CPU to run at 3 distinct operating points or profiles. Intel SST-PP supports three configuration levels: * 0 - Intel SST-PP Base Config * 1 - Intel SST-PP Config 1 * 2 - Intel SST-PP Config 2 The following table shows the list of active cores and their base frequency at different SST-PP config levels: ============== ========= =================== Config Cores Base Freq (GHz) ============== ========= =================== Base 24 2.4 Config 1 20 2.5 Config 2 16 2.7 ============== ========= =================== This configuration is managed by the management interface ``intel-ipmitool`` for IntelIPMI hardware. IntelIPMI manages nodes by using IPMI_ (Intelligent Platform Management Interface) protocol versions 2.0 or 1.5. It uses the IPMItool_ utility which is an open-source command-line interface (CLI) for controlling IPMI-enabled devices. Glossary ======== * IPMI - Intelligent Platform Management Interface. * Intel SST-PP - Intel Speed Select Performance Profile. Enabling the IntelIPMI hardware type ==================================== Please see :doc:`/install/configure-ipmi` for the required dependencies. #. To enable ``intel-ipmi`` hardware, add the following configuration to your ``ironic.conf``: .. code-block:: ini [DEFAULT] enabled_hardware_types = intel-ipmi enabled_management_interfaces = intel-ipmitool #. Restart the Ironic conductor service:: sudo service ironic-conductor restart # Or, for RDO: sudo systemctl restart openstack-ironic-conductor Registering a node with the IntelIPMI driver ============================================ Nodes configured to use the IntelIPMI drivers should have the ``driver`` field set to ``intel-ipmi``. All the configuration value required for IntelIPMI is the same as the IPMI hardware type except the management interface which is ``intel-ipmitool``. Refer :doc:`ipmitool` for details. The ``openstack baremetal node create`` command can be used to enroll a node with an IntelIPMI driver. For example:: openstack baremetal node create --driver intel-ipmi \ --driver-info ipmi_address=
\ --driver-info ipmi_username= \ --driver-info ipmi_password= Features of the ``intel-ipmi`` hardware type ============================================ Intel SST-PP ^^^^^^^^^^^^^ A node with Intel SST-PP can be configured to use it via ``configure_intel_speedselect`` deploy step. This deploy accepts: * ``intel_speedselect_config``: Hexadecimal code of Intel SST-PP configuration. Accepted values are '0x00', '0x01', '0x02'. These values correspond to `Intel SST-PP Config Base`, `Intel SST-PP Config 1`, `Intel SST-PP Config 2` respectively. The input value must be a string. * ``socket_count``: Number of sockets in the node. The input value must be a positive integer (1 by default). The deploy step issues an IPMI command with the raw code for each socket in the node to set the requested configuration. A reboot is required to reflect the changes. Each configuration profile is mapped to traits that Ironic understands. Please note that these names are used for example purpose only. Any name can be used. Only the parameter value should match the deploy step ``configure_intel_speedselect``. * 0 - ``CUSTOM_INTEL_SPEED_SELECT_CONFIG_BASE`` * 1 - ``CUSTOM_INTEL_SPEED_SELECT_CONFIG_1`` * 2 - ``CUSTOM_INTEL_SPEED_SELECT_CONFIG_2`` Now to configure a node with Intel SST-PP while provisioning, create deploy templates for each profiles in Ironic. .. code-block:: console openstack baremetal deploy template create \ CUSTOM_INTEL_SPEED_SELECT_CONFIG_BASE \ --steps '[{"interface": "management", "step": "configure_intel_speedselect", "args": {"intel_speedselect_config": "0x00", "socket_count": 2}, "priority": 150}]' openstack baremetal deploy template create \ CUSTOM_INTEL_SPEED_SELECT_CONFIG_1 \ --steps '[{"interface": "management", "step": "configure_intel_speedselect", "args": {"intel_speedselect_config": "0x01", "socket_count": 2}, "priority": 150}]' openstack baremetal deploy template create \ CUSTOM_INTEL_SPEED_SELECT_CONFIG_2 \ --steps '[{"interface": "management", "step": "configure_intel_speedselect", "args": {"intel_speedselect_config": "0x02", "socket_count": 2}, "priority": 150}]' All Intel SST-PP capable nodes should have these traits associated. .. code-block:: console openstack baremetal node add trait node-0 \ CUSTOM_INTEL_SPEED_SELECT_CONFIG_BASE \ CUSTOM_INTEL_SPEED_SELECT_CONFIG_1 \ CUSTOM_INTEL_SPEED_SELECT_CONFIG_2 To trigger the Intel SST-PP configuration during node provisioning, one of the traits can be added to the flavor. .. code-block:: console openstack flavor set baremetal --property trait:CUSTOM_INTEL_SPEED_SELECT_CONFIG_1=required Finally create a server with ``baremetal`` flavor to provision a baremetal node with Intel SST-PP profile *Config 1*. .. _IPMI: https://en.wikipedia.org/wiki/Intelligent_Platform_Management_Interface .. _IPMItool: https://sourceforge.net/projects/ipmitool/ .. _SST-PP: https://www.intel.com/content/www/us/en/architecture-and-technology/speed-select-technology-article.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/admin/drivers/ipa.rst0000644000175000017500000001264400000000000022501 0ustar00coreycorey00000000000000=================== Ironic Python Agent =================== Overview ======== *Ironic Python Agent* (also often called *IPA* or just *agent*) is a Python-based agent which handles *ironic* bare metal nodes in a variety of actions such as inspect, configure, clean and deploy images. IPA is distributed over nodes and runs, inside of a ramdisk, the process of booting this ramdisk on the node. For more information see the :ironic-python-agent-doc:`ironic-python-agent documentation <>`. Drivers ======= Starting with the Kilo release all deploy interfaces (except for fake ones) are using IPA. There are two types of them: * For nodes using the :ref:`iscsi-deploy` interface, IPA exposes the root hard drive as an iSCSI share and calls back to the ironic conductor. The conductor mounts the share and copies an image there. It then signals back to IPA for post-installation actions like setting up a bootloader for local boot support. * For nodes using the :ref:`direct-deploy` interface, the conductor prepares a swift temporary URL for an image. IPA then handles the whole deployment process: downloading an image from swift, putting it on the machine and doing any post-deploy actions. Which one to choose depends on your environment. :ref:`iscsi-deploy` puts higher load on conductors, :ref:`direct-deploy` currently requires the whole image to fit in the node's memory, except when using raw images. It also requires :doc:`/install/configure-glance-swift`. .. todo: other differences? Requirements ------------ Using IPA requires it to be present and configured on the deploy ramdisk, see :ref:`deploy-ramdisk` Using proxies for image download ================================ Overview -------- When using the :ref:`direct-deploy`, IPA supports using proxies for downloading the user image. For example, this could be used to speed up download by using a caching proxy. Steps to enable proxies ----------------------- #. Configure the proxy server of your choice (for example `Squid `_, `Apache Traffic Server `_). This will probably require you to configure the proxy server to cache the content even if the requested URL contains a query, and to raise the maximum cached file size as images can be pretty big. If you have HTTPS enabled in swift (see :swift-doc:`swift deployment guide `), it is possible to configure the proxy server to talk to swift via HTTPS to download the image, store it in the cache unencrypted and return it to the node via HTTPS again. Because the image will be stored unencrypted in the cache, this approach is recommended for images that do not contain sensitive information. Refer to your proxy server's documentation to complete this step. #. Set ``[glance]swift_temp_url_cache_enabled`` in the ironic conductor config file to ``True``. The conductor will reuse the cached swift temporary URLs instead of generating new ones each time an image is requested, so that the proxy server does not create new cache entries for the same image, based on the query part of the URL (as it contains some query parameters that change each time it is regenerated). #. Set ``[glance]swift_temp_url_expected_download_start_delay`` option in the ironic conductor config file to the value appropriate for your hardware. This is the delay (in seconds) from the time of the deploy request (when the swift temporary URL is generated) to when the URL is used for the image download. You can think of it as roughly the time needed for IPA ramdisk to startup and begin download. This value is used to check if the swift temporary URL duration is large enough to let the image download begin. Also if temporary URL caching is enabled, this will determine if a cached entry will still be valid when the download starts. It is used only if ``[glance]swift_temp_url_cache_enabled`` is ``True``. #. Increase ``[glance]swift_temp_url_duration`` option in the ironic conductor config file, as only non-expired links to images will be returned from the swift temporary URLs cache. This means that if ``swift_temp_url_duration=1200`` then after 20 minutes a new image will be cached by the proxy server as the query in its URL will change. The value of this option must be greater than or equal to ``[glance]swift_temp_url_expected_download_start_delay``. #. Add one or more of ``image_http_proxy``, ``image_https_proxy``, ``image_no_proxy`` to driver_info properties in each node that will use the proxy. Advanced configuration ====================== Out-of-band vs. in-band power off on deploy ------------------------------------------- After deploying an image onto the node's hard disk, Ironic will reboot the machine into the new image. By default this power action happens ``in-band``, meaning that the ironic-conductor will instruct the IPA ramdisk to power itself off. Some hardware may have a problem with the default approach and would require Ironic to talk directly to the management controller to switch the power off and on again. In order to tell Ironic to do that, you have to update the node's ``driver_info`` field and set the ``deploy_forces_oob_reboot`` parameter with the value of **True**. For example, the below command sets this configuration in a specific node:: openstack baremetal node set --driver-info deploy_forces_oob_reboot=True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/admin/drivers/ipmitool.rst0000644000175000017500000001663200000000000023565 0ustar00coreycorey00000000000000=========== IPMI driver =========== Overview ======== The ``ipmi`` hardware type manage nodes by using IPMI_ (Intelligent Platform Management Interface) protocol versions 2.0 or 1.5. It uses the IPMItool_ utility which is an open-source command-line interface (CLI) for controlling IPMI-enabled devices. Glossary ======== * IPMI_ - Intelligent Platform Management Interface. * IPMB - Intelligent Platform Management Bus/Bridge. * BMC_ - Baseboard Management Controller. * RMCP - Remote Management Control Protocol. Enabling the IPMI hardware type =============================== Please see :doc:`/install/configure-ipmi` for the required dependencies. #. The ``ipmi`` hardware type is enabled by default starting with the Ocata release. To enable it explicitly, add the following to your ``ironic.conf``: .. code-block:: ini [DEFAULT] enabled_hardware_types = ipmi enabled_management_interfaces = ipmitool,noop enabled_power_interfaces = ipmitool Optionally, enable the :doc:`vendor passthru interface ` and either or both :doc:`console interfaces `: .. code-block:: ini [DEFAULT] enabled_hardware_types = ipmi enabled_console_interfaces = ipmitool-socat,ipmitool-shellinabox,no-console enabled_management_interfaces = ipmitool,noop enabled_power_interfaces = ipmitool enabled_vendor_interfaces = ipmitool,no-vendor #. Restart the Ironic conductor service. Please see :doc:`/install/enabling-drivers` for more details. Registering a node with the IPMI driver ======================================= Nodes configured to use the IPMItool drivers should have the ``driver`` field set to ``ipmi``. The following configuration value is required and has to be added to the node's ``driver_info`` field: - ``ipmi_address``: The IP address or hostname of the BMC. Other options may be needed to match the configuration of the BMC, the following options are optional, but in most cases, it's considered a good practice to have them set: - ``ipmi_username``: The username to access the BMC; defaults to *NULL* user. - ``ipmi_password``: The password to access the BMC; defaults to *NULL*. - ``ipmi_port``: The remote IPMI RMCP port. By default ipmitool will use the port *623*. .. note:: It is highly recommend that you setup a username and password for your BMC. The ``openstack baremetal node create`` command can be used to enroll a node with an IPMItool-based driver. For example:: openstack baremetal node create --driver ipmi \ --driver-info ipmi_address=
\ --driver-info ipmi_username= \ --driver-info ipmi_password= Advanced configuration ====================== When a simple configuration such as providing the ``address``, ``username`` and ``password`` is not enough, the IPMItool driver contains many other options that can be used to address special usages. Single/Double bridging functionality ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. note:: A version of IPMItool higher or equal to 1.8.12 is required to use the bridging functionality. There are two different bridging functionalities supported by the IPMItool-based drivers: *single* bridge and *dual* bridge. The following configuration values need to be added to the node's ``driver_info`` field so bridging can be used: - ``ipmi_bridging``: The bridging type; default is *no*; other supported values are *single* for single bridge or *dual* for double bridge. - ``ipmi_local_address``: The local IPMB address for bridged requests. Required only if ``ipmi_bridging`` is set to *single* or *dual*. This configuration is optional, if not specified it will be auto discovered by IPMItool. - ``ipmi_target_address``: The destination address for bridged requests. Required only if ``ipmi_bridging`` is set to *single* or *dual*. - ``ipmi_target_channel``: The destination channel for bridged requests. Required only if ``ipmi_bridging`` is set to *single* or *dual*. Double bridge specific options: - ``ipmi_transit_address``: The transit address for bridged requests. Required only if ``ipmi_bridging`` is set to *dual*. - ``ipmi_transit_channel``: The transit channel for bridged requests. Required only if ``ipmi_bridging`` is set to *dual*. The parameter ``ipmi_bridging`` should specify the type of bridging required: *single* or *dual* to access the bare metal node. If the parameter is not specified, the default value will be set to *no*. The ``openstack baremetal node set`` command can be used to set the required bridging information to the Ironic node enrolled with the IPMItool driver. For example: * Single Bridging:: openstack baremetal node set \ --driver-info ipmi_local_address=
\ --driver-info ipmi_bridging=single \ --driver-info ipmi_target_channel= \ --driver-info ipmi_target_address= * Double Bridging:: openstack baremetal node set \ --driver-info ipmi_local_address=
\ --driver-info ipmi_bridging=dual \ --driver-info ipmi_transit_channel= \ --driver-info ipmi_transit_address= \ --driver-info ipmi_target_channel= \ --driver-info ipmi_target_address= Changing the version of the IPMI protocol ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The IPMItool-based drivers works with the versions *2.0* and *1.5* of the IPMI protocol. By default, the version *2.0* is used. In order to change the IPMI protocol version in the bare metal node, the following option needs to be set to the node's ``driver_info`` field: - ``ipmi_protocol_version``: The version of the IPMI protocol; default is *2.0*. Supported values are *1.5* or *2.0*. The ``openstack baremetal node set`` command can be used to set the desired protocol version:: openstack baremetal node set --driver-info ipmi_protocol_version= .. warning:: Version *1.5* of the IPMI protocol does not support encryption. Therefore, it is highly recommended that version 2.0 is used. Static boot order configuration ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Some hardware is known to misbehave when changing the boot device through the IPMI protocol. To work around it you can use the ``noop`` management interface implementation with the ``ipmi`` hardware type. In this case the Bare Metal service will not change the boot device for you, leaving the pre-configured boot order. For example, in case of the :ref:`pxe-boot`: #. Via any available means configure the boot order on the node as follows: #. Boot from PXE/iPXE on the provisioning NIC. .. warning:: If it is not possible to limit network boot to only provisioning NIC, make sure that no other DHCP/PXE servers are accessible by the node. #. Boot from hard drive. #. Make sure the ``noop`` management interface is enabled, see example in `Enabling the IPMI hardware type`_. #. Change the node to use the ``noop`` management interface:: openstack baremetal node set --management-interface noop .. TODO(lucasagomes): Write about privilege level .. TODO(lucasagomes): Write about force boot device .. _IPMItool: https://sourceforge.net/projects/ipmitool/ .. _IPMI: https://en.wikipedia.org/wiki/Intelligent_Platform_Management_Interface .. _BMC: https://en.wikipedia.org/wiki/Intelligent_Platform_Management_Interface#Baseboard_management_controller ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/admin/drivers/irmc.rst0000644000175000017500000005245000000000000022661 0ustar00coreycorey00000000000000.. _irmc: =========== iRMC driver =========== Overview ======== The iRMC driver enables control FUJITSU PRIMERGY via ServerView Common Command Interface (SCCI). Support for FUJITSU PRIMERGY servers consists of the ``irmc`` hardware type and a few hardware interfaces specific for that hardware type. Prerequisites ============= * Install `python-scciclient `_ and `pysnmp `_ packages:: $ pip install "python-scciclient>=0.7.2" pysnmp Hardware Type ============= The ``irmc`` hardware type is available for FUJITSU PRIMERGY servers. For information on how to enable the ``irmc`` hardware type, see :ref:`enable-hardware-types`. Hardware interfaces ^^^^^^^^^^^^^^^^^^^ The ``irmc`` hardware type overrides the selection of the following hardware interfaces: * bios Supports ``irmc`` and ``no-bios``. The default is ``irmc``. * boot Supports ``irmc-virtual-media``, ``irmc-pxe``, and ``pxe``. The default is ``irmc-virtual-media``. The ``irmc-virtual-media`` boot interface enables the virtual media based deploy with IPA (Ironic Python Agent). .. warning:: We deprecated the ``pxe`` boot interface when used with ``irmc`` hardware type. Support for this interface will be removed in the future. Instead, use ``irmc-pxe``. ``irmc-pxe`` boot interface was introduced in Pike. * console Supports ``ipmitool-socat``, ``ipmitool-shellinabox``, and ``no-console``. The default is ``ipmitool-socat``. * inspect Supports ``irmc``, ``inspector``, and ``no-inspect``. The default is ``irmc``. .. note:: :ironic-inspector-doc:`Ironic Inspector <>` needs to be present and configured to use ``inspector`` as the inspect interface. * management Supports only ``irmc``. * power Supports ``irmc``, which enables power control via ServerView Common Command Interface (SCCI), by default. Also supports ``ipmitool``. * raid Supports ``irmc``, ``no-raid`` and ``agent``. The default is ``no-raid``. For other hardware interfaces, ``irmc`` hardware type supports the Bare Metal reference interfaces. For more details about the hardware interfaces and how to enable the desired ones, see :ref:`enable-hardware-interfaces`. Here is a complete configuration example with most of the supported hardware interfaces enabled for ``irmc`` hardware type. .. code-block:: ini [DEFAULT] enabled_hardware_types = irmc enabled_bios_interfaces = irmc enabled_boot_interfaces = irmc-virtual-media,irmc-pxe enabled_console_interfaces = ipmitool-socat,ipmitool-shellinabox,no-console enabled_deploy_interfaces = iscsi,direct enabled_inspect_interfaces = irmc,inspector,no-inspect enabled_management_interfaces = irmc enabled_network_interfaces = flat,neutron enabled_power_interfaces = irmc enabled_raid_interfaces = no-raid,irmc enabled_storage_interfaces = noop,cinder enabled_vendor_interfaces = no-vendor,ipmitool Here is a command example to enroll a node with ``irmc`` hardware type. .. code-block:: console openstack baremetal node create \ --bios-interface irmc \ --boot-interface irmc-pxe \ --deploy-interface direct \ --inspect-interface irmc \ --raid-interface irmc Node configuration ^^^^^^^^^^^^^^^^^^ * Each node is configured for ``irmc`` hardware type by setting the following ironic node object's properties: - ``driver_info/irmc_address`` property to be ``IP address`` or ``hostname`` of the iRMC. - ``driver_info/irmc_username`` property to be ``username`` for the iRMC with administrator privileges. - ``driver_info/irmc_password`` property to be ``password`` for irmc_username. - ``properties/capabilities`` property to be ``boot_mode:uefi`` if UEFI boot is required. - ``properties/capabilities`` property to be ``secure_boot:true`` if UEFI Secure Boot is required. Please refer to `UEFI Secure Boot Support`_ for more information. * The following properties are also required if ``irmc-virtual-media`` boot interface is used: - ``driver_info/irmc_deploy_iso`` property to be either deploy iso file name, Glance UUID, or Image Service URL. - ``instance info/irmc_boot_iso`` property to be either boot iso file name, Glance UUID, or Image Service URL. This is optional property when ``boot_option`` is set to ``netboot``. * All of the nodes are configured by setting the following configuration options in the ``[irmc]`` section of ``/etc/ironic/ironic.conf``: - ``port``: Port to be used for iRMC operations; either 80 or 443. The default value is 443. Optional. - ``auth_method``: Authentication method for iRMC operations; either ``basic`` or ``digest``. The default value is ``basic``. Optional. - ``client_timeout``: Timeout (in seconds) for iRMC operations. The default value is 60. Optional. - ``sensor_method``: Sensor data retrieval method; either ``ipmitool`` or ``scci``. The default value is ``ipmitool``. Optional. * The following options are required if ``irmc-virtual-media`` boot interface is enabled: - ``remote_image_share_root``: Ironic conductor node's ``NFS`` or ``CIFS`` root path. The default value is ``/remote_image_share_root``. - ``remote_image_server``: IP of remote image server. - ``remote_image_share_type``: Share type of virtual media, either ``NFS`` or ``CIFS``. The default is ``CIFS``. - ``remote_image_share_name``: share name of ``remote_image_server``. The default value is ``share``. - ``remote_image_user_name``: User name of ``remote_image_server``. - ``remote_image_user_password``: Password of ``remote_image_user_name``. - ``remote_image_user_domain``: Domain name of ``remote_image_user_name``. * The following options are required if ``irmc`` inspect interface is enabled: - ``snmp_version``: SNMP protocol version; either ``v1``, ``v2c`` or ``v3``. The default value is ``v2c``. Optional. - ``snmp_port``: SNMP port. The default value is ``161``. Optional. - ``snmp_community``: SNMP community required for versions ``v1`` and ``v2c``. The default value is ``public``. Optional. - ``snmp_security``: SNMP security name required for version ``v3``. Optional. * Each node can be further configured by setting the following ironic node object's properties which override the parameter values in ``[irmc]`` section of ``/etc/ironic/ironic.conf``: - ``driver_info/irmc_port`` property overrides ``port``. - ``driver_info/irmc_auth_method`` property overrides ``auth_method``. - ``driver_info/irmc_client_timeout`` property overrides ``client_timeout``. - ``driver_info/irmc_sensor_method`` property overrides ``sensor_method``. - ``driver_info/irmc_snmp_version`` property overrides ``snmp_version``. - ``driver_info/irmc_snmp_port`` property overrides ``snmp_port``. - ``driver_info/irmc_snmp_community`` property overrides ``snmp_community``. - ``driver_info/irmc_snmp_security`` property overrides ``snmp_security``. Optional functionalities for the ``irmc`` hardware type ======================================================= UEFI Secure Boot Support ^^^^^^^^^^^^^^^^^^^^^^^^ The hardware type ``irmc`` supports secure boot deploy. .. warning:: Secure boot feature is not supported with ``pxe`` boot interface. The UEFI secure boot can be configured by adding ``secure_boot`` parameter, which is a boolean value. Enabling the secure boot is different when Bare Metal service is used with Compute service or without Compute service. The following sections describe both methods: * Enabling secure boot with Compute service: To enable secure boot we need to set a capability on the bare metal node and the bare metal flavor, for example:: openstack baremetal node set --property capabilities='secure_boot:true' openstack flavor set FLAVOR-NAME --property capabilities:secure_boot="true" * Enabling secure boot without Compute service: Since adding capabilities to the node's properties is only used by the nova scheduler to perform more advanced scheduling of instances, we need to enable secure boot without nova, for example:: openstack baremetal node set --instance-info capabilities='{"secure_boot": "true"}' .. _irmc_node_cleaning: Node Cleaning Support ^^^^^^^^^^^^^^^^^^^^^ The ``irmc`` hardware type supports node cleaning. For more information on node cleaning, see :ref:`cleaning`. Supported **Automated** Cleaning Operations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The automated cleaning operations supported are: * ``restore_irmc_bios_config``: Restores BIOS settings on a baremetal node from backup data. If this clean step is enabled, the BIOS settings of a baremetal node will be backed up automatically before the deployment. By default, this clean step is disabled with priority ``0``. Set its priority to a positive integer to enable it. The recommended value is ``10``. .. warning:: ``pxe`` boot interface, when used with ``irmc`` hardware type, does not support this clean step. If uses ``irmc`` hardware type, it is required to select ``irmc-pxe`` or ``irmc-virtual-media`` as the boot interface in order to make this clean step work. Configuration options for the automated cleaning steps are listed under ``[irmc]`` section in ironic.conf :: clean_priority_restore_irmc_bios_config = 0 For more information on node automated cleaning, see :ref:`automated_cleaning` Boot from Remote Volume ^^^^^^^^^^^^^^^^^^^^^^^ The ``irmc`` hardware type supports the generic iPXE-based remote volume booting when using the following boot interfaces: * ``irmc-pxe`` * ``pxe`` In addition, the ``irmc`` hardware type supports remote volume booting without iPXE. This is available when using the ``irmc-virtual-media`` boot interface. This feature configures a node to boot from a remote volume by using the API of iRMC. It supports iSCSI and FibreChannel. Configuration ~~~~~~~~~~~~~ In addition to the configuration for generic drivers to :ref:`remote volume boot `, the iRMC driver requires the following configuration: * It is necessary to set physical port IDs to network ports and volume connectors. All cards including those not used for volume boot should be registered. The format of a physical port ID is: ``-`` where: - ````: could be ``LAN``, ``FC`` or ``CNA`` - ````: 0 indicates onboard slot. Use 1 to 9 for add-on slots. - ````: A port number starting from 1. These IDs are specified in a node's ``driver_info[irmc_pci_physical_ids]``. This value is a dictionary. The key is the UUID of a resource (Port or Volume Connector) and its value is the physical port ID. For example:: { "1ecd14ee-c191-4007-8413-16bb5d5a73a2":"LAN0-1", "87f6c778-e60e-4df2-bdad-2605d53e6fc0":"CNA1-1" } It can be set with the following command:: openstack baremetal node set $NODE_UUID \ --driver-info irmc_pci_physical_ids={} \ --driver-info irmc_pci_physical_ids/$PORT_UUID=LAN0-1 \ --driver-info irmc_pci_physical_ids/$VOLUME_CONNECTOR_UUID=CNA1-1 * For iSCSI boot, volume connectors with both types ``iqn`` and ``ip`` are required. The configuration with DHCP is not supported yet. * For iSCSI, the size of the storage network is needed. This value should be specified in a node's ``driver_info[irmc_storage_network_size]``. It must be a positive integer < 32. For example, if the storage network is 10.2.0.0/22, use the following command:: openstack baremetal node set $NODE_UUID --driver-info irmc_storage_network_size=22 Supported hardware ~~~~~~~~~~~~~~~~~~ The driver supports the PCI controllers, Fibrechannel Cards, Converged Network Adapters supported by `Fujitsu ServerView Virtual-IO Manager `_. Hardware Inspection Support ^^^^^^^^^^^^^^^^^^^^^^^^^^^ The ``irmc`` hardware type provides the iRMC-specific hardware inspection with ``irmc`` inspect interface. .. note:: SNMP requires being enabled in ServerView® iRMC S4 Web Server(Network Settings\SNMP section). Configuration ~~~~~~~~~~~~~ The Hardware Inspection Support in the iRMC driver requires the following configuration: * It is necessary to set ironic configuration with ``gpu_ids`` and ``fpga_ids`` options in ``[irmc]`` section. ``gpu_ids`` and ``fpga_ids`` are lists of ``/`` where: - ````: 4 hexadecimal digits starts with '0x'. - ````: 4 hexadecimal digits starts with '0x'. Here are sample values for ``gpu_ids`` and ``fpga_ids``:: gpu_ids = 0x1000/0x0079,0x2100/0x0080 fpga_ids = 0x1000/0x005b,0x1100/0x0180 * The python-scciclient package requires pyghmi version >= 1.0.22 and pysnmp version >= 4.2.3. They are used by the conductor service on the conductor. The latest version of pyghmi can be downloaded from `here `__ and pysnmp can be downloaded from `here `__. Supported properties ~~~~~~~~~~~~~~~~~~~~ The inspection process will discover the following essential properties (properties required for scheduling deployment): * ``memory_mb``: memory size * ``cpus``: number of cpus * ``cpu_arch``: cpu architecture * ``local_gb``: disk size Inspection can also discover the following extra capabilities for iRMC driver: * ``irmc_firmware_version``: iRMC firmware version * ``rom_firmware_version``: ROM firmware version * ``trusted_boot``: The flag whether TPM(Trusted Platform Module) is supported by the server. The possible values are 'True' or 'False'. * ``server_model``: server model * ``pci_gpu_devices``: number of gpu devices connected to the bare metal. Inspection can also set/unset node's traits with the following cpu type for iRMC driver: * ``CUSTOM_CPU_FPGA``: The bare metal contains fpga cpu type. .. note:: * The disk size is returned only when eLCM License for FUJITSU PRIMERGY servers is activated. If the license is not activated, then Hardware Inspection will fail to get this value. * Before inspecting, if the server is power-off, it will be turned on automatically. System will wait for a few second before start inspecting. After inspection, power status will be restored to the previous state. The operator can specify these capabilities in compute service flavor, for example:: openstack flavor set baremetal-flavor-name --property capabilities:irmc_firmware_version="iRMC S4-8.64F" openstack flavor set baremetal-flavor-name --property capabilities:server_model="TX2540M1F5" openstack flavor set baremetal-flavor-name --property capabilities:pci_gpu_devices="1" See :ref:`capabilities-discovery` for more details and examples. The operator can add a trait in compute service flavor, for example:: openstack baremetal node add trait $NODE_UUID CUSTOM_CPU_FPGA A valid trait must be no longer than 255 characters. Standard traits are defined in the os_traits library. A custom trait must start with the prefix ``CUSTOM_`` and use the following characters: A-Z, 0-9 and _. RAID configuration Support ^^^^^^^^^^^^^^^^^^^^^^^^^^ The ``irmc`` hardware type provides the iRMC RAID configuration with ``irmc`` raid interface. .. note:: * RAID implementation for ``irmc`` hardware type is based on eLCM license and SDCard. Otherwise, SP(Service Platform) in lifecycle management must be available. * RAID implementation only supported for RAIDAdapter 0 in Fujitsu Servers. Configuration ~~~~~~~~~~~~~ The RAID configuration Support in the iRMC drivers requires the following configuration: * It is necessary to set ironic configuration into Node with JSON file option:: $ openstack baremetal node set \ --target-raid-config Here is some sample values for JSON file:: { "logical_disks": [ { "size_gb": 1000, "raid_level": "1" ] } or:: { "logical_disks": [ { "size_gb": 1000, "raid_level": "1", "controller": "FTS RAID Ctrl SAS 6G 1GB (D3116C) (0)", "physical_disks": [ "0", "1" ] } ] } .. note:: RAID 1+0 and 5+0 in iRMC driver does not support property ``physical_disks`` in ``target_raid_config`` during create raid configuration yet. See following example:: { "logical_disks": [ { "size_gb": "MAX", "raid_level": "1+0" } ] } See :ref:`raid` for more details and examples. Supported properties ~~~~~~~~~~~~~~~~~~~~ The RAID configuration using iRMC driver supports following parameters in JSON file: * ``size_gb``: is mandatory properties in Ironic. * ``raid_level``: is mandatory properties in Ironic. Currently, iRMC Server supports following RAID levels: 0, 1, 5, 6, 1+0 and 5+0. * ``controller``: is name of the controller as read by the RAID interface. * ``physical_disks``: are specific values for each raid array in LogicalDrive which operator want to set them along with ``raid_level``. The RAID configuration is supported as a manual cleaning step. .. note:: * iRMC server will power-on after create/delete raid configuration is applied, FGI (Foreground Initialize) will process raid configuration in iRMC server, thus the operation will completed upon power-on and power-off when created RAID on iRMC server. See :ref:`raid` for more details and examples. BIOS configuration Support ^^^^^^^^^^^^^^^^^^^^^^^^^^ The ``irmc`` hardware type provides the iRMC BIOS configuration with ``irmc`` bios interface. .. warning:: ``irmc`` bios interface does not support ``factory_reset``. Configuration ~~~~~~~~~~~~~ The BIOS configuration in the iRMC driver supports the following settings: - ``boot_option_filter``: Specifies from which drives can be booted. This supports following options: ``UefiAndLegacy``, ``LegacyOnly``, ``UefiOnly``. - ``check_controllers_health_status_enabled``: The UEFI FW checks the controller health status. This supports following options: ``true``, ``false``. - ``cpu_active_processor_cores``: The number of active processor cores 1...n. Option 0 indicates that all available processor cores are active. - ``cpu_adjacent_cache_line_prefetch_enabled``: The processor loads the requested cache line and the adjacent cache line. This supports following options: ``true``, ``false``. - ``cpu_vt_enabled``: Supports the virtualization of platform hardware and several software environments, based on Virtual Machine Extensions to support the use of several software environments using virtual computers. This supports following options: ``true``, ``false``. - ``flash_write_enabled``: The system BIOS can be written. Flash BIOS update is possible. This supports following options: ``true``, ``false``. - ``hyper_threading_enabled``: Hyper-threading technology allows a single physical processor core to appear as several logical processors. This supports following options: ``true``, ``false``. - ``keep_void_boot_options_enabled``: Boot Options will not be removed from "Boot Option Priority" list. This supports following options: ``true``, ``false``. - ``launch_csm_enabled``: Specifies whether the Compatibility Support Module (CSM) is executed. This supports following options: ``true``, ``false``. - ``os_energy_performance_override_enabled``: Prevents the OS from overruling any energy efficiency policy setting of the setup. This supports following options: ``true``, ``false``. - ``pci_aspm_support``: Active State Power Management (ASPM) is used to power-manage the PCI Express links, thus consuming less power. This supports following options: ``Disabled``, ``Auto``, ``L0Limited``, ``L1only``, ``L0Force``. - ``pci_above_4g_decoding_enabled``: Specifies if memory resources above the 4GB address boundary can be assigned to PCI devices. This supports following options: ``true``, ``false``. - ``power_on_source``: Specifies whether the switch on sources for the system are managed by the BIOS or the ACPI operating system. This supports following options: ``BiosControlled``, ``AcpiControlled``. - ``single_root_io_virtualization_support_enabled``: Single Root IO Virtualization Support is active. This supports following options: ``true``, ``false``. The BIOS configuration is supported as a manual cleaning step. See :ref:`bios` for more details and examples. Supported platforms =================== This driver supports FUJITSU PRIMERGY BX S4 or RX S8 servers and above. - PRIMERGY BX920 S4 - PRIMERGY BX924 S4 - PRIMERGY RX300 S8 When ``irmc`` power interface is used, Soft Reboot (Graceful Reset) and Soft Power Off (Graceful Power Off) are only available if `ServerView agents `_ are installed. See `iRMC S4 Manual `_ for more details. RAID configuration feature supports FUJITSU PRIMERGY servers with RAID-Ctrl-SAS-6G-1GB(D3116C) controller and above. For detail supported controller with OOB-RAID configuration, please see `the whitepaper for iRMC RAID configuration `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/admin/drivers/redfish.rst0000644000175000017500000001700100000000000023344 0ustar00coreycorey00000000000000============== Redfish driver ============== Overview ======== The ``redfish`` driver enables managing servers compliant with the Redfish_ protocol. Prerequisites ============= * The Sushy_ library should be installed on the ironic conductor node(s). For example, it can be installed with ``pip``:: sudo pip install sushy Enabling the Redfish driver =========================== #. Add ``redfish`` to the list of ``enabled_hardware_types``, ``enabled_power_interfaces``, ``enabled_management_interfaces`` and ``enabled_inspect_interfaces`` as well as ``redfish-virtual-media`` to ``enabled_boot_interfaces`` in ``/etc/ironic/ironic.conf``. For example:: [DEFAULT] ... enabled_hardware_types = ipmi,redfish enabled_boot_interfaces = ipmitool,redfish-virtual-media enabled_power_interfaces = ipmitool,redfish enabled_management_interfaces = ipmitool,redfish enabled_inspect_interfaces = inspector,redfish #. Restart the ironic conductor service:: sudo service ironic-conductor restart # Or, for RDO: sudo systemctl restart openstack-ironic-conductor Registering a node with the Redfish driver =========================================== Nodes configured to use the driver should have the ``driver`` property set to ``redfish``. The following properties are specified in the node's ``driver_info`` field: - ``redfish_address``: The URL address to the Redfish controller. It must include the authority portion of the URL, and can optionally include the scheme. If the scheme is missing, https is assumed. For example: https://mgmt.vendor.com. This is required. - ``redfish_system_id``: The canonical path to the ComputerSystem resource that the driver will interact with. It should include the root service, version and the unique resource path to the ComputerSystem. This property is only required if target BMC manages more than one ComputerSystem. Otherwise ironic will pick the only available ComputerSystem automatically. For example: /redfish/v1/Systems/1. - ``redfish_username``: User account with admin/server-profile access privilege. Although not required, it is highly recommended. - ``redfish_password``: User account password. Although not required, it is highly recommended. - ``redfish_verify_ca``: If redfish_address has the **https** scheme, the driver will use a secure (TLS_) connection when talking to the Redfish controller. By default (if this is not set or set to True), the driver will try to verify the host certificates. This can be set to the path of a certificate file or directory with trusted certificates that the driver will use for verification. To disable verifying TLS_, set this to False. This is optional. - ``redfish_auth_type``: Redfish HTTP client authentication method. Can be "basic", "session" or "auto". The "auto" mode first tries "session" and falls back to "basic" if session authentication is not supported by the Redfish BMC. Default is set in ironic config as ``[redfish]auth_type``. The ``openstack baremetal node create`` command can be used to enroll a node with the ``redfish`` driver. For example: .. code-block:: bash openstack baremetal node create --driver redfish --driver-info \ redfish_address=https://example.com --driver-info \ redfish_system_id=/redfish/v1/Systems/CX34R87 --driver-info \ redfish_username=admin --driver-info redfish_password=password \ --name node-0 For more information about enrolling nodes see :ref:`enrollment` in the install guide. Features of the ``redfish`` hardware type ========================================= Boot mode support ^^^^^^^^^^^^^^^^^ The ``redfish`` hardware type can read current boot mode from the bare metal node as well as set it to either Legacy BIOS or UEFI. .. note:: Boot mode management is the optional part of the Redfish specification. Not all Redfish-compliant BMCs might implement it. In that case it remains the responsibility of the operator to configure proper boot mode to their bare metal nodes. Out-Of-Band inspection ^^^^^^^^^^^^^^^^^^^^^^ The ``redfish`` hardware type can inspect the bare metal node by querying Redfish compatible BMC. This process is quick and reliable compared to the way the ``inspector`` hardware type works i.e. booting bare metal node into the introspection ramdisk. .. note:: The ``redfish`` inspect interface relies on the optional parts of the Redfish specification. Not all Redfish-compliant BMCs might serve the required information, in which case bare metal node inspection will fail. .. note:: The ``local_gb`` property cannot always be discovered, for example, when a node does not have local storage or the Redfish implementation does not support the required schema. In this case the property will be set to 0. Virtual media boot ^^^^^^^^^^^^^^^^^^ The idea behind virtual media boot is that BMC gets hold of the boot image one way or the other (e.g. by HTTP GET, other methods are defined in the standard), then "inserts" it into node's virtual drive as if it was burnt on a physical CD/DVD. The node can then boot from that virtual drive into the operating system residing on the image. The major advantage of virtual media boot feature is that potentially unreliable TFTP image transfer phase of PXE protocol suite is fully eliminated. Hardware types based on the ``redfish`` fully support booting deploy/rescue and user images over virtual media. Ironic builds bootable ISO images, for either UEFI or BIOS (Legacy) boot modes, at the moment of node deployment out of kernel and ramdisk images associated with the ironic node. To boot a node managed by ``redfish`` hardware type over virtual media using BIOS boot mode, it suffice to set ironic boot interface to ``redfish-virtual-media``, as opposed to ``ipmitool``. .. code-block:: bash openstack baremetal node set --boot-interface redfish-virtual-media node-0 If UEFI boot mode is desired, the user should additionally supply EFI System Partition image (ESP_) via ``[driver-info]/bootloader`` ironic node property or ironic configuration file in form of Glance image UUID or a URL. .. code-block:: bash openstack baremetal node set --driver-info bootloader= node-0 If ``[driver_info]/config_via_floppy`` boolean property of the node is set to ``true``, ironic will create a file with runtime configuration parameters, place into on a FAT image, then insert the image into node's virtual floppy drive. When booting over PXE or virtual media, and user instance requires some specific kernel configuration, ``[instance_info]/kernel_append_params`` property can be used to pass user-specified kernel command line parameters. For ramdisk kernel, ``[instance_info]/kernel_append_params`` property serves the same purpose. .. _Redfish: http://redfish.dmtf.org/ .. _Sushy: https://opendev.org/openstack/sushy .. _TLS: https://en.wikipedia.org/wiki/Transport_Layer_Security .. _ESP: https://wiki.ubuntu.com/EFIBootLoaders#Booting_from_EFI ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/admin/drivers/snmp.rst0000644000175000017500000001637600000000000022713 0ustar00coreycorey00000000000000=========== SNMP driver =========== The SNMP hardware type enables control of power distribution units of the type frequently found in data centre racks. PDUs frequently have a management ethernet interface and SNMP support enabling control of the power outlets. The SNMP power interface works with the :ref:`pxe-boot` interface for network deployment and network-configured boot. .. note:: Unlike most of the other power interfaces, the SNMP power interface does not have a corresponding management interface. The SNMP hardware type uses the ``noop`` management interface instead. List of supported devices ========================= This is a non-exhaustive list of supported devices. Any device not listed in this table could possibly work using a similar driver. Please report any device status. ============== ========== ========== ===================== Manufacturer Model Supported? Driver name ============== ========== ========== ===================== APC AP7920 Yes apc_masterswitch APC AP9606 Yes apc_masterswitch APC AP9225 Yes apc_masterswitchplus APC AP7155 Yes apc_rackpdu APC AP7900 Yes apc_rackpdu APC AP7901 Yes apc_rackpdu APC AP7902 Yes apc_rackpdu APC AP7911a Yes apc_rackpdu APC AP7921 Yes apc_rackpdu APC AP7922 Yes apc_rackpdu APC AP7930 Yes apc_rackpdu APC AP7931 Yes apc_rackpdu APC AP7932 Yes apc_rackpdu APC AP7940 Yes apc_rackpdu APC AP7941 Yes apc_rackpdu APC AP7951 Yes apc_rackpdu APC AP7960 Yes apc_rackpdu APC AP7990 Yes apc_rackpdu APC AP7998 Yes apc_rackpdu APC AP8941 Yes apc_rackpdu APC AP8953 Yes apc_rackpdu APC AP8959 Yes apc_rackpdu APC AP8961 Yes apc_rackpdu APC AP8965 Yes apc_rackpdu Aten all? Yes aten CyberPower all? Untested cyberpower EatonPower all? Untested eatonpower Teltronix all? Yes teltronix BayTech MRP27 Yes baytech_mrp27 ============== ========== ========== ===================== Software Requirements ===================== - The PySNMP package must be installed, variously referred to as ``pysnmp`` or ``python-pysnmp`` Enabling the SNMP Hardware Type =============================== #. Add ``snmp`` to the list of ``enabled_hardware_types`` in ``ironic.conf``. Also update ``enabled_management_interfaces`` and ``enabled_power_interfaces`` in ``ironic.conf`` as shown below: .. code-block:: ini [DEFAULT] enabled_hardware_types = snmp enabled_management_interfaces = noop enabled_power_interfaces = snmp #. To set the default boot option, update ``default_boot_option`` in ``ironic.conf``: .. code-block:: ini [DEFAULT] default_boot_option = netboot .. note:: Currently the default value of ``default_boot_option`` is ``netboot`` but it will be changed to ``local`` in the future. It is recommended to set an explicit value for this option. .. note:: It is important to set ``boot_option`` to ``netboot`` as SNMP hardware type does not support setting of boot devices. One can also configure a node to boot using ``netboot`` by setting its ``capabilities`` and updating Nova flavor as described below: .. code-block:: console openstack baremetal node set --property capabilities="boot_option:netboot" openstack flavor set --property "capabilities:boot_option"="netboot" ironic-flavor #. Restart the Ironic conductor service. .. code-block:: bash service ironic-conductor restart Ironic Node Configuration ========================= Nodes configured to use the SNMP hardware type should have the ``driver`` field set to the hardware type ``snmp``. The following property values have to be added to the node's ``driver_info`` field: - ``snmp_driver``: PDU manufacturer driver name or ``auto`` to automatically choose ironic snmp driver based on ``SNMPv2-MIB::sysObjectID`` value as reported by PDU. - ``snmp_address``: the IPv4 address of the PDU controlling this node. - ``snmp_port``: (optional) A non-standard UDP port to use for SNMP operations. If not specified, the default port (161) is used. - ``snmp_outlet``: The power outlet on the PDU (1-based indexing). - ``snmp_version``: (optional) SNMP protocol version (permitted values ``1``, ``2c`` or ``3``). If not specified, SNMPv1 is chosen. - ``snmp_community``: (Required for SNMPv1/SNMPv2c unless ``snmp_community_read`` and/or ``snmp_community_write`` properties are present in which case the latter take over) SNMP community name parameter for reads and writes to the PDU. - ``snmp_community_read``: SNMP community name parameter for reads to the PDU. Takes precedence over the ``snmp_community`` property. - ``snmp_community_write``: SNMP community name parameter for writes to the PDU. Takes precedence over the ``snmp_community`` property. - ``snmp_user``: (Required for SNMPv3) SNMPv3 User-based Security Model (USM) user name. Synonym for now obsolete ``snmp_security`` parameter. - ``snmp_auth_protocol``: SNMPv3 message authentication protocol ID. Valid values include: ``none``, ``md5``, ``sha`` for all pysnmp versions and additionally ``sha224``, ``sha256``, ``sha384``, ``sha512`` for pysnmp versions 4.4.1 and later. Default is ``none`` unless ``snmp_auth_key`` is provided. In the latter case ``md5`` is the default. - ``snmp_auth_key``: SNMPv3 message authentication key. Must be 8+ characters long. Required when message authentication is used. - ``snmp_priv_protocol``: SNMPv3 message privacy (encryption) protocol ID. Valid values include: ``none``, ``des``, ``3des``, ``aes``, ``aes192``, ``aes256`` for all pysnmp version and additionally ``aes192blmt``, ``aes256blmt`` for pysnmp versions 4.4.3+. Note that message privacy requires using message authentication. Default is ``none`` unless ``snmp_priv_key`` is provided. In the latter case ``des`` is the default. - ``snmp_priv_key``: SNMPv3 message privacy (encryption) key. Must be 8+ characters long. Required when message encryption is used. - ``snmp_context_engine_id``: SNMPv3 context engine ID. Default is the value of authoritative engine ID. - ``snmp_context_name``: SNMPv3 context name. Default is an empty string. The following command can be used to enroll a node with the ``snmp`` hardware type: .. code-block:: bash openstack baremetal node create --os-baremetal-api-version=1.31 \ --driver snmp --driver-info snmp_driver= \ --driver-info snmp_address= \ --driver-info snmp_outlet= \ --driver-info snmp_community= \ --properties capabilities=boot_option:netboot ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/admin/drivers/xclarity.rst0000644000175000017500000000451600000000000023566 0ustar00coreycorey00000000000000=============== XClarity driver =============== Overview ======== The ``xclarity`` driver is targeted for IMM 2.0 and IMM 3.0 managed Lenovo servers. The xclarity hardware type enables the user to take advantage of `XClarity Manager`_ by using the `XClarity Python Client`_. Prerequisites ============= * The XClarity Client library should be installed on the ironic conductor node(s). For example, it can be installed with ``pip``:: sudo pip install python-xclarityclient Enabling the XClarity driver ============================ #. Add ``xclarity`` to the list of ``enabled_hardware_types``, ``enabled_power_interfaces`` and ``enabled_management_interfaces`` in ``/etc/ironic/ironic.conf``. For example:: [DEFAULT] ... enabled_hardware_types = ipmi,xclarity enabled_power_interfaces = ipmitool,xclarity enabled_management_interfaces = ipmitool,xclarity #. Restart the ironic conductor service:: sudo service ironic-conductor restart # Or, for RDO: sudo systemctl restart openstack-ironic-conductor Registering a node with the XClarity driver =========================================== Nodes configured to use the driver should have the ``driver`` property set to ``xclarity``. The following properties are specified in the node's ``driver_info`` field and are required: - ``xclarity_manager_ip``: The IP address of the XClarity Controller. - ``xclarity_username``: User account with admin/server-profile access privilege to the XClarity Controller. - ``xclarity_password``: User account password corresponding to the xclarity_username to the XClarity Controller. - ``xclarity_hardware_id``: The hardware ID of the XClarity managed server. The ``openstack baremetal node create`` command can be used to enroll a node with the ``xclarity`` driver. For example: .. code-block:: bash openstack baremetal node create --driver xclarity \ --driver-info xclarity_manager_ip=https://10.240.217.101 \ --driver-info xclarity_username=admin \ --driver-info xclarity_password=password \ --driver-info xclarity_hardware_id=hardware_id For more information about enrolling nodes see :ref:`enrollment` in the install guide. .. _`XClarity Manager`: http://www3.lenovo.com/us/en/data-center/software/systems-management/xclarity/ .. _`XClarity Python Client`: http://pypi.org/project/python-xclarityclient/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/admin/drivers.rst0000644000175000017500000000776600000000000021741 0ustar00coreycorey00000000000000=============================================== Drivers, Hardware Types and Hardware Interfaces =============================================== Generic Interfaces ------------------ .. toctree:: :maxdepth: 2 interfaces/boot interfaces/deploy Hardware Types -------------- .. toctree:: :maxdepth: 1 drivers/ibmc drivers/idrac drivers/ilo drivers/intel-ipmi drivers/ipmitool drivers/irmc drivers/redfish drivers/snmp drivers/xclarity Changing Hardware Types and Interfaces -------------------------------------- Hardware types and interfaces are enabled in the configuration as described in :doc:`/install/enabling-drivers`. Usually, a hardware type is configured on enrolling as described in :doc:`/install/enrollment`:: openstack baremetal node create --driver Any hardware interfaces can be specified on enrollment as well:: openstack baremetal node create --driver \ --deploy-interface direct ---interface For the remaining interfaces the default value is assigned as described in :ref:`hardware_interfaces_defaults`. Both the hardware type and the hardware interfaces can be changed later via the node update API. Changing Hardware Interfaces ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Hardware interfaces can be changed by the following command:: openstack baremetal node set \ --deploy-interface direct \ ---interface The modified interfaces must be enabled and compatible with the current node's hardware type. Changing Hardware Type ~~~~~~~~~~~~~~~~~~~~~~ Changing the node's hardware type can pose a problem. When the ``driver`` field is updated, the final result must be consistent, that is, the resulting hardware interfaces must be compatible with the new hardware type. This will not work:: openstack baremetal node create --name test --driver fake-hardware openstack baremetal node set test --driver ipmi This is because the ``fake-hardware`` hardware type defaults to ``fake`` implementations for some or all interfaces, but the ``ipmi`` hardware type is not compatible with them. There are three ways to deal with this situation: #. Provide new values for all incompatible interfaces, for example:: openstack baremetal node set test --driver ipmi \ --boot-interface pxe \ --deploy-interface iscsi \ --management-interface ipmitool \ --power-interface ipmitool #. Request resetting some of the interfaces to their new defaults by using the ``--reset--interface`` family of arguments, for example:: openstack baremetal node set test --driver ipmi \ --reset-boot-interface \ --reset-deploy-interface \ --reset-management-interface \ --reset-power-interface .. note:: This feature is available starting with ironic 11.1.0 (Rocky series, API version 1.45). #. Request resetting all interfaces to their new defaults:: openstack baremetal node set test --driver ipmi --reset-interfaces You can still specify explicit values for some interfaces:: openstack baremetal node set test --driver ipmi --reset-interfaces \ --deploy-interface direct .. note:: This feature is available starting with ironic 11.1.0 (Rocky series, API version 1.45). Unsupported drivers ------------------- The following drivers were declared as unsupported in ironic Newton release and as of Ocata release they are removed from ironic: - AMT driver - available as part of ironic-staging-drivers_ - iBoot driver - available as part of ironic-staging-drivers_ - Wake-On-Lan driver - available as part of ironic-staging-drivers_ - Virtualbox drivers - SeaMicro drivers - MSFT OCS drivers The SSH drivers were removed in the Pike release. Similar functionality can be achieved either with VirtualBMC_ or using libvirt drivers from ironic-staging-drivers_. .. _ironic-staging-drivers: http://ironic-staging-drivers.readthedocs.io .. _VirtualBMC: https://opendev.org/openstack/virtualbmc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/admin/gmr.rst0000644000175000017500000000427700000000000021042 0ustar00coreycorey00000000000000Bare Metal Service state report (via Guru Meditation Reports) ============================================================= The Bare Metal service contains a mechanism whereby developers and system administrators can generate a report about the state of running Bare Metal executables (ironic-api and ironic-conductor). This report is called a Guru Meditation Report (GMR for short). GMR provides useful debugging information that can be used to obtain an accurate view on the current live state of the system. For example, what threads are running, what configuration parameters are in effect, and more. The eventlet backdoor facility provides an interactive shell interface for any eventlet based process, allowing an administrator to telnet to a pre-defined port and execute a variety of commands. Configuration ------------- The GMR feature is optional and requires the oslo.reports_ package to be installed. For example, using pip:: pip install 'oslo.reports>=1.18.0' .. _oslo.reports: https://opendev.org/openstack/oslo.reports Generating a GMR ---------------- A *GMR* can be generated by sending the *USR2* signal to any Bare Metal process that supports it. The *GMR* will then be output to stderr for that particular process. For example: Suppose that ``ironic-api`` has process ID ``6385``, and was run with ``2>/var/log/ironic/ironic-api-err.log``. Then, sending the *USR* signal:: kill -USR2 6385 will trigger the Guru Meditation report to be printed to ``/var/log/ironic/ironic-api-err.log``. Structure of a GMR ------------------ The *GMR* consists of the following sections: Package Shows information about the package to which this process belongs, including version information. Threads Shows stack traces and thread IDs for each of the threads within this process. Green Threads Shows stack traces for each of the green threads within this process (green threads don't have thread IDs). Configuration Lists all the configuration options currently accessible via the CONF object for the current process. .. only:: html Sample GMR Report ----------------- Below is a sample GMR report generated for ``ironic-api`` service: .. include:: report.txt :literal: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/admin/index.rst0000644000175000017500000000275500000000000021363 0ustar00coreycorey00000000000000Administrator's Guide ===================== If you are a system administrator running Ironic, this section contains information that may help you understand how to operate and upgrade the services. .. toctree:: :maxdepth: 1 Drivers, Hardware Types and Hardware Interfaces Ironic Python Agent Node Hardware Inspection Node Deployment Node Cleaning Node Adoption Node Retirement RAID Configuration BIOS Settings Node Rescuing Configuring to boot from volume Multi-tenant Networking Port Groups Configuring Web or Serial Console Enabling Notifications Ceph Object Gateway Emitting Software Metrics Auditing API Traffic Service State Reporting Conductor Groups Upgrade Guide Security Windows Images Troubleshooting FAQ Power Sync with the Compute Service Agent Token Node Multi-Tenancy .. toctree:: :hidden: deploy-steps Dashboard Integration --------------------- A plugin for the OpenStack Dashboard (horizon) service is under development. Documentation for that can be found within the ironic-ui project. * :ironic-ui-doc:`Dashboard (horizon) plugin <>` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/admin/inspection.rst0000644000175000017500000000670000000000000022421 0ustar00coreycorey00000000000000.. _inspection: =================== Hardware Inspection =================== Overview -------- Inspection allows Bare Metal service to discover required node properties once required ``driver_info`` fields (for example, IPMI credentials) are set by an operator. Inspection will also create the Bare Metal service ports for the discovered ethernet MACs. Operators will have to manually delete the Bare Metal service ports for which physical media is not connected. This is required due to the `bug 1405131 `_. There are two kinds of inspection supported by Bare Metal service: #. Out-of-band inspection is currently implemented by several hardware types, including ``ilo``, ``idrac`` and ``irmc``. #. `In-band inspection`_ by utilizing the ironic-inspector_ project. The node should be in the ``manageable`` state before inspection is initiated. If it is in the ``enroll`` or ``available`` state, move it to ``manageable`` first:: openstack baremetal node manage Then inspection can be initiated using the following command:: openstack baremetal node inspect .. _capabilities-discovery: Capabilities discovery ---------------------- This is an incomplete list of capabilities we want to discover during inspection. The exact support is hardware and hardware type specific though, the most complete list is provided by the iLO :ref:`ilo-inspection`. ``secure_boot`` (``true`` or ``false``) whether secure boot is supported for the node ``boot_mode`` (``bios`` or ``uefi``) the boot mode the node is using ``cpu_vt`` (``true`` or ``false``) whether the CPU virtualization is enabled ``cpu_aes`` (``true`` or ``false``) whether the AES CPU extensions are enabled ``max_raid_level`` (integer, 0-10) maximum RAID level supported by the node ``pci_gpu_devices`` (non-negative integer) number of GPU devices on the node The operator can specify these capabilities in nova flavor for node to be selected for scheduling:: nova flavor-key my-baremetal-flavor set capabilities:pci_gpu_devices="> 0" nova flavor-key my-baremetal-flavor set capabilities:secure_boot="true" Please see a specific :doc:`hardware type page ` for the exact list of capabilities this hardware type can discover. In-band inspection ------------------ In-band inspection involves booting a ramdisk on the target node and fetching information directly from it. This process is more fragile and time-consuming than the out-of-band inspection, but it is not vendor-specific and works across a wide range of hardware. In-band inspection is using the ironic-inspector_ project. It is supported by all hardware types, and used by default, if enabled, by the ``ipmi`` hardware type. The ``inspector`` *inspect* interface has to be enabled to use it: .. code-block:: ini [DEFAULT] enabled_inspect_interfaces = inspector,no-inspect If the ironic-inspector service is not registered in the service catalog, set the following option: .. code-block:: ini [inspector] endpoint-override = http://inspector.example.com:5050 In order to ensure that ports in Bare Metal service are synchronized with NIC ports on the node, the following settings in the ironic-inspector configuration file must be set:: [processing] add_ports = all keep_ports = present .. _ironic-inspector: https://pypi.org/project/ironic-inspector .. _python-ironicclient: https://pypi.org/project/python-ironicclient ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538406.1823993 ironic-14.0.1.dev163/doc/source/admin/interfaces/0000755000175000017500000000000000000000000021634 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/admin/interfaces/boot.rst0000644000175000017500000000516000000000000023333 0ustar00coreycorey00000000000000=============== Boot interfaces =============== The boot interface manages booting of both the deploy ramdisk and the user instances on the bare metal node. The `PXE boot`_ interface is generic and works with all hardware that supports booting from network. Alternatively, several vendors provide *virtual media* implementations of the boot interface. They work by pushing an ISO image to the node's `management controller`_, and do not require either PXE or iPXE. Check your driver documentation at :doc:`../drivers` for details. .. _pxe-boot: PXE boot -------- The ``pxe`` boot interface uses PXE_ or iPXE_ to deliver the target kernel/ramdisk pair. PXE uses relatively slow and unreliable TFTP protocol for transfer, while iPXE uses HTTP. The downside of iPXE is that it's less common, and usually requires bootstrapping using PXE first. The ``pxe`` boot interface works by preparing a PXE/iPXE environment for a node on the file system, then instructing the DHCP provider (for example, the Networking service) to boot the node from it. See :ref:`iscsi-deploy-example` and :ref:`direct-deploy-example` for a better understanding of the whole deployment process. .. note:: Both PXE and iPXE are configured differently, when UEFI boot is used instead of conventional BIOS boot. This is particularly important for CPU architectures that do not have BIOS support at all. The ``pxe`` boot interface is used by default for many hardware types, including ``ipmi``. Some hardware types, notably ``ilo`` and ``irmc`` have their specific implementations of the PXE boot interface. Additional configuration is required for this boot interface - see :doc:`/install/configure-pxe` for details. Enable persistent boot device for deploy/clean operation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Ironic uses non-persistent boot for cleaning/deploying phases as default, in PXE interface. For some drivers, a persistent change is far more costly than a non-persistent one, so this can bring performance improvements. Set the flag ``force_persistent_boot_device`` to ``True`` in the node's ``driver_info``:: $ openstack baremetal node set --driver-info force_persistent_boot_device=True .. note:: It's recommended to check if the node's state has not changed as there is no way of locking the node between these commands. Once the flag is present, the next cleaning and deploy steps will be done with persistent boot for that node. .. _PXE: https://en.wikipedia.org/wiki/Preboot_Execution_Environment .. _iPXE: https://en.wikipedia.org/wiki/IPXE .. _management controller: https://en.wikipedia.org/wiki/Out-of-band_management ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/admin/interfaces/deploy.rst0000644000175000017500000001363500000000000023672 0ustar00coreycorey00000000000000================= Deploy Interfaces ================= A *deploy* interface plays a critical role in the provisioning process. It orchestrates the whole deployment and defines how the image gets transferred to the target disk. .. _iscsi-deploy: iSCSI deploy ============ With ``iscsi`` deploy interface, the deploy ramdisk publishes the node's hard drive as an iSCSI_ share. The ironic-conductor then copies the image to this share. See :ref:`iSCSI deploy diagram ` for a detailed explanation of how this deploy interface works. This interface is used by default, if enabled (see :ref:`enable-hardware-interfaces`). You can specify it explicitly when creating or updating a node:: openstack baremetal node create --driver ipmi --deploy-interface iscsi openstack baremetal node set --deploy-interface iscsi .. _iSCSI: https://en.wikipedia.org/wiki/ISCSI .. _direct-deploy: Direct deploy ============= With ``direct`` deploy interface, the deploy ramdisk fetches the image from an HTTP location. It can be an object storage (swift or RadosGW) temporary URL or a user-provided HTTP URL. The deploy ramdisk then copies the image to the target disk. See :ref:`direct deploy diagram ` for a detailed explanation of how this deploy interface works. You can specify this deploy interface when creating or updating a node:: openstack baremetal node create --driver ipmi --deploy-interface direct openstack baremetal node set --deploy-interface direct .. note:: For historical reasons the ``direct`` deploy interface is sometimes called ``agent``. This is because before the Kilo release **ironic-python-agent** used to only support this deploy interface. Deploy with custom HTTP servers ------------------------------- The ``direct`` deploy interface can also be configured to use with custom HTTP servers set up at ironic conductor nodes, images will be cached locally and made accessible by the HTTP server. To use this deploy interface with a custom HTTP server, set ``image_download_source`` to ``http`` in the ``[agent]`` section. .. code-block:: ini [agent] ... image_download_source = http ... You need to set up a workable HTTP server at each conductor node which with ``direct`` deploy interface enabled, and check http related options in the ironic configuration file to match the HTTP server configurations. .. code-block:: ini [deploy] http_url = http://example.com http_root = /httpboot Each HTTP servers should be configured to follow symlinks for images accessible from HTTP service. Please refer to configuration option ``FollowSymLinks`` if you are using Apache HTTP server, or ``disable_symlinks`` if Nginx HTTP server is in use. .. _ansible-deploy: Ansible deploy ============== This interface is similar to ``direct`` in the sense that the image is downloaded by the ramdisk directly from the image store (not from ironic-conductor host), but the logic of provisioning the node is held in a set of Ansible playbooks that are applied by the ``ironic-conductor`` service handling the node. While somewhat more complex to set up, this deploy interface provides greater flexibility in terms of advanced node preparation during provisioning. This interface is supported by most but not all hardware types declared in ironic. However this deploy interface is not enabled by default. To enable it, add ``ansible`` to the list of enabled deploy interfaces in ``enabled_deploy_interfaces`` option in the ``[DEFAULT]`` section of ironic's configuration file: .. code-block:: ini [DEFAULT] ... enabled_deploy_interfaces = iscsi,direct,ansible ... Once enabled, you can specify this deploy interface when creating or updating a node: .. code-block:: shell openstack baremetal node create --driver ipmi --deploy-interface ansible openstack baremetal node set --deploy-interface ansible For more information about this deploy interface, its features and how to use it, see :doc:`Ansible deploy interface <../drivers/ansible>`. .. toctree:: :hidden: ../drivers/ansible .. _ramdisk-deploy: Ramdisk deploy ============== The ramdisk interface is intended to provide a mechanism to "deploy" an instance where the item to be deployed is in reality a ramdisk. Most commonly this is performed when an instance is booted via PXE, iPXE or Virtual Media, with the only local storage contents being those in memory. It is suported by ``pxe`` and ``ilo-virtual-media`` boot interfaces. As with most non-default interfaces, it must be enabled and set for a node to be utilized: .. code-block:: ini [DEFAULT] ... enabled_deploy_interfaces = iscsi,direct,ramdisk ... Once enabled and the conductor(s) have been restarted, the interface can be set upon creation of a new node or update a pre-existing node: .. code-block:: shell openstack baremetal node create --driver ipmi \ --deploy-interface ramdisk \ --boot-interface pxe openstack baremetal node set --deploy-interface ramdisk The intended use case is for advanced scientific and ephemeral workloads where the step of writing an image to the local storage is not required or desired. As such, this interface does come with several caveats: * Configuration drives are not supported. * Disk image contents are not written to the bare metal node. * Users and Operators who intend to leverage this interface should expect to leverage a metadata service, custom ramdisk images, or the ``instance_info/ramdisk_kernel_arguments`` parameter to add options to the kernel boot command line. * Bare metal nodes must continue to have network access to PXE and iPXE network resources. This is contrary to most tenant networking enabled configurations where this access is restricted to the provisioning and cleaning networks * As with all deployment interfaces, automatic cleaning of the node will still occur with the contents of any local storage being wiped between deployments. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/admin/metrics.rst0000644000175000017500000001016500000000000021714 0ustar00coreycorey00000000000000.. _metrics: ========================= Emitting Software Metrics ========================= Beginning with the Newton (6.1.0) release, the ironic services support emitting internal performance data to `statsd `_. This allows operators to graph and understand performance bottlenecks in their system. This guide assumes you have a statsd server setup. For information on using and configuring statsd, please see the `statsd `_ README and documentation. These performance measurements, herein referred to as "metrics", can be emitted from the Bare Metal service, including ironic-api, ironic-conductor, and ironic-python-agent. By default, none of the services will emit metrics. Configuring the Bare Metal Service to Enable Metrics ==================================================== Enabling metrics in ironic-api and ironic-conductor --------------------------------------------------- The ironic-api and ironic-conductor services can be configured to emit metrics to statsd by adding the following to the ironic configuration file, usually located at ``/etc/ironic/ironic.conf``:: [metrics] backend = statsd If a statsd daemon is installed and configured on every host running an ironic service, listening on the default UDP port (8125), no further configuration is needed. If you are using a remote statsd server, you must also supply connection information in the ironic configuration file:: [metrics_statsd] # Point this at your environments' statsd host statsd_host = 192.0.2.1 statsd_port = 8125 Enabling metrics in ironic-python-agent --------------------------------------- The ironic-python-agent process receives its configuration in the response from the initial lookup request to the ironic-api service. This means to configure ironic-python-agent to emit metrics, you must enable the agent metrics backend in your ironic configuration file on all ironic-conductor hosts:: [metrics] agent_backend = statsd In order to reliably emit metrics from the ironic-python-agent, you must provide a statsd server that is reachable from both the configured provisioning and cleaning networks. The agent statsd connection information is configured in the ironic configuration file as well:: [metrics_statsd] # Point this at a statsd host reachable from the provisioning and cleaning nets agent_statsd_host = 198.51.100.2 agent_statsd_port = 8125 Types of Metrics Emitted ======================== The Bare Metal service emits timing metrics for every API method, as well as for most driver methods. These metrics measure how long a given method takes to execute. A deployer with metrics enabled should expect between 100 and 500 distinctly named data points to be emitted from the Bare Metal service. This will increase if the metrics.preserve_host option is set to true or if multiple drivers are used in the Bare Metal deployment. This estimate may be used to determine if a deployer needs to scale their metrics backend to handle the additional load before enabling metrics. To see which metrics have changed names or have been removed between releases, refer to the `ironic release notes `_. .. note:: With the default statsd configuration, each timing metric may create additional metrics due to how statsd handles timing metrics. For more information, see statds documentation on `metric types `_. The ironic-python-agent ramdisk emits timing metrics for every API method. Deployers who use custom HardwareManagers can emit custom metrics for their hardware. For more information on custom HardwareManagers, and emitting metrics from them, please see the :ironic-python-agent-doc:`ironic-python-agent documentation <>`. Adding New Metrics ================== If you're a developer, and would like to add additional metrics to ironic, please see the :ironic-lib-doc:`ironic-lib developer documentation <>` for details on how to use the metrics library. A release note should also be created each time a metric is changed or removed to alert deployers of the change. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/admin/multitenancy.rst0000644000175000017500000002427200000000000022766 0ustar00coreycorey00000000000000.. _multitenancy: ======================================= Multi-tenancy in the Bare Metal service ======================================= Overview ======== It is possible to use dedicated tenant networks for provisioned nodes, which extends the current Bare Metal service capabilities of providing flat networks. This works in conjunction with the Networking service to allow provisioning of nodes in a separate provisioning network. The result of this is that multiple tenants can use nodes in an isolated fashion. However, this configuration does not support trunk ports belonging to multiple networks. Concepts ======== .. _network-interfaces: Network interfaces ------------------ Network interface is one of the driver interfaces that manages network switching for nodes. There are 3 network interfaces available in the Bare Metal service: - ``noop`` interface is used for standalone deployments, and does not perform any network switching; - ``flat`` interface places all nodes into a single provider network that is pre-configured on the Networking service and physical equipment. Nodes remain physically connected to this network during their entire life cycle. - ``neutron`` interface provides tenant-defined networking through the Networking service, separating tenant networks from each other and from the provisioning and cleaning provider networks. Nodes will move between these networks during their life cycle. This interface requires Networking service support for the switches attached to the baremetal servers so they can be programmed. Local link connection --------------------- The Bare Metal service allows ``local_link_connection`` information to be associated with Bare Metal ports. This information is provided to the Networking service's ML2 driver when a Virtual Interface (VIF) is attached. The ML2 driver uses the information to plug the specified port to the tenant network. .. list-table:: ``local_link_connection`` fields :header-rows: 1 * - Field - Description * - ``switch_id`` - Required. Identifies a switch and can be a MAC address or an OpenFlow-based ``datapath_id``. * - ``port_id`` - Required. Port ID on the switch/Smart NIC, for example, Gig0/1, rep0-0. * - ``switch_info`` - Optional. Used to distinguish different switch models or other vendor-specific identifier. Some ML2 plugins may require this field. * - ``hostname`` - Required in case of a Smart NIC port. Hostname of Smart NIC device. .. note:: This isn't applicable to Infiniband ports because the network topology is discoverable by the Infiniband Subnet Manager. If specified, local_link_connection information will be ignored. If port is Smart NIC port then: 1. ``port_id`` is the representor port name on the Smart NIC. 2. ``switch_id`` is not mandatory. .. _multitenancy-physnets: Physical networks ----------------- A Bare Metal port may be associated with a physical network using its ``physical_network`` field. The Bare Metal service uses this information when mapping between virtual ports in the Networking service and physical ports and port groups in the Bare Metal service. A port's physical network field is optional, and if not set then any virtual port may be mapped to that port, provided that no free Bare Metal port with a suitable physical network assignment exists. The physical network of a port group is defined by the physical network of its constituent ports. The Bare Metal service ensures that all ports in a port group have the same value in their physical network field. When attaching a virtual interface (VIF) to a node, the following ordered criteria are used to select a suitable unattached port or port group: * Require ports or port groups to not have a physical network or to have a physical network that matches one of the VIF's allowed physical networks. * Prefer ports and port groups that have a physical network to ports and port groups that do not have a physical network. * Prefer port groups to ports. Prefer ports with PXE enabled. Configuring the Bare Metal service ================================== See the :ref:`configure-tenant-networks` section in the installation guide for the Bare Metal service. Configuring nodes ================= #. Ensure that your python-ironicclient version and requested API version are sufficient for your requirements. * Multi-tenancy support was added in API version 1.20, and is supported by python-ironicclient version 1.5.0 or higher. * Physical network support for ironic ports was added in API version 1.34, and is supported by python-ironicclient version 1.15.0 or higher. * Smart NIC support for ironic ports was added in API version 1.53, and is supported by python-ironicclient version 2.7.0 or higher. The following examples assume you are using python-ironicclient version 2.7.0 or higher. Export the following variable:: export OS_BAREMETAL_API_VERSION= #. The node's ``network_interface`` field should be set to a valid network interface. Valid interfaces are listed in the ``[DEFAULT]/enabled_network_interfaces`` configuration option in the ironic-conductor's configuration file. Set it to ``neutron`` to use the Networking service's ML2 driver:: openstack baremetal node create --network-interface neutron --driver ipmi .. note:: If the ``[DEFAULT]/default_network_interface`` configuration option is set, the ``--network-interface`` option does not need to be specified when creating the node. #. To update an existing node's network interface to ``neutron``, use the following commands:: openstack baremetal node set $NODE_UUID_OR_NAME \ --network-interface neutron #. Create a port as follows:: openstack baremetal port create $HW_MAC_ADDRESS --node $NODE_UUID \ --local-link-connection switch_id=$SWITCH_MAC_ADDRESS \ --local-link-connection switch_info=$SWITCH_HOSTNAME \ --local-link-connection port_id=$SWITCH_PORT \ --pxe-enabled true \ --physical-network physnet1 An Infiniband port requires client ID, while local link connection information will be populated by Infiniband Subnet Manager. The client ID consists of <12-byte vendor prefix>:<8 byte port GUID>. There is no standard process for deriving the port's MAC address ($HW_MAC_ADDRESS); it is vendor specific. For example, Mellanox ConnectX Family Devices prefix is ff:00:00:00:00:00:02:00:00:02:c9:00. If port GUID was f4:52:14:03:00:38:39:81 the client ID would be ff:00:00:00:00:00:02:00:00:02:c9:00:f4:52:14:03:00:38:39:81. Mellanox ConnectX Family Device's HW_MAC_ADDRESS consists of 6 bytes; the port GUID's lower 3 and higher 3 bytes. In this example it would be f4:52:14:38:39:81. Putting it all together, create an Infiniband port as follows:: openstack baremetal port create $HW_MAC_ADDRESS --node $NODE_UUID \ --pxe-enabled true \ --extra client-id=$CLIENT_ID \ --physical-network physnet1 #. Create a Smart NIC port as follows:: openstack baremetal port create $HW_MAC_ADDRESS --node $NODE_UUID \ --local-link-connection hostname=$HOSTNAME \ --local-link-connection port_id=$REP_NAME \ --pxe-enabled true \ --physical-network physnet1 \ --is-smartnic A Smart NIC port requires ``hostname`` which is the hostname of the Smart NIC, and ``port_id`` which is the representor port name within the Smart NIC. #. Check the port configuration:: openstack baremetal port show $PORT_UUID After these steps, the provisioning of the created node will happen in the provisioning network, and then the node will be moved to the tenant network that was requested. Configuring the Networking service ================================== In addition to configuring the Bare Metal service some additional configuration of the Networking service is required to ensure ports for bare metal servers are correctly programmed. This configuration will be determined by the Bare Metal service network interfaces you have enabled and which top of rack switches you have in your environment. ``flat`` network interface -------------------------- In order for Networking service ports to correctly operate with the Bare Metal service ``flat`` network interface the ``baremetal`` ML2 mechanism driver from `networking-baremetal `_ needs to be loaded into the Networking service configuration. This driver understands that the switch should be already configured by the admin, and will mark the networking service ports as successfully bound as nothing else needs to be done. #. Install the ``networking-baremetal`` library .. code-block:: console $ pip install networking-baremetal #. Enable the ``baremetal`` driver in the Networking service ML2 configuration file .. code-block:: ini [ml2] mechanism_drivers = ovs,baremetal ``neutron`` network interface ----------------------------- The ``neutron`` network interface allows the Networking service to program the physical top of rack switches for the bare metal servers. To do this an ML2 mechanism driver which supports the ``baremetal`` VNIC type for the make and model of top of rack switch in the environment must be installed and enabled. This is a list of known top of rack ML2 mechanism drivers which work with the ``neutron`` network interface: Cisco Nexus 9000 series To install and configure this ML2 mechanism driver see `Nexus Mechanism Driver Installation Guide `_. FUJITSU CFX2000 ``networking-fujitsu`` ML2 driver supports this switch. The documentation is available `here `_. Networking Generic Switch This is an ML2 mechanism driver built for testing against virtual bare metal environments and some switches that are not covered by hardware specific ML2 mechanism drivers. More information is available in the project's `README `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/admin/node-deployment.rst0000644000175000017500000002476700000000000023366 0ustar00coreycorey00000000000000=============== Node Deployment =============== .. contents:: :depth: 2 .. _node-deployment-deploy-steps: Overview ======== Node deployment is performed by the Bare Metal service to prepare a node for use by a workload. The exact work flow used depends on a number of factors, including the hardware type and interfaces assigned to a node. Deploy Steps ============ The Bare Metal service implements deployment by collecting a list of deploy steps to perform on a node from the Power, Deploy, Management, BIOS, and RAID interfaces of the driver assigned to the node. These steps are then ordered by priority and executed on the node when the node is moved to the ``deploying`` state. Nodes move to the ``deploying`` state when attempting to move to the ``active`` state (when the hardware is prepared for use by a workload). For a full understanding of all state transitions into deployment, please see :doc:`../contributor/states`. The Bare Metal service added support for deploy steps in the Rocky release. Order of execution ------------------ Deploy steps are ordered from higher to lower priority, where a larger integer is a higher priority. If the same priority is used by deploy steps on different interfaces, the following resolution order is used: Power, Management, Deploy, BIOS, and RAID interfaces. .. _node-deployment-core-steps: Core steps ---------- Certain default deploy steps are designated as 'core' deploy steps. The following deploy steps are core: ``deploy.deploy`` In this step the node is booted using a provisioning image, and the user image is written to the node's disk. It has a priority of 100. Writing a Deploy Step --------------------- Please refer to :doc:`/contributor/deploy-steps`. FAQ --- What deploy step is running? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ To check what deploy step the node is performing or attempted to perform and failed, run the following command; it will return the value in the node's ``driver_internal_info`` field:: openstack baremetal node show $node_ident -f value -c driver_internal_info The ``deploy_steps`` field will contain a list of all remaining steps with their priorities, and the first one listed is the step currently in progress or that the node failed before going into ``deploy failed`` state. Troubleshooting --------------- If deployment fails on a node, the node will be put into the ``deploy failed`` state until the node is deprovisioned. A deprovisioned node is moved to the ``available`` state after the cleaning process has been performed successfully. Strategies for determining why a deploy step failed include checking the ironic conductor logs, checking logs from the ironic-python-agent that have been stored on the ironic conductor, or performing general hardware troubleshooting on the node. Deploy Templates ================ Starting with the Stein release, with Bare Metal API version 1.55, deploy templates offer a way to define a set of one or more deploy steps to be executed with particular sets of arguments and priorities. Each deploy template has a name, which must be a valid trait. Traits can be either standard or custom. Standard traits are listed in the :os-traits-doc:`os_traits library <>`. Custom traits must meet the following requirements: * prefixed with ``CUSTOM_`` * contain only upper case characters A to Z, digits 0 to 9, or underscores * no longer than 255 characters in length Deploy step format ------------------ An invocation of a deploy step is defined in a deploy template as follows:: { "interface": "", "step": "", "args": { "": "", "": "" }, "priority": } A deploy template contains a list of one or more such steps. Each combination of `interface` and `step` may only be specified once in a deploy template. Matching deploy templates ------------------------- During deployment, if any of the traits in a node's ``instance_info.traits`` field match the name of a deploy template, then the steps from that deploy template will be added to the list of steps to be executed by the node. When using the Compute service, any traits in the instance's flavor properties or image properties are stored in ``instance_info.traits`` during deployment. See :ref:`scheduling-traits` for further information on how traits are used for scheduling when the Bare Metal service is used with the Compute service. Note that there is no ongoing relationship between a node and any templates that are matched during deployment. The set of matching deploy templates is checked at deployment time. Any subsequent updates to or deletion of those templates will not be reflected in the node's configuration unless it is redeployed or rebuilt. Similarly, if a node is rebuilt and the set of matching deploy templates has changed since the initial deployment, then the resulting configuration of the node may be different from the initial deployment. Overriding default deploy steps ------------------------------- A deploy step is enabled by default if it has a non-zero default priority. A default deploy step may be overridden in a deploy template. If the step's priority is a positive integer it will be executed with the specified priority and arguments. If the step's priority is zero, the step will not be executed. If a `core deploy step `_ is included in a deploy template, it can only be assigned a priority of zero to disable it. Creating a deploy template via API ---------------------------------- A deploy template can be created using the Bare Metal API:: POST /v1/deploy_templates Here is an example of the body of a request to create a deploy template with a single step: .. code-block:: json { "name": "CUSTOM_HYPERTHREADING_ON", "steps": [ { "interface": "bios", "step": "apply_configuration", "args": { "settings": [ { "name": "LogicalProc", "value": "Enabled" } ] }, "priority": 150 } ] } Further information on this API is available `here `__. Creating a deploy template via "openstack baremetal" client ----------------------------------------------------------- A deploy template can be created via the ``openstack baremetal deploy template create`` command, starting with ``python-ironicclient`` 2.7.0. The argument ``--steps`` must be specified. Its value is one of: - a JSON string - path to a JSON file whose contents are passed to the API - '-', to read from stdin. This allows piping in the deploy steps. Example of creating a deploy template with a single step using a JSON string: .. code-block:: console openstack baremetal deploy template create \ CUSTOM_HYPERTHREADING_ON \ --steps '[{"interface": "bios", "step": "apply_configuration", "args": {"settings": [{"name": "LogicalProc", "value": "Enabled"}]}, "priority": 150}]' Or with a file: .. code-block:: console openstack baremetal deploy template create \ CUSTOM_HYPERTHREADING_ON \ ---steps my-deploy-steps.txt Or with stdin: .. code-block:: console cat my-deploy-steps.txt | openstack baremetal deploy template create \ CUSTOM_HYPERTHREADING_ON \ --steps - Example of use with the Compute service --------------------------------------- .. note:: The deploy steps used in this example are for example purposes only. In the following example, we first add the trait ``CUSTOM_HYPERTHREADING_ON`` to the node represented by ``$node_ident``: .. code-block:: console openstack baremetal node add trait $node_ident CUSTOM_HYPERTHREADING_ON We also update the flavor ``bm-hyperthreading-on`` in the Compute service with the following property: .. code-block:: console openstack flavor set --property trait:CUSTOM_HYPERTHREADING_ON=required bm-hyperthreading-on Creating a Compute instance with this flavor will ensure that the instance is scheduled only to Bare Metal nodes with the ``CUSTOM_HYPERTHREADING_ON`` trait. We could then create a Bare Metal deploy template with the name ``CUSTOM_HYPERTHREADING_ON`` and a deploy step that enables Hyperthreading: .. code-block:: json { "name": "CUSTOM_HYPERTHREADING_ON", "steps": [ { "interface": "bios", "step": "apply_configuration", "args": { "settings": [ { "name": "LogicalProc", "value": "Enabled" } ] }, "priority": 150 } ] } When an instance is created using the ``bm-hyperthreading-on`` flavor, then the deploy steps of deploy template ``CUSTOM_HYPERTHREADING_ON`` will be executed during the deployment of the scheduled node, causing Hyperthreading to be enabled in the node's BIOS configuration. To make this example more dynamic, let's add a second trait ``CUSTOM_HYPERTHREADING_OFF`` to the node: .. code-block:: console openstack baremetal node add trait $node_ident CUSTOM_HYPERTHREADING_OFF We could also update a second flavor, ``bm-hyperthreading-off``, with the following property: .. code-block:: console openstack flavor set --property trait:CUSTOM_HYPERTHREADING_OFF=required bm-hyperthreading-off Finally, we create a deploy template with the name ``CUSTOM_HYPERTHREADING_OFF`` and a deploy step that disables Hyperthreading: .. code-block:: json { "name": "CUSTOM_HYPERTHREADING_OFF", "steps": [ { "interface": "bios", "step": "apply_configuration", "args": { "settings": [ { "name": "LogicalProc", "value": "Disabled" } ] }, "priority": 150 } ] } Creating a Compute instance with the ``bm-hyperthreading-off`` instance will cause the scheduled node to have Hyperthreading disabled in the BIOS during deployment. We now have a way to create Compute instances with different configurations, by choosing between different Compute flavors, supported by a single Bare Metal node that is dynamically configured during deployment. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/admin/node-multitenancy.rst0000644000175000017500000001302400000000000023702 0ustar00coreycorey00000000000000================== Node Multi-Tenancy ================== This guide explains the steps needed to enable node multi-tenancy. This feature enables non-admins to perform API actions on nodes, limited by policy configuration. The Bare Metal service supports two kinds of non-admin users: * Owner: owns specific nodes and performs administrative actions on them * Lessee: receives temporary and limited access to a node Setting the Owner and Lessee ============================ Non-administrative access to a node is controlled through a node's ``owner`` or ``lessee`` attribute:: openstack baremetal node set --owner 080925ee2f464a2c9dce91ee6ea354e2 node-7 openstack baremetal node set --lessee 2a210e5ff114c8f2b6e994218f51a904 node-10 Configuring the Bare Metal Service Policy ========================================= By default, the Bare Metal service policy is configured so that a node owner or lessee has no access to any node APIs. However, the policy :doc:`policy file ` contains rules that can be used to enable node API access:: # Owner of node #"is_node_owner": "project_id:%(node.owner)s" # Lessee of node #"is_node_lessee": "project_id:%(node.lessee)s" An administrator can then modify the policy file to expose individual node APIs as follows:: # Change Node provision status # PUT /nodes/{node_ident}/states/provision #"baremetal:node:set_provision_state": "rule:is_admin" "baremetal:node:set_provision_state": "rule:is_admin or rule:is_node_owner or rule:is_node_lessee" # Update Node records # PATCH /nodes/{node_ident} #"baremetal:node:update": "rule:is_admin or rule:is_node_owner" In addition, it is safe to expose the ``baremetal:node:list`` rule, as the node list function now filters non-admins by owner and lessee:: # Retrieve multiple Node records, filtered by owner # GET /nodes # GET /nodes/detail #"baremetal:node:list": "rule:baremetal:node:get" "baremetal:node:list": "" Note that ``baremetal:node:list_all`` permits users to see all nodes regardless of owner/lessee, so it should remain restricted to admins. Ports ----- Port APIs can be similarly exposed to node owners and lessees:: # Retrieve Port records # GET /ports/{port_id} # GET /nodes/{node_ident}/ports # GET /nodes/{node_ident}/ports/detail # GET /portgroups/{portgroup_ident}/ports # GET /portgroups/{portgroup_ident}/ports/detail #"baremetal:port:get": "rule:is_admin or rule:is_observer" "baremetal:port:get": "rule:is_admin or rule:is_observer or rule:is_node_owner or rule:is_node_lessee" # Retrieve multiple Port records, filtered by owner # GET /ports # GET /ports/detail #"baremetal:port:list": "rule:baremetal:port:get" "baremetal:port:list": "" Allocations ----------- Allocations respect node tenancy as well. A restricted allocation creates an allocation tied to a project, and that can only match nodes where that project is the owner or lessee. Here is a sample set of allocation policy rules that allow non-admins to use allocations effectively:: # Retrieve Allocation records # GET /allocations/{allocation_id} # GET /nodes/{node_ident}/allocation #"baremetal:allocation:get": "rule:is_admin or rule:is_observer" "baremetal:allocation:get": "rule:is_admin or rule:is_observer or rule:is_allocation_owner" # Retrieve multiple Allocation records, filtered by owner # GET /allocations #"baremetal:allocation:list": "rule:baremetal:allocation:get" "baremetal:allocation:list": "" # Retrieve multiple Allocation records # GET /allocations #"baremetal:allocation:list_all": "rule:baremetal:allocation:get" # Create Allocation records # POST /allocations #"baremetal:allocation:create": "rule:is_admin" # Create Allocation records that are restricted to an owner # POST /allocations #"baremetal:allocation:create_restricted": "rule:baremetal:allocation:create" "baremetal:allocation:create_restricted": "" # Delete Allocation records # DELETE /allocations/{allocation_id} # DELETE /nodes/{node_ident}/allocation #"baremetal:allocation:delete": "rule:is_admin" "baremetal:allocation:delete": "rule:is_admin or rule:is_allocation_owner" # Change name and extra fields of an allocation # PATCH /allocations/{allocation_id} #"baremetal:allocation:update": "rule:is_admin" "baremetal:allocation:update": "rule:is_admin or rule:is_allocation_owner" Deployment and Metalsmith ------------------------- Provisioning a node requires a specific set of APIs to be made available. The following policy specifications are enough to allow a node owner to use :metalsmith-doc:`Metalsmith ` to deploy upon a node:: "baremetal:node:get": "rule:is_admin or rule:is_observer or rule:is_node_owner" "baremetal:node:list": "" "baremetal:node:update_extra": "rule:is_admin or rule:is_node_owner" "baremetal:node:update_instance_info": "rule:is_admin or rule:is_node_owner" "baremetal:node:validate": "rule:is_admin or rule:is_node_owner" "baremetal:node:set_provision_state": "rule:is_admin or rule:is_node_owner" "baremetal:node:vif:list": "rule:is_admin or rule:is_node_owner" "baremetal:node:vif:attach": "rule:is_admin or rule:is_node_owner" "baremetal:node:vif:detach": "rule:is_admin or rule:is_node_owner" "baremetal:allocation:get": "rule:is_admin or rule:is_observer or rule:is_allocation_owner" "baremetal:allocation:list": "" "baremetal:allocation:create_restricted": "" "baremetal:allocation:delete": "rule:is_admin or rule:is_allocation_owner" "baremetal:allocation:update": "rule:is_admin or rule:is_allocation_owner" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/admin/notifications.rst0000644000175000017500000007005400000000000023122 0ustar00coreycorey00000000000000.. _deploy-notifications: ============= Notifications ============= Ironic, when configured to do so, will emit notifications over a message bus that indicate different events that occur within the service. These can be consumed by any external service. Examples may include a billing or usage system, a monitoring data store, or other OpenStack services. This page describes how to enable notifications and the different kinds of notifications that ironic may emit. The external consumer will see notifications emitted by ironic as JSON objects structured in the following manner:: { "priority": , "event_type": , "timestamp": , "publisher_id": , "message_id": , "payload": } Configuration ============= To enable notifications with ironic, there are two configuration options in ironic.conf that must be adjusted. The first option is the ``notification_level`` option in the ``[DEFAULT]`` section of the configuration file. This can be set to "debug", "info", "warning", "error", or "critical", and determines the minimum priority level for which notifications are emitted. For example, if the option is set to "warning", all notifications with priority level "warning", "error", or "critical" are emitted, but not notifications with priority level "debug" or "info". For information about the semantics of each log level, see the OpenStack logging standards [1]_. If this option is unset, no notifications will be emitted. The priority level of each available notification is documented below. The second option is the ``transport_url`` option in the ``[oslo_messaging_notifications]`` section of the configuration. This determines the message bus used when sending notifications. If this is unset, the default transport used for RPC is used. All notifications are emitted on the "ironic_versioned_notifications" topic in the message bus. Generally, each type of message that traverses the message bus is associated with a topic describing what the message is about. For more information, see the documentation of your chosen message bus, such as the RabbitMQ documentation [2]_. Note that notifications may be lossy, and there's no guarantee that a notification will make it across the message bus to a consumer. Versioning ========== Each notification has an associated version in the "ironic_object.version" field of the payload. Consumers are guaranteed that microversion bumps will add new fields, while macroversion bumps are backwards-incompatible and may have fields removed. Versioned notifications are emitted by default to the `ironic_versioned_notifications` topic. This can be changed and it is configurable in the ironic.conf with the `versioned_notifications_topics` config option. Available notifications ======================= .. TODO(mariojv) Add some form of tabular formatting below The notifications that ironic emits are described here. They are listed (alphabetically) by service first, then by event_type. All examples below show payloads before serialization to JSON. ------------------------ ironic-api notifications ------------------------ Resources CRUD notifications ---------------------------- These notifications are emitted from API service when ironic resources are modified as part of create, update, or delete (CRUD) [3]_ procedures. All CRUD notifications are emitted at INFO level, except for "error" status that is emitted at ERROR level. List of CRUD notifications for chassis: * ``baremetal.chassis.create.start`` * ``baremetal.chassis.create.end`` * ``baremetal.chassis.create.error`` * ``baremetal.chassis.update.start`` * ``baremetal.chassis.update.end`` * ``baremetal.chassis.update.error`` * ``baremetal.chassis.delete.start`` * ``baremetal.chassis.delete.end`` * ``baremetal.chassis.delete.error`` Example of chassis CRUD notification:: { "priority": "info", "payload":{ "ironic_object.namespace":"ironic", "ironic_object.name":"ChassisCRUDPayload", "ironic_object.version":"1.0", "ironic_object.data":{ "created_at": "2016-04-10T10:13:03+00:00", "description": "bare 28", "extra": {}, "updated_at": "2016-04-27T21:11:03+00:00", "uuid": "1910f669-ce8b-43c2-b1d8-cf3d65be815e" } }, "event_type":"baremetal.chassis.update.end", "publisher_id":"ironic-api.hostname02" } List of CRUD notifications for deploy template: * ``baremetal.deploy_template.create.start`` * ``baremetal.deploy_template.create.end`` * ``baremetal.deploy_template.create.error`` * ``baremetal.deploy_template.update.start`` * ``baremetal.deploy_template.update.end`` * ``baremetal.deploy_template.update.error`` * ``baremetal.deploy_template.delete.start`` * ``baremetal.deploy_template.delete.end`` * ``baremetal.deploy_template.delete.error`` Example of deploy template CRUD notification:: { "priority": "info", "payload":{ "ironic_object.namespace":"ironic", "ironic_object.name":"DeployTemplateCRUDPayload", "ironic_object.version":"1.0", "ironic_object.data":{ "created_at": "2019-02-10T10:13:03+00:00", "extra": {}, "name": "CUSTOM_HYPERTHREADING_ON", "steps": [ { "interface": "bios", "step": "apply_configuration", "args": { "settings": [ { "name": "LogicalProc", "value": "Enabled" } ] }, "priority": 150 } ], "updated_at": "2019-02-27T21:11:03+00:00", "uuid": "1910f669-ce8b-43c2-b1d8-cf3d65be815e" } }, "event_type":"baremetal.deploy_template.update.end", "publisher_id":"ironic-api.hostname02" } List of CRUD notifications for node: * ``baremetal.node.create.start`` * ``baremetal.node.create.end`` * ``baremetal.node.create.error`` * ``baremetal.node.update.start`` * ``baremetal.node.update.end`` * ``baremetal.node.update.error`` * ``baremetal.node.delete.start`` * ``baremetal.node.delete.end`` * ``baremetal.node.delete.error`` Example of node CRUD notification:: { "priority": "info", "payload":{ "ironic_object.namespace":"ironic", "ironic_object.name":"NodeCRUDPayload", "ironic_object.version":"1.8", "ironic_object.data":{ "chassis_uuid": "db0eef9d-45b2-4dc0-94a8-fc283c01171f", "clean_step": None, "conductor_group": "", "console_enabled": False, "created_at": "2016-01-26T20:41:03+00:00", "deploy_step": None, "driver": "ipmi", "driver_info": { "ipmi_address": "192.168.0.111", "ipmi_username": "root"}, "extra": {}, "inspection_finished_at": None, "inspection_started_at": None, "instance_info": {}, "instance_uuid": None, "last_error": None, "maintenance": False, "maintenance_reason": None, "fault": None, "boot_interface": "pxe", "console_interface": "no-console", "deploy_interface": "iscsi", "inspect_interface": "no-inspect", "management_interface": "ipmitool", "network_interface": "flat", "power_interface": "ipmitool", "raid_interface": "no-raid", "rescue_interface": "no-rescue", "storage_interface": "noop", "vendor_interface": "no-vendor", "name": None, "power_state": "power off", "properties": { "memory_mb": 4096, "cpu_arch": "x86_64", "local_gb": 10, "cpus": 8}, "provision_state": "deploying", "provision_updated_at": "2016-01-27T20:41:03+00:00", "resource_class": None, "target_power_state": None, "target_provision_state": "active", "traits": [ "CUSTOM_TRAIT1", "HW_CPU_X86_VMX"], "updated_at": "2016-01-27T20:41:03+00:00", "uuid": "1be26c0b-03f2-4d2e-ae87-c02d7f33c123" } }, "event_type":"baremetal.node.update.end", "publisher_id":"ironic-api.hostname02" } List of CRUD notifications for port: * ``baremetal.port.create.start`` * ``baremetal.port.create.end`` * ``baremetal.port.create.error`` * ``baremetal.port.update.start`` * ``baremetal.port.update.end`` * ``baremetal.port.update.error`` * ``baremetal.port.delete.start`` * ``baremetal.port.delete.end`` * ``baremetal.port.delete.error`` Example of port CRUD notification:: { "priority": "info", "payload":{ "ironic_object.namespace":"ironic", "ironic_object.name":"PortCRUDPayload", "ironic_object.version":"1.3", "ironic_object.data":{ "address": "77:66:23:34:11:b7", "created_at": "2016-02-11T15:23:03+00:00", "node_uuid": "5b236cab-ad4e-4220-b57c-e827e858745a", "extra": {}, "is_smartnic": True, "local_link_connection": {}, "physical_network": "physnet1", "portgroup_uuid": "bd2f385e-c51c-4752-82d1-7a9ec2c25f24", "pxe_enabled": True, "updated_at": "2016-03-27T20:41:03+00:00", "uuid": "1be26c0b-03f2-4d2e-ae87-c02d7f33c123" } }, "event_type":"baremetal.port.update.end", "publisher_id":"ironic-api.hostname02" } List of CRUD notifications for port group: * ``baremetal.portgroup.create.start`` * ``baremetal.portgroup.create.end`` * ``baremetal.portgroup.create.error`` * ``baremetal.portgroup.update.start`` * ``baremetal.portgroup.update.end`` * ``baremetal.portgroup.update.error`` * ``baremetal.portgroup.delete.start`` * ``baremetal.portgroup.delete.end`` * ``baremetal.portgroup.delete.error`` Example of portgroup CRUD notification:: { "priority": "info", "payload":{ "ironic_object.namespace":"ironic", "ironic_object.name":"PortgroupCRUDPayload", "ironic_object.version":"1.0", "ironic_object.data":{ "address": "11:44:32:87:61:e5", "created_at": "2017-01-11T11:33:03+00:00", "node_uuid": "5b236cab-ad4e-4220-b57c-e827e858745a", "extra": {}, "mode": "7", "name": "portgroup-node-18", "properties": {}, "standalone_ports_supported": True, "updated_at": "2017-01-31T11:41:07+00:00", "uuid": "db033a40-bfed-4c84-815a-3db26bb268bb", } }, "event_type":"baremetal.portgroup.update.end", "publisher_id":"ironic-api.hostname02" } List of CRUD notifications for volume connector: * ``baremetal.volumeconnector.create.start`` * ``baremetal.volumeconnector.create.end`` * ``baremetal.volumeconnector.create.error`` * ``baremetal.volumeconnector.update.start`` * ``baremetal.volumeconnector.update.end`` * ``baremetal.volumeconnector.update.error`` * ``baremetal.volumeconnector.delete.start`` * ``baremetal.volumeconnector.delete.end`` * ``baremetal.volumeconnector.delete.error`` Example of volume connector CRUD notification:: { "priority": "info", "payload": { "ironic_object.namespace": "ironic", "ironic_object.name": "VolumeConnectorCRUDPayload", "ironic_object.version": "1.0", "ironic_object.data": { "connector_id": "iqn.2017-05.org.openstack:01:d9a51732c3f", "created_at": "2017-05-11T05:57:36+00:00", "extra": {}, "node_uuid": "4dbb4e69-99a8-4e13-b6e8-dd2ad4a20caf", "type": "iqn", "updated_at": "2017-05-11T08:28:58+00:00", "uuid": "19b9f3ab-4754-4725-a7a4-c43ea7e57360" } }, "event_type": "baremetal.volumeconnector.update.end", "publisher_id":"ironic-api.hostname02" } List of CRUD notifications for volume target: * ``baremetal.volumetarget.create.start`` * ``baremetal.volumetarget.create.end`` * ``baremetal.volumetarget.create.error`` * ``baremetal.volumetarget.update.start`` * ``baremetal.volumetarget.update.end`` * ``baremetal.volumetarget.update.error`` * ``baremetal.volumetarget.delete.start`` * ``baremetal.volumetarget.delete.end`` * ``baremetal.volumetarget.delete.error`` Example of volume target CRUD notification:: { "priority": "info", "payload": { "ironic_object.namespace": "ironic", "ironic_object.version": "1.0", "ironic_object.name": "VolumeTargetCRUDPayload" "ironic_object.data": { "boot_index": 0, "created_at": "2017-05-11T09:38:59+00:00", "extra": {}, "node_uuid": "4dbb4e69-99a8-4e13-b6e8-dd2ad4a20caf", "properties": { "access_mode": "rw", "auth_method": "CHAP" "auth_password": "***", "auth_username": "urxhQCzAKr4sjyE8DivY", "encrypted": false, "qos_specs": null, "target_discovered": false, "target_iqn": "iqn.2010-10.org.openstack:volume-f0d9b0e6-b242-9105-91d4-a20331693ad8", "target_lun": 1, "target_portal": "192.168.12.34:3260", "volume_id": "f0d9b0e6-b042-4105-91d4-a20331693ad8", }, "updated_at": "2017-05-11T09:52:04+00:00", "uuid": "82a45833-9c58-4ec1-943c-2091ab10e47b", "volume_id": "f0d9b0e6-b242-9105-91d4-a20331693ad8", "volume_type": "iscsi" } }, "event_type": "baremetal.volumetarget.update.end", "publisher_id":"ironic-api.hostname02" } Node maintenance notifications ------------------------------ These notifications are emitted from API service when maintenance mode is changed via API service. List of maintenance notifications for a node: * ``baremetal.node.maintenance_set.start`` * ``baremetal.node.maintenance_set.end`` * ``baremetal.node.maintenance_set.error`` "start" and "end" notifications have INFO level, "error" has ERROR. Example of node maintenance notification:: { "priority": "info", "payload":{ "ironic_object.namespace":"ironic", "ironic_object.name":"NodePayload", "ironic_object.version":"1.10", "ironic_object.data":{ "clean_step": None, "conductor_group": "", "console_enabled": False, "created_at": "2016-01-26T20:41:03+00:00", "driver": "ipmi", "extra": {}, "inspection_finished_at": None, "inspection_started_at": None, "instance_info": {}, "instance_uuid": None, "last_error": None, "maintenance": True, "maintenance_reason": "hw upgrade", "fault": None, "bios_interface": "no-bios", "boot_interface": "pxe", "console_interface": "no-console", "deploy_interface": "iscsi", "inspect_interface": "no-inspect", "management_interface": "ipmitool", "network_interface": "flat", "power_interface": "ipmitool", "raid_interface": "no-raid", "rescue_interface": "no-rescue", "storage_interface": "noop", "vendor_interface": "no-vendor", "name": None, "power_state": "power off", "properties": { "memory_mb": 4096, "cpu_arch": "x86_64", "local_gb": 10, "cpus": 8}, "provision_state": "available", "provision_updated_at": "2016-01-27T20:41:03+00:00", "resource_class": None, "target_power_state": None, "target_provision_state": None, "traits": [ "CUSTOM_TRAIT1", "HW_CPU_X86_VMX"], "updated_at": "2016-01-27T20:41:03+00:00", "uuid": "1be26c0b-03f2-4d2e-ae87-c02d7f33c123" } }, "event_type":"baremetal.node.maintenance_set.start", "publisher_id":"ironic-api.hostname02" } ------------------------------ ironic-conductor notifications ------------------------------ Node console notifications ------------------------------ These notifications are emitted by the ironic-conductor service when conductor service starts or stops console for the node. The notification event types for a node console are: * ``baremetal.node.console_set.start`` * ``baremetal.node.console_set.end`` * ``baremetal.node.console_set.error`` * ``baremetal.node.console_restore.start`` * ``baremetal.node.console_restore.end`` * ``baremetal.node.console_restore.error`` ``console_set`` action is used when start or stop console is initiated. The ``console_restore`` action is used when the console was already enabled, but a driver must restart the console because an ironic-conductor was restarted. This may also be sent when an ironic-conductor takes over a node that was being managed by another ironic-conductor. "start" and "end" notifications have INFO level, "error" has ERROR. Example of node console notification:: { "priority": "info", "payload":{ "ironic_object.namespace":"ironic", "ironic_object.name":"NodePayload", "ironic_object.version":"1.10", "ironic_object.data":{ "clean_step": None, "conductor_group": "", "console_enabled": True, "created_at": "2016-01-26T20:41:03+00:00", "driver": "ipmi", "extra": {}, "inspection_finished_at": None, "inspection_started_at": None, "instance_info": {}, "instance_uuid": None, "last_error": None, "maintenance": False, "maintenance_reason": None, "fault": None, "bios_interface": "no-bios", "boot_interface": "pxe", "console_interface": "no-console", "deploy_interface": "iscsi", "inspect_interface": "no-inspect", "management_interface": "ipmitool", "network_interface": "flat", "power_interface": "ipmitool", "raid_interface": "no-raid", "rescue_interface": "no-rescue", "storage_interface": "noop", "vendor_interface": "no-vendor", "name": None, "power_state": "power off", "properties": { "memory_mb": 4096, "cpu_arch": "x86_64", "local_gb": 10, "cpus": 8}, "provision_state": "available", "provision_updated_at": "2016-01-27T20:41:03+00:00", "resource_class": None, "target_power_state": None, "target_provision_state": None, "traits": [ "CUSTOM_TRAIT1", "HW_CPU_X86_VMX"], "updated_at": "2016-01-27T20:41:03+00:00", "uuid": "1be26c0b-03f2-4d2e-ae87-c02d7f33c123" } }, "event_type":"baremetal.node.console_set.end", "publisher_id":"ironic-conductor.hostname01" } baremetal.node.power_set ------------------------ * ``baremetal.node.power_set.start`` is emitted by the ironic-conductor service when it begins a power state change. It has notification level "info". * ``baremetal.node.power_set.end`` is emitted when ironic-conductor successfully completes a power state change task. It has notification level "info". * ``baremetal.node.power_set.error`` is emitted by ironic-conductor when it fails to set a node's power state. It has notification level "error". This can occur when ironic fails to retrieve the old power state prior to setting the new one on the node, or when it fails to set the power state if a change is requested. Here is an example payload for a notification with this event type. The "to_power" payload field indicates the power state to which the ironic-conductor is attempting to change the node:: { "priority": "info", "payload":{ "ironic_object.namespace":"ironic", "ironic_object.name":"NodeSetPowerStatePayload", "ironic_object.version":"1.10", "ironic_object.data":{ "clean_step": None, "conductor_group": "", "console_enabled": False, "created_at": "2016-01-26T20:41:03+00:00", "deploy_step": None, "driver": "ipmi", "extra": {}, "inspection_finished_at": None, "inspection_started_at": None, "instance_uuid": "d6ea00c1-1f94-4e95-90b3-3462d7031678", "last_error": None, "maintenance": False, "maintenance_reason": None, "fault": None, "boot_interface": "pxe", "console_interface": "no-console", "deploy_interface": "iscsi", "inspect_interface": "no-inspect", "management_interface": "ipmitool", "network_interface": "flat", "power_interface": "ipmitool", "raid_interface": "no-raid", "rescue_interface": "no-rescue", "storage_interface": "noop", "vendor_interface": "no-vendor", "name": None, "power_state": "power off", "properties": { "memory_mb": 4096, "cpu_arch": "x86_64", "local_gb": 10, "cpus": 8}, "provision_state": "available", "provision_updated_at": "2016-01-27T20:41:03+00:00", "resource_class": None, "target_power_state": None, "target_provision_state": None, "traits": [ "CUSTOM_TRAIT1", "HW_CPU_X86_VMX"], "updated_at": "2016-01-27T20:41:03+00:00", "uuid": "1be26c0b-03f2-4d2e-ae87-c02d7f33c123", "to_power": "power on" } }, "event_type":"baremetal.node.power_set.start", "publisher_id":"ironic-conductor.hostname01" } baremetal.node.power_state_corrected ------------------------------------ * ``baremetal.node.power_state_corrected.success`` is emitted by ironic-conductor when the power state on the baremetal hardware is different from the previous known power state of the node and the database is corrected to reflect this new power state. It has notification level "info". Here is an example payload for a notification with this event_type. The "from_power" payload field indicates the previous power state on the node, prior to the correction:: { "priority": "info", "payload":{ "ironic_object.namespace":"ironic", "ironic_object.name":"NodeCorrectedPowerStatePayload", "ironic_object.version":"1.10", "ironic_object.data":{ "clean_step": None, "conductor_group": "", "console_enabled": False, "created_at": "2016-01-26T20:41:03+00:00", "deploy_step": None, "driver": "ipmi", "extra": {}, "inspection_finished_at": None, "inspection_started_at": None, "instance_uuid": "d6ea00c1-1f94-4e95-90b3-3462d7031678", "last_error": None, "maintenance": False, "maintenance_reason": None, "fault": None, "boot_interface": "pxe", "console_interface": "no-console", "deploy_interface": "iscsi", "inspect_interface": "no-inspect", "management_interface": "ipmitool", "network_interface": "flat", "power_interface": "ipmitool", "raid_interface": "no-raid", "rescue_interface": "no-rescue", "storage_interface": "noop", "vendor_interface": "no-vendor", "name": None, "power_state": "power off", "properties": { "memory_mb": 4096, "cpu_arch": "x86_64", "local_gb": 10, "cpus": 8}, "provision_state": "available", "provision_updated_at": "2016-01-27T20:41:03+00:00", "resource_class": None, "target_power_state": None, "target_provision_state": None, "traits": [ "CUSTOM_TRAIT1", "HW_CPU_X86_VMX"], "updated_at": "2016-01-27T20:41:03+00:00", "uuid": "1be26c0b-03f2-4d2e-ae87-c02d7f33c123", "from_power": "power on" } }, "event_type":"baremetal.node.power_state_corrected.success", "publisher_id":"ironic-conductor.cond-hostname02" } baremetal.node.provision_set ---------------------------- * ``baremetal.node.provision_set.start`` is emitted by the ironic-conductor service when it begins a provision state transition. It has notification level INFO. * ``baremetal.node.provision_set.end`` is emitted when ironic-conductor successfully completes a provision state transition. It has notification level INFO. * ``baremetal.node.provision_set.success`` is emitted when ironic-conductor successfully changes provision state instantly, without any intermediate work required (example is AVAILABLE to MANAGEABLE). It has notification level INFO. * ``baremetal.node.provision_set.error`` is emitted by ironic-conductor when it changes provision state as result of error event processing. It has notification level ERROR. Here is an example payload for a notification with this event type. The "previous_provision_state" and "previous_target_provision_state" payload fields indicate a node's provision states before state change, "event" is the FSM (finite state machine) event that triggered the state change:: { "priority": "info", "payload":{ "ironic_object.namespace":"ironic", "ironic_object.name":"NodeSetProvisionStatePayload", "ironic_object.version":"1.10", "ironic_object.data":{ "clean_step": None, "conductor_group": "", "console_enabled": False, "created_at": "2016-01-26T20:41:03+00:00", "deploy_step": None, "driver": "ipmi", "extra": {}, "inspection_finished_at": None, "inspection_started_at": None, "instance_info": {}, "instance_uuid": None, "last_error": None, "maintenance": False, "maintenance_reason": None, "fault": None, "boot_interface": "pxe", "console_interface": "no-console", "deploy_interface": "iscsi", "inspect_interface": "no-inspect", "management_interface": "ipmitool", "network_interface": "flat", "power_interface": "ipmitool", "raid_interface": "no-raid", "rescue_interface": "no-rescue", "storage_interface": "noop", "vendor_interface": "no-vendor", "name": None, "power_state": "power off", "properties": { "memory_mb": 4096, "cpu_arch": "x86_64", "local_gb": 10, "cpus": 8}, "provision_state": "deploying", "provision_updated_at": "2016-01-27T20:41:03+00:00", "resource_class": None, "target_power_state": None, "target_provision_state": "active", "traits": [ "CUSTOM_TRAIT1", "HW_CPU_X86_VMX"], "updated_at": "2016-01-27T20:41:03+00:00", "uuid": "1be26c0b-03f2-4d2e-ae87-c02d7f33c123", "previous_provision_state": "available", "previous_target_provision_state": None, "event": "deploy" } }, "event_type":"baremetal.node.provision_set.start", "publisher_id":"ironic-conductor.hostname01" } .. [1] https://wiki.openstack.org/wiki/LoggingStandards#Log_level_definitions .. [2] https://www.rabbitmq.com/documentation.html .. [3] https://en.wikipedia.org/wiki/Create,_read,_update_and_delete ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/admin/portgroups.rst0000644000175000017500000001406500000000000022475 0ustar00coreycorey00000000000000=================== Port groups support =================== The Bare Metal service supports static configuration of port groups (bonds) in the instances via configdrive. See `kernel documentation on bonding`_ to see why it may be useful and how it is setup in linux. The sections below describe how to make use of them in the Bare Metal service. Switch-side configuration ------------------------- If port groups are desired in the ironic deployment, they need to be configured on the switches. It needs to be done manually, and the mode and properties configured on the switch have to correspond to the mode and properties that will be configured on the ironic side, as bonding mode and properties may be named differently on your switch, or have possible values different from the ones described in `kernel documentation on bonding`_. Please refer to your switch configuration documentation for more details. Provisioning and cleaning cannot make use of port groups if they need to boot the deployment ramdisk via (i)PXE. If your switches or desired port group configuration do not support port group fallback, which will allow port group members to be used by themselves, you need to set port group's ``standalone_ports_supported`` value to be ``False`` in ironic, as it is ``True`` by default. Physical networks ----------------- If any port in a port group has a physical network, then all ports in that port group must have the same physical network. In order to change the physical network of the ports in a port group, all ports must first be removed from the port group, before changing their physical networks (to the same value), then adding them back to the port group. See :ref:`physical networks ` for further information on using physical networks in the Bare Metal service. Port groups configuration in the Bare Metal service --------------------------------------------------- Port group configuration is supported in ironic API microversions 1.26, the CLI commands below specify it for completeness. #. When creating a port group, the node to which it belongs must be specified, along with, optionally, its name, address, mode, properties, and if it supports fallback to standalone ports:: openstack --os-baremetal-api-version 1.26 baremetal port group create \ --node $NODE_UUID --name test --address fa:ab:25:48:fd:ba --mode 802.3ad \ --property miimon=100 --property xmit_hash_policy="layer2+3" \ --support-standalone-ports A port group can also be updated with ``openstack baremetal port group set`` command, see its help for more details. If an address is not specified, the port group address on the deployed instance will be the same as the address of the neutron port that is attached to the port group. If the neutron port is not attached, the port group will not be configured. .. note:: In standalone mode, port groups have to be configured manually. It can be done either statically inside the image, or by generating the configdrive and adding it to the node's ``instance_info``. For more information on how to configure bonding via configdrive, refer to `cloud-init documentation `_ and `code `_. cloud-init version 0.7.7 or later is required for bonding configuration to work. If the port group's address is not explicitly set in standalone mode, it will be set automatically by the process described in `kernel documentation on bonding`_. During interface attachment, port groups have higher priority than ports, so they will be used first. (It is not yet possible to specify which one is desired, a port group or a port, in an interface attachment request). Port groups that don't have any ports will be ignored. The mode and properties values are described in the `kernel documentation on bonding`_. The default port group mode is ``active-backup``, and this default can be changed by setting the ``[DEFAULT]default_portgroup_mode`` configuration option in the ironic API service configuration file. #. Associate ports with the created port group. It can be done on port creation:: openstack --os-baremetal-api-version 1.26 baremetal port create \ --node $NODE_UUID --address fa:ab:25:48:fd:ba --port-group test Or by updating an existing port:: openstack --os-baremetal-api-version 1.26 baremetal port set \ $PORT_UUID --port-group $PORT_GROUP_UUID When updating a port, the node associated with the port has to be in ``enroll``, ``manageable``, or ``inspecting`` states. A port group can have the same or different address as individual ports. #. Boot an instance (or node directly, in case of using standalone ironic) providing an image that has cloud-init version 0.7.7 or later and supports bonding. When the deployment is done, you can check that the port group is set up properly by running the following command in the instance:: cat /proc/net/bonding/bondX where ``X`` is a number autogenerated by cloud-init for each configured port group, in no particular order. It starts with 0 and increments by 1 for every configured port group. .. _`kernel documentation on bonding`: https://www.kernel.org/doc/Documentation/networking/bonding.txt Link aggregation/teaming on windows ----------------------------------- Portgroups are supported for Windows Server images, which can created by :ref:`building_image_windows` instruction. You can customise an instance after it is launched along with `script file `_ in ``Configuration`` of ``Instance`` and selected ``Configuration Drive`` option. Then ironic virt driver will generate network metadata and add all the additional information, such as bond mode, transmit hash policy, MII link monitoring interval, and of which links the bond consists. The information in InstanceMetadata will be used afterwards to generate the config drive. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/admin/power-sync.rst0000644000175000017500000001014600000000000022353 0ustar00coreycorey00000000000000=================================== Power Sync with the Compute Service =================================== Baremetal Power Sync ==================== Each Baremetal conductor process runs a periodic task which synchronizes the power state of the nodes between its database and the actual hardware. If the value of the :oslo.config:option:`conductor.force_power_state_during_sync` option is set to ``true`` the power state in the database will be forced on the hardware and if it is set to ``false`` the hardware state will be forced on the database. If this periodic task is enabled, it runs at an interval defined by the :oslo.config:option:`conductor.sync_power_state_interval` config option for those nodes which are not in maintenance. Compute-Baremetal Power Sync ============================ Each ``nova-compute`` process in the Compute service runs a periodic task which synchronizes the power state of servers between its database and the compute driver. If enabled, it runs at an interval defined by the `sync_power_state_interval` config option on the ``nova-compute`` process. In case of the compute driver being baremetal driver, this sync will happen between the databases of the compute and baremetal services. Since the sync happens on the ``nova-compute`` process, the state in the compute database will be forced on the baremetal database in case of inconsistencies. Hence a node which was put down using the compute service API cannot be brought up through the baremetal service API since the power sync task will regard the compute service's knowledge of the power state as the source of truth. In order to get around this disadvantage of the compute-baremetal power sync, baremetal service does power state change callbacks to the compute service using external events. Power State Change Callbacks to the Compute Service --------------------------------------------------- Whenever the Baremetal service changes the power state of a node, it can issue a notification to the Compute service. The Compute service will consume this notification and update the power state of the instance in its database. By conveying all the power state changes to the compute service, the baremetal service becomes the source of truth thus preventing the compute service from forcing wrong power states on the physical instance during the compute-baremetal power sync. It also adds the possibility of bringing up/down a physical instance through the baremetal service API even if it was put down/up through the compute service API. This change requires the :oslo.config:group:`nova` section and the necessary authentication options like the :oslo.config:option:`nova.auth_url` to be defined in the configuration file of the baremetal service. If it is not configured the baremetal service will not be able to send notifications to the compute service and it will fall back to the behaviour of the compute service forcing power states on the baremetal service during the power sync. See :oslo.config:group:`nova` group for more details on the available config options. In case of baremetal stand alone deployments where there is no compute service running, the :oslo.config:option:`nova.send_power_notifications` config option should be set to ``False`` to disable power state change callbacks to the compute service. .. note:: The baremetal service sends notifications to the compute service only if the target power state is ``power on`` or ``power off``. Other error and ``None`` states will be ignored. In situations where the power state change is originally coming from the compute service, the notification will still be sent by the baremetal service and it will be a no-op on the compute service side with a debug log stating the node is already powering on/off. .. note:: Although an exclusive lock is used when sending notifications to the compute service, there can still be a race condition if the compute-baremetal power sync happens to happen a nano-second before the power state change event is received from the baremetal service in which case the power state from compute service's database will be forced on the node. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/admin/radosgw.rst0000644000175000017500000000447600000000000021724 0ustar00coreycorey00000000000000.. _radosgw support: =========================== Ceph Object Gateway support =========================== Overview ======== Ceph project is a powerful distributed storage system. It contains object store and provides a RADOS Gateway Swift API which is compatible with OpenStack Swift API. Ironic added support for RADOS Gateway temporary URL in the Mitaka release. Configure Ironic and Glance with RADOS Gateway ============================================== #. Install Ceph storage with RADOS Gateway. See `Ceph documentation `_. #. Configure RADOS Gateway to use keystone for authentication. See `Integrating with OpenStack Keystone `_ #. Register RADOS Gateway endpoint in the keystone catalog, with the same format swift uses, as the ``object-store`` service. URL example: ``http://rados.example.com:8080/swift/v1/AUTH_$(project_id)s``. In the ceph configuration, make sure radosgw is configured with the following value:: rgw swift account in url = True #. Configure Glance API service for RADOS Swift API as backend. Edit the configuration file for the Glance API service (is typically located at ``/etc/glance/glance-api.conf``):: [glance_store] stores = file, http, swift default_store = swift default_swift_reference=ref1 swift_store_config_file=/etc/glance/glance-swift-creds.conf swift_store_container = glance swift_store_create_container_on_put = True In the file referenced in ``swift_store_config_file`` option, add the following:: [ref1] user = : key = user_domain_id = default project_domain_id = default auth_version = 3 auth_address = http://keystone.example.com/identity Values for user and key options correspond to keystone credentials for RADOS Gateway service user. Note: RADOS Gateway uses FastCGI protocol for interacting with HTTP server. Read your HTTP server documentation if you want to enable HTTPS support. #. Restart Glance API service and upload all needed images. #. If you're using custom container name in RADOS, change Ironic configuration file on the conductor host(s) as follows:: [glance] swift_container = glance #. Restart Ironic conductor service(s). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/admin/raid.rst0000644000175000017500000003716500000000000021176 0ustar00coreycorey00000000000000.. _raid: ================== RAID Configuration ================== Overview ======== Ironic supports RAID configuration for bare metal nodes. It allows operators to specify the desired RAID configuration via the OpenStackClient CLI or REST API. The desired RAID configuration is applied on the bare metal during manual cleaning. The examples described here use the OpenStackClient CLI; please see the `REST API reference `_ for their corresponding REST API requests. Prerequisites ============= The bare metal node needs to use a hardware type that supports RAID configuration. RAID interfaces may implement RAID configuration either in-band or out-of-band. Software RAID is supported on all hardware, although with some caveats - see `Software RAID`_ for details. In-band RAID configuration (including software RAID) is done using the Ironic Python Agent ramdisk. For in-band hardware RAID configuration, a hardware manager which supports RAID should be bundled with the ramdisk. Whether a node supports RAID configuration could be found using the CLI command ``openstack baremetal node validate ``. In-band RAID is usually implemented by the ``agent`` RAID interface. Build agent ramdisk which supports RAID configuration ===================================================== For doing in-band hardware RAID configuration, Ironic needs an agent ramdisk bundled with a hardware manager which supports RAID configuration for your hardware. For example, the :ref:`DIB_raid_support` should be used for HPE Proliant Servers. .. note:: For in-band software RAID, the agent ramdisk does not need to be bundled with a hardware manager as the generic hardware manager in the Ironic Python Agent already provides (basic) support for software RAID. RAID configuration JSON format ============================== The desired RAID configuration and current RAID configuration are represented in JSON format. Target RAID configuration ------------------------- This is the desired RAID configuration on the bare metal node. Using the OpenStackClient CLI (or REST API), the operator sets ``target_raid_config`` field of the node. The target RAID configuration will be applied during manual cleaning. Target RAID configuration is a dictionary having ``logical_disks`` as the key. The value for the ``logical_disks`` is a list of JSON dictionaries. It looks like:: { "logical_disks": [ {}, {}, ... ] } If the ``target_raid_config`` is an empty dictionary, it unsets the value of ``target_raid_config`` if the value was set with previous RAID configuration done on the node. Each dictionary of logical disk contains the desired properties of logical disk supported by the hardware type. These properties are discoverable by:: openstack baremetal driver raid property list Mandatory properties ^^^^^^^^^^^^^^^^^^^^ These properties must be specified for each logical disk and have no default values: - ``size_gb`` - Size (Integer) of the logical disk to be created in GiB. ``MAX`` may be specified if the logical disk should use all of the remaining space available. This can be used only when backing physical disks are specified (see below). - ``raid_level`` - RAID level for the logical disk. Ironic supports the following RAID levels: 0, 1, 2, 5, 6, 1+0, 5+0, 6+0. Optional properties ^^^^^^^^^^^^^^^^^^^ These properties have default values and they may be overridden in the specification of any logical disk. None of these options are supported for software RAID. - ``volume_name`` - Name of the volume. Should be unique within the Node. If not specified, volume name will be auto-generated. - ``is_root_volume`` - Set to ``true`` if this is the root volume. At most one logical disk can have this set to ``true``; the other logical disks must have this set to ``false``. The ``root device hint`` will be saved, if the RAID interface is capable of retrieving it. This is ``false`` by default. Backing physical disk hints ^^^^^^^^^^^^^^^^^^^^^^^^^^^ These hints are specified for each logical disk to let Ironic find the desired disks for RAID configuration. This is machine-independent information. This serves the use-case where the operator doesn't want to provide individual details for each bare metal node. None of these options are supported for software RAID. - ``share_physical_disks`` - Set to ``true`` if this logical disk can share physical disks with other logical disks. The default value is ``false``, except for software RAID which always shares disks. - ``disk_type`` - ``hdd`` or ``ssd``. If this is not specified, disk type will not be a criterion to find backing physical disks. - ``interface_type`` - ``sata`` or ``scsi`` or ``sas``. If this is not specified, interface type will not be a criterion to find backing physical disks. - ``number_of_physical_disks`` - Integer, number of disks to use for the logical disk. Defaults to minimum number of disks required for the particular RAID level, except for software RAID which always spans all disks. Backing physical disks ^^^^^^^^^^^^^^^^^^^^^^ These are the actual machine-dependent information. This is suitable for environments where the operator wants to automate the selection of physical disks with a 3rd-party tool based on a wider range of attributes (eg. S.M.A.R.T. status, physical location). The values for these properties are hardware dependent. - ``controller`` - The name of the controller as read by the RAID interface. In order to trigger the setup of a Software RAID via the Ironic Python Agent, the value of this property needs to be set to ``software``. - ``physical_disks`` - A list of physical disks to use as read by the RAID interface. For software RAID ``physical_disks`` is a list of device hints in the same format as used for :ref:`root-device-hints`. The number of provided hints must match the expected number of backing devices (repeat the same hint if necessary). .. note:: If properties from both "Backing physical disk hints" or "Backing physical disks" are specified, they should be consistent with each other. If they are not consistent, then the RAID configuration will fail (because the appropriate backing physical disks could not be found). .. _raid-config-examples: Examples for ``target_raid_config`` ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ *Example 1*. Single RAID disk of RAID level 5 with all of the space available. Make this the root volume to which Ironic deploys the image: .. code-block:: json { "logical_disks": [ { "size_gb": "MAX", "raid_level": "5", "is_root_volume": true } ] } *Example 2*. Two RAID disks. One with RAID level 5 of 100 GiB and make it root volume and use SSD. Another with RAID level 1 of 500 GiB and use HDD: .. code-block:: json { "logical_disks": [ { "size_gb": 100, "raid_level": "5", "is_root_volume": true, "disk_type": "ssd" }, { "size_gb": 500, "raid_level": "1", "disk_type": "hdd" } ] } *Example 3*. Single RAID disk. I know which disks and controller to use: .. code-block:: json { "logical_disks": [ { "size_gb": 100, "raid_level": "5", "controller": "Smart Array P822 in Slot 3", "physical_disks": ["6I:1:5", "6I:1:6", "6I:1:7"], "is_root_volume": true } ] } *Example 4*. Using backing physical disks: .. code-block:: json { "logical_disks": [ { "size_gb": 50, "raid_level": "1+0", "controller": "RAID.Integrated.1-1", "volume_name": "root_volume", "is_root_volume": true, "physical_disks": [ "Disk.Bay.0:Encl.Int.0-1:RAID.Integrated.1-1", "Disk.Bay.1:Encl.Int.0-1:RAID.Integrated.1-1" ] }, { "size_gb": 100, "raid_level": "5", "controller": "RAID.Integrated.1-1", "volume_name": "data_volume", "physical_disks": [ "Disk.Bay.2:Encl.Int.0-1:RAID.Integrated.1-1", "Disk.Bay.3:Encl.Int.0-1:RAID.Integrated.1-1", "Disk.Bay.4:Encl.Int.0-1:RAID.Integrated.1-1" ] } ] } *Example 5*. Software RAID with two RAID devices: .. code-block:: json { "logical_disks": [ { "size_gb": 100, "raid_level": "1", "controller": "software" }, { "size_gb": "MAX", "raid_level": "0", "controller": "software" } ] } *Example 6*. Software RAID, limiting backing block devices to exactly two devices with the size exceeding 100 GiB: .. code-block:: json { "logical_disks": [ { "size_gb": "MAX", "raid_level": "0", "controller": "software", "physical_disks": [ {"size": "> 100"}, {"size": "> 100"} ] } ] } Current RAID configuration -------------------------- After target RAID configuration is applied on the bare metal node, Ironic populates the current RAID configuration. This is populated in the ``raid_config`` field in the Ironic node. This contains the details about every logical disk after they were created on the bare metal node. It contains details like RAID controller used, the backing physical disks used, WWN of each logical disk, etc. It also contains information about each physical disk found on the bare metal node. To get the current RAID configuration:: openstack baremetal node show Workflow ======== * Operator configures the bare metal node with a hardware type that has a ``RAIDInterface`` other than ``no-raid``. For instance, for Software RAID, this would be ``agent``. * For in-band RAID configuration, operator builds an agent ramdisk which supports RAID configuration by bundling the hardware manager with the ramdisk. See `Build agent ramdisk which supports RAID configuration`_ for more information. * Operator prepares the desired target RAID configuration as mentioned in `Target RAID configuration`_. The target RAID configuration is set on the Ironic node:: openstack baremetal node set \ --target-raid-config The CLI command can accept the input from standard input also:: openstack baremetal node set \ --target-raid-config - * Create a JSON file with the RAID clean steps for manual cleaning. Add other clean steps as desired:: [{ "interface": "raid", "step": "delete_configuration" }, { "interface": "raid", "step": "create_configuration" }] .. note:: 'create_configuration' doesn't remove existing disks. It is recommended to add 'delete_configuration' before 'create_configuration' to make sure that only the desired logical disks exist in the system after manual cleaning. * Bring the node to ``manageable`` state and do a ``clean`` action to start cleaning on the node:: openstack baremetal node clean \ --clean-steps * After manual cleaning is complete, the current RAID configuration is reported in the ``raid_config`` field when running:: openstack baremetal node show Software RAID ============= Building Linux software RAID in-band (via the Ironic Python Agent ramdisk) is supported starting with the Train release. It is requested by using the ``agent`` RAID interface and RAID configuration with all controllers set to ``software``. You can find a software RAID configuration example in :ref:`raid-config-examples`. There are certain limitations to be aware of: * Only the mandatory properties (plus the required ``controller`` property) from `Target RAID configuration`_ are currently supported. * There is no way to select the disks which are used to set up the software RAID, so the Ironic Python Agent will use all available disks. This seems appropriate for servers with 2 or 4 disks, but needs to be considered when disk arrays are attached. * The number of created Software RAID devices must be 1 or 2. If there is only one Software RAID device, it has to be a RAID-1. If there are two, the first one has to be a RAID-1, while the RAID level for the second one can 0, 1, or 1+0. As the first RAID device will be the deployment device, enforcing a RAID-1 reduces the risk of ending up with a non-booting node in case of a disk failure. * Building RAID will fail if the target disks are already partitioned. Wipe the disks using e.g. the ``erase_devices_metadata`` clean step before building RAID:: [{ "interface": "raid", "step": "delete_configuration" }, { "interface": "deploy", "step": "erase_devices_metadata" { "interface": "raid", "step": "create_configuration" }] * If local boot is going to be used, the final instance image must have the ``mdadm`` utility installed and needs to be able to detect software RAID devices at boot time (which is usually done by having the RAID drivers embedded in the image's initrd). * Regular cleaning will not remove RAID configuration (similarly to hardware RAID). To destroy RAID run the ``delete_configuration`` manual clean step. * There is no support for partition images, only whole-disk images are supported with Software RAID. See :doc:`/install/configure-glance-images`. Image requirements ------------------ Since Ironic needs to perform additional steps when deploying nodes with software RAID, there are some requirements the deployed images need to fulfill. Up to and including the Train release, the image needs to have its root file system on the first partition. Starting with Ussuri, the image can also have additional metadata to point Ironic to the partition with the root file system: for this, the image needs to set the ``rootfs_uuid`` property with the file system UUID of the root file system. The pre-Ussuri approach, i.e. to have the root file system on the first partition, is kept as a fallback and hence allows software RAID deployments where Ironic does not have access to any image metadata (e.g. Ironic stand-alone). Using RAID in nova flavor for scheduling ======================================== The operator can specify the `raid_level` capability in nova flavor for node to be selected for scheduling:: openstack flavor set my-baremetal-flavor --property capabilities:raid_level="1+0" Developer documentation ======================= In-band RAID configuration is done using IPA ramdisk. IPA ramdisk has support for pluggable hardware managers which can be used to extend the functionality offered by IPA ramdisk using stevedore plugins. For more information, see Ironic Python Agent :ironic-python-agent-doc:`Hardware Manager ` documentation. The hardware manager that supports RAID configuration should do the following: #. Implement a method named ``create_configuration``. This method creates the RAID configuration as given in ``target_raid_config``. After successful RAID configuration, it returns the current RAID configuration information which ironic uses to set ``node.raid_config``. #. Implement a method named ``delete_configuration``. This method deletes all the RAID disks on the bare metal. #. Return these two clean steps in ``get_clean_steps`` method with priority as 0. Example:: return [{'step': 'create_configuration', 'interface': 'raid', 'priority': 0}, {'step': 'delete_configuration', 'interface': 'raid', 'priority': 0}] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/admin/report.txt0000644000175000017500000004457500000000000021604 0ustar00coreycorey00000000000000/usr/local/lib/python2.7/dist-packages/pecan/__init__.py:122: RuntimeWarning: `static_root` is only used when `debug` is True, ignoring RuntimeWarning ======================================================================== ==== Guru Meditation ==== ======================================================================== |||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| ======================================================================== ==== Package ==== ======================================================================== product = None vendor = None version = None ======================================================================== ==== Threads ==== ======================================================================== ------ Thread #140512155997952 ------ /usr/local/lib/python2.7/dist-packages/eventlet/hubs/hub.py:346 in run `self.wait(sleep_time)` /usr/local/lib/python2.7/dist-packages/eventlet/hubs/poll.py:82 in wait `sleep(seconds)` ======================================================================== ==== Green Threads ==== ======================================================================== ------ Green Thread ------ /usr/local/bin/ironic-api:10 in `sys.exit(main())` /opt/stack/ironic/ironic/cmd/api.py:48 in main `launcher.wait()` /usr/local/lib/python2.7/dist-packages/oslo_service/service.py:586 in wait `self._respawn_children()` /usr/local/lib/python2.7/dist-packages/oslo_service/service.py:570 in _respawn_children `eventlet.greenthread.sleep(self.wait_interval)` /usr/local/lib/python2.7/dist-packages/eventlet/greenthread.py:34 in sleep `hub.switch()` /usr/local/lib/python2.7/dist-packages/eventlet/hubs/hub.py:294 in switch `return self.greenlet.switch()` ------ Green Thread ------ No Traceback! ======================================================================== ==== Processes ==== ======================================================================== Process 124840 (under 48114) [ run by: ubuntu (1000), state: running ] Process 124849 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124850 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124851 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124852 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124853 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124854 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124855 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124856 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124857 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124858 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124859 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124860 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124861 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124862 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124863 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124864 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124865 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124866 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124867 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124868 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124869 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124870 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124871 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124872 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124873 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124874 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124875 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124876 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124877 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124878 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124879 (under 124840) [ run by: ubuntu (1000), state: sleeping ] Process 124880 (under 124840) [ run by: ubuntu (1000), state: sleeping ] ======================================================================== ==== Configuration ==== ======================================================================== agent: agent_api_version = v1 deploy_logs_collect = always deploy_logs_local_path = /home/ubuntu/ironic-bm-logs/deploy_logs deploy_logs_storage_backend = local deploy_logs_swift_container = ironic_deploy_logs_container deploy_logs_swift_days_to_expire = 30 manage_agent_boot = True memory_consumed_by_agent = 0 post_deploy_get_power_state_retries = 6 post_deploy_get_power_state_retry_interval = 5 stream_raw_images = True api: api_workers = None enable_ssl_api = False host_ip = 0.0.0.0 max_limit = 1000 port = 6385 public_endpoint = None ramdisk_heartbeat_timeout = 30 restrict_lookup = True audit: audit_map_file = /etc/ironic/api_audit_map.conf enabled = False ignore_req_list = namespace = openstack audit_middleware_notifications: driver = None topics = None transport_url = *** conductor: api_url = http://10.223.197.220:6385 automated_clean = True check_provision_state_interval = 60 clean_callback_timeout = 1800 configdrive_swift_container = ironic_configdrive_container configdrive_use_swift = False deploy_callback_timeout = 1800 force_power_state_during_sync = True heartbeat_interval = 10 heartbeat_timeout = 60 inspect_timeout = 1800 node_locked_retry_attempts = 3 node_locked_retry_interval = 1 periodic_max_workers = 8 power_state_sync_max_retries = 3 send_sensor_data = False send_sensor_data_interval = 600 send_sensor_data_types = ALL sync_local_state_interval = 180 sync_power_state_interval = 60 workers_pool_size = 100 console: subprocess_checking_interval = 1 subprocess_timeout = 10 terminal = shellinaboxd terminal_cert_dir = None terminal_pid_dir = None cors: allow_credentials = True allow_headers = allow_methods = DELETE GET HEAD OPTIONS PATCH POST PUT TRACE allowed_origin = None expose_headers = max_age = 3600 cors.subdomain: allow_credentials = True allow_headers = allow_methods = DELETE GET HEAD OPTIONS PATCH POST PUT TRACE allowed_origin = None expose_headers = max_age = 3600 database: backend = sqlalchemy connection = *** connection_debug = 0 connection_trace = False db_inc_retry_interval = True db_max_retries = 20 db_max_retry_interval = 10 db_retry_interval = 1 idle_timeout = 3600 max_overflow = 50 max_pool_size = 5 max_retries = 10 min_pool_size = 1 mysql_engine = InnoDB mysql_sql_mode = TRADITIONAL pool_timeout = None retry_interval = 10 slave_connection = *** sqlite_synchronous = True use_db_reconnect = False default: api_paste_config = api-paste.ini auth_strategy = keystone bindir = /opt/stack/ironic/ironic/bin client_socket_timeout = 900 config-dir = config-file = /etc/ironic/ironic.conf control_exchange = ironic debug = True debug_tracebacks_in_api = False default_boot_interface = None default_console_interface = None default_deploy_interface = None default_inspect_interface = None default_log_levels = amqp=WARNING amqplib=WARNING eventlet.wsgi.server=INFO glanceclient=WARNING iso8601=WARNING keystoneauth.session=INFO keystonemiddleware.auth_token=INFO neutronclient=WARNING oslo_messaging=INFO paramiko=WARNING qpid.messaging=INFO requests=WARNING sqlalchemy=WARNING stevedore=INFO urllib3.connectionpool=WARNING default_management_interface = None default_network_interface = None default_portgroup_mode = active-backup default_power_interface = None default_raid_interface = None default_vendor_interface = None enabled_boot_interfaces = pxe enabled_console_interfaces = no-console enabled_deploy_interfaces = direct iscsi enabled_hardware_types = ipmi redfish enabled_inspect_interfaces = no-inspect enabled_management_interfaces = ipmitool redfish enabled_network_interfaces = flat noop enabled_power_interfaces = ipmitool redfish enabled_raid_interfaces = agent no-raid enabled_vendor_interfaces = no-vendor fatal_exception_format_errors = False force_raw_images = True graceful_shutdown_timeout = 60 grub_config_template = /opt/stack/ironic/ironic/common/grub_conf.template hash_partition_exponent = 5 hash_ring_reset_interval = 180 host = ubuntu instance_format = [instance: %(uuid)s] instance_uuid_format = [instance: %(uuid)s] isolinux_bin = /usr/lib/syslinux/isolinux.bin isolinux_config_template = /opt/stack/ironic/ironic/common/isolinux_config.template log-config-append = None log-date-format = %Y-%m-%d %H:%M:%S log-dir = None log-file = None log_options = True logging_context_format_string = %(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [%(request_id)s %(project_name)s %(user_name)s%(color)s] %(instance)s%(color)s%(message)s logging_debug_format_suffix = from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d logging_default_format_string = %(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s logging_exception_prefix = %(color)s%(asctime)s.%(msecs)03d TRACE %(name)s %(instance)s logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s max_header_line = 16384 my_ip = 10.223.197.220 notification_level = None parallel_image_downloads = False pecan_debug = False publish_errors = False pybasedir = /opt/stack/ironic/ironic rate_limit_burst = 0 rate_limit_except_level = CRITICAL rate_limit_interval = 0 rootwrap_config = /etc/ironic/rootwrap.conf rpc_backend = rabbit rpc_response_timeout = 60 state_path = /var/lib/ironic syslog-log-facility = LOG_USER tcp_keepidle = 600 tempdir = /tmp transport_url = *** use-journal = False use-syslog = False use_stderr = False watch-log-file = False wsgi_default_pool_size = 100 wsgi_keep_alive = True wsgi_log_format = %(client_ip)s "%(request_line)s" status: %(status_code)s len: %(body_length)s time: %(wall_seconds).7f deploy: continue_if_disk_secure_erase_fails = False default_boot_option = netboot erase_devices_metadata_priority = None erase_devices_priority = 0 http_root = /opt/stack/data/ironic/httpboot http_url = http://10.223.197.220:3928 power_off_after_deploy_failure = True shred_final_overwrite_with_zeros = True shred_random_overwrite_iterations = 1 dhcp: dhcp_provider = neutron disk_partitioner: check_device_interval = 1 check_device_max_retries = 20 disk_utils: bios_boot_partition_size = 1 dd_block_size = 1M efi_system_partition_size = 200 iscsi_verify_attempts = 3 drac: query_raid_config_job_status_interval = 120 glance: allowed_direct_url_schemes = auth_section = None auth_strategy = keystone auth_type = password cafile = /opt/stack/data/ca-bundle.pem certfile = None glance_api_insecure = False glance_api_servers = None glance_cafile = None glance_num_retries = 0 insecure = False keyfile = None swift_account = AUTH_cb13c4492d124b01b4659a97d627955c swift_api_version = v1 swift_container = glance swift_endpoint_url = http://10.223.197.220:8080 swift_store_multiple_containers_seed = 0 swift_temp_url_cache_enabled = False swift_temp_url_duration = 3600 swift_temp_url_expected_download_start_delay = 0 swift_temp_url_key = *** timeout = None ilo: ca_file = None clean_priority_clear_secure_boot_keys = 0 clean_priority_erase_devices = None clean_priority_reset_bios_to_default = 10 clean_priority_reset_ilo = 0 clean_priority_reset_ilo_credential = 30 clean_priority_reset_secure_boot_keys_to_default = 20 client_port = 443 client_timeout = 60 default_boot_mode = auto power_retry = 6 power_wait = 2 swift_ilo_container = ironic_ilo_container swift_object_expiry_timeout = 900 use_web_server_for_images = False inspector: auth_section = None auth_type = password cafile = /opt/stack/data/ca-bundle.pem certfile = None enabled = False insecure = False keyfile = None service_url = None status_check_period = 60 timeout = None ipmi: min_command_interval = 5 retry_timeout = 60 irmc: auth_method = basic client_timeout = 60 port = 443 remote_image_server = None remote_image_share_name = share remote_image_share_root = /remote_image_share_root remote_image_share_type = CIFS remote_image_user_domain = remote_image_user_name = None remote_image_user_password = *** sensor_method = ipmitool snmp_community = public snmp_port = 161 snmp_security = None snmp_version = v2c ironic_lib: fatal_exception_format_errors = False root_helper = sudo ironic-rootwrap /etc/ironic/rootwrap.conf iscsi: portal_port = 3260 keystone: region_name = RegionOne keystone_authtoken: admin_password = *** admin_tenant_name = admin admin_token = *** admin_user = None auth-url = http://10.223.197.220/identity_admin auth_admin_prefix = auth_host = 127.0.0.1 auth_port = 5000 auth_protocol = https auth_section = None auth_type = password www_authenticate_uri = http://10.223.197.220/identity auth_version = None cache = None cafile = /opt/stack/data/ca-bundle.pem certfile = None check_revocations_for_cached = False default-domain-id = None default-domain-name = None delay_auth_decision = False domain-id = None domain-name = None enforce_token_bind = permissive hash_algorithms = md5 http_connect_timeout = None http_request_max_retries = 3 identity_uri = None include_service_catalog = True insecure = False keyfile = None memcache_pool_conn_get_timeout = 10 memcache_pool_dead_retry = 300 memcache_pool_maxsize = 10 memcache_pool_socket_timeout = 3 memcache_pool_unused_timeout = 60 memcache_secret_key = *** memcache_security_strategy = None memcache_use_advanced_pool = False memcached_servers = 10.223.197.220:11211 password = *** project-domain-id = None project-domain-name = Default project-id = None project-name = service region_name = None revocation_cache_time = 10 service_token_roles = service service_token_roles_required = False signing_dir = /var/cache/ironic/api token_cache_time = 300 trust-id = None user-domain-id = None user-domain-name = Default user-id = None username = ironic metrics: agent_backend = noop agent_global_prefix = None agent_prepend_host = False agent_prepend_host_reverse = True agent_prepend_uuid = False backend = noop global_prefix = None prepend_host = False prepend_host_reverse = True metrics_statsd: agent_statsd_host = localhost agent_statsd_port = 8125 statsd_host = localhost statsd_port = 8125 neutron: auth_section = None auth_strategy = keystone auth_type = password cafile = /opt/stack/data/ca-bundle.pem certfile = None cleaning_network = private cleaning_network_security_groups = insecure = False keyfile = None port_setup_delay = 15 provisioning_network = None provisioning_network_security_groups = retries = 3 timeout = None url = None url_timeout = 30 oslo_concurrency: disable_process_locking = False lock_path = None oslo_messaging_notifications: driver = topics = notifications transport_url = *** oslo_messaging_rabbit: amqp_auto_delete = False amqp_durable_queues = False conn_pool_min_size = 2 conn_pool_ttl = 1200 fake_rabbit = False heartbeat_rate = 2 heartbeat_timeout_threshold = 60 kombu_compression = None kombu_failover_strategy = round-robin kombu_missing_consumer_retry_timeout = 60 kombu_reconnect_delay = 1.0 rabbit_ha_queues = False rabbit_host = localhost rabbit_hosts = localhost:5672 rabbit_interval_max = 30 rabbit_login_method = AMQPLAIN rabbit_password = *** rabbit_port = 5672 rabbit_qos_prefetch_count = 0 rabbit_retry_backoff = 2 rabbit_retry_interval = 1 rabbit_transient_queues_ttl = 1800 rabbit_userid = guest rabbit_virtual_host = / rpc_conn_pool_size = 30 ssl = False ssl_ca_file = ssl_cert_file = ssl_key_file = ssl_version = oslo_versionedobjects: fatal_exception_format_errors = False pxe: default_ephemeral_format = ext4 image_cache_size = 20480 image_cache_ttl = 10080 images_path = /var/lib/ironic/images/ instance_master_path = /var/lib/ironic/master_images ip_version = 4 ipxe_boot_script = /opt/stack/ironic/ironic/drivers/modules/boot.ipxe ipxe_enabled = True ipxe_timeout = 0 ipxe_use_swift = False pxe_append_params = nofb nomodeset vga=normal console=ttyS0 systemd.journald.forward_to_console=yes pxe_bootfile_name = undionly.kpxe pxe_bootfile_name_by_arch: pxe_config_template = /opt/stack/ironic/ironic/drivers/modules/ipxe_config.template pxe_config_template_by_arch: tftp_master_path = /opt/stack/data/ironic/tftpboot/master_images tftp_root = /opt/stack/data/ironic/tftpboot tftp_server = 10.223.197.220 uefi_pxe_bootfile_name = ipxe.efi uefi_pxe_config_template = /opt/stack/ironic/ironic/drivers/modules/ipxe_config.template seamicro: action_timeout = 10 max_retry = 3 service_catalog: auth_section = None auth_type = password cafile = /opt/stack/data/ca-bundle.pem certfile = None insecure = False keyfile = None timeout = None snmp: power_timeout = 10 reboot_delay = 0 swift: auth_section = None auth_type = password cafile = /opt/stack/data/ca-bundle.pem certfile = None insecure = False keyfile = None swift_max_retries = 2 timeout = None virtualbox: port = 18083 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/admin/rescue.rst0000644000175000017500000000675400000000000021545 0ustar00coreycorey00000000000000.. _rescue: =========== Rescue Mode =========== Overview ======== The Bare Metal Service supports putting nodes in rescue mode using hardware types that support rescue interfaces. The hardware types utilizing ironic-python-agent with ``PXE``/``Virtual Media`` based boot interface can support rescue operation when configured appropriately. .. note:: The rescue operation is currently supported only when tenant networks use DHCP to obtain IP addresses. Rescue operation can be used to boot nodes into a rescue ramdisk so that the ``rescue`` user can access the node, in order to provide the ability to access the node in case access to OS is not possible. For example, if there is a need to perform manual password reset or data recovery in the event of some failure, rescue operation can be used. Configuring The Bare Metal Service ================================== Configure the Bare Metal Service appropriately so that the service has the information needed to boot the ramdisk before a user tries to initiate rescue operation. This will differ somewhat between different deploy environments, but an example of how to do this is outlined below: #. Create and configure ramdisk that supports rescue operation. Please see :doc:`/install/deploy-ramdisk` for detailed instructions to build a ramdisk. #. Configure a network to use for booting nodes into the rescue ramdisk in neutron, and note the UUID or name of this network. This is required if you're using the neutron DHCP provider and have Bare Metal Service managing ramdisk booting (the default). This can be the same network as your cleaning or tenant network (for flat network). For an example of how to configure new networks with Bare Metal Service, see the :doc:`/install/configure-networking` documentation. #. Add the unique name or UUID of your rescue network to ``ironic.conf``: .. code-block:: ini [neutron] rescuing_network= .. note:: This can be set per node via driver_info['rescuing_network'] #. Restart the ironic conductor service. #. Specify a rescue kernel and ramdisk or rescue ISO compatible with the node's driver for pxe based boot interface or virtual-media based boot interface respectively. Example for pxe based boot interface: .. code-block:: console openstack baremetal node set $NODE_UUID \ --driver-info rescue_ramdisk=$RESCUE_INITRD_UUID \ --driver-info rescue_kernel=$RESCUE_VMLINUZ_UUID See :doc:`/install/configure-glance-images` for details. If you are not using Image service, it is possible to provide images to Bare Metal service via hrefs. After this, The Bare Metal Service should be ready for ``rescue`` operation. Test it out by attempting to rescue an active node and connect to the instance using ssh, as given below: .. code-block:: console openstack baremetal node rescue $NODE_UUID \ --rescue-password --wait ssh rescue@$INSTANCE_IP_ADDRESS To move a node back to active state after using rescue mode you can use ``unrescue``. Please unmount any filesystems that were manually mounted before proceeding with unrescue. The node unrescue can be done as given below: .. code-block:: console openstack baremetal node unrescue $NODE_UUID ``rescue`` and ``unrescue`` operations can also be triggered via the Compute Service using the following commands: .. code-block:: console openstack server rescue --password openstack server unrescue ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/admin/retirement.rst0000644000175000017500000000452000000000000022422 0ustar00coreycorey00000000000000.. _retirement: =============== Node retirement =============== Overview ======== Retiring nodes is a natural part of a server’s life cycle, for instance when the end of the warranty is reached and the physical space is needed for new deliveries to install replacement capacity. However, depending on the type of the deployment, removing nodes from service can be a full workflow by itself as it may include steps like moving applications to other hosts, cleaning sensitive data from disks or the BMC, or tracking the dismantling of servers from their racks. Ironic provides some means to support such workflows by allowing to tag nodes as ``retired`` which will prevent any further scheduling of instances, but will still allow for other operations, such as cleaning, to happen (this marks an important difference to nodes which have the ``maintenance`` flag set). How to use ========== When it is known that a node shall be retired, set the ``retired`` flag on the node with:: openstack baremetal node set --retired node-001 This can be done irrespective of the state the node is in, so in particular while the node is ``active``. .. NOTE:: An exception are nodes which are in ``available``. For backwards compatibility reasons, these nodes need to be moved to ``manageable`` first. Trying to set the ``retired`` flag for ``available`` nodes will result in an error. Optionally, a reason can be specified when a node is retired, e.g.:: openstack baremetal node set --retired node-001 \ --retired-reason "End of warranty for delivery abc123" Upon instance deletion, an ``active`` node with the ``retired`` flag set will not move to ``available``, but to ``manageable``. The node will hence not be eligible for scheduling of new instances. Equally, nodes with ``retired`` set to True cannot move from ``manageable`` to ``available``: the ``provide`` verb is blocked. This is to prevent accidental re-use of nodes tagged for removal from the fleet. In order to move these nodes to ``available`` none the less, the ``retired`` field needs to be removed first. This can be done via:: openstack baremetal node unset --retired node-001 In order to facilitate the identification of nodes marked for retirement, e.g. by other teams, ironic also allows to list all nodes which have the ``retired`` flag set:: openstack baremetal node list --retired ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/admin/security.rst0000644000175000017500000002024300000000000022113 0ustar00coreycorey00000000000000.. _security: ================= Security Overview ================= While the Bare Metal service is intended to be a secure application, it is important to understand what it does and does not cover today. Deployers must properly evaluate their use case and take the appropriate actions to secure their environment(s). This document is intended to provide an overview of what risks an operator of the Bare Metal service should be aware of. It is not intended as a How-To guide for securing a data center or an OpenStack deployment. .. TODO: add "Security Considerations for Network Boot" section .. TODO: add "Credential Storage and Management" section .. TODO: add "Multi-tenancy Considerations" section REST API: user roles and policy settings ======================================== Beginning with the Newton (6.1.0) release, the Bare Metal service allows operators significant control over API access: * Access may be restricted to each method (GET, PUT, etc) for each REST resource. Defaults are provided with the release and defined in code. * Access may be divided between an "administrative" role with full access and "observer" role with read-only access. By default, these roles are assigned the names ``baremetal_admin`` and ``baremetal_observer``, respectively. * By default, passwords and instance secrets are hidden in ``driver_info`` and ``instance_info``, respectively. In case of debugging or diagnosing, the behavior can be overridden by changing the policy file. To allow password in ``driver_info`` unmasked for users with administrative privileges, apply following changes to policy configuration file:: "show_password": "role:is_admin" And restart the Bare Metal API service to take effect. Please check :doc:`/configuration/policy` for more details. Prior to the Newton (6.1.0) release, the Bare Metal service only supported two policy options: * API access may be secured by a simple policy rule: users with administrative privileges may access all API resources, whereas users without administrative privileges may only access public API resources. * Passwords contained in the ``driver_info`` field may be hidden from all API responses with the ``show_password`` policy setting. This defaults to always hide passwords, regardless of the user's role. You can override it with policy configuration as described above. Multi-tenancy ============= There are two aspects of multitenancy to consider when evaluating a deployment of the Bare Metal Service: interactions between tenants on the network, and actions one tenant can take on a machine that will affect the next tenant. Network Interactions -------------------- Interactions between tenants' workloads running simultaneously on separate servers include, but are not limited to: IP spoofing, packet sniffing, and network man-in-the-middle attacks. By default, the Bare Metal service provisions all nodes on a "flat" network, and does not take any precautions to avoid or prevent interaction between tenants. This can be addressed by integration with the OpenStack Identity, Compute, and Networking services, so as to provide tenant-network isolation. Additional documentation on `network multi-tenancy `_ is available. Lingering Effects ----------------- Interactions between tenants placed sequentially on the same server include, but are not limited to: changes in BIOS settings, modifications to firmware, or files left on disk or peripheral storage devices (if these devices are not erased between uses). By default, the Bare Metal service will erase (clean) the local disk drives during the "cleaning" phase, after deleting an instance. It *does not* reset BIOS or reflash firmware or peripheral devices. This can be addressed through customizing the utility ramdisk used during the "cleaning" phase. See details in the `Firmware security`_ section. Firmware security ================= When the Bare Metal service deploys an operating system image to a server, that image is run natively on the server without virtualization. Any user with administrative access to the deployed instance has administrative access to the underlying hardware. Most servers' default settings do not prevent a privileged local user from gaining direct access to hardware devices. Such a user could modify device or firmware settings, and potentially flash new firmware to the device, before deleting their instance and allowing the server to be allocated to another user. If the ``[conductor]/automated_clean`` configuration option is enabled (and the ``[deploy]/erase_devices_priority`` configuration option is not zero), the Bare Metal service will securely erase all local disk devices within a machine during instance deletion. However, the service does not ship with any code that will validate the integrity of, or make any modifications to, system or device firmware or firmware settings. Operators are encouraged to write their own hardware manager plugins for the ``ironic-python-agent`` ramdisk. This should include custom ``clean steps`` that would be run during the :ref:`cleaning` process, as part of Node de-provisioning. The ``clean steps`` would perform the specific actions necessary within that environment to ensure the integrity of each server's firmware. Ideally, an operator would work with their hardware vendor to ensure that proper firmware security measures are put in place ahead of time. This could include: - installing signed firmware for BIOS and peripheral devices - using a TPM (Trusted Platform Module) to validate signatures at boot time - booting machines in :ref:`iLO UEFI Secure Boot Support`, rather than BIOS mode, to validate kernel signatures - disabling local (in-band) access from the host OS to the management controller (BMC) - disabling modifications to boot settings from the host OS Additional references: - :ref:`cleaning` - :ref:`trusted-boot` Other considerations ==================== Internal networks ----------------- Access to networks which the Bare Metal service uses internally should be prohibited from outside. These networks are the ones used for management (with the nodes' BMC controllers), provisioning, cleaning (if used) and rescuing (if used). This can be done with physical or logical network isolation, traffic filtering, etc. Management interface technologies --------------------------------- Some nodes support more than one management interface technology (vendor and IPMI for example). If you use only one modern technology for out-of-band node access, it is recommended that you disable IPMI since the IPMI protocol is not secure. If IPMI is enabled, in most cases a local OS administrator is able to work in-band with IPMI settings without specifying any credentials, as this is a DCMI specification requirement. Tenant network isolation ------------------------ If you use tenant network isolation, services (TFTP or HTTP) that handle the nodes' boot files should serve requests only from the internal networks that are used for the nodes being deployed and cleaned. TFTP protocol does not support per-user access control at all. For HTTP, there is no generic and safe way to transfer credentials to the node. Also, tenant network isolation is not intended to work with network-booting a node by default, once the node has been provisioned. API endpoints for RAM disk use ------------------------------ There are `two (unauthorized) endpoints `_ in the Bare Metal API that are intended for use by the ironic-python-agent RAM disk. They are not intended for public use. These endpoints can potentially cause security issues. Access to these endpoints from external or untrusted networks should be prohibited. An easy way to do this is to: * set up two groups of API services: one for external requests, the second for deploy RAM disks' requests. * to disable unauthorized access to these endpoints in the (first) API services group that serves external requests, the following lines should be added to the :ironic-doc:`policy.yaml file `:: # Send heartbeats from IPA ramdisk "baremetal:node:ipa_heartbeat": "rule:is_admin" # Access IPA ramdisk functions "baremetal:driver:ipa_lookup": "rule:is_admin" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/admin/troubleshooting.rst0000644000175000017500000003666600000000000023513 0ustar00coreycorey00000000000000.. _troubleshooting: ====================== Troubleshooting Ironic ====================== Nova returns "No valid host was found" Error ============================================ Sometimes Nova Conductor log file "nova-conductor.log" or a message returned from Nova API contains the following error:: NoValidHost: No valid host was found. There are not enough hosts available. "No valid host was found" means that the Nova Scheduler could not find a bare metal node suitable for booting the new instance. This in turn usually means some mismatch between resources that Nova expects to find and resources that Ironic advertised to Nova. A few things should be checked in this case: #. Make sure that enough nodes are in ``available`` state, not in maintenance mode and not already used by an existing instance. Check with the following command:: openstack baremetal node list --provision-state available --no-maintenance --unassociated If this command does not show enough nodes, use generic ``openstack baremetal node list`` to check other nodes. For example, nodes in ``manageable`` state should be made available:: openstack baremetal node provide The Bare metal service automatically puts a node in maintenance mode if there are issues with accessing its management interface. Check the power credentials (e.g. ``ipmi_address``, ``ipmi_username`` and ``ipmi_password``) and then move the node out of maintenance mode:: openstack baremetal node maintenance unset The ``node validate`` command can be used to verify that all required fields are present. The following command should not return anything:: openstack baremetal node validate | grep -E '(power|management)\W*False' Maintenance mode will be also set on a node if automated cleaning has failed for it previously. #. Make sure that you have Compute services running and enabled:: $ openstack compute service list --service nova-compute +----+--------------+-------------+------+---------+-------+----------------------------+ | ID | Binary | Host | Zone | Status | State | Updated At | +----+--------------+-------------+------+---------+-------+----------------------------+ | 7 | nova-compute | example.com | nova | enabled | up | 2017-09-04T13:14:03.000000 | +----+--------------+-------------+------+---------+-------+----------------------------+ By default, a Compute service is disabled after 10 consecutive build failures on it. This is to ensure that new build requests are not routed to a broken Compute service. If it is the case, make sure to fix the source of the failures, then re-enable it:: openstack compute service set --enable nova-compute #. Starting with the Pike release, check that all your nodes have the ``resource_class`` field set using the following command:: openstack --os-baremetal-api-version 1.21 baremetal node list --fields uuid name resource_class Then check that the flavor(s) are configured to request these resource classes via their properties:: openstack flavor show -f value -c properties For example, if your node has resource class ``baremetal-large``, it will be matched by a flavor with property ``resources:CUSTOM_BAREMETAL_LARGE`` set to ``1``. See :doc:`/install/configure-nova-flavors` for more details on the correct configuration. #. If you do not use scheduling based on resource classes, then the node's properties must have been set either manually or via inspection. For each node with ``available`` state check that the ``properties`` JSON field has valid values for the keys ``cpus``, ``cpu_arch``, ``memory_mb`` and ``local_gb``. Example of valid properties:: $ openstack baremetal node show --fields properties +------------+------------------------------------------------------------------------------------+ | Property | Value | +------------+------------------------------------------------------------------------------------+ | properties | {u'memory_mb': u'8192', u'cpu_arch': u'x86_64', u'local_gb': u'41', u'cpus': u'4'} | +------------+------------------------------------------------------------------------------------+ .. warning:: If you're using exact match filters in the Nova Scheduler, make sure the flavor and the node properties match exactly. #. The Nova flavor that you are using does not match any properties of the available Ironic nodes. Use :: openstack flavor show to compare. The extra specs in your flavor starting with ``capability:`` should match ones in ``node.properties['capabilities']``. .. note:: The format of capabilities is different in Nova and Ironic. E.g. in Nova flavor:: $ openstack flavor show -c properties +------------+----------------------------------+ | Field | Value | +------------+----------------------------------+ | properties | capabilities:boot_option='local' | +------------+----------------------------------+ But in Ironic node:: $ openstack baremetal node show --fields properties +------------+-----------------------------------------+ | Property | Value | +------------+-----------------------------------------+ | properties | {u'capabilities': u'boot_option:local'} | +------------+-----------------------------------------+ #. After making changes to nodes in Ironic, it takes time for those changes to propagate from Ironic to Nova. Check that :: openstack hypervisor stats show correctly shows total amount of resources in your system. You can also check ``openstack hypervisor show `` to see the status of individual Ironic nodes as reported to Nova. .. TODO(dtantsur): explain inspecting the placement API #. Figure out which Nova Scheduler filter ruled out your nodes. Check the ``nova-scheduler`` logs for lines containing something like:: Filter ComputeCapabilitiesFilter returned 0 hosts The name of the filter that removed the last hosts may give some hints on what exactly was not matched. See :nova-doc:`Nova filters documentation ` for more details. #. If none of the above helped, check Ironic conductor log carefully to see if there are any conductor-related errors which are the root cause for "No valid host was found". If there are any "Error in deploy of node : [Errno 28] ..." error messages in Ironic conductor log, it means the conductor run into a special error during deployment. So you can check the log carefully to fix or work around and then try again. Patching the Deploy Ramdisk =========================== When debugging a problem with deployment and/or inspection you may want to quickly apply a change to the ramdisk to see if it helps. Of course you can inject your code and/or SSH keys during the ramdisk build (depends on how exactly you've built your ramdisk). But it's also possible to quickly modify an already built ramdisk. Create an empty directory and unpack the ramdisk content there:: mkdir unpack cd unpack gzip -dc /path/to/the/ramdisk | cpio -id The last command will result in the whole Linux file system tree unpacked in the current directory. Now you can modify any files you want. The actual location of the files will depend on the way you've built the ramdisk. .. note:: On a systemd-based system you can use the ``systemd-nspawn`` tool (from the ``systemd-container`` package) to create a lightweight container from the unpacked filesystem tree:: sudo systemd-nspawn --directory /path/to/unpacked/ramdisk/ /bin/bash This will allow you to run commands within the filesystem, e.g. use package manager. If the ramdisk is also systemd-based, and you have login credentials set up, you can even boot a real ramdisk enviroment with :: sudo systemd-nspawn --directory /path/to/unpacked/ramdisk/ --boot After you've done the modifications, pack the whole content of the current directory back:: find . | cpio -H newc -o | gzip -c > /path/to/the/new/ramdisk .. note:: You don't need to modify the kernel (e.g. ``tinyipa-master.vmlinuz``), only the ramdisk part. API Errors ========== The `debug_tracebacks_in_api` config option may be set to return tracebacks in the API response for all 4xx and 5xx errors. .. _retrieve_deploy_ramdisk_logs: Retrieving logs from the deploy ramdisk ======================================= When troubleshooting deployments (specially in case of a deploy failure) it's important to have access to the deploy ramdisk logs to be able to identify the source of the problem. By default, Ironic will retrieve the logs from the deploy ramdisk when the deployment fails and save it on the local filesystem at ``/var/log/ironic/deploy``. To change this behavior, operators can make the following changes to ``/etc/ironic/ironic.conf`` under the ``[agent]`` group: * ``deploy_logs_collect``: Whether Ironic should collect the deployment logs on deployment. Valid values for this option are: * ``on_failure`` (**default**): Retrieve the deployment logs upon a deployment failure. * ``always``: Always retrieve the deployment logs, even if the deployment succeed. * ``never``: Disable retrieving the deployment logs. * ``deploy_logs_storage_backend``: The name of the storage backend where the logs will be stored. Valid values for this option are: * ``local`` (**default**): Store the logs in the local filesystem. * ``swift``: Store the logs in Swift. * ``deploy_logs_local_path``: The path to the directory where the logs should be stored, used when the ``deploy_logs_storage_backend`` is configured to ``local``. By default logs will be stored at **/var/log/ironic/deploy**. * ``deploy_logs_swift_container``: The name of the Swift container to store the logs, used when the deploy_logs_storage_backend is configured to "swift". By default **ironic_deploy_logs_container**. * ``deploy_logs_swift_days_to_expire``: Number of days before a log object is marked as expired in Swift. If None, the logs will be kept forever or until manually deleted. Used when the deploy_logs_storage_backend is configured to "swift". By default **30** days. When the logs are collected, Ironic will store a *tar.gz* file containing all the logs according to the ``deploy_logs_storage_backend`` configuration option. All log objects will be named with the following pattern:: [_]_.tar.gz .. note:: The *instance_uuid* field is not required for deploying a node when Ironic is configured to be used in standalone mode. If present it will be appended to the name. Accessing the log data ---------------------- When storing in the local filesystem ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When storing the logs in the local filesystem, the log files can be found at the path configured in the ``deploy_logs_local_path`` configuration option. For example, to find the logs from the node ``5e9258c4-cfda-40b6-86e2-e192f523d668``: .. code-block:: bash $ ls /var/log/ironic/deploy | grep 5e9258c4-cfda-40b6-86e2-e192f523d668 5e9258c4-cfda-40b6-86e2-e192f523d668_88595d8a-6725-4471-8cd5-c0f3106b6898_2016-08-08-13:52:12.tar.gz 5e9258c4-cfda-40b6-86e2-e192f523d668_db87f2c5-7a9a-48c2-9a76-604287257c1b_2016-08-08-14:07:25.tar.gz .. note:: When saving the logs to the filesystem, operators may want to enable some form of rotation for the logs to avoid disk space problems. When storing in Swift ~~~~~~~~~~~~~~~~~~~~~ When using Swift, operators can associate the objects in the container with the nodes in Ironic and search for the logs for the node ``5e9258c4-cfda-40b6-86e2-e192f523d668`` using the **prefix** parameter. For example: .. code-block:: bash $ swift list ironic_deploy_logs_container -p 5e9258c4-cfda-40b6-86e2-e192f523d668 5e9258c4-cfda-40b6-86e2-e192f523d668_88595d8a-6725-4471-8cd5-c0f3106b6898_2016-08-08-13:52:12.tar.gz 5e9258c4-cfda-40b6-86e2-e192f523d668_db87f2c5-7a9a-48c2-9a76-604287257c1b_2016-08-08-14:07:25.tar.gz To download a specific log from Swift, do: .. code-block:: bash $ swift download ironic_deploy_logs_container "5e9258c4-cfda-40b6-86e2-e192f523d668_db87f2c5-7a9a-48c2-9a76-604287257c1b_2016-08-08-14:07:25.tar.gz" 5e9258c4-cfda-40b6-86e2-e192f523d668_db87f2c5-7a9a-48c2-9a76-604287257c1b_2016-08-08-14:07:25.tar.gz [auth 0.341s, headers 0.391s, total 0.391s, 0.531 MB/s] The contents of the log file ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The log is just a ``.tar.gz`` file that can be extracted as: .. code-block:: bash $ tar xvf The contents of the file may differ slightly depending on the distribution that the deploy ramdisk is using: * For distributions using ``systemd`` there will be a file called **journal** which contains all the system logs collected via the ``journalctl`` command. * For other distributions, the ramdisk will collect all the contents of the ``/var/log`` directory. For all distributions, the log file will also contain the output of the following commands (if present): ``ps``, ``df``, ``ip addr`` and ``iptables``. Here's one example when extracting the content of a log file for a distribution that uses ``systemd``: .. code-block:: bash $ tar xvf 5e9258c4-cfda-40b6-86e2-e192f523d668_88595d8a-6725-4471-8cd5-c0f3106b6898_2016-08-08-13:52:12.tar.gz df ps journal ip_addr iptables .. _troubleshooting-stp: DHCP during PXE or iPXE is inconsistent or unreliable ===================================================== This can be caused by the spanning tree protocol delay on some switches. The delay prevents the switch port moving to forwarding mode during the nodes attempts to PXE, so the packets never make it to the DHCP server. To resolve this issue you should set the switch port that connects to your baremetal nodes as an edge or PortFast type port. Configured in this way the switch port will move to forwarding mode as soon as the link is established. An example on how to do that for a Cisco Nexus switch is: .. code-block:: bash $ config terminal $ (config) interface eth1/11 $ (config-if) spanning-tree port type edge IPMI errors =========== When working with IPMI, several settings need to be enabled depending on vendors. Enable IPMI over LAN -------------------- Machines may not have IPMI access over LAN enabled by default. This could cause the IPMI port to be unreachable through ipmitool, as shown: .. code-block:: bash $ipmitool -I lan -H ipmi_host -U ipmi_user -P ipmi_pass chassis power status Error: Unable to establish LAN session To fix this, enable `IPMI over lan` setting using your BMC tool or web app. Troubleshooting lanplus interface --------------------------------- When working with lanplus interfaces, you may encounter the following error: .. code-block:: bash $ipmitool -I lanplus -H ipmi_host -U ipmi_user -P ipmi_pass power status Error in open session response message : insufficient resources for session Error: Unable to establish IPMI v2 / RMCP+ session To fix that issue, please enable `RMCP+ Cipher Suite3 Configuration` setting using your BMC tool or web app. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/admin/upgrade-guide.rst0000644000175000017500000004650200000000000022774 0ustar00coreycorey00000000000000.. _upgrade-guide: ================================ Bare Metal Service Upgrade Guide ================================ This document outlines various steps and notes for operators to consider when upgrading their ironic-driven clouds from previous versions of OpenStack. The Bare Metal (ironic) service is tightly coupled with the ironic driver that is shipped with the Compute (nova) service. Some special considerations must be taken into account when upgrading your cloud. Both offline and rolling upgrades are supported. Plan your upgrade ================= * Rolling upgrades are available starting with the Pike release; that is, when upgrading from Ocata. This means that it is possible to do an upgrade with minimal to no downtime of the Bare Metal API. * Upgrades are only supported between two consecutive named releases. This means that you cannot upgrade Ocata directly into Queens; you need to upgrade into Pike first. * The `release notes `_ should always be read carefully when upgrading the Bare Metal service. Specific upgrade steps and considerations are documented there. * The Bare Metal service should always be upgraded before the Compute service. .. note:: The ironic virt driver in nova always uses a specific version of the ironic REST API. This API version may be one that was introduced in the same development cycle, so upgrading nova first may result in nova being unable to use the Bare Metal API. * Make a backup of your database. Ironic does not support downgrading of the database. Hence, in case of upgrade failure, restoring the database from a backup is the only choice. * Before starting your upgrade, it is best to ensure that all nodes have reached, or are in, a stable ``provision_state``. Nodes in states with long running processes such as deploying or cleaning, may fail, and may require manual intervention to return them to the available hardware pool. This is most likely in cases where a timeout has occurred or a service was terminated abruptly. For a visual diagram detailing states and possible state transitions, please see :ref:`states`. Offline upgrades ================ In an offline (or cold) upgrade, the Bare Metal service is not available during the upgrade, because all the services have to be taken down. When upgrading the Bare Metal service, the following steps should always be taken in this order: #. upgrade the ironic-python-agent image #. update ironic code, without restarting services #. run database schema migrations via ``ironic-dbsync upgrade`` #. restart ironic-conductor and ironic-api services Once the above is done, do the following: * update any applicable configuration options to stop using any deprecated features or options, and perform any required work to transition to alternatives. All the deprecated features and options will be supported for one release cycle, so should be removed before your next upgrade is performed. * upgrade python-ironicclient along with any other services connecting to the Bare Metal service as a client, such as nova-compute * run the ``ironic-dbsync online_data_migrations`` command to make sure that data migrations are applied. The command lets you limit the impact of the data migrations with the ``--max-count`` option, which limits the number of migrations executed in one run. You should complete all of the migrations as soon as possible after the upgrade. .. warning:: You will not be able to start an upgrade to the release after this one, until this has been completed for the current release. For example, as part of upgrading from Ocata to Pike, you need to complete Pike's data migrations. If this not done, you will not be able to upgrade to Queens -- it will not be possible to execute Queens' database schema updates. Rolling upgrades ================ To Reduce downtime, the services can be upgraded in a rolling fashion, meaning to upgrade one or a few services at a time to minimize impact. Rolling upgrades are available starting with the Pike release. This feature makes it possible to upgrade between releases, such as Ocata to Pike, with minimal to no downtime of the Bare Metal API. Requirements ------------ To facilitate an upgrade in a rolling fashion, you need to have a highly-available deployment consisting of at least two ironic-api and two ironic-conductor services. Use of a load balancer to balance requests across the ironic-api services is recommended, as it allows for a minimal impact to end users. Concepts -------- There are four aspects of the rolling upgrade process to keep in mind: * API and RPC version pinning, and versioned object backports * online data migrations * graceful service shutdown * API load balancer draining API & RPC version pinning and versioned object backports ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Through careful RPC versioning, newer services are able to talk to older services (and vice-versa). The ``[DEFAULT]/pin_release_version`` configuration option is used for this. It should be set (pinned) to the release version that the older services are using. The newer services will backport RPC calls and objects to their appropriate versions from the pinned release. If the ``IncompatibleObjectVersion`` exception occurs, it is most likely due to an incorrect or unspecified ``[DEFAULT]/pin_release_version`` configuration value. For example, when ``[DEFAULT]/pin_release_version`` is not set to the older release version, no conversion will happen during the upgrade. For the ironic-api service, the API version is pinned via the same ``[DEFAULT]/pin_release_version`` configuration option as above. When pinned, the new ironic-api services will not service any API requests with Bare Metal API versions that are higher than what the old ironic-api services support. HTTP status code 406 is returned for such requests. This prevents new features (available in new API versions) from being used until after the upgrade has been completed. Online data migrations ~~~~~~~~~~~~~~~~~~~~~~ To make database schema migrations less painful to execute, we have implemented process changes to facilitate upgrades. * All data migrations are banned from schema migration scripts. * Schema migration scripts only update the database schema. * Data migrations must be done at the end of the rolling upgrade process, after the schema migration and after the services have been upgraded to the latest release. All data migrations are performed using the ``ironic-dbsync online_data_migrations`` command. It can be run as a background process so that it does not interrupt running services; however it must be run to completion for a cold upgrade if the intent is to make use of new features immediately. (You would also execute the same command with services turned off if you are doing a cold upgrade). This data migration must be completed. If not, you will not be able to upgrade to future releases. For example, if you had upgraded from Ocata to Pike but did not do the data migrations, you will not be able to upgrade from Pike to Queens. (More precisely, you will not be able to apply Queens' schema migrations.) Graceful conductor service shutdown ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The ironic-conductor service is a Python process listening for messages on a message queue. When the operator sends the SIGTERM signal to the process, the service stops consuming messages from the queue, so that no additional work is picked up. It completes any outstanding work and then terminates. During this process, messages can be left on the queue and will be processed after the Python process starts back up. This gives us a way to shutdown a service using older code, and start up a service using newer code with minimal impact. .. note:: This was tested with RabbitMQ messaging backend and may vary with other backends. Nodes that are being acted upon by an ironic-conductor process, which are not in a stable state, may encounter failures. Node failures that occur during an upgrade are likely due to timeouts, resulting from delays involving messages being processed and acted upon by a conductor during long running, multi-step processes such as deployment or cleaning. API load balancer draining ~~~~~~~~~~~~~~~~~~~~~~~~~~ If you are using a load balancer for the ironic-api services, we recommend that you redirect requests to the new API services and drain off of the ironic-api services that have not yet been upgraded. Rolling upgrade process ----------------------- Before maintenance window ~~~~~~~~~~~~~~~~~~~~~~~~~ * Upgrade the ironic-python-agent image * Using the new release (ironic code), execute the required database schema updates by running the database upgrade command: ``ironic-dbsync upgrade``. These schema change operations should have minimal or no effect on performance, and should not cause any operations to fail (but please check the release notes). You can: * install the new release on an existing system * install the new release in a new virtualenv or a container At this point, new columns and tables may exist in the database. These database schema changes are done in a way that both the old and new (N and N+1) releases can perform operations against the same schema. .. note:: Ironic bases its API, RPC and object storage format versions on the ``[DEFAULT]/pin_release_version`` configuration option. It is advisable to automate the deployment of changes in configuration files to make the process less error prone and repeatable. During maintenance window ~~~~~~~~~~~~~~~~~~~~~~~~~ #. All ironic-conductor services should be upgraded first. Ensure that at least one ironic-conductor service is running at all times. For every ironic-conductor, either one by one or a few at a time: * shut down the service. Messages from the ironic-api services to the conductors are load-balanced by the message queue and a hash-ring, so the only thing you need to worry about is to shut the service down gracefully (using ``SIGTERM`` signal) to make sure it will finish all the requests being processed before shutting down. * upgrade the installed version of ironic and dependencies * set the ``[DEFAULT]/pin_release_version`` configuration option value to the version you are upgrading from (that is, the old version). Based on this setting, the new ironic-conductor services will downgrade any RPC communication and data objects to conform to the old service. For example, if you are upgrading from Ocata to Pike, set this value to ``ocata``. * start the service #. The next service to upgrade is ironic-api. Ensure that at least one ironic-api service is running at all times. You may want to start another temporary instance of the older ironic-api to handle the load while you are upgrading the original ironic-api services. For every ironic-api service, either one by one or a few at a time: * in HA deployment you are typically running them behind a load balancer (for example HAProxy), so you need to take the service instance out of the balancer * shut it down * upgrade the installed version of ironic and dependencies * set the ``[DEFAULT]/pin_release_version`` configuration option value to the version you are upgrading from (that is, the old version). Based on this setting, the new ironic-api services will downgrade any RPC communication and data objects to conform to the old service. In addition, the new services will return HTTP status code 406 for any requests with newer API versions that the old services did not support. This prevents new features (available in new API versions) from being used until after the upgrade has been completed. For example, if you are upgrading from Ocata to Pike, set this value to ``ocata``. * restart the service * add it back into the load balancer After upgrading all the ironic-api services, the Bare Metal service is running in the new version but with downgraded RPC communication and database object storage formats. New features (in new API versions) are not supported, because they could fail when objects are in the downgraded object formats and some internal RPC API functions may still not be available. #. For all the ironic-conductor services, one at a time: * remove the ``[DEFAULT]/pin_release_version`` configuration option setting * restart the ironic-conductor service #. For all the ironic-api services, one at a time: * remove the ``[DEFAULT]/pin_release_version`` configuration option setting * restart the ironic-api service After maintenance window ~~~~~~~~~~~~~~~~~~~~~~~~ Now that all the services are upgraded, the system is able to use the latest version of the RPC protocol and able to access all the features of the new release. * Update any applicable configuration options to stop using any deprecated features or options, and perform any required work to transition to alternatives. All the deprecated features and options will be supported for one release cycle, so should be removed before your next upgrade is performed. * Upgrade ``python-ironicclient`` along with other services connecting to the Bare Metal service as a client, such as ``nova-compute``. .. warning:: A ``nova-compute`` instance tries to attach VIFs to all active instances on start up. Make sure that for all active nodes there is at least one running ``ironic-conductor`` process to manage them. Otherwise the instances will be moved to the ``ERROR`` state on the ``nova-compute`` start up. * Run the ``ironic-dbsync online_data_migrations`` command to make sure that data migrations are applied. The command lets you limit the impact of the data migrations with the ``--max-count`` option, which limits the number of migrations executed in one run. You should complete all of the migrations as soon as possible after the upgrade. .. warning:: Note that you will not be able to start an upgrade to the next release after this one, until this has been completed for the current release. For example, as part of upgrading from Ocata to Pike, you need to complete Pike's data migrations. If this not done, you will not be able to upgrade to Queens -- it will not be possible to execute Queens' database schema updates. Upgrading from Ocata to Pike ============================ #. Use the ``ironic-dbsync online_data_migrations`` command from the 9.1.1 (or newer) release. The one from older (9.0.0 - 9.1.0) releases could cause a a port's physical_network information to be deleted from the database. #. It is required to set the ``resource_class`` field for nodes registered with the Bare Metal service *before* using the Pike version of the Compute service. See :ref:`enrollment` for details. #. It is recommended to move from old-style classic drivers to the new hardware types after the upgrade to Pike. We expect the classic drivers to be deprecated in the Queens release and removed in the Rocky release. See :doc:`upgrade-to-hardware-types` for the details on the migration. Other upgrade instructions are in the `Pike release notes `_. .. toctree:: :maxdepth: 1 upgrade-to-hardware-types.rst Upgrading from Newton to Ocata ============================== There are no specific upgrade instructions other than the `Ocata release notes `_. Upgrading from Mitaka to Newton =============================== There are no specific upgrade instructions other than the `Newton release notes `_. Upgrading from Liberty to Mitaka ================================ There are no specific upgrade instructions other than the `Mitaka release notes `_. Upgrading from Kilo to Liberty ============================== In-band Inspection ------------------ If you used in-band inspection with **ironic-discoverd**, it is highly recommended that you switch to using **ironic-inspector**, which is a newer (and compatible on API level) version of the same service. You have to install **python-ironic-inspector-client** during the upgrade. This package contains a client module for the in-band inspection service, which was previously part of the **ironic-discoverd** package. Ironic Liberty supports the **ironic-discoverd** service, but does not support its in-tree client module. Please refer to :ironic-inspector-doc:`ironic-inspector version support matrix ` for details on which ironic versions are compatible with which **ironic-inspector**/**ironic-discoverd** versions. The discoverd to inspector upgrade procedure is as follows: * Install **ironic-inspector** on the machine where you have **ironic-discoverd** (usually the same as conductor). * Update the **ironic-inspector** configuration file to stop using deprecated configuration options, as marked by the comments in the :ironic-inspector-doc:`example.conf `. It is recommended you move the configuration file to ``/etc/ironic-inspector/inspector.conf``. * Shutdown **ironic-discoverd**, and start **ironic-inspector**. * During upgrade of each conductor instance: #. Shutdown the conductor. #. Uninstall **ironic-discoverd**, install **python-ironic-inspector-client**. #. Update the conductor. #. Update ``ironic.conf`` to use ``[inspector]`` section instead of ``[discoverd]`` (option names are the same). #. Start the conductor. Upgrading from Juno to Kilo =========================== When upgrading a cloud from Juno to Kilo, users must ensure the nova service is upgraded prior to upgrading the ironic service. Additionally, users need to set a special config flag in nova prior to upgrading to ensure the newer version of nova is not attempting to take advantage of new ironic features until the ironic service has been upgraded. The steps for upgrading your nova and ironic services are as follows: - Edit nova.conf and ensure force_config_drive=False is set in the [DEFAULT] group. Restart nova-compute if necessary. - Install new nova code, run database migrations. - Install new python-ironicclient code. - Restart nova services. - Install new ironic code, run database migrations, restart ironic services. - Edit nova.conf and set force_config_drive to your liking, restarting nova-compute if necessary. Note that during the period between nova's upgrade and ironic's upgrades, instances can still be provisioned to nodes. However, any attempt by users to specify a config drive for an instance will cause an error until ironic's upgrade has completed. Cleaning -------- A new feature starting from Kilo cycle is support for the automated cleaning of nodes between workloads to ensure the node is ready for another workload. This can include erasing the hard drives, updating firmware, and other steps. For more information, see :ref:`automated_cleaning`. If ironic is configured with automated cleaning enabled (defaults to True) and neutron is set as the DHCP provider (also the default), you will need to set the `cleaning_network_uuid` option in the ironic configuration file before starting the ironic service. See :ref:`configure-cleaning` for information on how to set up the cleaning network for ironic. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/admin/upgrade-to-hardware-types.rst0000644000175000017500000002444700000000000025262 0ustar00coreycorey00000000000000Upgrading to Hardware Types =========================== Starting with the Rocky release, the Bare Metal service does not support *classic drivers* any more. If you still use *classic drivers*, please upgrade to *hardware types* immediately. Please see :doc:`/install/enabling-drivers` for details on *hardware types* and *hardware interfaces*. Planning the upgrade -------------------- It is necessary to figure out which hardware types and hardware interfaces correspond to which classic drivers used in your deployment. The following table lists the classic drivers with their corresponding hardware types and the boot, deploy, inspect, management, and power hardware interfaces: ===================== ==================== ==================== ============== ========== ========== ========= Classic Driver Hardware Type Boot Deploy Inspect Management Power ===================== ==================== ==================== ============== ========== ========== ========= agent_ilo ilo ilo-virtual-media direct ilo ilo ilo agent_ipmitool ipmi pxe direct inspector ipmitool ipmitool agent_ipmitool_socat ipmi pxe direct inspector ipmitool ipmitool agent_irmc irmc irmc-virtual-media direct irmc irmc irmc iscsi_ilo ilo ilo-virtual-media iscsi ilo ilo ilo iscsi_irmc irmc irmc-virtual-media iscsi irmc irmc irmc pxe_drac idrac pxe iscsi idrac idrac idrac pxe_drac_inspector idrac pxe iscsi inspector idrac idrac pxe_ilo ilo ilo-pxe iscsi ilo ilo ilo pxe_ipmitool ipmi pxe iscsi inspector ipmitool ipmitool pxe_ipmitool_socat ipmi pxe iscsi inspector ipmitool ipmitool pxe_irmc irmc irmc-pxe iscsi irmc irmc irmc pxe_snmp snmp pxe iscsi no-inspect fake snmp ===================== ==================== ==================== ============== ========== ========== ========= .. note:: The ``inspector`` *inspect* interface was only used if explicitly enabled in the configuration. Otherwise, ``no-inspect`` was used. .. note:: ``pxe_ipmitool_socat`` and ``agent_ipmitool_socat`` use ``ipmitool-socat`` *console* interface (the default for the ``ipmi`` hardware type), while ``pxe_ipmitool`` and ``agent_ipmitool`` use ``ipmitool-shellinabox``. See Console_ for details. For out-of-tree drivers you may need to reach out to their maintainers or figure out the appropriate interfaces by researching the source code. Configuration ------------- You will need to enable hardware types and interfaces that correspond to your currently enabled classic drivers. For example, if you have the following configuration in your ``ironic.conf``: .. code-block:: ini [DEFAULT] enabled_drivers = pxe_ipmitool,agent_ipmitool You will have to add this configuration as well: .. code-block:: ini [DEFAULT] enabled_hardware_types = ipmi enabled_boot_interfaces = pxe enabled_deploy_interfaces = iscsi,direct enabled_management_interfaces = ipmitool enabled_power_interfaces = ipmitool .. note:: For every interface type there is an option ``default__interface``, where ```` is the interface type name. For example, one can make all nodes use the ``direct`` deploy method by default by setting: .. code-block:: ini [DEFAULT] default_deploy_interface = direct Migrating nodes --------------- After the required items are enabled in the configuration, each node's ``driver`` field has to be updated to a new value. You may need to also set new values for some or all interfaces: .. code-block:: console export OS_BAREMETAL_API_VERSION=1.31 for uuid in $(openstack baremetal node list --driver pxe_ipmitool -f value -c UUID); do openstack baremetal node set $uuid --driver ipmi --deploy-interface iscsi done for uuid in $(openstack baremetal node list --driver agent_ipmitool -f value -c UUID); do openstack baremetal node set $uuid --driver ipmi --deploy-interface direct done See :doc:`/install/enrollment` for more details on setting hardware types and interfaces. .. warning:: It is not recommended to change the interfaces for ``active`` nodes. If absolutely needed, the nodes have to be put in the maintenance mode first: .. code-block:: console openstack baremetal node maintenance set $UUID \ --reason "Changing driver and/or hardware interfaces" # do the update, validate its correctness openstack baremetal node maintenance unset $UUID Other interfaces ---------------- Care has to be taken to migrate from classic drivers using non-default interfaces. This chapter covers a few of the most commonly used. Ironic Inspector ~~~~~~~~~~~~~~~~ Some classic drivers, notably ``pxe_ipmitool``, ``agent_ipmitool`` and ``pxe_drac_inspector``, use ironic-inspector_ for their *inspect* interface. The same functionality is available for all hardware types, but the appropriate ``inspect`` interface has to be enabled in the Bare Metal service configuration file, for example: .. code-block:: ini [DEFAULT] enabled_inspect_interfaces = inspector,no-inspect See :doc:`/install/enabling-drivers` for more details. .. note:: The configuration option ``[inspector]enabled`` does not affect hardware types. Then you can tell your nodes to use this interface, for example: .. code-block:: console export OS_BAREMETAL_API_VERSION=1.31 for uuid in $(openstack baremetal node list --driver ipmi -f value -c UUID); do openstack baremetal node set $uuid --inspect-interface inspector done .. note:: A node configured with the IPMI hardware type, will use the inspector inspection implementation automatically if it is enabled. This is not the case for the most of the vendor drivers. .. _ironic-inspector: https://docs.openstack.org/ironic-inspector/ Console ~~~~~~~ Several classic drivers, notably ``pxe_ipmitool_socat`` and ``agent_ipmitool_socat``, use socat-based serial console implementation. For the ``ipmi`` hardware type it is used by default, if enabled in the configuration file: .. code-block:: ini [DEFAULT] enabled_console_interfaces = ipmitool-socat,no-console If you want to use the ``shellinabox`` implementation instead, it has to be enabled as well: .. code-block:: ini [DEFAULT] enabled_console_interfaces = ipmitool-shellinabox,no-console Then you need to update some or all nodes to use it explicitly. For example, to update all nodes use: .. code-block:: console export OS_BAREMETAL_API_VERSION=1.31 for uuid in $(openstack baremetal node list --driver ipmi -f value -c UUID); do openstack baremetal node set $uuid --console-interface ipmitool-shellinabox done RAID ~~~~ Many classic drivers, including ``pxe_ipmitool`` and ``agent_ipmitool`` use the IPA-based in-band RAID implementation by default. For the hardware types it is not used by default. To use it, you need to enable it in the configuration first: .. code-block:: ini [DEFAULT] enabled_raid_interfaces = agent,no-raid Then you can update those nodes that support in-band RAID to use the ``agent`` RAID interface. For example, to update all nodes use: .. code-block:: console export OS_BAREMETAL_API_VERSION=1.31 for uuid in $(openstack baremetal node list --driver ipmi -f value -c UUID); do openstack baremetal node set $uuid --raid-interface agent done .. note:: The ability of a node to use the ``agent`` RAID interface depends on the ramdisk (more specifically, a :ironic-python-agent-doc:`hardware manager ` used in it), not on the driver. Network and storage ~~~~~~~~~~~~~~~~~~~ The network and storage interfaces have always been dynamic, and thus do not require any special treatment during upgrade. Vendor ~~~~~~ Classic drivers are allowed to use the ``VendorMixin`` functionality to combine and expose several node or driver vendor passthru methods from different vendor interface implementations in one driver. **This is no longer possible with hardware types.** With hardware types, a vendor interface can only have a single active implementation from the list of vendor interfaces supported by a given hardware type. Ironic no longer has in-tree drivers (both classic and hardware types) that rely on this ``VendorMixin`` functionality support. However if you are using an out-of-tree classic driver that depends on it, you'll need to do the following in order to use vendor passthru methods from different vendor passthru implementations: #. While creating a new hardware type to replace your classic driver, specify all vendor interface implementations your classic driver was using to build its ``VendorMixin`` as supported vendor interfaces (property ``supported_vendor_interfaces`` of the Python class that defines your hardware type). #. Ensure all required vendor interfaces are enabled in the ironic configuration file under the ``[DEFAULT]enabled_vendor_interfaces`` option. You should also consider setting the ``[DEFAULT]default_vendor_interface`` option to specify the vendor interface for nodes that do not have one set explicitly. #. Before invoking a specific vendor passthru method, make sure that the node's vendor interface is set to the interface with the desired vendor passthru method. For example, if you want to invoke the vendor passthru method ``vendor_method_foo()`` from ``vendor_foo`` vendor interface: .. code-block:: shell # set the vendor interface to 'vendor_foo` openstack --os-baremetal-api-version 1.31 baremetal node set --vendor-interface vendor_foo # invoke the vendor passthru method openstack baremetal node passthru call vendor_method_foo ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538406.1823993 ironic-14.0.1.dev163/doc/source/cli/0000755000175000017500000000000000000000000017170 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/cli/index.rst0000644000175000017500000000024000000000000021025 0ustar00coreycorey00000000000000Command References ================== Here are references for commands not elsewhere documented. .. toctree:: :maxdepth: 1 ironic-dbsync ironic-status ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/cli/ironic-dbsync.rst0000644000175000017500000001430100000000000022464 0ustar00coreycorey00000000000000============= ironic-dbsync ============= The :command:`ironic-dbsync` utility is used to create the database schema tables that the ironic services will use for storage. It can also be used to upgrade existing database tables when migrating between different versions of ironic. The `Alembic library `_ is used to perform the database migrations. Options ======= This is a partial list of the most useful options. To see the full list, run the following:: ironic-dbsync --help .. program:: ironic-dbsync .. option:: -h, --help Show help message and exit. .. option:: --config-dir Path to a config directory with configuration files. .. option:: --config-file Path to a configuration file to use. .. option:: -d, --debug Print debugging output. .. option:: --version Show the program's version number and exit. .. option:: upgrade, stamp, revision, version, create_schema, online_data_migrations The :ref:`command ` to run. Usage ===== Options for the various :ref:`commands ` for :command:`ironic-dbsync` are listed when the :option:`-h` or :option:`--help` option is used after the command. For example:: ironic-dbsync create_schema --help Information about the database is read from the ironic configuration file used by the API server and conductor services. This file must be specified with the :option:`--config-file` option:: ironic-dbsync --config-file /path/to/ironic.conf create_schema The configuration file defines the database backend to use with the *connection* database option:: [database] connection=mysql+pymysql://root@localhost/ironic If no configuration file is specified with the :option:`--config-file` option, :command:`ironic-dbsync` assumes an SQLite database. .. _dbsync_cmds: Command Options =============== :command:`ironic-dbsync` is given a command that tells the utility what actions to perform. These commands can take arguments. Several commands are available: .. _create_schema: create_schema ------------- .. program:: create_schema .. option:: -h, --help Show help for create_schema and exit. This command will create database tables based on the most current version. It assumes that there are no existing tables. An example of creating database tables with the most recent version:: ironic-dbsync --config-file=/etc/ironic/ironic.conf create_schema online_data_migrations ---------------------- .. program:: online_data_migrations .. option:: -h, --help Show help for online_data_migrations and exit. .. option:: --max-count The maximum number of objects (a positive value) to migrate. Optional. If not specified, all the objects will be migrated (in batches of 50 to avoid locking the database for long periods of time). .. option:: --option If a migration accepts additional parameters, they can be passed via this argument. It can be specified several times. This command will migrate objects in the database to their most recent versions. This command must be successfully run (return code 0) before upgrading to a future release. It returns: * 1 (not completed) if there are still pending objects to be migrated. Before upgrading to a newer release, this command must be run until 0 is returned. * 0 (success) after migrations are finished or there are no data to migrate * 127 (error) if max-count is not a positive value or an option is invalid * 2 (error) if the database is not compatible with this release. This command needs to be run using the previous release of ironic, before upgrading and running it with this release. revision -------- .. program:: revision .. option:: -h, --help Show help for revision and exit. .. option:: -m , --message The message to use with the revision file. .. option:: --autogenerate Compares table metadata in the application with the status of the database and generates migrations based on this comparison. This command will create a new revision file. You can use the :option:`--message` option to comment the revision. This is really only useful for ironic developers making changes that require database changes. This revision file is used during database migration and will specify the changes that need to be made to the database tables. Further discussion is beyond the scope of this document. stamp ----- .. program:: stamp .. option:: -h, --help Show help for stamp and exit. .. option:: --revision The revision number. This command will 'stamp' the revision table with the version specified with the :option:`--revision` option. It will not run any migrations. upgrade ------- .. program:: upgrade .. option:: -h, --help Show help for upgrade and exit. .. option:: --revision The revision number to upgrade to. This command will upgrade existing database tables to the most recent version, or to the version specified with the :option:`--revision` option. Before this ``upgrade`` is invoked, the command :command:`ironic-dbsync online_data_migrations` must have been successfully run using the previous version of ironic (if you are doing an upgrade as opposed to a new installation of ironic). If it wasn't run, the database will not be compatible with this recent version of ironic, and this command will return 2 (error). If there are no existing tables, then new tables are created, beginning with the oldest known version, and successively upgraded using all of the database migration files, until they are at the specified version. Note that this behavior is different from the :ref:`create_schema` command that creates the tables based on the most recent version. An example of upgrading to the most recent table versions:: ironic-dbsync --config-file=/etc/ironic/ironic.conf upgrade .. note:: This command is the default if no command is given to :command:`ironic-dbsync`. .. warning:: The upgrade command is not compatible with SQLite databases since it uses ALTER TABLE commands to upgrade the database tables. SQLite supports only a limited subset of ALTER TABLE. version ------- .. program:: version .. option:: -h, --help Show help for version and exit. This command will output the current database version. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/cli/ironic-status.rst0000644000175000017500000000337000000000000022531 0ustar00coreycorey00000000000000============= ironic-status ============= Synopsis ======== :: ironic-status [] Description =========== :program:`ironic-status` is a tool that provides routines for checking the status of a Ironic deployment. Options ======= The standard pattern for executing a :program:`ironic-status` command is:: ironic-status [] Run without arguments to see a list of available command categories:: ironic-status Categories are: * ``upgrade`` Detailed descriptions are below. You can also run with a category argument such as ``upgrade`` to see a list of all commands in that category:: ironic-status upgrade These sections describe the available categories and arguments for :program:`ironic-status`. Upgrade ~~~~~~~ .. _ironic-status-checks: ``ironic-status upgrade check`` Performs a release-specific readiness check before restarting services with new code. This command expects to have complete configuration and access to databases and services. **Return Codes** .. list-table:: :widths: 20 80 :header-rows: 1 * - Return code - Description * - 0 - All upgrade readiness checks passed successfully and there is nothing to do. * - 1 - At least one check encountered an issue and requires further investigation. This is considered a warning but the upgrade may be OK. * - 2 - There was an upgrade status check failure that needs to be investigated. This should be considered something that stops an upgrade. * - 255 - An unexpected error occurred. **History of Checks** **12.0.0 (Stein)** * Adds a check for compatibility of the object versions with the release of ironic. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/conf.py0000644000175000017500000001236300000000000017725 0ustar00coreycorey00000000000000# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import sys import eventlet # NOTE(dims): monkey patch subprocess to prevent failures in latest eventlet # See https://github.com/eventlet/eventlet/issues/398 try: eventlet.monkey_patch(subprocess=True) except TypeError: pass # -- General configuration ---------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.join(os.path.abspath('.'), '_exts')) # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.viewcode', 'sphinx.ext.graphviz', 'sphinxcontrib.httpdomain', 'sphinxcontrib.pecanwsme.rest', 'sphinxcontrib.seqdiag', 'sphinxcontrib.apidoc', 'sphinxcontrib.rsvgconverter', 'oslo_config.sphinxext', 'oslo_config.sphinxconfiggen', 'oslo_policy.sphinxext', 'oslo_policy.sphinxpolicygen', 'automated_steps', 'openstackdocstheme' ] # sphinxcontrib.apidoc options apidoc_module_dir = '../../ironic' apidoc_output_dir = 'contributor/api' apidoc_excluded_paths = [ 'db/sqlalchemy/alembic/env', 'db/sqlalchemy/alembic/versions/*', 'drivers/modules/ansible/playbooks*', 'hacking', 'tests', ] apidoc_separate_modules = True repository_name = 'openstack/ironic' use_storyboard = True openstack_projects = [ 'bifrost', 'cinder', 'glance', 'ironic', 'ironic-inspector', 'ironic-lib', 'ironic-neutron-agent', 'ironic-python-agent', 'ironic-ui', 'keystone', 'keystonemiddleware', 'metalsmith', 'networking-baremetal', 'neutron', 'nova', 'oslo.messaging', 'oslo.reports', 'oslo.versionedobjects', 'oslotest', 'osprofiler', 'os-traits', 'python-ironicclient', 'python-ironic-inspector-client', 'python-openstackclient', 'swift', ] wsme_protocols = ['restjson'] # autodoc generation is a bit aggressive and a nuisance when doing heavy # text edit cycles. # execute "export SPHINX_DEBUG=1" in your terminal to disable # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. copyright = u'OpenStack Foundation' config_generator_config_file = '../../tools/config/ironic-config-generator.conf' sample_config_basename = '_static/ironic' policy_generator_config_file = '../../tools/policy/ironic-policy-generator.conf' sample_policy_basename = '_static/ironic' # A list of ignored prefixes for module index sorting. modindex_common_prefix = ['ironic.'] # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of glob-style patterns that should be excluded when looking for # source files. They are matched against the source file names relative to the # source directory, using slashes as directory separators on all platforms. exclude_patterns = ['api/ironic.drivers.modules.ansible.playbooks.*', 'api/ironic.tests.*'] # Ignore the following warning: WARNING: while setting up extension # wsmeext.sphinxext: directive 'autoattribute' is already registered, # it will be overridden. suppress_warnings = ['app.add_directive'] # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. html_theme = 'openstackdocs' # Output file base name for HTML help builder. htmlhelp_basename = 'Ironicdoc' latex_use_xindy = False # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ( 'index', 'doc-ironic.tex', u'Ironic Documentation', u'OpenStack Foundation', 'manual' ), ] # Allow deeper levels of nesting for \begin...\end stanzas latex_elements = {'maxlistdepth': 10} # -- Options for seqdiag ------------------------------------------------------ seqdiag_html_image_format = "SVG" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538406.1823993 ironic-14.0.1.dev163/doc/source/configuration/0000755000175000017500000000000000000000000021270 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/configuration/config.rst0000644000175000017500000000044000000000000023265 0ustar00coreycorey00000000000000===================== Configuration Options ===================== The following is an overview of all available configuration options in Ironic. For a sample configuration file, refer to :doc:`sample-config`. .. show-options:: :config-file: tools/config/ironic-config-generator.conf ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/configuration/index.rst0000644000175000017500000000102600000000000023130 0ustar00coreycorey00000000000000======================= Configuration Reference ======================= Many aspects of the Bare Metal service are specific to the environment it is deployed in. The following pages describe configuration options that can be used to adjust the service to your particular situation. .. toctree:: :maxdepth: 1 Configuration Options Policies .. only:: html Sample files ------------ .. toctree:: :maxdepth: 1 Sample Config File Sample Policy File ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/configuration/policy.rst0000644000175000017500000000035400000000000023323 0ustar00coreycorey00000000000000======== Policies ======== The following is an overview of all available policies in Ironic. For a sample configuration file, refer to :doc:`sample-policy`. .. show-policy:: :config-file: tools/policy/ironic-policy-generator.conf ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/configuration/sample-config.rst0000644000175000017500000000111600000000000024545 0ustar00coreycorey00000000000000========================= Sample Configuration File ========================= The following is a sample Ironic configuration for adaptation and use. For a detailed overview of all available configuration options, refer to :doc:`config`. The sample configuration can also be viewed in :download:`file form `. .. important:: The sample configuration file is auto-generated from Ironic when this documentation is built. You must ensure your version of Ironic matches the version of this documentation. .. literalinclude:: /_static/ironic.conf.sample ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/configuration/sample-policy.rst0000644000175000017500000000062600000000000024604 0ustar00coreycorey00000000000000============= Ironic Policy ============= The following is a sample Ironic policy file, autogenerated from Ironic when this documentation is built. To prevent conflicts, ensure your version of Ironic aligns with the version of this documentation. The sample policy can also be downloaded as a :download:`file `. .. literalinclude:: /_static/ironic.policy.yaml.sample ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538406.1863992 ironic-14.0.1.dev163/doc/source/contributor/0000755000175000017500000000000000000000000020773 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/contributor/adding-new-job.rst0000644000175000017500000000452700000000000024322 0ustar00coreycorey00000000000000.. _adding-new-job: ================ Adding a new Job ================ Are you familiar with Zuul? =========================== Before start trying to figure out how Zuul works, take some time and read about `Zuul Config `_ and the `Zuul Best Practices `_. .. _zuul_config: https://zuul-ci.org/docs/zuul/user/config.html .. _zuul_best_practices: https://docs.openstack.org/infra/manual/creators.html#zuul-best-practices Where can I find the existing jobs? =================================== The jobs for the Ironic project are defined under the zuul.d_ folder in the root directory, that contains three files, whose function is described below. * ironic-jobs.yaml_: Contains the configuration of each Ironic Job converted to Zuul v3. * legacy-ironic-jobs.yaml_: Contains the configuration of each Ironic Job that haven't been converted to Zuul v3 yet. * project.yaml_: Contains the jobs that will run during check and gate phase. .. _zuul.d: https://opendev.org/openstack/ironic/src/branch/master/zuul.d .. _ironic-jobs.yaml: https://opendev.org/openstack/ironic/src/branch/master/zuul.d/ironic-jobs.yaml .. _legacy-ironic-jobs.yaml: https://opendev.org/openstack/ironic/src/branch/master/zuul.d/legacy-ironic-jobs.yaml .. _project.yaml: https://opendev.org/openstack/ironic/src/branch/master/zuul.d/project.yaml Create a new Job ================ Identify among the existing jobs the one that most closely resembles the scenario you want to test, the existing job will be used as `parent` in your job definition. Now you will only need to either overwrite or add variables to your job definition under the `vars` section to represent the desired scenario. The code block below shows the minimal structure of a new job definition that you need to add to ironic-jobs.yaml_. .. code-block:: yaml - job: name: description: parent: vars: : After having the definition of your new job you just need to add the job name to the project.yaml_ under `check` and `gate`. Only jobs that are voting should be in the `gate` section. .. code-block:: yaml - project: check: jobs: - gate: queue: ironic jobs: - ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/contributor/architecture.rst0000644000175000017500000000731300000000000024213 0ustar00coreycorey00000000000000.. _architecture: =================== System Architecture =================== High Level description ====================== An Ironic deployment will be composed of the following components: - An admin-only RESTful `API service`_, by which privileged users, such as cloud operators and other services within the cloud control plane, may interact with the managed bare metal servers. - A `Conductor service`_, which does the bulk of the work. Functionality is exposed via the `API service`_. The Conductor and API services communicate via RPC. - A Database and `DB API`_ for storing the state of the Conductor and Drivers. - A Deployment Ramdisk or Deployment Agent, which provide control over the hardware which is not available remotely to the Conductor. A ramdisk should be built which contains one of these agents, eg. with `diskimage-builder`_. This ramdisk can be booted on-demand. .. note:: The agent is never run inside a tenant instance. .. _`architecture_drivers`: Drivers ======= The internal driver API provides a consistent interface between the Conductor service and the driver implementations. A driver is defined by a *hardware type* deriving from the AbstractHardwareType_ class, defining supported *hardware interfaces*. See :doc:`/install/enabling-drivers` for a more detailed explanation. See :doc:`drivers` for an explanation on how to write new hardware types and interfaces. Driver-Specific Periodic Tasks ------------------------------ Drivers may run their own periodic tasks, i.e. actions run repeatedly after a certain amount of time. Such a task is created by using the periodic_ decorator on an interface method. For example :: from futurist import periodics class FakePower(base.PowerInterface): @periodics.periodic(spacing=42) def task(self, manager, context): pass # do something Here the ``spacing`` argument is a period in seconds for a given periodic task. For example 'spacing=5' means every 5 seconds. Driver-Specific Steps --------------------- Drivers may have specific steps that may need to be executed or offered to a user to execute in order to perform specific configuration tasks. These steps should ideally be located on the management interface to enable consistent user experience of the hardware type. What should be avoided is duplication of existing interfaces such as the deploy interface to enable vendor specific cleaning or deployment steps. Message Routing =============== Each Conductor registers itself in the database upon start-up, and periodically updates the timestamp of its record. Contained within this registration is a list of the drivers which this Conductor instance supports. This allows all services to maintain a consistent view of which Conductors and which drivers are available at all times. Based on their respective driver, all nodes are mapped across the set of available Conductors using a `consistent hashing algorithm`_. Node-specific tasks are dispatched from the API tier to the appropriate conductor using conductor-specific RPC channels. As Conductor instances join or leave the cluster, nodes may be remapped to different Conductors, thus triggering various driver actions such as take-over or clean-up. .. _API service: webapi.html .. _AbstractHardwareType: api/ironic.drivers.hardware_type.html#ironic.drivers.hardware_type.AbstractHardwareType .. _Conductor service: api/ironic.conductor.manager.html .. _DB API: api/ironic.db.api.html .. _diskimage-builder: https://docs.openstack.org/diskimage-builder/latest/ .. _consistent hashing algorithm: https://docs.openstack.org/tooz/latest/user/tutorial/hashring.html .. _periodic: https://docs.openstack.org/futurist/latest/reference/index.html#futurist.periodics.periodic ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/contributor/bios_develop.rst0000644000175000017500000001013700000000000024201 0ustar00coreycorey00000000000000.. _bios_develop: Developing BIOS Interface ========================= To support a driver specific BIOS interface it is necessary to create a class inheriting from the ``BIOSInterface`` class: .. code-block:: python from ironic.drivers import base class ExampleBIOS(base.BIOSInterface): def get_properties(self): return {} def validate(self, task): pass See :doc:`/contributor/drivers` for a detailed explanation of hardware type and interface. The ``get_properties`` and ``validate`` are methods that all driver interfaces have. The hardware interface that supports BIOS settings should also implement the following three methods: * Implement a method named ``cache_bios_settings``. This method stores BIOS settings to the ``bios_settings`` table during cleaning operations and updates the ``bios_settings`` table when ``apply_configuration`` or ``factory_reset`` are successfully called. .. code-block:: python from ironic.drivers import base driver_client = importutils.try_import('driver.client') class ExampleBIOS(base.BIOSInterface): def __init__(self): if driver_client is None: raise exception.DriverLoadError( driver=self.__class__.__name__, reason=_("Unable to import driver library")) def cache_bios_settings(self, task): node_id = task.node.id node_info = driver_common.parse_driver_info(task.node) settings = driver_client.get_bios_settings(node_info) create_list, update_list, delete_list, nochange_list = ( objects.BIOSSettingList.sync_node_setting(settings)) if len(create_list) > 0: objects.BIOSSettingList.create( task.context, node_id, create_list) if len(update_list) > 0: objects.BIOSSettingList.save( task.context, node_id, update_list) if len(delete_list) > 0: delete_names = [] for setting in delete_list: delete_names.append(setting.name) objects.BIOSSettingList.delete( task.context, node_id, delete_names) .. note:: ``driver.client`` is vendor specific library to control and manage the bare metal hardware, for example: python-dracclient, sushy. * Implement a method named ``factory_reset``. This method needs to use the ``clean_step`` decorator. It resets BIOS settings to factory default on the given node. It calls ``cache_bios_settings`` automatically to update existing ``bios_settings`` table once successfully executed. .. code-block:: python class ExampleBIOS(base.BIOSInterface): @base.clean_step(priority=0) def factory_reset(self, task): node_info = driver_common.parse_driver_info(task.node) driver_client.reset_bios_settings(node_info) * Implement a method named ``apply_configuration``. This method needs to use the clean_step decorator. It takes the given BIOS settings and applies them on the node. It also calls ``cache_bios_settings`` automatically to update existing ``bios_settings`` table after successfully applying given settings on the node. .. code-block:: python class ExampleBIOS(base.BIOSInterface): @base.clean_step(priority=0, argsinfo={ 'settings': { 'description': ( 'A list of BIOS settings to be applied' ), 'required': True } }) def apply_configuration(self, task, settings): node_info = driver_common.parse_driver_info(task.node) driver_client.apply_bios_settings(node_info, settings) The ``settings`` parameter is a list of BIOS settings to be configured. for example:: [ { "setting name": { "name": "String", "value": "String" } }, { "setting name": { "name": "String", "value": "String" } }, ... ] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/contributor/contributing.rst0000644000175000017500000004142000000000000024235 0ustar00coreycorey00000000000000.. _code-contribution-guide: ============================ So You Want to Contribute... ============================ This document provides some necessary points for developers to consider when writing and reviewing Ironic code. The checklist will help developers get things right. Getting Started =============== If you're completely new to OpenStack and want to contribute to the ironic project, please start by familiarizing yourself with the `Infra Team's Developer Guide `_. This will help you get your accounts set up in Launchpad and Gerrit, familiarize you with the workflow for the OpenStack continuous integration and testing systems, and help you with your first commit. LaunchPad --------- Most of the tools used for OpenStack require a launchpad.net ID for authentication. Ironic previously used to track work on Launchpad, but we have not done so since migrating to Storyboard. .. seealso:: * https://launchpad.net Storyboard ---------- The ironic project moved from Launchpad to `StoryBoard `_ for work and task tracking. This provides an aggregate view called a "Project Group" and individual "Projects". A good starting place is the `project group `_ representing the whole of the ironic community, as opposed to the `ironic project `_ storyboard which represents ironic as a repository. Internet Relay Chat 'IRC' ------------------------- Daily contributor discussions take place on IRC in the '#openstack-ironic' channel on Freenode IRC. Please feel free to join us at irc://irc.freenode.net and join our channel! Everything Ironic ~~~~~~~~~~~~~~~~~ Ironic is a community of projects centered around the primary project repository 'ironic', which help facilitate the deployment and management of bare metal resources. This means there are a number of different repositories that fall into the responsibility of the project team and the community. Some of the repositories may not seem strictly hardware related, but they may be tools or things to just make an aspect easier. Related Projects ---------------- There are several projects that are tightly integrated with ironic and which are developed by the same community. .. seealso:: * :bifrost-doc:`Bifrost Documentation <>` * :ironic-inspector-doc:`Ironic Inspector Documentation <>` * :ironic-lib-doc:`Ironic Lib Documentation <>` * :ironic-python-agent-doc:`Ironic Python Agent (IPA) Documentation <>` * :python-ironicclient-doc:`Ironic Client Documentation <>` * :python-ironic-inspector-client-doc:`Ironic Inspector Client Documentation <>` Useful Links ------------ Bug/Task tracker https://storyboard.openstack.org/#!/project/943 Mailing list (prefix Subject line with ``[ironic]``) http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-discuss Code Hosting https://opendev.org/openstack/ironic Code Review https://review.opendev.org/#/q/status:open+project:openstack/ironic,n,z Whiteboard https://etherpad.openstack.org/p/IronicWhiteBoard Weekly Meeting Agenda https://wiki.openstack.org/wiki/Meetings/Ironic#Agenda_for_next_meeting Adding New Features =================== Ironic tracks new features using RFEs (Requests for Feature Enhancements) instead of blueprints. These are stories with 'rfe' tag, and they should be submitted before a spec or code is proposed. When a member of the `ironic-core team `_ decides that the proposal is worth implementing, a spec (if needed) and code should be submitted, referencing the RFE task or story ID number. Contributors are welcome to submit a spec and/or code before the RFE is approved, however those patches will not land until the RFE is approved. Feature Submission Process -------------------------- #. Submit a bug report on the `ironic StoryBoard `_. There are two fields that must be filled: 'Title' and 'Description'. 'Tasks' can be added and are associated with a project. If you can't describe it in a sentence or two, it may mean that you are either trying to capture more than one RFE at once, or that you are having a hard time defining what you are trying to solve at all. This may also be a sign that your feature may require a specification document. #. Describe the proposed change in the 'Description' field. The description should provide enough details for a knowledgeable developer to understand what is the existing problem in the current platform that needs to be addressed, or what is the enhancement that would make the platform more capable, both from a functional and a non-functional standpoint. #. Submit the story, add an 'rfe' tag to it and assign yourself or whoever is going to work on this feature. #. As soon as a member of the team acknowledges the story, we will move the story to the 'Review' state. As time goes on, Discussion about the RFE, and whether to approve it will occur. #. Contributors will evaluate the RFE and may advise the submitter to file a spec in the ironic-specs repository to elaborate on the feature request. Typically this is when an RFE requires extra scrutiny, more design discussion, etc. For the spec submission process, please see the `Ironic Specs Process`_. A specific task should be created to track the creation of a specification. #. If a spec is not required, once the discussion has happened and there is positive consensus among the ironic-core team on the RFE, the RFE is 'approved', and its tag will move from 'rfe' to 'rfe-approved'. This means that the feature is approved and the related code may be merged. #. If a spec is required, the spec must be submitted (with a new task as part of the story referenced as 'Task' in the commit message), reviewed, and merged before the RFE will be 'approved' (and the tag changed to 'rfe-approved'). #. The tasks then goes through the usual process -- first to 'Review' when the spec/code is being worked on, then 'Merged' when it is implemented. #. If the RFE is rejected, the ironic-core team will move the story to "Invalid" status. Change Tracking --------------- We track our stories and tasks in Storyboard. https://storyboard.openstack.org/#!/project/ironic When working on an RFE, please be sure to tag your commits properly: "Story: #xxxx" or "Task: #xxxx". It is also helpful to set a consistent review topic, such as "story/xxxx" for all patches related to the RFE. If the RFE spans across several projects (e.g. ironic and python-ironicclient), but the main work is going to happen within ironic, please use the same story for all the code you're submitting, there is no need to create a separate RFE in every project. .. note:: **RFEs may only be approved by members of the ironic-core team**. .. note:: While not strictly required for minor changes and fixes, it is highly preferred by the Ironic community that any change which needs to be backported, have a recorded Story and Task in Storyboard. Managing Change Sets -------------------- If you would like some help, or if you (or some members of your team) are unable to continue working on the feature, updating and maintaining the changes, please let the rest of the ironic community know. You could leave a comment in one or more of the changes/patches, bring it up in IRC, the weekly meeting, or on the OpenStack development email list. Communicating this will make other contributors aware of the situation and allow for others to step forward and volunteer to continue with the work. In the event that a contributor leaves the community, do not expect the contributor's changes to be continued unless someone volunteers to do so. Getting Your Patch Merged ------------------------- Within the Ironic project, we generally require two core reviewers to sign-off (+2) change sets. We also will generally recognize non-core (+1) reviewers, and sometimes even reverse our decision to merge code based upon their reviews. We recognize that some repositories have less visibility, as such it is okay to ask for a review in our IRC channel. Please be prepared to stay in IRC for a little while in case we have questions. Sometimes we may also approve patches with a single core reviewer. This is generally discouraged, but sometimes necessary. When we do so, we try to explain why we do so. As a patch submitter, it equally helps us to understand why the change is important. Generally, more detail and context helps us understand the change faster. Timeline Expectations --------------------- As with any large project, it does take time for features and changes to be merged in any of the project repositories. This is largely due to limited review bandwidth coupled with varying reviewer priorities and focuses. When establishing an understanding of complexity, the following things should be kept in mind. * Generally, small and minor changes can gain consensus and merge fairly quickly. These sorts of changes would be: bug fixes, minor documentation updates, follow-up changes. * Medium changes generally consist of driver feature parity changes, where one driver is working to match functionality of another driver. * These changes generally only require an RFE for the purposes of tracking and correlating the change. * Documentation updates are expected to be submitted with or immediately following the initial change set. * Larger or controversial changes generally take much longer to merge. This is often due to the necessity of reviewers to gain additional context and for change sets to be iterated upon to reach a state where there is consensus. These sorts of changes include: database, object, internal interface additions, RPC, rest API changes. * These changes will very often require specifications to reach consensus, unless there are pre-existing patterns or code already present. * These changes may require many reviews and iterations, and can also expect to be impacted by merge conflicts as other code or features are merged. * These changes must typically be split into a series of changes. Reviewers typically shy away from larger single change sets due to increased difficulty in reviewing. * Do not expect any API or user-visible data model changes to merge after the API client freeze. Some substrate changes may merge if not user visible. * You should expect complex features, such as cross-project features or integration, to take longer than a single development cycle to land. * Building consensus is vital. * Often these changes are controversial or have multiple considerations that need to be worked through in the specification process, which may cause the design to change. As such, it may take months to reach consensus over design. * These features are best broken into larger chunks and tackled in an incremental fashion. Live Upgrade Related Concerns ----------------------------- See :doc:`/contributor/rolling-upgrades`. Driver Internal Info ~~~~~~~~~~~~~~~~~~~~ The ``driver_internal_info`` node field was introduced in the Kilo release. It allows driver developers to store internal information that can not be modified by end users. Here is the list of existing common and agent driver attributes: * Common attributes: * ``is_whole_disk_image``: A Boolean value to indicate whether the user image contains ramdisk/kernel. * ``clean_steps``: An ordered list of clean steps that will be performed on the node. * ``deploy_steps``: An ordered list of deploy steps that will be performed on the node. Support for deploy steps was added in the ``11.1.0`` release. * ``instance``: A list of dictionaries containing the disk layout values. * ``root_uuid_or_disk_id``: A String value of the bare metal node's root partition uuid or disk id. * ``persistent_boot_device``: A String value of device from ``ironic.common.boot_devices``. * ``is_next_boot_persistent``: A Boolean value to indicate whether the next boot device is ``persistent_boot_device``. * Agent driver attributes: * ``agent_url``: A String value of IPA API URL so that Ironic can talk to IPA ramdisk. * ``hardware_manager_version``: A String value of the version of the hardware manager in IPA ramdisk. * ``target_raid_config``: A Dictionary containing the target RAID configuration. This is a copy of the same name attribute in Node object. But this one is never actually saved into DB and is only read by IPA ramdisk. .. note:: These are only some fields in use. Other vendor drivers might expose more ``driver_internal_info`` properties, please check their development documentation and/or module docstring for details. It is important for developers to make sure these properties follow the precedent of prefixing their variable names with a specific interface name (e.g., ilo_bar, drac_xyz), so as to minimize or avoid any conflicts between interfaces. Ironic Specs Process -------------------- Specifications must follow the template which can be found at `specs/template.rst `_, which is quite self-documenting. Specifications are proposed by adding them to the `specs/approved` directory, adding a soft link to it from the `specs/not-implemented` directory, and posting it for review to Gerrit. For more information, please see the `README `_. The same `Gerrit process `_ as with source code, using the repository `ironic-specs `_, is used to add new specifications. All approved specifications are available at: https://specs.openstack.org/openstack/ironic-specs. If a specification has been approved but not completed within one or more releases since the approval, it may be re-reviewed to make sure it still makes sense as written. Ironic specifications are part of the `RFE (Requests for Feature Enhancements) process <#adding-new-features>`_. You are welcome to submit patches associated with an RFE, but they will have a -2 ("do not merge") until the specification has been approved. This is to ensure that the patches don't get accidentally merged beforehand. You will still be able to get reviewer feedback and push new patch sets, even with a -2. The `list of core reviewers `_ for the specifications is small but mighty. (This is not necessarily the same list of core reviewers for code patches.) Changes to existing specs ------------------------- For approved but not-completed specs: - cosmetic cleanup, fixing errors, and changing the definition of a feature can be done to the spec. For approved and completed specs: - changing a previously approved and completed spec should only be done for cosmetic cleanup or fixing errors. - changing the definition of the feature should be done in a new spec. Please see the `Ironic specs process wiki page `_ for further reference. Bug Reporting ============= Bugs can reported via our Task and Bug tracking tool Storyboard. When filing bugs, please include as much detail as possible, and don't be shy. Essential pieces of information are generally: * Contents of the 'node' - `openstack baremetal node show ` * Steps to reproduce the issue. * Exceptions and surrounding lines from the logs. * Versions of ironic, ironic-python-agent, and any other coupled components. Please also set your expectations of what *should* be happening. Statements of user expectations are how we understand what is occuring and how we learn new use cases! Project Team Leader Duties ========================== The ``Project Team Leader`` or ``PTL`` is elected each development cycle by the contributors to the ironic community. Think of this person as your primary contact if you need to try and rally the project, or have a major issue that requires attention. They serve a role that is mainly oriented towards trying to drive the technical discussion forward and managing the idiosyncrasies of the project. With this responsibility, they are considered a "public face" of the project and are generally obliged to try and provide "project updates" and outreach communication. All common PTL duties are enumerated here in the `PTL guide `_. Tasks like release management or preparation for a release are generally delegated with-in the team. Even outreach can be delegated, and specifically there is no rule stating that any member of the community can't propose a release, clean-up release notes or documentation, or even get on the occasional stage. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/contributor/debug-ci-failures.rst0000644000175000017500000000206200000000000025014 0ustar00coreycorey00000000000000.. _debug-ci-failures: ===================== Debugging CI failures ===================== If you see `FAILURE` in one or more jobs for your patch please don't panic. This guide may help you to find the initial reason for the failure. When clicking in the failed job you will be redirect to the Zuul web page that contains all the information about the job build. Zuul Web Page ============= The page has three tabs: `Summary`, `Logs` and `Console`. * Summary: Contains overall information about the build of the job, if the job build failed it will contain a general output of the failure. * Logs: Contains all configurations and log files about all services that were used in the job. This will give you an overall idea of the failures and you can identify services that may be involved. The `job-output` file can give an overall idea of the failures and what services may be involved. * Console: Contains all the playbooks that were executed, by clicking in the arrow before each playbook name you can find the roles and commands that were executed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/contributor/deploy-steps.rst0000644000175000017500000000306200000000000024156 0ustar00coreycorey00000000000000Developing a new Deploy Step ============================ To support customized deployment step, implement a new method in an interface class and use the decorator ``deploy_step`` defined in ``ironic/drivers/base.py``. For example, we will implement a ``do_nothing`` deploy step in the ``AgentDeploy`` class. .. code-block:: python class AgentDeploy(AgentDeployMixin, base.DeployInterface): ... @base.deploy_step(priority=200, argsinfo={ 'test_arg': { 'description': ( "This is a test argument." ), 'required': True } }) def do_nothing(self, task, **kwargs): return None After deployment of the baremetal node, check the updated deploy steps:: openstack baremetal node show $node_ident -f json -c driver_internal_info The above command outputs the ``driver_internal_info`` as following:: { "driver_internal_info": { ... "deploy_steps": [ { "priority": 200, "interface": "deploy", "step": "do_nothing", "argsinfo": { "test_arg": { "required": True, "description": "This is a test argument." } } }, { "priority": 100, "interface": "deploy", "step": "deploy", "argsinfo": null } ], "deploy_step_index": 1 } } .. note:: Similarly, clean steps can be implemented using the ``clean_step`` decorator. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/contributor/dev-quickstart.rst0000644000175000017500000010100400000000000024467 0ustar00coreycorey00000000000000.. _dev-quickstart: ===================== Developer Quick-Start ===================== This is a quick walkthrough to get you started developing code for Ironic. This assumes you are already familiar with submitting code reviews to an OpenStack project. The gate currently runs the unit tests under Python 3.6 and Python 3.7. It is strongly encouraged to run the unit tests locally prior to submitting a patch. .. note:: Do not run unit tests on the same environment as devstack due to conflicting configuration with system dependencies. .. note:: This document is compatible with Python (3.7), Ubuntu (18.04) and Fedora (31). When referring to different versions of Python and OS distributions, this is explicitly stated. .. seealso:: https://docs.openstack.org/infra/manual/developers.html#development-workflow Prepare Development System ========================== System Prerequisites -------------------- The following packages cover the prerequisites for a local development environment on most current distributions. Instructions for getting set up with non-default versions of Python and on older distributions are included below as well. - Ubuntu/Debian:: sudo apt-get install build-essential python-dev libssl-dev python-pip libmysqlclient-dev libxml2-dev libxslt-dev libpq-dev git git-review libffi-dev gettext ipmitool psmisc graphviz libjpeg-dev - RHEL7/CentOS7:: sudo yum install python-devel openssl-devel python-pip mysql-devel libxml2-devel libxslt-devel postgresql-devel git git-review libffi-devel gettext ipmitool psmisc graphviz gcc libjpeg-turbo-devel If using RHEL and yum reports "No package python-pip available" and "No package git-review available", use the EPEL software repository. Instructions can be found at ``_. - Fedora:: sudo dnf install python-devel openssl-devel python-pip mysql-devel libxml2-devel libxslt-devel postgresql-devel git git-review libffi-devel gettext ipmitool psmisc graphviz gcc libjpeg-turbo-devel Additionally, if using Fedora 23, ``redhat-rpm-config`` package should be installed so that development virtualenv can be built successfully. - openSUSE/SLE 12:: sudo zypper install git git-review libffi-devel libmysqlclient-devel libopenssl-devel libxml2-devel libxslt-devel postgresql-devel python-devel python-nose python-pip gettext-runtime psmisc Graphviz is only needed for generating the state machine diagram. To install it on openSUSE or SLE 12, see ``_. To run the tests locally, it is a requirement that your terminal emulator supports unicode with the ``en_US.UTF8`` locale. If you use locale-gen to manage your locales, make sure you have enabled ``en_US.UTF8`` in ``/etc/locale.gen`` and rerun ``locale-gen``. Python Prerequisites -------------------- If your distro has at least tox 1.8, use similar command to install ``python-tox`` package. Otherwise install this on all distros:: sudo pip install -U tox You may need to explicitly upgrade virtualenv if you've installed the one from your OS distribution and it is too old (tox will complain). You can upgrade it individually, if you need to:: sudo pip install -U virtualenv Running Unit Tests Locally ========================== If you haven't already, Ironic source code should be pulled directly from git:: # from your home or source directory cd ~ git clone https://opendev.org/openstack/ironic cd ironic Running Unit and Style Tests ---------------------------- All unit tests should be run using tox. To run Ironic's entire test suite:: # to run the py3 unit tests, and the style tests tox To run a specific test or tests, use the "-e" option followed by the tox target name. For example:: # run the unit tests under py36 and also run the pep8 tests tox -epy36 -epep8 You may pass options to the test programs using positional arguments. To run a specific unit test, this passes the desired test (regex string) to `stestr `_:: # run a specific test for Python 3.6 tox -epy36 -- test_conductor Debugging unit tests -------------------- In order to break into the debugger from a unit test we need to insert a breaking point to the code: .. code-block:: python import pdb; pdb.set_trace() Then run ``tox`` with the debug environment as one of the following:: tox -e debug tox -e debug test_file_name tox -e debug test_file_name.TestClass tox -e debug test_file_name.TestClass.test_name For more information see the :oslotest-doc:`oslotest documentation `. Database Setup -------------- The unit tests need a local database setup, you can use ``tools/test-setup.sh`` to set up the database the same way as setup in the OpenStack test systems. Additional Tox Targets ---------------------- There are several additional tox targets not included in the default list, such as the target which builds the documentation site. See the ``tox.ini`` file for a complete listing of tox targets. These can be run directly by specifying the target name:: # generate the documentation pages locally tox -edocs # generate the sample configuration file tox -egenconfig Exercising the Services Locally =============================== In addition to running automated tests, sometimes it can be helpful to actually run the services locally, without needing a server in a remote datacenter. If you would like to exercise the Ironic services in isolation within your local environment, you can do this without starting any other OpenStack services. For example, this is useful for rapidly prototyping and debugging interactions over the RPC channel, testing database migrations, and so forth. Here we describe two ways to install and configure the dependencies, either run directly on your local machine or encapsulated in a virtual machine or container. Step 1: Create a Python virtualenv ---------------------------------- #. If you haven't already downloaded the source code, do that first:: cd ~ git clone https://opendev.org/openstack/ironic cd ironic #. Create the Python virtualenv:: tox -evenv --notest --develop -r #. Activate the virtual environment:: . .tox/venv/bin/activate #. Install the `openstack` client command utility:: pip install python-openstackclient #. Install the `openstack baremetal` client:: pip install python-ironicclient .. note:: You can install python-ironicclient from source by cloning the git repository and running `pip install .` while in the root of the cloned repository. #. Export some ENV vars so the client will connect to the local services that you'll start in the next section:: export OS_AUTH_TYPE=none export OS_ENDPOINT=http://localhost:6385/ Next, install and configure system dependencies. Step 2: Install System Dependencies Locally -------------------------------------------- This step will install MySQL on your local system. This may not be desirable in some situations (eg, you're developing from a laptop and do not want to run a MySQL server on it all the time). If you want to use SQLite, skip it and do not set the ``connection`` option. #. Install mysql-server: Ubuntu/Debian:: sudo apt-get install mysql-server RHEL7/CentOS7:: sudo yum install mariadb mariadb-server sudo systemctl start mariadb.service Fedora:: sudo dnf install mariadb mariadb-server sudo systemctl start mariadb.service openSUSE/SLE 12:: sudo zypper install mariadb sudo systemctl start mysql.service If using MySQL, you need to create the initial database:: mysql -u root -pMYSQL_ROOT_PWD -e "create schema ironic" .. note:: if you choose not to install mysql-server, ironic will default to using a local sqlite database. The database will then be stored in ``ironic/ironic.sqlite``. #. Create a configuration file within the ironic source directory:: # generate a sample config tox -egenconfig # copy sample config and modify it as necessary cp etc/ironic/ironic.conf.sample etc/ironic/ironic.conf.local # disable auth since we are not running keystone here sed -i "s/#auth_strategy = keystone/auth_strategy = noauth/" etc/ironic/ironic.conf.local # use the 'fake-hardware' test hardware type sed -i "s/#enabled_hardware_types = .*/enabled_hardware_types = fake-hardware/" etc/ironic/ironic.conf.local # use the 'fake' deploy and boot interfaces sed -i "s/#enabled_deploy_interfaces = .*/enabled_deploy_interfaces = fake/" etc/ironic/ironic.conf.local sed -i "s/#enabled_boot_interfaces = .*/enabled_boot_interfaces = fake/" etc/ironic/ironic.conf.local # enable both fake and ipmitool management and power interfaces sed -i "s/#enabled_management_interfaces = .*/enabled_management_interfaces = fake,ipmitool/" etc/ironic/ironic.conf.local sed -i "s/#enabled_power_interfaces = .*/enabled_power_interfaces = fake,ipmitool/" etc/ironic/ironic.conf.local # change the periodic sync_power_state_interval to a week, to avoid getting NodeLocked exceptions sed -i "s/#sync_power_state_interval = 60/sync_power_state_interval = 604800/" etc/ironic/ironic.conf.local # if you opted to install mysql-server, switch the DB connection from sqlite to mysql sed -i "s/#connection = .*/connection = mysql\+pymysql:\/\/root:MYSQL_ROOT_PWD@localhost\/ironic/" etc/ironic/ironic.conf.local # use JSON RPC to avoid installing rabbitmq locally sed -i "s/#rpc_transport = oslo/rpc_transport = json-rpc/" etc/ironic/ironic.conf.local Step 3: Start the Services -------------------------- From within the python virtualenv, run the following command to prepare the database before you start the ironic services:: # initialize the database for ironic ironic-dbsync --config-file etc/ironic/ironic.conf.local create_schema Next, open two new terminals for this section, and run each of the examples here in a separate terminal. In this way, the services will *not* be run as daemons; you can observe their output and stop them with Ctrl-C at any time. #. Start the API service in debug mode and watch its output:: cd ~/ironic . .tox/venv/bin/activate ironic-api -d --config-file etc/ironic/ironic.conf.local #. Start the Conductor service in debug mode and watch its output:: cd ~/ironic . .tox/venv/bin/activate ironic-conductor -d --config-file etc/ironic/ironic.conf.local Step 4: Interact with the running services ------------------------------------------ You should now be able to interact with ironic via the python client, which is present in the python virtualenv, and observe both services' debug outputs in the other two windows. This is a good way to test new features or play with the functionality without necessarily starting DevStack. To get started, export the following variables to point the client at the local instance of ironic and disable the authentication:: export OS_AUTH_TYPE=token_endpoint export OS_TOKEN=fake export OS_ENDPOINT=http://127.0.0.1:6385 Then list the available commands and resources:: # get a list of available commands openstack help baremetal # get the list of drivers currently supported by the available conductor(s) openstack baremetal driver list # get a list of nodes (should be empty at this point) openstack baremetal node list Here is an example walkthrough of creating a node:: MAC="aa:bb:cc:dd:ee:ff" # replace with the MAC of a data port on your node IPMI_ADDR="1.2.3.4" # replace with a real IP of the node BMC IPMI_USER="admin" # replace with the BMC's user name IPMI_PASS="pass" # replace with the BMC's password # enroll the node with the fake hardware type and IPMI-based power and # management interfaces. Note that driver info may be added at node # creation time with "--driver-info" NODE=$(openstack baremetal node create \ --driver fake-hardware \ --management-interface ipmitool \ --power-interface ipmitool \ --driver-info ipmi_address=$IPMI_ADDR \ --driver-info ipmi_username=$IPMI_USER \ -f value -c uuid) # driver info may also be added or updated later on openstack baremetal node set $NODE --driver-info ipmi_password=$IPMI_PASS # add a network port openstack baremetal port create $MAC --node $NODE # view the information for the node openstack baremetal node show $NODE # request that the node's driver validate the supplied information openstack baremetal node validate $NODE # you have now enrolled a node sufficiently to be able to control # its power state from ironic! openstack baremetal node power on $NODE If you make some code changes and want to test their effects, simply stop the services with Ctrl-C and restart them. Step 5: Fixing your test environment ------------------------------------ If you are testing changes that add or remove python entrypoints, or making significant changes to ironic's python modules, or simply keep the virtualenv around for a long time, your development environment may reach an inconsistent state. It may help to delete cached ".pyc" files, update dependencies, reinstall ironic, or even recreate the virtualenv. The following commands may help with that, but are not an exhaustive troubleshooting guide:: # clear cached pyc files cd ~/ironic/ironic find ./ -name '*.pyc' | xargs rm # reinstall ironic modules cd ~/ironic . .tox/venv/bin/activate pip uninstall ironic pip install -e . # install and upgrade ironic and all python dependencies cd ~/ironic . .tox/venv/bin/activate pip install -U -e . .. _`deploy_devstack`: Deploying Ironic with DevStack ============================== DevStack may be configured to deploy Ironic, setup Nova to use the Ironic driver and provide hardware resources (network, baremetal compute nodes) using a combination of OpenVSwitch and libvirt. It is highly recommended to deploy on an expendable virtual machine and not on your personal work station. Deploying Ironic with DevStack requires a machine running Ubuntu 16.04 (or later) or Fedora 24 (or later). Make sure your machine is fully up to date and has the latest packages installed before beginning this process. The ironic-tempest-plugin is necessary if you want to run integration tests, the section `Ironic with ironic-tempest-plugin`_ tells the extra steps you need to enable it in DevStack. .. seealso:: https://docs.openstack.org/devstack/latest/ .. note:: The devstack "demo" tenant is now granted the "baremetal_observer" role and thereby has read-only access to ironic's API. This is sufficient for all the examples below. Should you want to create or modify bare metal resources directly (ie. through ironic rather than through nova) you will need to use the devstack "admin" tenant. Devstack will no longer create the user 'stack' with the desired permissions, but does provide a script to perform the task:: git clone https://opendev.org/openstack/devstack.git devstack sudo ./devstack/tools/create-stack-user.sh Switch to the stack user and clone DevStack:: sudo su - stack git clone https://opendev.org/openstack/devstack.git devstack Ironic ------ Create devstack/local.conf with minimal settings required to enable Ironic. An example local.conf that enables both ``direct`` and ``iscsi`` :doc:`deploy interfaces ` and uses the ``ipmi`` hardware type by default:: cd devstack cat >local.conf <` and uses the ``ipmi`` hardware type by default:: cd devstack cat >local.conf <`_ to control the power state of the virtual baremetal nodes. .. note:: When running QEMU as non-root user (e.g. ``qemu`` on Fedora or ``libvirt-qemu`` on Ubuntu), make sure ``IRONIC_VM_LOG_DIR`` points to a directory where QEMU will be able to write. You can verify this with, for example:: # on Fedora sudo -u qemu touch $HOME/ironic-bm-logs/test.log # on Ubuntu sudo -u libvirt-qemu touch $HOME/ironic-bm-logs/test.log .. note:: To check out an in-progress patch for testing, you can add a Git ref to the ``enable_plugin`` line. For instance:: enable_plugin ironic https://opendev.org/openstack/ironic refs/changes/46/295946/15 For a patch in review, you can find the ref to use by clicking the "Download" button in Gerrit. You can also specify a different git repo, or a branch or tag:: enable_plugin ironic https://github.com/openstack/ironic stable/kilo For more details, see the `devstack plugin interface documentation `_. Run stack.sh:: ./stack.sh Source credentials, create a key, and spawn an instance as the ``demo`` user:: . ~/devstack/openrc # query the image id of the default cirros image image=$(openstack image show $DEFAULT_IMAGE_NAME -f value -c id) # create keypair ssh-keygen openstack keypair create --public-key ~/.ssh/id_rsa.pub default # spawn instance openstack server create --flavor baremetal --image $image --key-name default testing .. note:: Because devstack create multiple networks, we need to pass an additional parameter ``--nic net-id`` to the nova boot command when using the admin account, for example:: net_id=$(openstack network list | egrep "$PRIVATE_NETWORK_NAME"'[^-]' | awk '{ print $2 }') openstack server create --flavor baremetal --nic net-id=$net_id --image $image --key-name default testing You should now see a Nova instance building:: openstack server list --long +----------+---------+--------+------------+-------------+----------+------------+----------+-------------------+------+------------+ | ID | Name | Status | Task State | Power State | Networks | Image Name | Image ID | Availability Zone | Host | Properties | +----------+---------+--------+------------+-------------+----------+------------+----------+-------------------+------+------------+ | a2c7f812 | testing | BUILD | spawning | NOSTATE | | cirros-0.3 | 44d4092a | nova | | | | -e386-4a | | | | | | .5-x86_64- | -51ac-47 | | | | | 22-b393- | | | | | | disk | 51-9c50- | | | | | fe1802ab | | | | | | | fd6e2050 | | | | | d56e | | | | | | | faa1 | | | | +----------+---------+--------+------------+-------------+----------+------------+----------+-------------------+------+------------+ Nova will be interfacing with Ironic conductor to spawn the node. On the Ironic side, you should see an Ironic node associated with this Nova instance. It should be powered on and in a 'wait call-back' provisioning state:: openstack baremetal node list +--------------------------------------+--------+--------------------------------------+-------------+--------------------+-------------+ | UUID | Name | Instance UUID | Power State | Provisioning State | Maintenance | +--------------------------------------+--------+--------------------------------------+-------------+--------------------+-------------+ | 9e592cbe-e492-4e4f-bf8f-4c9e0ad1868f | node-0 | None | power off | None | False | | ec0c6384-cc3a-4edf-b7db-abde1998be96 | node-1 | None | power off | None | False | | 4099e31c-576c-48f8-b460-75e1b14e497f | node-2 | a2c7f812-e386-4a22-b393-fe1802abd56e | power on | wait call-back | False | +--------------------------------------+--------+--------------------------------------+-------------+--------------------+-------------+ At this point, Ironic conductor has called to libvirt (via virtualbmc) to power on a virtual machine, which will PXE + TFTP boot from the conductor node and progress through the Ironic provisioning workflow. One libvirt domain should be active now:: sudo virsh list --all Id Name State ---------------------------------------------------- 2 node-2 running - node-0 shut off - node-1 shut off This provisioning process may take some time depending on the performance of the host system, but Ironic should eventually show the node as having an 'active' provisioning state:: openstack baremetal node list +--------------------------------------+--------+--------------------------------------+-------------+--------------------+-------------+ | UUID | Name | Instance UUID | Power State | Provisioning State | Maintenance | +--------------------------------------+--------+--------------------------------------+-------------+--------------------+-------------+ | 9e592cbe-e492-4e4f-bf8f-4c9e0ad1868f | node-0 | None | power off | None | False | | ec0c6384-cc3a-4edf-b7db-abde1998be96 | node-1 | None | power off | None | False | | 4099e31c-576c-48f8-b460-75e1b14e497f | node-2 | a2c7f812-e386-4a22-b393-fe1802abd56e | power on | active | False | +--------------------------------------+--------+--------------------------------------+-------------+--------------------+-------------+ This should also be reflected in the Nova instance state, which at this point should be ACTIVE, Running and an associated private IP:: openstack server list --long +----------+---------+--------+------------+-------------+---------------+------------+----------+-------------------+------+------------+ | ID | Name | Status | Task State | Power State | Networks | Image Name | Image ID | Availability Zone | Host | Properties | +----------+---------+--------+------------+-------------+---------------+------------+----------+-------------------+------+------------+ | a2c7f812 | testing | ACTIVE | none | Running | private=10.1. | cirros-0.3 | 44d4092a | nova | | | | -e386-4a | | | | | 0.4, fd7d:1f3 | .5-x86_64- | -51ac-47 | | | | | 22-b393- | | | | | c:4bf1:0:f816 | disk | 51-9c50- | | | | | fe1802ab | | | | | :3eff:f39d:6d | | fd6e2050 | | | | | d56e | | | | | 94 | | faa1 | | | | +----------+---------+--------+------------+-------------+---------------+------------+----------+-------------------+------+------------+ The server should now be accessible via SSH:: ssh cirros@10.1.0.4 $ Running Tempest tests ===================== After :ref:`Deploying Ironic with DevStack ` with the ironic-tempest-plugin enabled, one might want to run integration tests against the running cloud. The Tempest project is the project that offers an integration test suite for OpenStack. First, navigate to Tempest directory:: cd /opt/stack/tempest To run all tests from the `Ironic plugin `_, execute the following command:: tox -e all -- ironic To limit the amount of tests that you would like to run, you can use a regex. For instance, to limit the run to a single test file, the following command can be used:: tox -e all -- ironic_tempest_plugin.tests.scenario.test_baremetal_basic_ops Debugging Tempest tests ----------------------- It is sometimes useful to step through the test code, line by line, especially when the error output is vague. This can be done by running the tests in debug mode and using a debugger such as `pdb `_. For example, after editing the *test_baremetal_basic_ops* file and setting up the pdb traces you can invoke the ``run_tempest.sh`` script in the Tempest directory with the following parameters:: ./run_tempest.sh -N -d ironic_tempest_plugin.tests.scenario.test_baremetal_basic_ops * The *-N* parameter tells the script to run the tests in the local environment (without a virtualenv) so it can find the Ironic tempest plugin. * The *-d* parameter enables the debug mode, allowing it to be used with pdb. For more information about the supported parameters see:: ./run_tempest.sh --help .. note:: Always be careful when running debuggers in time sensitive code, they may cause timeout errors that weren't there before. OSProfiler Tracing in Ironic ============================ OSProfiler is an OpenStack cross-project profiling library. It is being used among OpenStack projects to look at performance issues and detect bottlenecks. For details on how OSProfiler works and how to use it in ironic, please refer to `OSProfiler Support Documentation `_. Building developer documentation ================================ If you would like to build the documentation locally, eg. to test your documentation changes before uploading them for review, run these commands to build the documentation set: - On your local machine:: # activate your development virtualenv . .tox/venv/bin/activate # build the docs tox -edocs #Now use your browser to open the top-level index.html located at: ironic/doc/build/html/index.html - On a remote machine:: # Go to the directory that contains the docs cd ~/ironic/doc/source/ # Build the docs tox -edocs # Change directory to the newly built HTML files cd ~/ironic/doc/build/html/ # Create a server using python on port 8000 python -m SimpleHTTPServer 8000 #Now use your browser to open the top-level index.html located at: http://your_ip:8000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/contributor/drivers.rst0000644000175000017500000002016200000000000023204 0ustar00coreycorey00000000000000.. _pluggable_drivers: ================= Pluggable Drivers ================= Ironic supports a pluggable driver model. This allows contributors to easily add new drivers, and operators to use third-party drivers or write their own. A driver is built at runtime from a *hardware type* and *hardware interfaces*. See :doc:`/install/enabling-drivers` for a detailed explanation of these concepts. Hardware types and interfaces are loaded by the ``ironic-conductor`` service during initialization from the setuptools entrypoints ``ironic.hardware.types`` and ``ironic.hardware.interfaces.`` where ```` is an interface type (for example, ``deploy``). Only hardware types listed in the configuration option ``enabled_hardware_types`` and interfaces listed in configuration options ``enabled__interfaces`` are loaded. A complete list of hardware types available on the system may be found by enumerating this entrypoint by running the following python script:: #!/usr/bin/env python import pkg_resources as pkg print [p.name for p in pkg.iter_entry_points("ironic.hardware.types") if not p.name.startswith("fake")] A list of drivers enabled in a running Ironic service may be found by issuing the following command against that API end point:: openstack baremetal driver list Writing a hardware type ----------------------- A hardware type is a Python class, inheriting :py:class:`ironic.drivers.hardware_type.AbstractHardwareType` and listed in the setuptools entry point ``ironic.hardware.types``. Most of the real world hardware types inherit :py:class:`ironic.drivers.generic.GenericHardware` instead. This helper class provides useful implementations for interfaces that are usually the same for all hardware types, such as ``deploy``. The minimum required interfaces are: * :doc:`boot ` that specifies how to boot ramdisks and instances on the hardware. A generic ``pxe`` implementation is provided by the ``GenericHardware`` base class. * :doc:`deploy ` that orchestrates the deployment. A few common implementations are provided by the ``GenericHardware`` base class. As of the Rocky release, a deploy interface should decorate its deploy method to indicate that it is a deploy step. Conventionally, the deploy method uses a priority of 100. .. code-block:: python @ironic.drivers.base.deploy_step(priority=100) def deploy(self, task): .. note:: Most of the hardware types should not override this interface. * `power` implements power actions for the hardware. These common implementations may be used, if supported by the hardware: * :py:class:`ironic.drivers.modules.ipmitool.IPMIPower` * :py:class:`ironic.drivers.modules.redfish.power.RedfishPower` Otherwise, you need to write your own implementation by subclassing :py:class:`ironic.drivers.base.PowerInterface` and providing missing methods. .. note:: Power actions in Ironic are blocking - methods of a power interface should not return until the power action is finished or errors out. * `management` implements additional out-of-band management actions, such as setting a boot device. A few common implementations exist and may be used, if supported by the hardware: * :py:class:`ironic.drivers.modules.ipmitool.IPMIManagement` * :py:class:`ironic.drivers.modules.redfish.management.RedfishManagement` Some hardware types, such as ``snmp`` do not support out-of-band management. They use the fake implementation in :py:class:`ironic.drivers.modules.fake.FakeManagement` instead. Otherwise, you need to write your own implementation by subclassing :py:class:`ironic.drivers.base.ManagementInterface` and providing missing methods. Combine the interfaces in a hardware type by populating the lists of supported interfaces. These lists are prioritized, with the most preferred implementation first. For example: .. code-block:: python class MyHardware(generic.GenericHardware): @property def supported_management_interfaces(self): """List of supported management interfaces.""" return [MyManagement, ipmitool.IPMIManagement] @property def supported_power_interfaces(self): """List of supported power interfaces.""" return [MyPower, ipmitool.IPMIPower] .. note:: In this example, all interfaces, except for ``management`` and ``power`` are taken from the ``GenericHardware`` base class. Finally, give the new hardware type and new interfaces human-friendly names and create entry points for them in the ``setup.cfg`` file:: ironic.hardware.types = my-hardware = ironic.drivers.my_hardware:MyHardware ironic.hardware.interfaces.power = my-power = ironic.drivers.modules.my_hardware:MyPower ironic.hardware.interfaces.management = my-management = ironic.drivers.modules.my_hardware:MyManagement Supported Drivers ----------------- For a list of supported drivers (those that are continuously tested on every upstream commit) please consult the :doc:`drivers page `. Node Vendor Passthru -------------------- Drivers may implement a passthrough API, which is accessible via the ``/v1/nodes//vendor_passthru?method={METHOD}`` endpoint. Beyond basic checking, Ironic does not introspect the message body and simply "passes it through" to the relevant driver. A method: * can support one or more HTTP methods (for example, GET, POST) * is asynchronous or synchronous + For asynchronous methods, a 202 (Accepted) HTTP status code is returned to indicate that the request was received, accepted and is being acted upon. No body is returned in the response. + For synchronous methods, a 200 (OK) HTTP status code is returned to indicate that the request was fulfilled. The response may include a body. * can require an exclusive lock on the node. This only occurs if the method doesn't specify require_exclusive_lock=False in the decorator. If an exclusive lock is held on the node, other requests for the node will be delayed and may fail with an HTTP 409 (Conflict) error code. This endpoint exposes a node's driver directly, and as such, it is expressly not part of Ironic's standard REST API. There is only a single HTTP endpoint exposed, and the semantics of the message body are determined solely by the driver. Ironic makes no guarantees about backwards compatibility; this is solely up to the discretion of each driver's author. To get information about all the methods available via the vendor_passthru endpoint for a particular node, you can issue an HTTP GET request:: GET /v1/nodes//vendor_passthru/methods The response's JSON body will contain information for each method, such as the method's name, a description, the HTTP methods supported, and whether it's asynchronous or synchronous. Driver Vendor Passthru ---------------------- Drivers may implement an API for requests not related to any node, at ``/v1/drivers//vendor_passthru?method={METHOD}``. A method: * can support one or more HTTP methods (for example, GET, POST) * is asynchronous or synchronous + For asynchronous methods, a 202 (Accepted) HTTP status code is returned to indicate that the request was received, accepted and is being acted upon. No body is returned in the response. + For synchronous methods, a 200 (OK) HTTP status code is returned to indicate that the request was fulfilled. The response may include a body. .. note:: Unlike methods in `Node Vendor Passthru`_, a request does not lock any resource, so it will not delay other requests and will not fail with an HTTP 409 (Conflict) error code. Ironic makes no guarantees about the semantics of the message BODY sent to this endpoint. That is left up to each driver's author. To get information about all the methods available via the driver vendor_passthru endpoint, you can issue an HTTP GET request:: GET /v1/drivers//vendor_passthru/methods The response's JSON body will contain information for each method, such as the method's name, a description, the HTTP methods supported, and whether it's asynchronous or synchronous. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/contributor/faq.rst0000644000175000017500000001263200000000000022300 0ustar00coreycorey00000000000000.. _faq: ========================================== Developer FAQ (frequently asked questions) ========================================== Here are some answers to frequently-asked questions from IRC and elsewhere. .. contents:: :local: :depth: 2 How do I... =========== ...create a migration script template? -------------------------------------- Using the ``ironic-dbsync revision`` command, e.g:: $ cd ironic $ tox -evenv -- ironic-dbsync revision -m \"create foo table\" It will create an empty alembic migration. For more information see the `alembic documentation`_. .. _`alembic documentation`: http://alembic.zzzcomputing.com/en/latest/tutorial.html#create-a-migration-script .. _faq_release_note: ...know if a release note is needed for my change? -------------------------------------------------- `Reno documentation`_ contains a description of what can be added to each section of a release note. If, after reading this, you're still unsure about whether to add a release note for your change or not, keep in mind that it is intended to contain information for deployers, so changes to unit tests or documentation are unlikely to require one. ...create a new release note? ----------------------------- By running ``reno`` command via tox, e.g:: $ tox -e venv -- reno new version-foo venv create: /home/foo/ironic/.tox/venv venv installdeps: -r/home/foo/ironic/test-requirements.txt venv develop-inst: /home/foo/ironic venv runtests: PYTHONHASHSEED='0' venv runtests: commands[0] | reno new version-foo Created new notes file in releasenotes/notes/version-foo-ecb3875dc1cbf6d9.yaml venv: commands succeeded congratulations :) $ git status On branch test Untracked files: (use "git add ..." to include in what will be committed) releasenotes/notes/version-foo-ecb3875dc1cbf6d9.yaml Then edit the result file. Note that: - we prefer to use present tense in release notes. For example, a release note should say "Adds support for feature foo", not "Added support for feature foo". (We use 'adds' instead of 'add' because grammatically, it is "ironic adds support", not "ironic add support".) - any variant of English spelling (American, British, Canadian, Australian...) is acceptable. The release note itself should be consistent and not have different spelling variants of the same word. For more information see the `reno documentation`_. .. _`reno documentation`: https://docs.openstack.org/reno/latest/user/usage.html ...update a release note? ------------------------- If this is a release note that pertains to something that was fixed on master or an intermediary release (during a development cycle, that hasn't been branched yet), you can go ahead and update it by submitting a patch. If it is the release note of an ironic release that has branched, `it can be updated `_ but we will only allow it in extenuating circumstances. (It can be updated by *only* updating the file in that branch. DO NOT update the file in master and cherry-pick it. If you do, `see how the mess was cleaned up `_.) ...get a decision on something? ------------------------------- You have an issue and would like a decision to be made. First, make sure that the issue hasn't already been addressed, by looking at documentation, stories, specifications, or asking. Information and links can be found on the `Ironic wiki`_ page. There are several ways to solicit comments and opinions: * bringing it up at the `weekly Ironic meeting`_ * bringing it up on IRC_ * bringing it up on the `mailing list`_ (add "[Ironic]" to the Subject of the email) If there are enough core folks at the weekly meeting, after discussing an issue, voting could happen and a decision could be made. The problem with IRC or the weekly meeting is that feedback will only come from the people that are actually present. To inform (and solicit feedback from) more people about an issue, the preferred process is: #. bring it up on the mailing list #. after some period of time has elapsed (and depending on the thread activity), someone should propose a solution via gerrit. (E.g. the person that started the thread if no one else steps up.) The proposal should be made in the git repository that is associated with the issue. (For instance, this decision process was proposed as a documentation patch to the ironic repository.) #. In the email thread, don't forget to provide a link to the proposed patch! #. The discussion then moves to the proposed patch. If this is a big decision, we could declare that some percentage of the cores should vote on it before landing it. (This process was suggested in an email thread about `process for making decisions`_.) .. _Ironic wiki: https://wiki.openstack.org/wiki/Ironic .. _weekly Ironic meeting: https://wiki.openstack.org/wiki/Meetings/Ironic .. _IRC: https://wiki.openstack.org/wiki/Ironic#IRC .. _mailing list: http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-discuss .. _process for making decisions: http://lists.openstack.org/pipermail/openstack-dev/2016-May/095460.html ...add support for GMRs to new executables and extending the GMR? ----------------------------------------------------------------- For more information, see the :oslo.reports-doc:`oslo.reports documentation ` page. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/contributor/governance.rst0000644000175000017500000000265400000000000023663 0ustar00coreycorey00000000000000=========================== Ironic Governance Structure =========================== The ironic project manages a number of repositories that contribute to our mission. The full list of repositories that ironic manages is available in the `governance site`_. .. _`governance site`: https://governance.openstack.org/reference/projects/ironic.html What belongs in ironic governance? ================================== For a repository to be part of the Ironic project: * It must comply with the TC's `rules for a new project `_. * It must not be intended for use with only a single vendor's hardware. A library that implements a standard to manage hardware from multiple vendors (such as IPMI or redfish) is okay. * It must align with Ironic's `mission statement `_. Lack of contributor diversity is a chicken-egg problem, and as such a repository where only a single company is contributing is okay, with the hope that other companies will contribute after joining the ironic project. Repositories that are no longer maintained should be pruned from governance regularly. Proposing a new project to ironic governance ============================================ Bring the proposal to the ironic `weekly meeting `_ to discuss with the team. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/contributor/index.rst0000644000175000017500000001111300000000000022631 0ustar00coreycorey00000000000000Developer's Guide ================= Getting Started --------------- If you are new to ironic, this section contains information that should help you get started as a developer working on the project or contributing to the project. .. toctree:: :maxdepth: 1 Developer Contribution Guide Setting Up Your Development Environment Priorities Specifications Frequently Asked Questions Contributor Vision OpenStack Vision The following pages describe the architecture of the Bare Metal service and may be helpful to anyone working on or with the service, but are written primarily for developers. .. toctree:: :maxdepth: 1 Ironic System Architecture Provisioning State Machine Developing New Notifications OSProfiler Tracing Rolling Upgrades These pages contain information for PTLs, cross-project liaisons, and core reviewers. .. toctree:: :maxdepth: 1 Releasing Ironic Projects Ironic Governance Structure Writing Drivers --------------- Ironic's community includes many hardware vendors who contribute drivers that enable more advanced functionality when Ironic is used in conjunction with that hardware. To do this, the Ironic developer community is committed to standardizing on a `Python Driver API `_ that meets the common needs of all hardware vendors, and evolving this API without breaking backwards compatibility. However, it is sometimes necessary for driver authors to implement functionality - and expose it through the REST API - that can not be done through any existing API. To facilitate that, we also provide the means for API calls to be "passed through" ironic and directly to the driver. Some guidelines on how to implement this are provided below. Driver authors are strongly encouraged to talk with the developer community about any implementation using this functionality. .. toctree:: :maxdepth: 1 Driver Overview Writing "vendor_passthru" methods Creating new BIOS interfaces Third party continuous integration testing Writing Deploy or Clean Steps Testing Network Integration --------------------------- In order to test the integration between the Bare Metal and Networking services, support has been added to `devstack `_ to mimic an external physical switch. Here we include a recommended configuration for devstack to bring up this environment. .. toctree:: :maxdepth: 1 Configuring Devstack for multitenant network testing Testing Boot-from-Volume ------------------------ Starting with the Pike release, it is also possible to use DevStack for testing booting from Cinder volumes with VMs. .. toctree:: :maxdepth: 1 Configuring Devstack for boot-from-volume testing Full Ironic Server Python API Reference --------------------------------------- .. toctree:: :maxdepth: 1 api/modules Understanding the Ironic's CI ----------------------------- It's important to understand the role of each job in the CI, how to add new jobs and how to debug failures that may arise. To facilitate that, we have created the documentation below. .. toctree:: :maxdepth: 1 Job roles in the CI How to add a new job? How to debug failures in CI jobs Our policy for stable branches ------------------------------ Stable branches that are on `Extended Maintenance`_ and haven't received backports in a while, can be tagged as ``Unmaintained``, after discussions within the ironic community. If such a decision is taken, an email will be sent to the OpenStack mailing list. What does ``Unmaintained`` mean? The branch still exists, but the ironic upstream community will not actively backport patches from maintained branches. Fixes can still be merged, though, if pushed into review by operators or other downstream developers. It also means that branchless projects (e.g.: ironic-tempest-plugin), may not have configurations that are compatible with those branches. As of 09 March 2020, the list of ``Unmaintained`` branches includes: * Ocata (Last commit - Jun 28, 2019) * Pike (Last commit - Oct 2, 2019) .. _Extended Maintenance: https://docs.openstack.org/project-team-guide/stable-branches.html#maintenance-phases ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/contributor/ironic-boot-from-volume.rst0000644000175000017500000001060500000000000026221 0ustar00coreycorey00000000000000===================================== Ironic Boot-from-Volume with DevStack ===================================== This guide shows how to setup DevStack for enabling boot-from-volume feature, which has been supported from the Pike release. This scenario shows how to setup DevStack to enable nodes to boot from volumes managed by cinder with VMs as baremetal servers. DevStack Configuration ====================== The following is ``local.conf`` that will setup DevStack with 3 VMs that are registered in ironic. A volume connector with IQN is created for each node. These connectors can be used to connect volumes created by cinder. The detailed description for DevStack is at :ref:`deploy_devstack`. :: [[local|localrc]] enable_plugin ironic https://opendev.org/openstack/ironic IRONIC_STORAGE_INTERFACE=cinder # Credentials ADMIN_PASSWORD=password DATABASE_PASSWORD=password RABBIT_PASSWORD=password SERVICE_PASSWORD=password SERVICE_TOKEN=password SWIFT_HASH=password SWIFT_TEMPURL_KEY=password # Enable Neutron which is required by Ironic and disable nova-network. disable_service n-net disable_service n-novnc enable_service q-svc enable_service q-agt enable_service q-dhcp enable_service q-l3 enable_service q-meta enable_service neutron # Enable Swift for the direct deploy interface. enable_service s-proxy enable_service s-object enable_service s-container enable_service s-account # Disable Horizon disable_service horizon # Disable Heat disable_service heat h-api h-api-cfn h-api-cw h-eng # Swift temp URL's are required for the direct deploy interface. SWIFT_ENABLE_TEMPURLS=True # Create 3 virtual machines to pose as Ironic's baremetal nodes. IRONIC_VM_COUNT=3 IRONIC_BAREMETAL_BASIC_OPS=True DEFAULT_INSTANCE_TYPE=baremetal # Enable additional hardware types, if needed. #IRONIC_ENABLED_HARDWARE_TYPES=ipmi,fake-hardware # Don't forget that many hardware types require enabling of additional # interfaces, most often power and management: #IRONIC_ENABLED_MANAGEMENT_INTERFACES=ipmitool,fake #IRONIC_ENABLED_POWER_INTERFACES=ipmitool,fake # The default deploy interface is 'iscsi', you can use 'direct' with #IRONIC_DEFAULT_DEPLOY_INTERFACE=direct # Change this to alter the default driver for nodes created by devstack. # This driver should be in the enabled list above. IRONIC_DEPLOY_DRIVER=ipmi # The parameters below represent the minimum possible values to create # functional nodes. IRONIC_VM_SPECS_RAM=1280 IRONIC_VM_SPECS_DISK=10 # Size of the ephemeral partition in GB. Use 0 for no ephemeral partition. IRONIC_VM_EPHEMERAL_DISK=0 # To build your own IPA ramdisk from source, set this to True IRONIC_BUILD_DEPLOY_RAMDISK=False VIRT_DRIVER=ironic # By default, DevStack creates a 10.0.0.0/24 network for instances. # If this overlaps with the hosts network, you may adjust with the # following. NETWORK_GATEWAY=10.1.0.1 FIXED_RANGE=10.1.0.0/24 FIXED_NETWORK_SIZE=256 # Log all output to files LOGFILE=$HOME/devstack.log LOGDIR=$HOME/logs IRONIC_VM_LOG_DIR=$HOME/ironic-bm-logs After the environment is built, you can create a volume with cinder and request an instance with the volume to nova:: . ~/devstack/openrc # query the image id of the default cirros image image=$(openstack image show $DEFAULT_IMAGE_NAME -f value -c id) # create keypair ssh-keygen openstack keypair create --public-key ~/.ssh/id_rsa.pub default # create volume volume=$(openstack volume create --image $image --size 1 my-volume -f value -c id) # spawn instance openstack server create --flavor baremetal --volume $volume --key-name default testing You can also run an integration test that an instance is booted from a remote volume with tempest in the environment:: cd /opt/stack/tempest tox -e all-plugin -- ironic_tempest_plugin.tests.scenario.test_baremetal_boot_from_volume Please note that the storage interface will only indicate errors based upon the state of the node and the configuration present. As such a node does not exclusively have to boot via a remote volume, and as such `validate` actions upon nodes may be slightly misleading. If an appropriate `volume target` is defined, no error should be returned for the boot interface. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/contributor/ironic-multitenant-networking.rst0000644000175000017500000000756000000000000027547 0ustar00coreycorey00000000000000========================================== Ironic multitenant networking and DevStack ========================================== This guide will walk you through using OpenStack Ironic/Neutron with the ML2 ``networking-generic-switch`` plugin. Using VMs as baremetal servers ============================== This scenario shows how to setup Devstack to use Ironic/Neutron integration with VMs as baremetal servers and ML2 ``networking-generic-switch`` that interacts with OVS. DevStack Configuration ---------------------- The following is ``local.conf`` that will setup Devstack with 3 VMs that are registered in ironic. ``networking-generic-switch`` driver will be installed and configured in Neutron. :: [[local|localrc]] # Configure ironic from ironic devstack plugin. enable_plugin ironic https://opendev.org/openstack/ironic # Install networking-generic-switch Neutron ML2 driver that interacts with OVS enable_plugin networking-generic-switch https://opendev.org/openstack/networking-generic-switch # Add link local info when registering Ironic node IRONIC_USE_LINK_LOCAL=True IRONIC_ENABLED_NETWORK_INTERFACES=flat,neutron IRONIC_NETWORK_INTERFACE=neutron #Networking configuration OVS_PHYSICAL_BRIDGE=brbm PHYSICAL_NETWORK=mynetwork IRONIC_PROVISION_NETWORK_NAME=ironic-provision IRONIC_PROVISION_SUBNET_PREFIX=10.0.5.0/24 IRONIC_PROVISION_SUBNET_GATEWAY=10.0.5.1 Q_PLUGIN=ml2 ENABLE_TENANT_VLANS=True Q_ML2_TENANT_NETWORK_TYPE=vlan TENANT_VLAN_RANGE=100:150 # Credentials ADMIN_PASSWORD=password RABBIT_PASSWORD=password DATABASE_PASSWORD=password SERVICE_PASSWORD=password SERVICE_TOKEN=password SWIFT_HASH=password SWIFT_TEMPURL_KEY=password # Enable Ironic API and Ironic Conductor enable_service ironic enable_service ir-api enable_service ir-cond # Disable nova novnc service, ironic does not support it anyway. disable_service n-novnc # Enable Swift for the direct deploy interface. enable_service s-proxy enable_service s-object enable_service s-container enable_service s-account # Disable Horizon disable_service horizon # Disable Cinder disable_service cinder c-sch c-api c-vol # Disable Tempest disable_service tempest # Swift temp URL's are required for the direct deploy interface. SWIFT_ENABLE_TEMPURLS=True # Create 3 virtual machines to pose as Ironic's baremetal nodes. IRONIC_VM_COUNT=3 IRONIC_BAREMETAL_BASIC_OPS=True # Enable additional hardware types, if needed. #IRONIC_ENABLED_HARDWARE_TYPES=ipmi,fake-hardware # Don't forget that many hardware types require enabling of additional # interfaces, most often power and management: #IRONIC_ENABLED_MANAGEMENT_INTERFACES=ipmitool,fake #IRONIC_ENABLED_POWER_INTERFACES=ipmitool,fake # The default deploy interface is 'iscsi', you can use 'direct' with #IRONIC_DEFAULT_DEPLOY_INTERFACE=direct # Change this to alter the default driver for nodes created by devstack. # This driver should be in the enabled list above. IRONIC_DEPLOY_DRIVER=ipmi # The parameters below represent the minimum possible values to create # functional nodes. IRONIC_VM_SPECS_RAM=1024 IRONIC_VM_SPECS_DISK=10 # Size of the ephemeral partition in GB. Use 0 for no ephemeral partition. IRONIC_VM_EPHEMERAL_DISK=0 # To build your own IPA ramdisk from source, set this to True IRONIC_BUILD_DEPLOY_RAMDISK=False VIRT_DRIVER=ironic # By default, DevStack creates a 10.0.0.0/24 network for instances. # If this overlaps with the hosts network, you may adjust with the # following. NETWORK_GATEWAY=10.1.0.1 FIXED_RANGE=10.1.0.0/24 FIXED_NETWORK_SIZE=256 # Log all output to files LOGFILE=$HOME/devstack.log LOGDIR=$HOME/logs IRONIC_VM_LOG_DIR=$HOME/ironic-bm-logs ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/contributor/jobs-description.rst0000644000175000017500000001303000000000000025000 0ustar00coreycorey00000000000000.. _jobs-description: ================ Jobs description ================ The description of each jobs that runs in the CI when you submit a patch for `openstack/ironic` is visible in :ref:`table_jobs_description`. .. _table_jobs_description: .. list-table:: Table. OpenStack Ironic CI jobs description :widths: 53 47 :header-rows: 1 * - Job name - Description * - ironic-tox-unit-with-driver-libs-python3 - Runs Ironic unit tests with the driver dependencies installed under Python3 * - ironic-standalone - Deploys Ironic in standalone mode and runs tempest tests that match the regex `ironic_standalone`. * - ironic-tempest-functional-python3 - Deploys Ironic in standalone mode and runs tempest functional tests that matches the regex `ironic_tempest_plugin.tests.api` under Python3 * - ironic-grenade-dsvm - Deploys Ironic in a DevStack and runs upgrade for all enabled services. * - ironic-grenade-dsvm-multinode-multitenant - Deploys Ironic in a multinode DevStack and runs upgrade for all enabled services. * - ironic-tempest-ipa-partition-pxe_ipmitool-tinyipa-python3 - Deploys Ironic in DevStack under Python3, configured to use tinyipa ramdisk partition image with `pxe` boot and `ipmi` driver. Runs tempest tests that match the regex `ironic_tempest_plugin.tests.scenario` and deploy 1 virtual baremetal. * - ironic-tempest-ipa-partition-redfish-tinyipa - Deploys Ironic in DevStack, configured to use tinyipa ramdisk partition image with `pxe` boot and `redfish` driver. Runs tempest tests that match the regex `ironic_tempest_plugin.tests.scenario`, also deploys 1 virtual baremetal. * - ironic-tempest-ipa-partition-uefi-pxe_ipmitool-tinyipa - Deploys Ironic in DevStack, configured to use tinyipa ramdisk partition image with `uefi` boot and `ipmi` driver. Runs tempest tests that match the regex `ironic_tempest_plugin.tests.scenario`, also deploys 1 virtual baremetal. * - ironic-tempest-ipa-wholedisk-direct-tinyipa-multinode - Deploys Ironic in a multinode DevStack, configured to use a pre-build tinyipa ramdisk wholedisk image that is downloaded from a Swift temporary url, `pxe` boot and `ipmi` driver. Runs tempest tests that match the regex `(ironic_tempest_plugin.tests.scenario|test_schedule_to_all_nodes)` and deploys 7 virtual baremetal. * - ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-tinyipa - Deploys Ironic in DevStack, configured to use a pre-build tinyipa ramdisk wholedisk image that is downloaded from a Swift temporary url, `pxe` boot and `ipmi` driver. Runs tempest tests that match the regex `ironic_tempest_plugin.tests.scenario` and deploys 1 virtual baremetal. * - ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-tinyipa-indirect - Deploys Ironic in DevStack, configured to use a pre-build tinyipa ramdisk wholedisk image that is downloaded from http url, `pxe` boot and `ipmi` driver. Runs tempest tests that match the regex `ironic_tempest_plugin.tests.scenario` and deploys 1 virtual baremetal. * - ironic-tempest-ipa-partition-bios-agent_ipmitool-tinyipa-indirect - Deploys Ironic in DevStack, configured to use a pre-build tinyipa ramdisk partition image that is downloaded from http url, `pxe` boot and `ipmi` driver. Runs tempest tests that match the regex `ironic_tempest_plugin.tests.scenario` and deploys 1 virtual baremetal. * - ironic-tempest-bfv - Deploys Ironic in DevStack with cinder enabled, so it can deploy baremetal using boot from volume. Runs tempest tests that match the regex `baremetal_boot_from_volume` and deploys 3 virtual baremetal nodes using boot from volume. * - ironic-tempest-ipa-partition-uefi-pxe-grub2 - Deploys Ironic in DevStack, configured to use pxe with uefi and grub2 and `ipmi` driver. Runs tempest tests that match the regex `ironic_tempest_plugin.tests.scenario` and deploys 1 virtual baremetal. * - ironic-tox-bandit - Runs bandit security tests in a tox environment to find known issues in the Ironic code. * - ironic-tempest-ipa-wholedisk-bios-pxe_snmp-tinyipa - Deploys Ironic in DevStack, configured to use a pre-build tinyipa ramdisk wholedisk image that is downloaded from a Swift temporary url, `pxe` boot and `snmp` driver. Runs tempest tests that match the regex `ironic_tempest_plugin.tests.scenario` and deploys 1 virtual baremetal. * - ironic-inspector-tempest - Deploys Ironic and Ironic Inspector in DevStack, configured to use a pre-build tinyipa ramdisk wholedisk image that is downloaded from a Swift temporary url, `pxe` boot and `ipmi` driver. Runs tempest tests that match the regex `InspectorBasicTest` and deploys 1 virtual baremetal. * - bifrost-integration-tinyipa-ubuntu-xenial - Tests the integration between Ironic and Bifrost. * - metalsmith-integration-glance-localboot-centos7 - Tests the integration between Ironic and Metalsmith using Glance as image source and CentOS7 with local boot. * - ironic-tempest-pxe_ipmitool-postgres - Deploys Ironic in DevStack, configured to use tinyipa ramdisk partition image with `pxe` boot and `ipmi` driver and postgres instead of mysql. Runs tempest tests that match the regex `ironic_tempest_plugin.tests.scenario`, also deploys 1 virtual baremetal. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/contributor/notifications.rst0000644000175000017500000001470600000000000024406 0ustar00coreycorey00000000000000.. _develop-notifications: ============================ Developing New Notifications ============================ Ironic notifications are events intended for consumption by external services. Notifications are sent to these services over a message bus by :oslo.messaging-doc:`oslo.messaging's Notifier class `. For more information about configuring notifications and available notifications, see :ref:`deploy-notifications`. Ironic also has a set of base classes that assist in clearly defining the notification itself, the payload, and the other fields not auto-generated by oslo (level, event_type and publisher_id). Below describes how to use these base classes to add a new notification to ironic. Adding a new notification to ironic =================================== To add a new notification to ironic, a new versioned notification class should be created by subclassing the NotificationBase class to define the notification itself and the NotificationPayloadBase class to define which fields the new notification will contain inside its payload. You may also define a schema to allow the payload to be automatically populated by the fields of an ironic object. Here's an example:: # The ironic object whose fields you want to use in your schema @base.IronicObjectRegistry.register class ExampleObject(base.IronicObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'id': fields.IntegerField(), 'uuid': fields.UUIDField(), 'a_useful_field': fields.StringField(), 'not_useful_field': fields.StringField() } # A class for your new notification @base.IronicObjectRegistry.register class ExampleNotification(notification.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': fields.ObjectField('ExampleNotifPayload') } # A class for your notification's payload @base.IronicObjectRegistry.register class ExampleNotifPayload(notification.NotificationPayloadBase): # Schemas are optional. They just allow you to reuse other objects' # fields by passing in that object and calling populate_schema with # a kwarg set to the other object. SCHEMA = { 'a_useful_field': ('example_obj', 'a_useful_field') } # Version 1.0: Initial version VERSION = '1.0' fields = { 'a_useful_field': fields.StringField(), 'an_extra_field': fields.StringField(nullable=True) } Note that both the payload and notification classes are :oslo.versionedobjects-doc:`oslo versioned objects <>`. Modifications to these require a version bump so that consumers of notifications know when the notifications have changed. SCHEMA defines how to populate the payload fields. It's an optional attribute that subclasses may use to easily populate notifications with data from other objects. It is a dictionary where every key value pair has the following format:: : (, ) The ```` is the name where the data will be stored in the payload object; this field has to be defined as a field of the payload. The ```` shall refer to name of the parameter passed as kwarg to the payload's ``populate_schema()`` call and this object will be used as the source of the data. The ```` shall be a valid field of the passed argument. The SCHEMA needs to be applied with the ``populate_schema()`` call before the notification can be emitted. The value of the ``payload.`` field will be set by the ``.`` field. The ```` will not be part of the payload object internal or external representation. Payload fields that are not set by the SCHEMA can be filled in the same way as in any versioned object. Then, to create a payload, you would do something like the following. Note that if you choose to define a schema in the SCHEMA class variable, you must populate the schema by calling ``populate_schema(example_obj=my_example_obj)`` before emitting the notification is allowed:: my_example_obj = ExampleObject(id=1, a_useful_field='important', not_useful_field='blah') # an_extra_field is optional since it's not a part of the SCHEMA and is a # nullable field in the class fields my_notify_payload = ExampleNotifyPayload(an_extra_field='hello') # populate the schema with the ExampleObject fields my_notify_payload.populate_schema(example_obj=my_example_obj) You then create the notification with the oslo required fields (event_type, publisher_id, and level, all sender fields needed by oslo that are defined in the ironic notification base classes) and emit it:: notify = ExampleNotification( event_type=notification.EventType(object='example_obj', action='do_something', status=fields.NotificationStatus.START), publisher=notification.NotificationPublisher( service='ironic-conductor', host='hostname01'), level=fields.NotificationLevel.DEBUG, payload=my_notify_payload) notify.emit(context) When specifying the event_type, ``object`` will specify the object being acted on, ``action`` will be a string describing what action is being performed on that object, and ``status`` will be one of "start", "end", "error", or "success". "start" and "end" are used to indicate when actions that are not immediate begin and succeed. "success" is used to indicate when actions that are immediate succeed. "error" is used to indicate when any type of action fails, regardless of whether it's immediate or not. As a result of specifying these parameters, event_type will be formatted as ``baremetal...`` on the message bus. This example will send the following notification over the message bus:: { "priority": "debug", "payload":{ "ironic_object.namespace":"ironic", "ironic_object.name":"ExampleNotifyPayload", "ironic_object.version":"1.0", "ironic_object.data":{ "a_useful_field":"important", "an_extra_field":"hello" } }, "event_type":"baremetal.example_obj.do_something.start", "publisher_id":"ironic-conductor.hostname01" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/contributor/osprofiler-support.rst0000644000175000017500000000741100000000000025426 0ustar00coreycorey00000000000000.. _OSProfiler-support: ================ About OSProfiler ================ OSProfiler is an OpenStack cross-project profiling library. Its API provides different ways to add a new trace point. Trace points contain two messages (start and stop). Messages like below are sent to a collector:: { "name": -(start|stop), "base_id": , "parent_id": , "trace_id": , "info": } The fields are defined as follows: ``base_id`` - that is same for all trace points that belong to one trace. This is used to simplify the process of retrieving all trace points (related to one trace) from the collector. ``parent_id`` - of parent trace point. ``trace_id`` - of current trace point. ``info`` - the dictionary that contains user information passed when calling profiler start() & stop() methods. The profiler uses ceilometer as a centralized collector. Two other alternatives for ceilometer are pure MongoDB driver and Elasticsearch. A notifier is setup to send notifications to ceilometer using oslo.messaging and ceilometer API is used to retrieve all messages related to one trace. OSProfiler has entry point that allows the user to retrieve information about traces and present it in HTML/JSON using CLI. For more details see :osprofiler-doc:`OSProfiler – Cross-project profiling library `. How to Use OSProfiler with Ironic in Devstack ============================================= To use or test OSProfiler in ironic, the user needs to setup Devstack with OSProfiler and ceilometer. In addition to the setup described at :ref:`deploy_devstack`, the user needs to do the following: Add the following to ``localrc`` to enable OSProfiler and ceilometer:: enable_plugin panko https://opendev.org/openstack/panko enable_plugin ceilometer https://opendev.org/openstack/ceilometer enable_plugin osprofiler https://opendev.org/openstack/osprofiler # Enable the following services CEILOMETER_NOTIFICATION_TOPICS=notifications,profiler ENABLED_SERVICES+=,ceilometer-acompute,ceilometer-acentral ENABLED_SERVICES+=,ceilometer-anotification,ceilometer-collector ENABLED_SERVICES+=,ceilometer-alarm-evaluator,ceilometer-alarm-notifier ENABLED_SERVICES+=,ceilometer-api Run stack.sh. Once Devstack environment is setup, edit ``ironic.conf`` to set the following profiler options and restart ironic services:: [profiler] enabled = True hmac_keys = SECRET_KEY # default value used across several OpenStack projects trace_sqlalchemy = True In order to trace ironic using OSProfiler, use openstackclient to run baremetal commands with ``--os-profile SECRET_KEY``. For example, the following will cause a to be printed after node list:: $ openstack --os-profile SECRET_KEY baremetal node list Output of the above command will include the following:: Trace ID: Display trace with command: osprofiler trace show --html The trace results can be seen using this command:: $ osprofiler trace show --html The trace results can be saved in a file with ``--out file-name`` option:: $ osprofiler trace show --html --out trace.html The trace results show the time spent in ironic-api, ironic-conductor, and db calls. More detailed db tracing is enabled if ``trace_sqlalchemy`` is set to true. Sample Trace: .. figure:: ../images/sample_trace.svg :width: 660px :align: left :alt: Sample Trace Each trace has embedded trace point details as shown below: .. figure:: ../images/sample_trace_details.svg :width: 660px :align: left :alt: Sample Trace Details References ========== - :osprofiler-doc:`OSProfiler – Cross-project profiling library ` - :ref:`deploy_devstack` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/contributor/releasing.rst0000644000175000017500000001776300000000000023514 0ustar00coreycorey00000000000000========================= Releasing Ironic Projects ========================= Since the responsibility for releases will move between people, we document that process here. A full list of projects that ironic manages is available in the `governance site`_. .. _`governance site`: https://governance.openstack.org/reference/projects/ironic.html Who is responsible for releases? ================================ The current PTL is ultimately responsible for making sure code gets released. They may choose to delegate this responsibility to a liaison, which is documented in the `cross-project liaison wiki`_. Anyone may submit a release request per the process below, but the PTL or liaison must +1 the request for it to be processed. .. _`cross-project liaison wiki`: https://wiki.openstack.org/wiki/CrossProjectLiaisons#Release_management Release process =============== Releases are managed by the OpenStack release team. The release process is documented in the `Project Team Guide`_. .. _`Project Team Guide`: https://docs.openstack.org/project-team-guide/release-management.html#how-to-release What do we have to release? =========================== The ironic project has a number of deliverables under its governance. The ultimate source of truth for this is `projects.yaml `__ in the governance repository. These deliverables have varying release models, and these are defined in the `deliverables YAML files `__ in the releases repository. In general, ironic deliverables follow the `cycle-with-intermediary `__ release model. Non-client libraries -------------------- The following deliverables are non-client libraries: * ironic-lib * metalsmith * sushy Client libraries ---------------- The following deliverables are client libraries: * python-ironicclient * python-ironic-inspector-client * sushy-cli Normal release -------------- The following deliverables are Neutron plugins: * networking-baremetal * networking-generic-switch The following deliverables are Horizon plugins: * ironic-ui The following deliverables are Tempest plugins: * ironic-tempest-plugin The following deliverables are services, or treated as such: * bifrost * ironic * ironic-inspector * ironic-prometheus-exporter * ironic-python-agent Independent ----------- The following deliverables are released `independently `__: * ironic-python-agent-builder * molteniron * sushy-tools * tenks * virtualbmc Not released ------------ The following deliverables do not need to be released: * ironic-inspector-specs * ironic-specs Things to do before releasing ============================= * Review the unreleased release notes, if the project uses them. Make sure they follow our :ref:`standards `, are coherent, and have proper grammar. Combine release notes if necessary (for example, a release note for a feature and another release note to add to that feature may be combined). * For ironic releases only, not ironic-inspector releases: if any new API microversions have been added since the last release, update the REST API version history (``doc/source/contributor/webapi-version-history.rst``) to indicate that they were part of the new release. * To support rolling upgrades, add this new release version (and release name if it is a named release) into ``ironic/common/release_mappings.py``: * in ``RELEASE_MAPPING`` make a copy of the ``master`` entry, and rename the first ``master`` entry to the new semver release version. * If this is a named release, add a ``RELEASE_MAPPING`` entry for the named release. Its value should be the same as that of the latest semver one (that you just added above). It is important to do this before a stable/ branch is made (or if `the grenade switch is made `_ to use the latest release from stable as the 'old' release). Otherwise, once it is made, CI (the grenade job that tests new-release -> master) will fail. Things to do after releasing ============================ When a release is done that results in a stable branch ------------------------------------------------------ When a release is done that results in a stable branch for the project, several changes need to be made. The release automation will push a number of changes that need to be approved. This includes: * In the new stable branch: * a change to point ``.gitreview`` at the branch * a change to update the upper constraints file used by ``tox`` * In the master branch: * updating the release notes RST to include the new branch. The generated RST does not include the version range in the title, so we typically submit a follow-up patch to do that. An example of this patch is `here `__. * update the `templates` in `.zuul.yaml` or `zuul.d/project.yaml`. The update is necessary to use the job for the next release `openstack-python3--jobs`. An example of this patch is `here `__. We need to submit patches for changes in the stable branch to: * update the ironic devstack plugin to point at the branched tarball for IPA. An example of this patch is `here `_. * update links in the documentation (``ironic/doc/source/``) to point to the branched versions of any openstack projects' (that branch) documents. As of Pike release, the only outlier is `diskimage-builder `_. * set appropriate defaults for ``TEMPEST_BAREMETAL_MIN_MICROVERSION`` and ``TEMPEST_BAREMETAL_MAX_MICROVERSION`` in ``devstack/lib/ironic`` to make sure that unsupported API tempest tests are skipped on stable branches. E.g. `patch 495319 `_. We need to submit patches for changes on master to: * create an empty commit with a ``Sem-Ver`` tag to bump the generated minor version. See `example `_ and `pbr documentation `_ for details. * to support rolling upgrades, since the release was a named release, we need to make these changes. Note that we need to wait until *after* the switch in grenade is made to test the latest release (N) with master (e.g. `for stable/queens `_). Doing these changes sooner -- after the ironic release and before the switch when grenade is testing the prior release (N-1) with master, will cause the tests to fail. (You may want to ask/remind infra/qa team, as to when they will do this switch.) * In ``ironic/common/release_mappings.py``, delete any entries from ``RELEASE_MAPPING`` associated with the oldest named release. Since we support upgrades between adjacent named releases, the master branch will only support upgrades from the most recent named release to master. * remove any DB migration scripts from ``ironic.cmd.dbsync.ONLINE_MIGRATIONS`` and remove the corresponding code from ironic. (These migration scripts are used to migrate from an old release to this latest release; they shouldn't be needed after that.) * remove any model class names from ``ironic.cmd.dbsync.NEW_MODELS``. As **ironic-tempest-plugin** is branchless, we need to submit a patch adding stable jobs to its master branch. `Example for Queens `_. For all releases ---------------- For all releases, whether or not it results in a stable branch: * update the specs repo to mark any specs completed in the release as implemented. * remove any -2s on patches that were blocked until after the release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/contributor/rolling-upgrades.rst0000644000175000017500000006132100000000000025006 0ustar00coreycorey00000000000000.. _rolling-upgrades-dev: ================ Rolling Upgrades ================ The ironic (ironic-api and ironic-conductor) services support rolling upgrades, starting with a rolling upgrade from the Ocata to the Pike release. This describes the design of rolling upgrades, followed by notes for developing new features or modifying an IronicObject. Design ====== Rolling upgrades between releases --------------------------------- Ironic follows the `release-cycle-with-intermediary release model `_. The releases are `semantic-versioned `_, in the form ... We refer to a ``named release`` of ironic as the release associated with a development cycle like Pike. In addition, ironic follows the `standard deprecation policy `_, which says that the deprecation period must be at least three months and a cycle boundary. This means that there will never be anything that is both deprecated *and* removed between two named releases. Rolling upgrades will be supported between: * named release N to N+1 (starting with N == Ocata) * any named release to its latest revision, containing backported bug fixes. Because those bug fixes can contain improvements to the upgrade process, the operator should patch the system before upgrading between named releases. * most recent named release N (and semver releases newer than N) to master. As with the above bullet point, there may be a bug or a feature introduced on a master branch, that we want to remove before publishing a named release. Deprecation policy allows to do this in a 3 month time frame. If the feature was included and removed in intermediate releases, there should be a release note added, with instructions on how to do a rolling upgrade to master from an affected release or release span. This would typically instruct the operator to upgrade to a particular intermediate release, before upgrading to master. Rolling upgrade process ----------------------- Ironic supports rolling upgrades as described in the :doc:`upgrade guide <../admin/upgrade-guide>`. The upgrade process will cause the ironic services to be running the ``FromVer`` and ``ToVer`` releases in this order: 0. Upgrade ironic code and run database schema migrations via the ``ironic-dbsync upgrade`` command. 1. Upgrade code and restart ironic-conductor services, one at a time. 2. Upgrade code and restart ironic-api services, one at a time. 3. Unpin API, RPC and object versions so that the services can now use the latest versions in ``ToVer``. This is done via updating the configuration option described below in `API, RPC and object version pinning`_ and then restarting the services. ironic-conductor services should be restarted first, followed by the ironic-api services. This is to ensure that when new functionality is exposed on the unpinned API service (via API micro version), it is available on the backend. +------+---------------------------------+---------------------------------+ | step | ironic-api | ironic-conductor | +======+=================================+=================================+ | 0 | all FromVer | all FromVer | +------+---------------------------------+---------------------------------+ | 1.1 | all FromVer | some FromVer, some ToVer-pinned | +------+---------------------------------+---------------------------------+ | 1.2 | all FromVer | all ToVer-pinned | +------+---------------------------------+---------------------------------+ | 2.1 | some FromVer, some ToVer-pinned | all ToVer-pinned | +------+---------------------------------+---------------------------------+ | 2.2 | all ToVer-pinned | all ToVer-pinned | +------+---------------------------------+---------------------------------+ | 3.1 | all ToVer-pinned | some ToVer-pinned, some ToVer | +------+---------------------------------+---------------------------------+ | 3.2 | all ToVer-pinned | all ToVer | +------+---------------------------------+---------------------------------+ | 3.3 | some ToVer-pinned, some ToVer | all ToVer | +------+---------------------------------+---------------------------------+ | 3.4 | all ToVer | all ToVer | +------+---------------------------------+---------------------------------+ Policy for changes to the DB model ---------------------------------- The policy for changes to the DB model is as follows: * Adding new items to the DB model is supported. * The dropping of columns or tables and corresponding objects' fields is subject to ironic's `deprecation policy `_. But its alembic script has to wait one more deprecation period, otherwise an ``unknown column`` exception will be thrown when ``FromVer`` services access the DB. This is because :command:`ironic-dbsync upgrade` upgrades the DB schema but ``FromVer`` services still contain the dropped field in their SQLAlchemy DB model. * An ``alembic.op.alter_column()`` to rename or resize a column is not allowed. Instead, split it into multiple operations, with one operation per release cycle (to maintain compatibility with an old SQLAlchemy model). For example, to rename a column, add the new column in release N, then remove the old column in release N+1. * Some implementations of SQL's ``ALTER TABLE``, such as adding foreign keys in PostgreSQL, may impose table locks and cause downtime. If the change cannot be avoided and the impact is significant (e.g. the table can be frequently accessed and/or store a large dataset), these cases must be mentioned in the release notes. API, RPC and object version pinning ----------------------------------- For the ironic services to be running old and new releases at the same time during a rolling upgrade, the services need to be able to handle different API, RPC and object versions. This versioning is handled via the configuration option: ``[DEFAULT]/pin_release_version``. It is used to pin the API, RPC and IronicObject (e.g., Node, Conductor, Chassis, Port, and Portgroup) versions for all the ironic services. The default value of empty indicates that ironic-api and ironic-conductor will use the latest versions of API, RPC and IronicObjects. Its possible values are releases, named (e.g. ``ocata``) or sem-versioned (e.g. ``7.0``). Internally, in `common/release_mappings.py `_, ironic maintains a mapping that indicates the API, RPC and IronicObject versions associated with each release. This mapping is maintained manually. During a rolling upgrade, the services using the new release will set the configuration option value to be the name (or version) of the old release. This will indicate to the services running the new release, which API, RPC and object versions that they should be compatible with, in order to communicate with the services using the old release. Handling API versions --------------------- When the (newer) service is pinned, the maximum API version it supports will be the pinned version -- which the older service supports (as described above at `API, RPC and object version pinning`_). The ironic-api service returns HTTP status code 406 for any requests with API versions that are higher than this maximum version. Handling RPC versions --------------------- `ConductorAPI.__init__() `_ sets the ``version_cap`` variable to the desired (latest or pinned) RPC API version and passes it to the ``RPCClient`` as an initialization parameter. This variable is then used to determine the maximum requested message version that the ``RPCClient`` can send. Each RPC call can customize the request according to this ``version_cap``. The `Ironic RPC versions`_ section below has more details about this. Handling IronicObject versions ------------------------------ Internally, ironic services deal with IronicObjects in their latest versions. Only at these boundaries, when the IronicObject enters or leaves the service, do we deal with object versioning: * getting objects from the database: convert to latest version * saving objects to the database: if pinned, save in pinned version; else save in latest version * serializing objects (to send over RPC): if pinned, send pinned version; else send latest version * deserializing objects (receiving objects from RPC): convert to latest version The ironic-api service also has to handle API requests/responses based on whether or how a feature is supported by the API version and object versions. For example, when the ironic-api service is pinned, it can only allow actions that are available to the object's pinned version, and cannot allow actions that are only available for the latest version of that object. To support this: * All the database tables (SQLAlchemy models) of the IronicObjects have a column named ``version``. The value is the version of the object that is saved in the database. * The method ``IronicObject.get_target_version()`` returns the target version. If pinned, the pinned version is returned. Otherwise, the latest version is returned. * The method ``IronicObject.convert_to_version()`` converts the object into the target version. The target version may be a newer or older version than the existing version of the object. The bulk of the work is done in the helper method ``IronicObject._convert_to_version()``. Subclasses that have new versions redefine this to perform the actual conversions. In the following, * The old release is ``FromVer``; it uses version 1.14 of a Node object. * The new release is ``ToVer``. It uses version 1.15 of a Node object -- this has a deprecated ``extra`` field and a new ``meta`` field that replaces ``extra``. * db_obj['meta'] and db_obj['extra'] are the database representations of those node fields. Getting objects from the database (API/conductor <-- DB) :::::::::::::::::::::::::::::::::::::::::::::::::::::::: Both ironic-api and ironic-conductor services read values from the database. These values are converted to IronicObjects via the method ``IronicObject._from_db_object()``. This method always returns the IronicObject in its latest version, even if it was in an older version in the database. This is done regardless of the service being pinned or not. Note that if an object is converted to a later version, that IronicObject will retain any changes (in its ``_changed_fields`` field) resulting from that conversion. This is needed in case the object gets saved later, in the latest version. For example, if the node in the database is in version 1.14 and has db_obj['extra'] set: * a ``FromVer`` service will get a Node with node.extra = db_obj['extra'] (and no knowledge of node.meta since it doesn't exist) * a ``ToVer`` service (pinned or unpinned), will get a Node with: * node.meta = db_obj['extra'] * node.extra = None * node._changed_fields = ['meta', 'extra'] Saving objects to the database (API/conductor --> DB) ::::::::::::::::::::::::::::::::::::::::::::::::::::: The version used for saving IronicObjects to the database is determined as follows: * For an unpinned service, the object is saved in its latest version. Since objects are always in their latest version, no conversions are needed. * For a pinned service, the object is saved in its pinned version. Since objects are always in their latest version, the object needs to be converted to the pinned version before being saved. The method ``IronicObject.do_version_changes_for_db()`` handles this logic, returning a dictionary of changed fields and their new values (similar to the existing ``oslo.versionedobjects.VersionedObject.obj_get_changes()``). Since we do not keep track internally, of the database version of an object, the object's ``version`` field will always be part of these changes. The `Rolling upgrade process`_ (at step 3.1) ensures that by the time an object can be saved in its latest version, all services are running the newer release (although some may still be pinned) and can handle the latest object versions. An interesting situation can occur when the services are as described in step 3.1. It is possible for an IronicObject to be saved in a newer version and subsequently get saved in an older version. For example, a ``ToVer`` unpinned conductor might save a node in version 1.5. A subsequent request may cause a ``ToVer`` pinned conductor to replace and save the same node in version 1.4! Sending objects via RPC (API/conductor -> RPC) :::::::::::::::::::::::::::::::::::::::::::::: When a service makes an RPC request, any IronicObjects that are sent as part of that request are serialized into entities or primitives via ``IronicObjectSerializer.serialize_entity()``. The version used for objects being serialized is as follows: * For an unpinned service, the object is serialized to its latest version. Since objects are always in their latest version, no conversions are needed. * For a pinned service, the object is serialized to its pinned version. Since objects are always in their latest version, the object is converted to the pinned version before being serialized. The converted object includes changes that resulted from the conversion; this is needed so that the service at the other end of the RPC request has the necessary information if that object will be saved to the database. Receiving objects via RPC (API/conductor <- RPC) :::::::::::::::::::::::::::::::::::::::::::::::: When a service receives an RPC request, any entities that are part of the request need to be deserialized (via ``oslo.versionedobjects.VersionedObjectSerializer.deserialize_entity()``). For entities that represent IronicObjects, we want the deserialization process (via ``IronicObjectSerializer._process_object()``) to result in IronicObjects that are in their latest version, regardless of the version they were sent in and regardless of whether the receiving service is pinned or not. Again, any objects that are converted will retain the changes that resulted from the conversion, useful if that object is later saved to the database. For example, a ``FromVer`` ironic-api could issue an ``update_node()`` RPC request with a node in version 1.4, where node.extra was changed (so node._changed_fields = ['extra']). This node will be serialized in version 1.4. The receiving ``ToVer`` pinned ironic-conductor deserializes it and converts it to version 1.5. The resulting node will have node.meta set (to the changed value from node.extra in v1.4), node.extra = None, and node._changed_fields = ['meta', 'extra']. When developing a new feature or modifying an IronicObject ========================================================== When adding a new feature or changing an IronicObject, they need to be coded so that things work during a rolling upgrade. The following describe areas where the code may need to be changed, as well as some points to keep in mind when developing code. ironic-api ---------- During a rolling upgrade, the new, pinned ironic-api is talking to a new conductor that might also be pinned. There may also be old ironic-api services. So the new, pinned ironic-api service needs to act like it was the older service: * New features should not be made available, unless they are somehow totally supported in the old and new releases. Pinning the API version is in place to handle this. * If, for whatever reason, the API version pinning doesn't prevent a request from being handled that cannot or should not be handled, it should be coded so that the response has HTTP status code 406 (Not Acceptable). This is the same response to requests that have an incorrect (old) version specified. Ironic RPC versions ------------------- When the signature (arguments) of an RPC method is changed or new methods are added, the following needs to be considered: - The RPC version must be incremented and be the same value for both the client (``ironic/conductor/rpcapi.py``, used by ironic-api) and the server (``ironic/conductor/manager.py``, used by ironic-conductor). It should also be updated in ``ironic/common/release_mappings.py``. - Until there is a major version bump, new arguments of an RPC method can only be added as optional. Existing arguments cannot be removed or changed in incompatible ways with the method in older RPC versions. - ironic-api (client-side) sets a version cap (by passing the version cap to the constructor of oslo_messaging.RPCClient). This "pinning" is in place during a rolling upgrade when the ``[DEFAULT]/pin_release_version`` configuration option is set. - New RPC methods are not available when the service is pinned to the older release version. In this case, the corresponding REST API function should return a server error or implement alternative behaviours. - Methods which change arguments should run ``client.can_send_version()`` to see if the version of the request is compatible with the version cap of the RPC Client. Otherwise the request needs to be created to work with a previous version that is supported. - ironic-conductor (server-side) should tolerate older versions of requests in order to keep working during the rolling upgrade process. The behaviour of ironic-conductor will depend on the input parameters passed from the client-side. - Old methods can be removed only after they are no longer used by a previous named release. Object versions --------------- When subclasses of ``ironic.objects.base.IronicObject`` are modified, the following needs to be considered: - Any change of fields or change in signature of remotable methods needs a bump of the object version. The object versions are also maintained in ``ironic/common/release_mappings.py``. - New objects must be added to ``ironic/common/release_mappings.py``. Also for the first releases they should be excluded from the version check by adding their class names to the ``NEW_MODELS`` list in ``ironic/cmd/dbsync.py``. - The arguments of remotable methods (methods which are remoted to the conductor via RPC) can only be added as optional. They cannot be removed or changed in an incompatible way (to the previous release). - Field types cannot be changed. Instead, create a new field and deprecate the old one. - There is a `unit test `_ that generates the hash of an object using its fields and the signatures of its remotable methods. Objects that have a version bump need to be updated in the `expected_object_fingerprints `_ dictionary; otherwise this test will fail. A failed test can also indicate to the developer that their change(s) to an object require a version bump. - When new version objects communicate with old version objects and when reading or writing to the database, ``ironic.objects.base.IronicObject._convert_to_version()`` will be called to convert objects to the target version. Objects should implement their own ._convert_to_version() to remove or alter fields which were added or changed after the target version:: def _convert_to_version(self, target_version, remove_unavailable_fields=True): """Convert to the target version. Subclasses should redefine this method, to do the conversion of the object to the target version. Convert the object to the target version. The target version may be the same, older, or newer than the version of the object. This is used for DB interactions as well as for serialization/deserialization. The remove_unavailable_fields flag is used to distinguish these two cases: 1) For serialization/deserialization, we need to remove the unavailable fields, because the service receiving the object may not know about these fields. remove_unavailable_fields is set to True in this case. 2) For DB interactions, we need to set the unavailable fields to their appropriate values so that these fields are saved in the DB. (If they are not set, the VersionedObject magic will not know to save/update them to the DB.) remove_unavailable_fields is set to False in this case. :param target_version: the desired version of the object :param remove_unavailable_fields: True to remove fields that are unavailable in the target version; set this to True when (de)serializing. False to set the unavailable fields to appropriate values; set this to False for DB interactions. This method must handle: * converting from an older version to a newer version * converting from a newer version to an older version * making sure, when converting, that you take into consideration other object fields that may have been affected by a field (value) only available in a newer version. For example, if field 'new' is only available in Node version 1.5 and Node.affected = Node.new+3, when converting to 1.4 (an older version), you may need to change the value of Node.affected too. Online data migrations ---------------------- The ``ironic-dbsync online_data_migrations`` command will perform online data migrations. Keep in mind the `Policy for changes to the DB model`_. Future incompatible changes in SQLAlchemy models, like removing or renaming columns and tables can break rolling upgrades (when ironic services are run with different release versions simultaneously). It is forbidden to remove these database resources when they may still be used by the previous named release. When `creating new Alembic migrations `_ which modify existing models, make sure that any new columns default to NULL. Test the migration out on a non-empty database to make sure that any new constraints don't cause the database to be locked out for normal operations. You can find an overview on what DDL operations may cause downtime in https://dev.mysql.com/doc/refman/5.7/en/innodb-create-index-overview.html. (You should also check older, widely deployed InnoDB versions for issues.) In the case of PostgreSQL, adding a foreign key may lock a whole table for writes. Make sure to add a release note if there are any downtime-related concerns. Backfilling default values, and migrating data between columns or between tables must be implemented inside an online migration script. A script is a database API method (added to ``ironic/db/api.py`` and ``ironic/db/sqlalchemy/api.py``) which takes two arguments: - context: an admin context - max_count: this is used to limit the query. It is the maximum number of objects to migrate; >= 0. If zero, all the objects will be migrated. It returns a two-tuple: - the total number of objects that need to be migrated, at the start of the method, and - the number of migrated objects. In this method, the version column can be used to select and update old objects. The method name should be added to the list of ``ONLINE_MIGRATIONS`` in ``ironic/cmd/dbsync.py``. The method should be removed in the next named release after this one. After online data migrations are completed and the SQLAlchemy models no longer contain old fields, old columns can be removed from the database. This takes at least 3 releases, since we have to wait until the previous named release no longer contains references to the old schema. Before removing any resources from the database by modifying the schema, make sure that your implementation checks that all objects in the affected tables have been migrated. This check can be implemented using the version column. "ironic-dbsync upgrade" command ------------------------------- The ``ironic-dbsync upgrade`` command first checks that the versions of the objects are compatible with the (new) release of ironic, before it will make any DB schema changes. If one or more objects are not compatible, the upgrade will not be performed. This check is done by comparing the objects' ``version`` field in the database with the expected (or supported) versions of these objects. The supported versions are the versions specified in ``ironic.common.release_mappings.RELEASE_MAPPING``. The newly created tables cannot pass this check and thus have to be excluded by adding their object class names (e.g. ``Node``) to ``ironic.cmd.dbsync.NEW_MODELS``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/contributor/states.rst0000644000175000017500000002373500000000000023042 0ustar00coreycorey00000000000000.. _states: ====================== Ironic's State Machine ====================== State Machine Diagram ===================== The diagram below shows the provisioning states that an Ironic node goes through during the lifetime of a node. The diagram also depicts the events that transition the node to different states. Stable states are highlighted with a thicker border. All transitions from stable states are initiated by API requests. There are a few other API-initiated-transitions that are possible from non-stable states. The events for these API-initiated transitions are indicated with '(via API)'. Internally, the conductor initiates the other transitions (depicted in gray). .. figure:: ../images/states.svg :width: 660px :align: left :alt: Ironic state transitions State Descriptions ================== enroll (stable state) This is the state that all nodes start off in when created using API version 1.11 or newer. When a node is in the ``enroll`` state, the only thing ironic knows about it is that it exists, and ironic cannot take any further action by itself. Once a node has its driver/interfaces and their required information set in ``node.driver_info``, the node can be transitioned to the ``verifying`` state by setting the node's provision state using the ``manage`` verb. verifying ironic will validate that it can manage the node using the information given in ``node.driver_info`` and with either the driver/hardware type and interfaces it has been assigned. This involves going out and confirming that the credentials work to access whatever node control mechanism they talk to. manageable (stable state) Once ironic has verified that it can manage the node using the driver/interfaces and credentials passed in at node create time, the node will be transitioned to the ``manageable`` state. From ``manageable``, nodes can transition to: * ``manageable`` (through ``cleaning``) by setting the node's provision state using the ``clean`` verb. * ``manageable`` (through ``inspecting``) by setting the node's provision state using the ``inspect`` verb. * ``available`` (through ``cleaning`` if automatic cleaning is enabled) by setting the node's provision state using the ``provide`` verb. * ``active`` (through ``adopting``) by setting the node's provision state using the ``adopt`` verb. ``manageable`` is the state that a node should be moved into when any updates need to be made to it such as changes to fields in driver_info and updates to networking information on ironic ports assigned to the node. ``manageable`` is also the only stable state that can be transitioned to, from these failure states: * ``adopt failed`` * ``clean failed`` * ``inspect failed`` inspecting ``inspecting`` will utilize node introspection to update hardware-derived node properties to reflect the current state of the hardware. Typically, the node will transition to ``manageable`` if inspection is synchronous, or ``inspect wait`` if asynchronous. The node will transition to ``inspect failed`` if error occurred. inspect wait This is the provision state used when an asynchronous inspection is in progress. A successfully inspected node shall transition to ``manageable`` state. inspect failed This is the state a node will move into when inspection of the node fails. From here the node can transitioned to: * ``inspecting`` by setting the node's provision state using the ``inspect`` verb. * ``manageable`` by setting the node's provision state using the ``manage`` verb cleaning Nodes in the ``cleaning`` state are being scrubbed and reprogrammed into a known configuration. When a node is in the ``cleaning`` state it means that the conductor is executing the clean step (for out-of-band clean steps) or preparing the environment (building PXE configuration files, configuring the DHCP, etc) to boot the ramdisk for running in-band clean steps. clean wait Just like the ``cleaning`` state, the nodes in the ``clean wait`` state are being scrubbed and reprogrammed. The difference is that in the ``clean wait`` state the conductor is waiting for the ramdisk to boot or the clean step which is running in-band to finish. The cleaning process of a node in the ``clean wait`` state can be interrupted by setting the node's provision state using the ``abort`` verb if the task that is running allows it. available (stable state) After nodes have been successfully preconfigured and cleaned, they are moved into the ``available`` state and are ready to be provisioned. From ``available``, nodes can transition to: * ``active`` (through ``deploying``) by setting the node's provision state using the ``active`` verb. * ``manageable`` by setting the node's provision state using the ``manage`` verb deploying Nodes in ``deploying`` are being prepared to run a workload on them. This consists of running a series of tasks, such as: * Setting appropriate BIOS configurations * Partitioning drives and laying down file systems. * Creating any additional resources (node-specific network config, a config drive partition, etc.) that may be required by additional subsystems. wait call-back Just like the ``deploying`` state, the nodes in ``wait call-back`` are being deployed. The difference is that in ``wait call-back`` the conductor is waiting for the ramdisk to boot or execute parts of the deployment which need to run in-band on the node (for example, installing the bootloader, or writing the image to the disk). The deployment of a node in ``wait call-back`` can be interrupted by setting the node's provision state using the ``deleted`` verb. deploy failed This is the state a node will move into when a deployment fails, for example a timeout waiting for the ramdisk to PXE boot. From here the node can be transitioned to: * ``active`` (through ``deploying``) by setting the node's provision state using either the ``active`` or ``rebuild`` verbs. * ``available`` (through ``deleting`` and ``cleaning``) by setting the node's provision state using the ``deleted`` verb. active (stable state) Nodes in ``active`` have a workload running on them. ironic may collect out-of-band sensor information (including power state) on a regular basis. Nodes in ``active`` can transition to: * ``available`` (through ``deleting`` and ``cleaning``) by setting the node's provision state using the ``deleted`` verb. * ``active`` (through ``deploying``) by setting the node's provision state using the ``rebuild`` verb. * ``rescue`` (through ``rescuing``) by setting the node's provision state using the ``rescue`` verb. deleting Nodes in ``deleting`` state are being torn down from running an active workload. In ``deleting``, ironic tears down and removes any configuration and resources it added in ``deploying`` or ``rescuing``. error (stable state) This is the state a node will move into when deleting an active deployment fails. From ``error``, nodes can transition to: * ``available`` (through ``deleting`` and ``cleaning``) by setting the node's provision state using the ``deleted`` verb. adopting This state allows ironic to take over management of a baremetal node with an existing workload on it. Ordinarily when a baremetal node is enrolled and managed by ironic, it must transition through ``cleaning`` and ``deploying`` to reach ``active`` state. However, those baremetal nodes that have an existing workload on them, do not need to be deployed or cleaned again, so this transition allows these nodes to move directly from ``manageable`` to ``active``. rescuing Nodes in ``rescuing`` are being prepared to perform rescue operations. This consists of running a series of tasks, such as: * Setting appropriate BIOS configurations. * Creating any additional resources (node-specific network config, etc.) that may be required by additional subsystems. rescue wait Just like the ``rescuing`` state, the nodes in ``rescue wait`` are being rescued. The difference is that in ``rescue wait`` the conductor is waiting for the ramdisk to boot or execute parts of the rescue which need to run in-band on the node (for example, setting the password for user named ``rescue``). The rescue operation of a node in ``rescue wait`` can be aborted by setting the node's provision state using the ``abort`` verb. rescue failed This is the state a node will move into when a rescue operation fails, for example a timeout waiting for the ramdisk to PXE boot. From here the node can be transitioned to: * ``rescue`` (through ``rescuing``) by setting the node's provision state using the ``rescue`` verb. * ``active`` (through ``unrescuing``) by setting the node's provision state using the ``unrescue`` verb. * ``available`` (through ``deleting``) by setting the node's provision state using the ``deleted`` verb. rescue (stable state) Nodes in ``rescue`` have a rescue ramdisk running on them. Ironic may collect out-of-band sensor information (including power state) on a regular basis. Nodes in ``rescue`` can transition to: * ``active`` (through ``unrescuing``) by setting the node's provision state using the ``unrescue`` verb. * ``available`` (through ``deleting``) by setting the node's provision state using the ``deleted`` verb. unrescuing Nodes in ``unrescuing`` are being prepared to transition to ``active`` state from ``rescue`` state. This consists of running a series of tasks, such as setting appropriate BIOS configurations such as changing boot device. unrescue failed This is the state a node will move into when an unrescue operation fails. From here the node can be transitioned to: * ``rescue`` (through ``rescuing``) by setting the node's provision state using the ``rescue`` verb. * ``active`` (through ``unrescuing``) by setting the node's provision state using the ``unrescue`` verb. * ``available`` (through ``deleting``) by setting the node's provision state using the ``deleted`` verb. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/contributor/third-party-ci.rst0000644000175000017500000000333700000000000024373 0ustar00coreycorey00000000000000.. _third-party-ci: ================================== Third Party Continuous Integration ================================== .. NOTE:: This document is a work-in-progress. Unfilled sections will be worked in follow-up patchsets. This version is to get a basic outline and index done so that we can then build on it. (krtaylor) This document provides tips and guidelines for third-party driver developers setting up their continuous integration test systems. CI Architecture Overview ======================== Requirements Cookbook ===================== Sizing ------ Infrastructure -------------- This section describes what changes you'll need to make to a your CI system to add an ironic job. jenkins changes ############### nodepool changes ################ neutron changes ############### pre-test hook ############# cleanup hook ############ Ironic ------ Hardware Pool Management ======================== Problem ------- If you are using actual hardware as target machines for your CI testing then the problem of two jobs trying to use the name target arises. If you have one target machine and a maximum number of one jobs running on your ironic pipeline at a time, then you won't run into this problem. However, one target may not handle the load of ironic's daily patch submissions. Solutions --------- Zuul v3 ####### Molten Iron ########### `molteniron `_ is a tool that allows you to reserve hardware from a pool at the last minute to use in your job. Once finished testing, you can unreserve the hardware making it available for the next test job. Tips and Tricks =============== Optimize Run Time ----------------- Image Server ############ Other References ---------------- ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/contributor/vendor-passthru.rst0000644000175000017500000001442600000000000024700 0ustar00coreycorey00000000000000.. _vendor-passthru: ============== Vendor Methods ============== This document is a quick tutorial on writing vendor specific methods to a driver. The first thing to note is that the Ironic API supports two vendor endpoints: A driver vendor passthru and a node vendor passthru. * The ``VendorInterface`` allows hardware types to expose a custom top-level functionality which is not specific to a Node. For example, let's say the driver `ipmi` exposed a method called `authentication_types` that would return what are the authentication types supported. It could be accessed via the Ironic API like: :: GET http://
:/v1/drivers/ipmi/vendor_passthru/authentication_types .. warning:: The Bare Metal API currently only allows to use driver passthru for the default ``vendor`` interface implementation for a given hardware type. This limitation will be lifted in the future. * The node vendor passthru allows drivers to expose custom functionality on per-node basis. For example the same driver `ipmi` exposing a method called `send_raw` that would send raw bytes to the BMC, the method also receives a parameter called `raw_bytes` which the value would be the bytes to be sent. It could be accessed via the Ironic API like: :: POST {'raw_bytes': '0x01 0x02'} http://
:/v1/nodes//vendor_passthru/send_raw Writing Vendor Methods ====================== Writing a custom vendor method in Ironic should be simple. The first thing to do is write a class inheriting from the `VendorInterface`_ class: .. code-block:: python class ExampleVendor(VendorInterface) def get_properties(self): return {} def validate(self, task, **kwargs): pass The `get_properties` is a method that all driver interfaces have, it should return a dictionary of : telling in the description whether that property is required or optional so the node can be manageable by that driver. For example, a required property for a `ipmi` driver would be `ipmi_address` which is the IP address or hostname of the node. We are returning an empty dictionary in our example to make it simpler. The `validate` method is responsible for validating the parameters passed to the vendor methods. Ironic will not introspect into what is passed to the drivers, it's up to the developers writing the vendor method to validate that data. Let's extend the `ExampleVendor` class to support two methods, the `authentication_types` which will be exposed on the driver vendor passthru endpoint; And the `send_raw` method that will be exposed on the node vendor passthru endpoint: .. code-block:: python class ExampleVendor(VendorInterface) def get_properties(self): return {} def validate(self, task, method, **kwargs): if method == 'send_raw': if 'raw_bytes' not in kwargs: raise MissingParameterValue() @base.driver_passthru(['GET'], async_call=False) def authentication_types(self, context, **kwargs): return {"types": ["NONE", "MD5", "MD2"]} @base.passthru(['POST']) def send_raw(self, task, **kwargs): raw_bytes = kwargs.get('raw_bytes') ... That's it! Writing a node or driver vendor passthru method is pretty much the same, the only difference is how you decorate the methods and the first parameter of the method (ignoring self). A method decorated with the `@passthru` decorator should expect a Task object as first parameter and a method decorated with the `@driver_passthru` decorator should expect a Context object as first parameter. Both decorators accept these parameters: * http_methods: A list of what the HTTP methods supported by that vendor function. To know what HTTP method that function was invoked with, a `http_method` parameter will be present in the `kwargs`. Supported HTTP methods are *POST*, *PUT*, *GET* and *PATCH*. * method: By default the method name is the name of the python function, if you want to use a different name this parameter is where this name can be set. For example: .. code-block:: python @passthru(['PUT'], method="alternative_name") def name(self, task, **kwargs): ... * description: A string containing a nice description about what that method is supposed to do. Defaults to "" (empty string). .. _VendorInterface: ../api/ironic.drivers.base.html#ironic.drivers.base.VendorInterface * async_call: A boolean value to determine whether this method should run asynchronously or synchronously. Defaults to True (Asynchronously). .. note:: This parameter was previously called "async". The node vendor passthru decorator (`@passthru`) also accepts the following parameter: * require_exclusive_lock: A boolean value determining whether this method should require an exclusive lock on a node between validate() and the beginning of method execution. For synchronous methods, the lock on the node would also be kept for the duration of method execution. Defaults to True. .. WARNING:: Please avoid having a synchronous method for slow/long-running operations **or** if the method does talk to a BMC; BMCs are flaky and very easy to break. .. WARNING:: Each asynchronous request consumes a worker thread in the ``ironic-conductor`` process. This can lead to starvation of the thread pool, resulting in a denial of service. Give the new vendor interface implementation a human-friendly name and create an entry point for it in the ``setup.cfg``:: ironic.hardware.interfaces.vendor = example = ironic.drivers.modules.example:ExampleVendor Finally, add it to the list of supported vendor interfaces for relevant hardware types, for example: .. code-block:: python class ExampleHardware(generic.GenericHardware): ... @property def supported_vendor_interfaces(self): return [example.ExampleVendor] Backwards Compatibility ======================= There is no requirement that changes to a vendor method be backwards compatible. However, for your users' sakes, we highly recommend that you do so. If you are changing the exceptions being raised, you might want to ensure that the same HTTP code is being returned to the user. For non-backwards compatibility, please make sure you add a release note that indicates this. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/contributor/vision-reflection.rst0000644000175000017500000000434100000000000025166 0ustar00coreycorey00000000000000.. _vision_reflection: ================================================= Comparison to the 2018 OpenStack Technical Vision ================================================= In late-2018, the OpenStack Technical composed a `technical vision `_ of what OpenStack clouds should look like. While every component differs, and "cloudy" interactions change dramatically the closer to physical hardware one gets, there are a few areas where Ironic could use some improvement. This list is largely for the purposes of help wanted. It is also important to note that Ironic as a project has a `vision document `_ for itself. The Pillars of Cloud - Self Service =================================== * Ironic's mechanisms and tooling are low level infrastructure mechanisms and as such there has never been a huge emphasis or need on making Ironic be capable of offering direct multi-tenant interaction. Most users interact with the bare metal managed by Ironic via Nova, which abstracts away many of these issues. Eventually, we should offer direct multi-tenancy which is not oriented towards admin-only. Design Goals - Built-in Reliability and Durability ================================================== * Ironic presently considers in-flight operations as failed upon the restart of a controller that was previously performing a task, because we do not know the current status of the task upon re-start. In some cases, this makes sense, but potentially requires administrative intervention in the worst of cases. In a perfect universe, Ironic "conductors" would validate their perception, in case tasks actually finished. Design Goals - Graphical User Interface ======================================= * While a graphical interface was developed for Horizon in the form of `ironic-ui `_, currently ironic-ui receives only minimal housekeeping. As Ironic has evolved, ironic-ui is stuck on version `1.34` and knows nothing of our evolution since. Ironic ultimately needs a contributor with sufficient time to pick up ``ironic-ui`` or to completely replace it as a functional and customizable user interface. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/contributor/vision.rst0000644000175000017500000000631700000000000023043 0ustar00coreycorey00000000000000.. _vision: ================== Contributor Vision ================== Background ========== During the Rocky Project Teams Gathering (Feburary/March 2018), The contributors in the room at that time took a few minutes to write out each contributor's vision of where they see ironic in five years time. After everyone had a chance to spend a few minutes writing, we went around the room and gave every contributor the chance to read their vision and allow other contributors to ask questions to better understand what each individual contributor wrote. While we were doing that, we also took time to capture the common themes. This entire exercise did result in some laughs and a common set of words, and truly helped to ensure that the entire team proceeded to use the same "words" to describe various aspects as the sessions progressed during the week. We also agreed that we should write a shared vision, to have something to reference and remind us of where we want to go as a community. Rocky Vision: For 2022-2023 =========================== Common Themes ------------- Below is an entirely unscientific summary of common themes that arose during the discussion among fourteen contributors. * Contributors picked a time between 2020, and 2023. * 4 Contributors foresee ironic being the leading Open Source baremetal deployment technology * 2 Contributors foresee ironic reaching feature parity with Nova. * 2 Contributors foresee users moving all workloads "to the cloud" * 1 Contributor foresees Kubernetes and Container integration being the major focus of Bare Metal as a Service further down the road. * 2 Contributors foresee greater composible hardware being more common. * 1 Contributor foresees ironic growing into or supporting CMDBs. * 2 Contributors foresee that features are more micro-service oriented. * 2 Contributors foresee that ironic supported all of the possible baremetal management needs * 1 Contributor foresees standalone use being more common. * 2 Contributors foresee the ironic's developer community growing * 2 Contributors foresee that auto-discovery will be more common. * 2 Contributors foresee ironic being used for devices beyond servers, such as lightbulbs, IOT, etc. Vision Statement ---------------- The year is 2022. We're meeting to plan the Z release of Ironic. We stopped to reflect upon the last few years of Ironic's growth, how we had come such a long way to become the defacto open source baremetal deployment technology. How we had grown our use cases, and support for consumers such as containers, and users who wished to managed specialized fleets of composed machines. New contributors and their different use cases have brought us closer to parity with virtual machines. Everyday we're gaining word of more operators adopting the ironic community's CMDB integration to leverage hardware discovery. We've heard of operators deploying racks upon racks of new hardware by just connecting the power and network cables, and from there the operators have discovered time to write the world's greatest operator novel with the time saved in commissioning new racks of hardware. Time has brought us closer and taught us to be more collaborative across the community, and we look forward to our next release together. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/contributor/webapi-version-history.rst0000644000175000017500000004253100000000000026163 0ustar00coreycorey00000000000000======================== REST API Version History ======================== 1.65 (Ussuri, master) --------------------- Added ``lessee`` field to the node object. The field should match the ``project_id`` of the intended lessee. If an allocation has an owner, then the allocation process will only match the allocation with a node that has the same ``owner`` or ``lessee``. 1.64 (Ussuri, master) --------------------- Added the ``network_type`` to the port objects ``local_link_connection`` field. The ``network_type`` can be set to either ``managed`` or ``unmanaged``. When the type is ``unmanaged`` other fields are not required. Use ``unmanaged`` when the neutron ``network_interface`` is required, but the network is in fact a flat network where no actual switch management is done. 1.63 (Ussuri, master) --------------------- Added the following new endpoints for indicator management: * ``GET /v1/nodes//management/indicators`` to list all available indicators names for each of the hardware component. Currently known components are: ``chassis``, ``system``, ``disk``, ``power`` and ``nic``. * ``GET /v1/nodes//management/indicators//`` to retrieve all indicators and their states for the hardware component. * ``PUT /v1/nodes//management/indicators//`` change state of the desired indicators of the component. 1.62 (Ussuri, master) --------------------- This version of the API is to signify capability of an ironic deployment to support the ``agent token`` functionality with the ``ironic-python-agent``. 1.61 (Ussuri, master) --------------------- Added ``retired`` field to the node object to mark nodes for retirement. If set, this flag will move nodes to ``manageable`` upon automatic cleaning. ``manageable`` nodes which have this flag set cannot be moved to available. Also added ``retired_reason`` to specify the retirement reason. 1.60 (Ussuri, master) --------------------- Added ``owner`` field to the allocation object. The field should match the ``project_id`` of the intended owner. If the ``owner`` field is set, the allocation process will only match the allocation with a node that has the same ``owner`` field set. 1.59 (Ussuri, master) --------------------- Added the ability to specify a ``vendor_data`` dictionary field in the ``configdrive`` parameter submitted with the deployment of a node. The value is a dictionary which is served as ``vendor_data2.json`` in the config drive. 1.58 (Train, 12.2.0) -------------------- Added the ability to backfill allocations for already deployed nodes by creating an allocation with ``node`` set. 1.57 (Train, 12.2.0) -------------------- Added the following new endpoint for allocation: * ``PATCH /v1/allocations/`` that allows updating ``name`` and ``extra`` fields for an existing allocation. 1.56 (Stein, 12.1.0) -------------------- Added the ability for the ``configdrive`` parameter submitted with the deployment of a node, to include a ``meta_data``, ``network_data`` and ``user_data`` dictionary fields. Ironic will now use the supplied data to create a configuration drive for the user. Prior uses of the ``configdrive`` field are unaffected. 1.55 (Stein, 12.1.0) -------------------- Added the following new endpoints for deploy templates: * ``GET /v1/deploy_templates`` to list all deploy templates. * ``GET /v1/deploy_templates/`` to retrieve details of a deploy template. * ``POST /v1/deploy_templates`` to create a deploy template. * ``PATCH /v1/deploy_templates/`` to update a deploy template. * ``DELETE /v1/deploy_templates/`` to delete a deploy template. 1.54 (Stein, 12.1.0) -------------------- Added new endpoints for external ``events``: * POST /v1/events for creating events. (This endpoint is only intended for internal consumption.) 1.53 (Stein, 12.1.0) -------------------- Added ``is_smartnic`` field to the port object to enable Smart NIC port creation in addition to local link connection attributes ``port_id`` and ``hostname``. 1.52 (Stein, 12.1.0) -------------------- Added allocation API, allowing reserving a node for deployment based on resource class and traits. The new endpoints are: * ``POST /v1/allocations`` to request an allocation. * ``GET /v1/allocations`` to list all allocations. * ``GET /v1/allocations/`` to retrieve the allocation details. * ``GET /v1/nodes//allocation`` to retrieve an allocation associated with the node. * ``DELETE /v1/allocations/`` to remove the allocation. * ``DELETE /v1/nodes//allocation`` to remove an allocation associated with the node. Also added a new field ``allocation_uuid`` to the node resource. 1.51 (Stein, 12.1.0) -------------------- Added ``description`` field to the node object to enable operators to store any information relates to the node. The field is limited to 4096 characters. 1.50 (Stein, 12.1.0) -------------------- Added ``owner`` field to the node object to enable operators to store information in relation to the owner of a node. The field is up to 255 characters and MAY be used in a later point in time to allow designation and deligation of permissions. 1.49 (Stein, 12.0.0) -------------------- Added new endpoints for retrieving conductors information, and added a ``conductor`` field to node object. 1.48 (Stein, 12.0.0) -------------------- Added ``protected`` field to the node object to allow protecting deployed nodes from undeploying, rebuilding or deletion. Also added ``protected_reason`` to specify the reason of making the node protected. 1.47 (Stein, 12.0.0) -------------------- Added ``automated_clean`` field to the node object, enabling cleaning per node. 1.46 (Rocky, 11.1.0) -------------------- Added ``conductor_group`` field to the node and the node response, as well as support to the API to return results by matching the parameter. 1.45 (Rocky, 11.1.0) -------------------- Added ``reset_interfaces`` parameter to node's PATCH request, to specify whether to reset hardware interfaces to their defaults on driver's update. 1.44 (Rocky, 11.1.0) -------------------- Added ``deploy_step`` to the node object, to indicate the current deploy step (if any) being performed on the node. 1.43 (Rocky, 11.0.0) -------------------- Added ``?detail=`` boolean query to the API list endpoints to provide a more RESTful alternative to the existing ``/nodes/detail`` and similar endpoints. 1.42 (Rocky, 11.0.0) -------------------- Added ``fault`` to the node object, to indicate currently detected fault on the node. 1.41 (Rocky, 11.0.0) -------------------- Added support to abort inspection of a node in the ``inspect wait`` state. 1.40 (Rocky, 11.0.0) -------------------- Added BIOS properties as sub resources of nodes: * GET /v1/nodes//bios * GET /v1/nodes//bios/ Added ``bios_interface`` field to the node object to allow getting and setting the interface. 1.39 (Rocky, 11.0.0) -------------------- Added ``inspect wait`` to available provision states. A node is shown as ``inspect wait`` instead of ``inspecting`` during asynchronous inspection. 1.38 (Queens, 10.1.0) --------------------- Added provision_state verbs ``rescue`` and ``unrescue`` along with the following states: ``rescue``, ``rescue failed``, ``rescue wait``, ``rescuing``, ``unrescue failed``, and ``unrescuing``. After rescuing a node, it will be left in the ``rescue`` state running a rescue ramdisk, configured with the ``rescue_password``, and listening with ssh on the specified network interfaces. Unrescuing a node will return it to ``active``. Added ``rescue_interface`` to the node object, to allow setting the rescue interface for a dynamic driver. 1.37 (Queens, 10.1.0) --------------------- Adds support for node traits, with the following new endpoints. * GET /v1/nodes//traits lists the traits for a node. * PUT /v1/nodes//traits sets all traits for a node. * PUT /v1/nodes//traits/ adds a trait to a node. * DELETE /v1/nodes//traits removes all traits from a node. * DELETE /v1/nodes//traits/ removes a trait from a node. A node's traits are also included the following node query and list responses: * GET /v1/nodes/ * GET /v1/nodes/detail * GET /v1/nodes?fields=traits Traits cannot be specified on node creation, nor can they be updated via a PATCH request on the node. 1.36 (Queens, 10.0.0) --------------------- Added ``agent_version`` parameter to deploy heartbeat request for version negotiation with Ironic Python Agent features. 1.35 (Queens, 9.2.0) -------------------- Added ability to provide ``configdrive`` when node is updated to ``rebuild`` provision state. 1.34 (Pike, 9.0.0) ------------------ Adds a ``physical_network`` field to the port object. All ports in a portgroup must have the same value in their ``physical_network`` field. 1.33 (Pike, 9.0.0) ------------------ Added ``storage_interface`` field to the node object to allow getting and setting the interface. Added ``default_storage_interface`` and ``enabled_storage_interfaces`` fields to the driver object to show the information. 1.32 (Pike, 9.0.0) ------------------ Added new endpoints for remote volume configuration: * GET /v1/volume as a root for volume resources * GET /v1/volume/connectors for listing volume connectors * POST /v1/volume/connectors for creating a volume connector * GET /v1/volume/connectors/ for showing a volume connector * PATCH /v1/volume/connectors/ for updating a volume connector * DELETE /v1/volume/connectors/ for deleting a volume connector * GET /v1/volume/targets for listing volume targets * POST /v1/volume/targets for creating a volume target * GET /v1/volume/targets/ for showing a volume target * PATCH /v1/volume/targets/ for updating a volume target * DELETE /v1/volume/targets/ for deleting a volume target Volume resources also can be listed as sub resources of nodes: * GET /v1/nodes//volume * GET /v1/nodes//volume/connectors * GET /v1/nodes//volume/targets 1.31 (Ocata, 7.0.0) ------------------- Added the following fields to the node object, to allow getting and setting interfaces for a dynamic driver: * boot_interface * console_interface * deploy_interface * inspect_interface * management_interface * power_interface * raid_interface * vendor_interface 1.30 (Ocata, 7.0.0) ------------------- Added dynamic driver APIs: * GET /v1/drivers now accepts a ``type`` parameter (optional, one of ``classic`` or ``dynamic``), to limit the result to only classic drivers or dynamic drivers (hardware types). Without this parameter, both classic and dynamic drivers are returned. * GET /v1/drivers now accepts a ``detail`` parameter (optional, one of ``True`` or ``False``), to show all fields for a driver. Defaults to ``False``. * GET /v1/drivers now returns an additional ``type`` field to show if the driver is classic or dynamic. * GET /v1/drivers/ now returns an additional ``type`` field to show if the driver is classic or dynamic. * GET /v1/drivers/ now returns additional fields that are null for classic drivers, and set as following for dynamic drivers: * The value of the default__interface is the entrypoint name of the calculated default interface for that type: * default_boot_interface * default_console_interface * default_deploy_interface * default_inspect_interface * default_management_interface * default_network_interface * default_power_interface * default_raid_interface * default_vendor_interface * The value of the enabled__interfaces is a list of entrypoint names of the enabled interfaces for that type: * enabled_boot_interfaces * enabled_console_interfaces * enabled_deploy_interfaces * enabled_inspect_interfaces * enabled_management_interfaces * enabled_network_interfaces * enabled_power_interfaces * enabled_raid_interfaces * enabled_vendor_interfaces 1.29 (Ocata, 7.0.0) ------------------- Add a new management API to support inject NMI, 'PUT /v1/nodes/(node_ident)/management/inject_nmi'. 1.28 (Ocata, 7.0.0) ------------------- Add '/v1/nodes//vifs' endpoint for attach, detach and list of VIFs. 1.27 (Ocata, 7.0.0) ------------------- Add ``soft rebooting`` and ``soft power off`` as possible values for the ``target`` field of the power state change payload, and also add ``timeout`` field to it. 1.26 (Ocata, 7.0.0) ------------------- Add portgroup ``mode`` and ``properties`` fields. 1.25 (Ocata, 7.0.0) ------------------- Add possibility to unset chassis_uuid from a node. 1.24 (Ocata, 7.0.0) ------------------- Added new endpoints '/v1/nodes//portgroups' and '/v1/portgroups//ports'. Added new field ``port.portgroup_uuid``. 1.23 (Ocata, 7.0.0) ------------------- Added '/v1/portgroups/ endpoint. 1.22 (Newton, 6.1.0) -------------------- Added endpoints for deployment ramdisks. 1.21 (Newton, 6.1.0) -------------------- Add node ``resource_class`` field. 1.20 (Newton, 6.1.0) -------------------- Add node ``network_interface`` field. 1.19 (Newton, 6.1.0) -------------------- Add ``local_link_connection`` and ``pxe_enabled`` fields to the port object. 1.18 (Newton, 6.1.0) -------------------- Add ``internal_info`` readonly field to the port object, that will be used by ironic to store internal port-related information. 1.17 (Newton, 6.0.0) -------------------- Addition of provision_state verb ``adopt`` which allows an operator to move a node from ``manageable`` state to ``active`` state without performing a deployment operation on the node. This is intended for nodes that have already been deployed by external means. 1.16 (Mitaka, 5.0.0) -------------------- Add ability to filter nodes by driver. 1.15 (Mitaka, 5.0.0) -------------------- Add ability to do manual cleaning when a node is in the manageable provision state via PUT v1/nodes//states/provision, target:clean, clean_steps:[...]. 1.14 (Liberty, 4.2.0) --------------------- Make the following endpoints discoverable via Ironic API: * '/v1/nodes//states' * '/v1/drivers//properties' 1.13 (Liberty, 4.2.0) --------------------- Add a new verb ``abort`` to the API used to abort nodes in ``CLEANWAIT`` state. 1.12 (Liberty, 4.2.0) --------------------- This API version adds the following abilities: * Get/set ``node.target_raid_config`` and to get ``node.raid_config``. * Retrieve the logical disk properties for the driver. 1.11 (Liberty, 4.0.0, breaking change) -------------------------------------- Newly registered nodes begin in the ``enroll`` provision state by default, instead of ``available``. To get them to the ``available`` state, the ``manage`` action must first be run to verify basic hardware control. On success the node moves to ``manageable`` provision state. Then the ``provide`` action must be run. Automated cleaning of the node is done and the node is made ``available``. 1.10 (Liberty, 4.0.0) --------------------- Logical node names support all RFC 3986 unreserved characters. Previously only valid fully qualified domain names could be used. 1.9 (Liberty, 4.0.0) -------------------- Add ability to filter nodes by provision state. 1.8 (Liberty, 4.0.0) -------------------- Add ability to return a subset of resource fields. 1.7 (Liberty, 4.0.0) -------------------- Add node ``clean_step`` field. 1.6 (Kilo) ---------- Add :ref:`inspection` process: introduce ``inspecting`` and ``inspectfail`` provision states, and ``inspect`` action that can be used when a node is in ``manageable`` provision state. 1.5 (Kilo) ---------- Add logical node names that can be used to address a node in addition to the node UUID. Name is expected to be a valid `fully qualified domain name`_ in this version of API. 1.4 (Kilo) ---------- Add ``manageable`` state and ``manage`` transition, which can be used to move a node to ``manageable`` state from ``available``. The node cannot be deployed in ``manageable`` state. This change is mostly a preparation for future inspection work and introduction of ``enroll`` provision state. 1.3 (Kilo) ---------- Add node ``driver_internal_info`` field. 1.2 (Kilo, breaking change) --------------------------- Renamed NOSTATE (``None`` in Python, ``null`` in JSON) node state to ``available``. This is needed to reduce confusion around ``None`` state, especially when future additions to the state machine land. 1.1 (Kilo) ---------- This was the initial version when API versioning was introduced. Includes the following changes from Kilo release cycle: * Add node ``maintenance_reason`` field and an API endpoint to set/unset the node maintenance mode. * Add sync and async support for vendor passthru methods. * Vendor passthru endpoints support different HTTP methods, not only ``POST``. * Make vendor methods discoverable via the Ironic API. * Add logic to store the config drive passed by Nova. This has been the minimum supported version since versioning was introduced. 1.0 (Juno) ---------- This version denotes Juno API and was never explicitly supported, as API versioning was not implemented in Juno, and 1.1 became the minimum supported version in Kilo. .. _fully qualified domain name: https://en.wikipedia.org/wiki/Fully_qualified_domain_name ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/contributor/webapi.rst0000644000175000017500000000543200000000000023000 0ustar00coreycorey00000000000000========================= REST API Conceptual Guide ========================= Versioning ========== The ironic REST API supports two types of versioning: - "major versions", which have dedicated urls. - "microversions", which can be requested through the use of the ``X-OpenStack-Ironic-API-Version`` header. There is only one major version supported currently, "v1". As such, most URLs in this documentation are written with the "/v1/" prefix. Starting with the Kilo release, ironic supports microversions. In this context, a version is defined as a string of 2 integers separated by a dot: **X.Y**. Here ``X`` is a major version, always equal to ``1``, and ``Y`` is a minor version. Server minor version is increased every time the API behavior is changed (note `Exceptions from Versioning`_). .. note:: :nova-doc:`Nova versioning documentation ` has a nice guide for developers on when to bump an API version. The server indicates its minimum and maximum supported API versions in the ``X-OpenStack-Ironic-API-Minimum-Version`` and ``X-OpenStack-Ironic-API-Maximum-Version`` headers respectively, returned with every response. Client may request a specific API version by providing ``X-OpenStack-Ironic-API-Version`` header with request. The requested microversion determines both the allowable requests and the response format for all requests. A resource may be represented differently based on the requested microversion. If no version is requested by the client, the minimum supported version will be assumed. In this way, a client is only exposed to those API features that are supported in the requested (explicitly or implicitly) API version (again note `Exceptions from Versioning`_, they are not covered by this rule). We recommend clients that require a stable API to always request a specific version of API that they have been tested against. .. note:: A special value ``latest`` can be requested instead a numerical microversion, which always requests the newest supported API version from the server. REST API Versions History ------------------------- .. toctree:: :maxdepth: 1 API Version History Exceptions from Versioning -------------------------- The following API-visible things are not covered by the API versioning: * Current node state is always exposed as it is, even if not supported by the requested API version, with exception of ``available`` state, which is returned in version 1.1 as ``None`` (in Python) or ``null`` (in JSON). * Data within free-form JSON attributes: ``properties``, ``driver_info``, ``instance_info``, ``driver_internal_info`` fields on a node object; ``extra`` fields on all objects. * Addition of new drivers. * All vendor passthru methods. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538406.1863992 ironic-14.0.1.dev163/doc/source/images/0000755000175000017500000000000000000000000017666 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/images/conceptual_architecture.png0000644000175000017500000013765600000000000025315 0ustar00coreycorey00000000000000PNG  IHDR[AsRGBgAMA a pHYs+CIDATx^|U'd'72d*ZE(ԪEت:JZwu2D =Sا-v4=ɪ?# IH@H`\#11Wˤzmw18 "5`}_Gƞ+o[f}@b+U2K_z}yg12{/ʕSeϾ y)r#Oݷ_rrrƕ"Oɖbq$XXM,_V}Yb%}Ͽ8qqR>lM5Wn!CfO/smi٫u7I w ?LGSE4j`n]N @@6LZ&_̤Khl# x$cF~4ɯ9$2E!<"P?Vy?R}v={ɃZ{EIINUucHOY.-9  bd#ѯx [Y-'2KǗU5+❂4}YVrUe{|}~@0~{Yr}%$<|w3_Ni&guTI,6m+^Jߵ[TR@()a9nɡリe{i:1WǷn,%ya+9aS@/#ľhaHr:;FYYc,;>#G{iOfv]XGgV.\Wbiu_FY8^]5Hڵ+lɮl,rԏRޏ+Cb%Zڵi)/=iNs?}.C㞐^0^@+`dKKJ\Rzt 5GbwI~( *e֯JequetcA}J%D~Y[\O {M~\e̓,OڔkFKm؅hɜ?JFΩs$!>^f}@^ys<55;䞇8S6/Ͻl+,EΤi B@B(`dˏQ$&]j˿+~O0iX=ޙ"K 'JޏFՓZZGb]yt9Y]ztnҘֱz|i~]"]}8:gi ߲Vd|޾AW.&ָK5s"mj['(˹UX]^:G Ҭ<$55E{MdW[qҨA=] ^sB=8ҁN#OV{J\׮طtM;UY2VX|3EEk-}Fn_t41->e͂x.;jha)4[և\Ew do],cuI߳5OEͱa[v>i?[x+9W˹_̖ 5hļ[-̚|4 |Z᧜n1q 2gxb$y;7s̰A7Ng/z@ 8*_Jޠr/y):i ɊF tֲE/&8ۇRTNK+1|3GorrՓ&|``S?or# + u9R ..l۸WsZY)v1<QYzHA@ɓ~O6Nڛ  opv 3`< fJ> y<|t5WdӐpz68avPN}WG7nhql]Yf%=REGY| cyTYvG_0.u\=IN*ZlBi(=ect@@jFE]ŷm]f$;n1_z?lt´&:hU/ŪYͬt.+҇?\d;(zz78䭧ŲTW쀒-r>YwEy}KzY l E m6*9TaDn݅kK|\^ K>r8D{@ ٲv3nmmP=2OS   n'['.++ K}2:@@!e  sXFX,#, pV͙   @HB-L   $[Qv   0#  @T lEe4  Zd+ԏ  Q)@ag   jP S?  DVTA#  @HB-L   $[Qv,KK˰@$[C+i-l )ak@ H@|ɕR4a! ٲol vuILLLۥ T  @1-.@@bJe\".HZSRҪlT ɖcD@ 4l^&N@c٨U5@  x Erdrpo:0-M< 8Wd˹ @3]$PM(uA Cb}cE@ :H#ΌjK>{{XD@d$[\ d˟$\@"Aa@@@W   R%   @5  @d+T  lq   ! *U"  $[\   @HBJ      R%   @5  @d+T  lq   ! *U"  $[\   @HBJ      R%   @5  @d+T  lq   ! *U"  $[\   @HBJ      R%   @5  @d+T  x   @p '!(!xf;ürI @IV۷֭[&Gd+<δ ppq9HeذaRn]_*Y͞=[ 9%)/b@眀`5녵KÆ &Gd+<δ p_,l޼Yk_P h-G#X~Y7zh1W3f/]TF%Ç1cƘƎ+͚5Ν;o}+FСC%''|33gmڴ'x¼Gi+Voٱc\{%K.]k׮裏pN93r@   @8ƍ1b MZ,x=<ܹs=qqqW_}su_SNCyliժÞ3gFOEy㱒!ϼyh³fO?֭>|ᇞkƴm%t+93m-k4im޽syyc%r+y3='M[ `f+Ԕ@!.YYY'T鬔wJ䦛n4pРAjmϥRtϖ5j駟.L:U7o.V&K,+9KvdΜ9r}ٵw}!mf͚b%ib%vfVKgmV켊 W@ɖAo@h۶Ix߁O>-[JrrqUCjժ^Jf5MtB̚%3ɛ~Y3S2p@iݺ,_\ԩch^իW5fѣts*^TA"Q&@eg ʕ+-R=mJ~䣏>2 O4;=wb-?oYnٸq/,/[f+77לVf͒/\?KV=Y @Ǝ+͚5Ν;o}BkO<~Ϟ=Í̟?_?{EgôB2Bz6ȈK@yGo>{-@6g/q e +@g vlI AaK_$'4n9lm 3XF̸k@KvS]ٗY5=5, H?zD.^ݱ),ҷHZ%>F@ l9$}ܳ}T=>$S) ;HWFD@톧٭`Lڿsj\!Ύ|72@ 0Uqèaƌҿ-E\ÇƗ[t@*[{ ԨSD-@$[7̙#^ziԌ7q7o^4 F@-e|#YVӥ۽i*8@H&Xbt99: &w>wkǦ/]UY8@$- 6l ͚5xtu˖-0=EOsjK"4C@t-yLM8}}1eDe{ޙPҝ !D@$[&@; TX2(svKg*55p H, nisړ~: 3@J @p@Mޭ{K=b~mNj @H%I= Н n(+caZ:/   $ ZvVd.jm [غt@y$[΋=F(@\|TYOl[_l݁PgS*VNA@t-@Qv=els=]@-@lQC@brqл3@XVIJb>1|)((pɖBF@*"PAsؽM/܁ZxfddHLLԬY5l0ٲeK ՔxY+8{|3 'f͚IϞ=_6 :T͛#8pX1vXc=&[޽{˚5k|Ǿ{Ҿ}{iӦ[.39rtYtM v ٲC @XyZ;%:E).9s&ꪫ$33S^|Eիu]&Izwdܹ2|߫WO?]}MթS'%֭3eƌO˂ ̱{zHMf&M$~L>ݜI&{$|#,@ x$[&@ UwPo)SɓeժU&[L'u&7tԩSeҲeKIHHAɄ k1HitךE8=Og4 ͷ͛ˢEdɒ%ҪU+ĵkN̙#wɻk?0zQ)@awzSy_[N [! +Ə/?7Zj{ӦMRN5 Hǎ>'u]WkΫRp͛7Kbb䘯;Ot &e˗/7hտ3@pɖ3D/.?)Z\}.فoa:r "3K `٢K/CY/))P-zə 0@3o63i7n4t`/^VC@&?vA%nTm۶5ZJ}.On9]t1j92Ů]JdʕrK>}'};SeEg@1sh*..)9x]VZhjB[vĺD?D֢]}f+)K/zmFs6XkGjz#wlllCUr @ AU~} JrrLsU?EgիgR@;l! 郝~&[dD->>|ؗlo8i1[5e&_ٸqciԨܹ'u.Oygi-4 >{?K/仩<җi G@!2`(RGz)_D[t&A[N΂=fViIwa2IKgɴhm҆&V/_͌/2bv4  ZR{}YWʕ}-z]23W*U27dk믋-%'zUYOn?iC38loСC!z@@H lEH^,\Mלzt!_:i]v 2U4Q&2k]r[ݩJީNj,>bɒ?mx.5cG@|@_9cn3XtOO|Wz_lv!zGh߾}f_2 V;zJynSN1qyɒGK/ 9ta|Fz|9oӧ,\pW)vK|tYǓ{j1ޗ\ߡ|#v_P⌺I'ݔA֬Y#\uU2ݥ\}N.tajĈon6,}o|?`wg8z錫n?(Nu3lw:x_=‘m;%o=D7ޡo,D-%--MY6(!kyݼ|ryM%c:hoB׭!fk֮=z֣<د6vmηf|mL8ѼfmnPUk uĚTVc%H5R fJ̱f<kfc"d}2Z*."uaJu٠h3Z5:3e%=E7V,FZZ=No]Uo̲a .˳֭{³f͚ pBCa2yYt_$[anɖ. ai%vohkm۶5޽{hvl.~IcE-AaIx_BqI\r{ iSL1p89 d' ř< ֲZLmڴG [~ uT7u0ѢErE%@; ݛ V];k74,{8 pjGo[\g{0^w@( 96} 0c#{gviN2Q YT {xFvr'G߲qP7 l߾<0aÆˈGU\gQn`Iw20ܑM 8  Dc+uV^9g2BgǏ#  MHl   rv=  Td˦[   l-gǏ#  MHl   rv=  Td˦[   l-gǏ#  MHl   rv=  Td˦c$;;ێ]O t@@-. 5k&6lxtƵQF0=E@ @ ٥;w.ݡAXvh|)   k袋d̙g4Pڻwh:cF@B&Jjb##n`ޱcmV,Y"mڴ!.Y.]ȷ~+-[tɨ; |W!8#pp3[nbP^=_*#FCU @^^ƕD+ԍ (VdOv-Ǐg+ O-Md٢MRB/σЋ}zcZpJ'b3[yq%0}t8pqɓ'O?Ķ6:t{wӌ3Y:x饗h3c +i׮祗^X J, E]T8п P#<"V7" L`jR dݰbK=|תUߛ6m:u*ؠAر|g2qDΚU*v^*U7o,c;<%Y3VM_hSRR|~u@?ȧ~jo+֌$''qjE=|Ќ @$H"N @$l/ӂf=WIIIfh{.Fg5  Xd+T  )20ĝi0 W 끮q۱c1)Ž3rel} Wv)`]Ϸy;Z7ӭԪ!\: CLxsM޽YOiٰVԭ 5KRB'<3]+g*IiV+ұsTǹu6?IkT@<>lu݆ /4֩JpȜH +Gm:JChu`AnLgw6mjjgr.Ow>|ŽW1e{DcLA@ $[PM[ y?W}X _eTtFk yZ5"͚ͬX}>{رҬY3ܹ۾!zϞ=̓w!C &M.u=ޓۛkiRR7ph򤿔Xl1Cy8pYn~sn&={X}>hgOR8.]*FǢ1cJ%Xvifb1XS@>gZ+mڃ&kyi~}zƐ'777hmP}-v큰vσ_~өS'ϡC<[ljcxukYJSIn߾c%qJ۲rp챒(a5)((}9ƍX 9zQFbX966NS\\W_X+vc5}_"@E*s {zKv=jժrÝM!a=ZNI+١? 4H'u]WꅢEuYfɓ2S h5s   6m9BO>Znb?:bt/]wkׯ ҄?33Xeeen-3Ff͒K.$a 0@exr7-"7ntSIjEgު$1EљgyƌZx>kF4s   6m9F74}g:.+f塽{6\kNz!wuT\Yz%Ҷm[.3%/x]v5;pf}XRZֽTfN4iӥstS/3`:5No}k9@p 0h\0 o0Jq>TX2`E٤Wh:۸[EKIZ'Y6M-EY.5ꫯߥ7R1׊"'2r=e' p99z;V>ӭodCzڟ+2Hn}#{ $WXU%AMZU0*R?x󠬰vO?ԨQ<8<%R1Xi30h}_:3' CҤX Ͷn MDuoWڇFo}{qg)A?.R1@0 :$T,:/يθ{Ԛh}ٲD܈69qaF&!qu77nDո @$[ =;3Zdn+~"g  )@U&x-]:HqpڟA1@@$[6SapSX?7 L@@H6∣ ŝ[wFQ! @$H"ﰶu{w;>-VOჅU~NvyڮtvU%>.F=ٝQysTn~ @ $[=P)vS`^߰k,Wfwd>:({sVwTʩt+8\K$UB6# $[\;MٺWÙCO䐭q5JK8~$Y2WI6MM( Zd+2$yZ2#e[/I!OdvG(/>~ok^Wkoٝ#k\.2c~!jLy.#ֿ!7_Zyvn]?]-?w|1C2d!b#׃f alE%ѽm%Vݴ.vĞq" يth\1֯ ][,b uQĦRlھI#Gˏ+u?};wtڎksWϐqco0G왇GȜɞ[rcYBbއ]&,~ 8)p[6U?3+Pfo3 %ݢ   2mDn;/%Eކ~o^gjeɪoHuȕhfbm ׃O`ʛ4aV@t/=Wer۲v֍Sl?: N-w5*F,nOҭxE˦_0طK^z/19$@|U}d-JS 9~ok׹Ԩ]ߺ*ںWXK􏖴5S;[H̫G2+*8Q@ZuYAd=+YSJ+ jUMc%z@p X?I0\FY;ߛfg-|mzfjn`6x_ie'IRzblHY}R)77 !7k[4q{&UZ3^OYDf~'׎|ļze[x1{PwJSig"{߃/Eu1)Y]u3 ܣσ_v`ܹrٳo߾n}c|\gC{Aqli3yC|LO*&MYVԚ*]qU?|_7$͟dht4yLbKsM],M_vO]/ p灓"`4h >\x y.skx_Yt^,#θjԧutv^1~KL{9y_oc%EJ޿UF]>!Q[fOdY <.…裏_~cIjL/7 ~ @(XFJݣue. Yu-+Q4hZ,!zt }Vu%66ĪuRŕl&/Ou^iи$%Ո2-*+H :$V+W>l%7vә/M̾ eHBK]V$O^؆Y7DKzgJjСCM3B;qO߸@F2@8@:坱:Y/K3uX X-dž#h彇kĈ tR! @4Ѻ+=Xeh7M9E(Mdk@b}֋/=Ze ;ålp@ *K#N@D'Zj:.A%2@ jH6 p-]:زeː {BJ @ lE]0 \M:訽piFr  Z@p&ZVyw t:å-|r $[\ (:5(f%I= @ lE_1 ( ût0܉.3fpEVi@ fG`xw)> Wdk@ _ p+q @t lEw= `KHl/#Fc8R( ^/YQ1-r觗 gTA:W@kVHǿYmMt>uЗh/םsSC6HE/zy{p>o]:?Ќ۟] &o.Fc?a<@.$[v(Q`ҭkF q** +3Z+בMɌ5Ⱥ}O:}`zd,\P%R4 Rsi\OerSyJ˓z7(9\kguHʩR%]Bˊ G.@eEq8$ru$?Ͽߞ;,:)8RTF<c `WuIlL%)fVP DKgv^x&8YƋ2TC2םx\ pzYٜl[֟׊Z/;V;`y7ЄsƌŎښ)֮EwHNAT^]N@ C+m ِi1Fr sh*'H* $5>՜x$?GزL[ q W^-[&q:Gt]GHaKftk[7Jxߤ&LX{DzYu7;zw븇K[x^/$0~^nXȈzÚ՗LV0.)ԄkgVLY^>ٴUrIiҢJf9d!48_w_okG)~gVo_VJ&iދ> [PN;}< ,@j 8:^f2B]BhfK>Kg׋e^=f[2Z3Q=hR~[Lz:=byH|}6Xl+*5;n<%zڴi2qLruIqRJ(Z4Il @wWsuX$[,.\!6|K`y7ǨT)FԬ[[tk VƪJ Վm+-I\),Y{8rܿS7TM"IfyD},>ZJ8S5֗|y7(u-<0#٪"ILɖl>X|[rނc Tuނ㒭:/M [ɖn}sOY_E[PTNJ\ܤYN趒i=-3CV'gׯgH/#iIۥ pEOCw[zm&Ѫ#5&V5? <ke,-kVb̌.6͹'_6NCߙ[>Y\E']& ӬI=DHJ|u\ҢVvn۠YOլq[VlX_xV y3}]XfR@k)aabh]hw&Z-9NmҺUT6İ~$;gɊ{17vh<Ӻ'SI$~xqm8o-.[ ˶R~e9My~S9S߶])V!LO$$JJc[Ϟ\C>}9W,MIo/@ $[ԧm@RK K!VpM2E&O,V2 -b֭tM&SNK˖-%!!VB8=Og4 {h}͛7Eɒ%KUV&k׮̙3G>˓w}w q &3\$Z'FI7]yѣZj{ӦMRN5]4h ;v>L&N(]w]o^*Uo޼Y%''|wyK5)[|9GB".@@ xgt#R"*5گ_?PgQ^r%B.uh{赢_wyWTP c et̞\<]+pؙGKծm)?͒m +M \σ͊G]tjժU2?vCt _鿝YFÆ %99y:[EtիW,5W*"uV=̖scG@pc M HwiRH-~O:>כ6mPR pW . tP5-̙rAg@[ l*t@P>ػtW^!Cӝ>F7,( lq DKa0gtFK̙B9_`BL( 8E$}W0-' "Yl;V.\hcꫥE?OҤIML.]Pw|G}-=YYz>'k{oFkmG);waÆɊ+1" 5-!@ {g^0⿀nϮ;j23\s|C+IRwO?/R}]Yv2e#<"/ܹSk6m4i9f[xMFK5G" 3 P@YR}3Z駟.gy٪Q҄KyfYIa:cٶmItvzZO-SN5;곸!KvdΜ9fLzIɖB@(ϦE-f&?v[tU;fҩS'xC:vh?]5-z>/+''|wy2p@pRNp߿❧@ $[!@J@xgtd'Vc;^~?YwnjѤKg DV 0]gn/,/[Νkft"ɖ"B@*$}6͈+:ȌV:Y'wKaiLSO=%vԫWlѻwoiР$$$出P7С\$k}'|NN/C cp4mR ̞\<]+pؙGKծm)?͒m.m1>!^w{*WkXg40X~xx[ҝ>ƍtӌL9|lR4袋mPnqzVѢ3]RÊޗ\rql+@@{K nAK%66֗hit }Mgtkw}n&^ݻTRDKU4ъ"@lEK' p1x3Z:h aÆ9]r{-/N ܌3@$pi5)$Z6.܃l5@-8@p}+kCG@@(@Ĩg@z_-\ƌczOHfͤgϞ/׆*bb Sرc=nZz-k֬{IM6n-pȑҹsgY6 YdelQ/KfΜ)Iz*̔_|Qz%wuIy;w̟?իO_$Y:u]n:~[f̘!O?,Xw^y衇dڴikҤI͹diM¢>H  kH\ZȔ)Sdɲj*pr-[nrM7IZZL:U,-[4hL0AH錔&L^{[4tfK /|Kk޼,ZH,Y"Z2I\vdΜ9r}I^^ 8vaCZ =ݰbK=|CU7m$u]!UAұcGdĉru#۾}{TbyfILLuy.AԤlM !`  $[Q| ,5lѥ\R,-~IIIfh{Zv^&:U~}Z7hذ$'';OgtyhљzꙥX<\xGm#Z.S`?ˈy\}lQ+I,: f34) $ XDKphM6D@Kd+h@@@ L$[a@@.7E6zO@pɖ(A`޼yfHݝ+dh@Vȉi >|X>Ѥ YdelQ!0tbeŊf@f͚IϞ=CGK{g…W_}hBƎ+ӟI&a4{Ҿ}{|-mG>yȑҹsg6lEgv郐qҥ2j(>|3&* @@}$[)#2O?]}3j}VֲeˤSNfywdܹի}:4m޼-[Gr=,=y/"G޽{ӦM3_&M?POntɞ&K$hHƏ/5k֔)SLyE|]wEYD. Ed-dQ+p5Ȍ3̬&=^{yԩSe9X 2h 0a_NyffFfKN;$hZ>CkѢEdiժIڵk's̑^'md}֭tMW9@ɖ"BPAұcGdĉruי6m$uզOAAA۷=}hVjbi•(9993[n-˗/7jտd}UV Kd^7K@gtɞ>,qp̙f+;;[f͚e]ʧɎ~_-]~o0`Yr3^rlܸQ}a/,_Y/]zXZ)@@; l1* 4%}NzT۶m&ҽ{wuvtJ[t9.ڵtAV\){>}'};SK>C@(㱊;>2,~%{|T::hM3tƫ~%֩3Ru-W{:gsVѢ3]3K )eџ:r (7m:@e g2 /-J}% pEuVtƝQG&B%ZPDKϭR |he1P@@E$[. &CA@@l'@@\$@`2@@ɖ}bAOpf9t@V#@   J-WA!  @H"G@@W l2 @@"-@>  Rd˕aeP   iHG@@@$[ +B@@H lE:   reX  DZd+}@@pɖ+ʠ@@@ $[#  +H\V   يth@@\)@ʰ2(@@V#@   J-WA!  @H"G@@W l2 @@"-@>  Rd˕aeP   i G`ŊcǎRk}GAn@|+*7:Do""2"4@ l95_,k׮-V?nײe Ju `G>/ `Wd+޵jՒk^uAFaZ SO=d+T]ҪU+m&ժU akTvk??ңGIlڴIԩ&NlAұcGdĉrukuֲ|rs&\/ՃRRR|Ɗ㑲oIޗԧmCd+q~7M ~{[zP~RC]zx%u6K&%%~mUZx}f n`-eRKx_Zn_3_) |'zgնm[iӦ}Z `OhLg1|I۷;.rzKyYgjޗ~[ Z 4#eӁm^9)Gs"%?@bl&H8gz͞\<]H۰SGKծԥb},ئp! cmU vK XQ+(1Hl׸TY;ltfD4qFW$Z}/ZoY'g~8 ֶGr$ۚݣW ZrfWX'9IWI,= CjC>N(.dK;vr9ʬZKΨIJC#٪`pUIՑe_Hb} 6غ%s$&*I;dˡL)Lڰ 5EmP_ٙ8vid.o?"P˶$Zy[ۙ[>d1t(Py.r[7h9<%k,dgj$o<Hg4њ~l:xD+8-ذIM]( KtFp1f˯ Mzdr8e#_-|:uF!oB*|A `$[z.K@gvgsנM-t*|EhAt+0pvWT % D$0AН挭 n=Z!h& fܣU!B۞C D$])<ЍJwS`?:Hqlsvgd"lw dvUt{w;@v~srE7PW}B?jyOOVNF \;՜-iuMwo_W?y}7B /?OroȽ}.%k27/ fԅ8Jdp}9wοˁ7]V*+V|,WBB m\z 6E"5{(]իH59Տ?lsx |;<}͒!##\v˭r`y{eVq H8/7O{<ڣ2dԵ OIdEY/جCëcɢ$rxsZ8<{2﷎wp۵s[+ Ͽ-lQ&_gy㱗Eg*:U`_$%>Ś٪zZYK{xg=ߑ^g; S1ˤK}=?by\3_&y{p*v@Nv5a_ gs^%GҬCqRGuZ3 ΗG >ؽ迥7ƌ/gΔdž_#c|ز]{+Ryd^[صK^r+ˇ?g;zb! HkǦm QJZٻG0>?Oλl꯾-)//_[BX />M#Ix7cs4ƿdl)FnN{1b+Itϙ+҄lͪtU' -$]vp0Ue7ߖX'_̞+[앚i˟>) WdoR%[oֱ̼YsWяGWJٕq@46*B PkטjZҡgb2T8YF<:j5aך{RҬ_ 9Ǜ0-yy򃕨}9s/=]֭\),2W󷏔-w'O&s~KÆdMݽuLϋV}:TG"@UqۆߌժWKb*ֿƞzу [˸O^K|˛X\6Դ~kߕS-Ƃd`ѻ5y7Mٴy>߱{v얋+( P]?m;I e}Aֽ[$!hY;[|%IS͡wx3s+66~IuִڰYآ^k2Yd6-[+1@ b6k6hnCZu4KoE|8M{-s+v|֡Cr[o?>+ |U+͟V|!5՗/T5o.;~mXZO*l.Dil%v_oo-bx4Q)@U$ o<%ziyZkz{Y7vy4iLRK5c2vq}F^fRRo-_MAyGdrd_vl_rER2nf |fvh+ k֐Jgs6-%MbOLkI׿y+7Գt{IZ5#~̼S'@0NZ3H޲ZI/MxJ*O=UnY|=fKF-WN3|+lC g.I{(reTˬs:_ W|س[^/rGGS25˗:@b o cӬ0S:5[$!j[^I].UkV+=Ȟ%-s䮧,<5^:Y:Xuo>Rɺ_ ~b5k\*7 ɭzMȿ|dZkjʊfrg.8M93bLKެf3Pߣ9͈< ;9bdx)>>sCa.id%U:'+?'n4[7vx~I.\"7yvpײz?p>/lzi-;G Ru#ݏ\tu\|?[A]a}a{hm-{o=֗.KM~t7w*ԑ|nf,jյ?L}y /}RtxݽvfVJs>9/$<\xu2W|1m4>TGEKov݋"k˔.3} O~݀/"L:t祷k jכx8(X9%dΖFKڟyyqSxU/ٚ/###]/ԫ+yMg)c .@H#ӄgJ-1L}ɹr{ .9{$95YV.^._Yd=khh,qyٷdkSfZr3${`<[tiwJuOOi=leSZb+U+{cgIMeJurĺgK8J`mԕvc Nt .  ٪^Y;9RCPήoShe}*σ=;IϔI~^[WJ7!)Vpx"HU!l@C(N ;bluUrE]d+䱩NH6_ZZxJ[:@?L>!*:Ĥ8Ypi!J xpKd]d4   `-n   HOF  6 ٲI   Kd]d4   `-n   HOF  6 ٲI   Kd]d4   `-n   HOF  6 ٲI   Kd]d4   `-n   HOF  6 ٲI   Kd]d4   `-n   HOF  6 ٲI   Kd]d4   `-n   HOF  6 ٲI   Kd]d4   `-n   HOF  6 ٲI   Kd]d4   `-n   HOFQ&#'Fp@pɖ3D/@R$+3&}8WRV!,@g x9/`8?[SIzN rr;D@ɲ{ǡw x{3ZUHM @ lEq:8_^*s!+p`A?5@ $[n"c@R=IkրOUBfgJM $[F.h߭lZOvn;0H I3G Ndu!e@ m:q9e[RVv Tdt;[cR9"q. @-.@59CGd5drPrA|=#yf&ow3I,MRAZ>@ bh+_GKծC܊{=xH;q;ሯncٷ+zWds]¥ρRn41ߝePM8"9 #F֋?Iq`t3 s'_zQN!NZLUq2kùL@U1?ζ>xWyxvf}^v<: +Kzͦ"IR)6&8*G-o<8S"%7Rhdq t,# T@".3[mzr-Lxί'lrDe-/)V @XHL# \WHV-fje 7K44ߏ㟼Pŀz@J_ @W&![T6nƣKΦ.@)@̸k@Zwmfn0hP'@"&@1zF.)M06kd`@ @8@PFu @$H"O AR-I @")@I}Fn )̭7F Dd@z@H lER@@@$[ -C@@H lER@@@$[ -C@@H lER@@@$[ -C@@H lER@@@$[ -C@@H lER@@@$[ -C@@H lER@@@$[ -C@@H lER@@@$[ -C@@H lER@@@$[ -C@@H lER@@@$[ -C@@H lER@"'=SdZC9OVPڢL#@HI vH "X4Mٌ`.@( ي 3D@ɩ&yNjF3U_nɮ퇤AӪvƇ `3- O}RRٙi*kYXؽA*,@* @U' Q#@5f %qiKJFTs! -Hl: Pͪ;Y,,# Gd+8Ԃ `ظJҸEuӓTkš*۠Wt@ ZH5p@V5:vo2,@l9%R@/7}KZ~A Jd+Tԋ 1/>^i@@*pV chuthqq' i׋Cm8"6dȾ݇%9nx.}Gԭ? b݃uTj5HH6`93#"MӨ8:fx@T-_Ynٷ0qۿ'K6W|^ts@" $[FQ$?񯒾 ]A*{3MEe@@$$[\ @|6̍Ѻ˕KpɖG@[{89D@gt)(@$[ B*apcvPImSٶl``@8-. @ tA;v -! e w >΁1*@$[."C@9Ze 9ֹ ~-ǘ"  @H"N   ~-ǘ"  @H"N   ~-ǘ"  @H"N   ~-ǘ"  @H"N   ~-ǘ"  @H"N   ~-ǘ"  @H"N   ~-#q0@@ يx;rI)qȾi@@ ي;fɩ r`_cKG@@P- T,w}? _`s咡ss_I˦ é  z@FUdCW`ApZٴʼn v ٲ{Td| @ -eؓeד#IZJ@ يθ;n՗Mkmw:.{lZyO oXe[-"_ž/wrR1.XZX>ABV#@~ $4_lVKA>[A@`s,dӖuNJƝpզ'ֱ:L[mef8+zl P @XHL#Y7Uι8" &;6ÇrxW0 se~w^dr; W Y_eۗұw?Fmε]C@XB壥j!n%׭wl9 veYʑyI~z|PFid*^+L}fIi2o<6.YhUjr4@o2="園%f6g֫Q10 ĸg00d+ .jE,a( ?|ֱdppAL| mؓd+Lq! ;!rg\ 6ݲZ1X=m讄ǵ@ *H" p/ N9Sf\$=ew؍ZHSsn6tӌ;Ch2cD$h &SEf䪑>doÍcۿݘbs6 @lEI& &÷`׽aTpMZjx.o]t@AF04KPmTek 2l"Gv 266;a wƵQ1UG@@!@U4NA@@ *K#   *   e l%@@@rlS@@@H   @9Hʁ)   @Y$[e }@@@$[@@@@,>   Prq    PVYB|@@(V98@@(Kd,!  Cdh8M !1i]~ [?8 Vi@jp7I{aHh@ P@8p@I5]GJub  ي:m"ahtJu s4M@(V98p@ZDӰӺMY7UN @$[6 B-j/BN=FuEGd%A\ )RQ FC6Ĥ` Hl S@="Naux0CSƨZdiҲtLՔD+4ԊU cHe'd,-UF^brA7k/PłwW[vN!_d1f #֝ȱ̌76|K֦?Dɖ=B@g{⪴Krʭ 3OLCVP9 @ ٻ%(Im9\*%(|bR{|;o $[F8^ 6 IVAWq2@ l9/f@@ @ E@@pɖbF@@@$[]D@@ l9/f@@ @ E@@pɖbF@@@$[]D@@ l9/f@@ @ E@@pɖbF@@@$[]D@@ l9/f@@ @ E@@pɖbF@@@$[]D@@ l9/f@@ @ E@@pɖbF@@@$[]D@@ l9/f@@ @ E@@pɖbF@@@$[]D@@ l9/f@@ @ E@@pɖbF@@@$[]D@@ l9/f@@ @ E@@pɖbF@@@$[]D@@ l9/f@@ @ E@@pɖbF@@@$[]D@@ l9/f@@ @ E@@pɖbF@@@$[]D@@ l9/f@@ @ E@@pɖbF@@@$[]D@@ l9/f@@ @ E@@pɖbF@@@$[]D@@ l9/f@@ 㱊.f,-Uv|^goxqs g}$&-Ánc=#qE_B݉XCRI|kI@LLcw x63[N}F@!Tu?rd"ɳ-'&ZZ75F[ΥrW%s[=h يH3N@,?e ̭\0j.`RW4lEC# ?)i2>EԉkH\Z @=ZDҥOd?'B@t3 -1 s^+Kd/&B@@tA;vs` l*@vޝN#{90F@HBJ Dۻ -# VM@@-.@@@ $[!@J@@@dk@@VP@@ @@@B @TD@@H@@@l*@~~jD@VP#5k} 6Ll_q'k@@ $[7 ڵK#钕%ƍ h_C%   ي=-H ..Nt6jٲetR5j >\ƌcFOHfͤgϞ/׆*) 8PVXa>vXc=&[޽{˚5k|Ǿ{Ҿ}{iӦ[.39rtYt[   (p+bpM8ƬYd̙ꫯ窫LyW^r]w$wޑs^ZN?tyMׯ_oN:Gbd_~m1c<Ӳ`s޽{塇iӦI&ɇ~(ӧO]ިI&{$,&WDp3 Td+ H*R? -DL"'OUV[n1]֭tM&SNK˖-%!!A $&Lk$R:# ӵ^kOZL̖&a^x[k޼,ZH,Y"Z2I\vdΜ9r}I^^=?<)D@ =ݍ5 Z}T0ĪRb5h"a矗=zU7m$u]!UAұcGdĉruיcdIdپ}{TbyfILLuy.AԤlMnC?pA@t`dތ,/뽾_38pTt Do'DVbD CK4aG~Y*]zx%l.KJJ23TZIǖU=`z&gz.C2`9p|f&mƍflrwcu+77׎$Q'>~lV'Y׵ySdڝa{Wso.Cs_@޽;J.@t+Nr3~nC7 ܶkB{ڶmk6HMM}jz_!ZU{cstbrie]v:ʕ+/>}ȓO>)}5_wy#;AxlVi$)rl)M.]H5ENMIT{(DK{OMh <9,yKIݚMN%|1kcѢ3\3_AVNmY[|YV#?ج{u /D;hWgtFKw Ǹx",VNFZ;M?H<+qpJS\/NTI|Y8*5yкde:7Ӥ[& rמL}4ْ`ֲBMwn"7҂~5k{kYwR,_lo9ݚz|G~ˬzudnkZO:t)&vZיG1wJKAŢ<͖Veֲn56ԖO3XMklivML=3Dm5KO?-ovdK;n8sO>?R7nҳgOOKbH;Aev}p/gy9><(?c?|ɈDo2D(yrYXGnyC.LL}]OPz9d :5Rdŏere%ZȐ?#SNm^K^I^۸[_D$.ӢuotU7q=ϒS_*$MK:K^6IJ uavԭ6|~w+ ҁX7~,sfu|QvF&zj,/$OiND%zNԣG9묳D/R@^hP|w!\4}v&]|4iD.Ӗ~OoT%W_}m|T"eOZ_q`ře԰hэ+ο.A<=W쓄Xiި$nӮ v1Zt#2L7(O)3sʺwY~DA ي3 VNoj׮-'NlQ~믅;`TN*6ɗnۄ ̡ݺunD+ѥ2p@|\%iۢgDk;KNZF VЫWI.HVuOH]J3]DK_Y2dC+ov ѲK4G$H"OۮН>T4iYJG0F*}@.C<=_6m:uzs}$@-{ k"kFDh ي3 @7xC^y5rHywf͚ɑ&dh-[׶Vү_?9s3;;,CKS '0͛voZ(>~ ,@ @8Rt aÊ}W [je6hӦ_ڵ+ѺnݺRJ1bDz s֫h<#`FN@JQ\ ,6$ P@zzI]*ͪ\5\~.PC"` 2‚F Cgh$V@\@OקM|ϋD1L@@ $[ԥn@@Z =G@@P lR@@@ jH6 @@B)@J]F@@ ي3p@@V(u@@Vd+jC@@@ $[ԥn@@Z =G@@P lR@@@ jH6 @ ĥrj(?;Lda 8A 6I!P)F9Sd+:Ψ@@lJOMi l9,`t@'*ÏNU}1S8S0jC@@lr:t PsDNM"6"D-'F># &Kj)]G@cl?r X @@ ÒPTOa-Mb+EA$[#@@Y;$w ?Mw&a nhtоg ٲw|  8T{8   ٲw|  8Tdˡ   `o-{LJ!  CH8   ٲw|  8Tdˡ   `o-{LJ!  CH8   ٲw|  8Tdˡ   `o-{LJ!  CH8   ٲw|  8Tdˡ   `o-{LJ!  CH8   ٲw|  8Tdˡ   `o-{LJ!  CH8   ٲw|  8Tdˡ   `o-{LJ!  CH8   ٲw|  8Tdˡ   `o-{LJ!  CH8  h*~ IENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/images/deployment_architecture_2.png0000644000175000017500000011255400000000000025547 0ustar00coreycorey00000000000000PNG  IHDR1 AiCCPICC ProfileH wTSϽ7" %z ;HQIP&vDF)VdTG"cE b PQDE݌k 5ޚYg}׺PtX4X\XffGD=HƳ.d,P&s"7C$ E6<~&S2)212 "įl+ɘ&Y4Pޚ%ᣌ\%g|eTI(L0_&l2E9r9hxgIbטifSb1+MxL 0oE%YmhYh~S=zU&ϞAYl/$ZUm@O ޜl^ ' lsk.+7oʿ9V;?#I3eE妧KD d9i,UQ h A1vjpԁzN6p\W p G@ K0ށiABZyCAP8C@&*CP=#t] 4}a ٰ;GDxJ>,_“@FXDBX$!k"EHqaYbVabJ0՘cVL6f3bձX'?v 6-V``[a;p~\2n5׌ &x*sb|! ߏƿ' Zk! $l$T4QOt"y\b)AI&NI$R$)TIj"]&=&!:dGrY@^O$ _%?P(&OJEBN9J@y@yCR nXZOD}J}/G3ɭk{%Oחw_.'_!JQ@SVF=IEbbbb5Q%O@%!BӥyҸM:e0G7ӓ e%e[(R0`3R46i^)*n*|"fLUo՝mO0j&jajj.ϧwϝ_4갺zj=U45nɚ4ǴhZ ZZ^0Tf%9->ݫ=cXgN].[7A\SwBOK/X/_Q>QG[ `Aaac#*Z;8cq>[&IIMST`ϴ kh&45ǢYYF֠9<|y+ =X_,,S-,Y)YXmĚk]c}džjcΦ浭-v};]N"&1=xtv(}'{'IߝY) Σ -rqr.d._xpUەZM׍vm=+KGǔ ^WWbj>:>>>v}/avO8 FV> 2 u/_$\BCv< 5 ]s.,4&yUx~xw-bEDCĻHGKwFGEGME{EEKX,YFZ ={$vrK .3\rϮ_Yq*©L_wד+]eD]cIIIOAu_䩔)3ѩiB%a+]3='/40CiU@ёL(sYfLH$%Y jgGeQn~5f5wugv5k֮\۹Nw]m mHFˍenQQ`hBBQ-[lllfjۗ"^bO%ܒY}WwvwXbY^Ю]WVa[q`id2JjGէ{׿m>PkAma꺿g_DHGGu;776ƱqoC{P38!9 ҝˁ^r۽Ug9];}}_~imp㭎}]/}.{^=}^?z8hc' O*?f`ϳgC/Oϩ+FFGGόzˌㅿ)ѫ~wgbk?Jި9mdwi獵ޫ?cǑOO?w| x&mf2:Y~ pHYs+iTXtXML:com.adobe.xmp 5 2 1 2@IDATxUWy !(`5 ̴ T-0~f ܷooք`$3D} ^|m X Z iBL"F!ښI Y39>wgzwmV۠IB @bK`Tl-p@ @@s#@ @ 1@̇ @{ @1'yb> @ @9D};!@  @@ cށ@ D= @bNQ|@   @ sw C @Q=C`Ν裏 ĒXZÇ塇X) KO}|)=F֬Y#c>T@r ؼy3Ã]@AQ~ CMU BQ F I}z@MQuM@7K!@>TT7BIQ~u@ %ة4nq1 >]Mk oB (D}x)}ZV^-ǏBn)@Uz_]VTرCԛO h B?'^}Knj`ܻW!>QWdo>AЧ^ SNb^tvvڇǠ|@26Ba|B~pthWe͚5xcO_kQy}ԇI B gk2'8r5~0 WaO @W7񚌾 g5B9IV;wZoibS4ֆx 96l Byg]sm8&a {n&δ5֭['=h ZGQ_f~xߨKeg)>ʷKػ)|iM[G@HOC A@0O}_s_juobBu.IBi942'NȔ)SFXGA/P7-*Bzg_?[J P+D}RZ)/q.05ery۵[ c?۶UٳYN B-OM}L`芳0S* Dŧj L_^1SRu,K[[X`x*8f̘a יHHh0a2z![ٵS^4_5B=˨Q짆"ZQ+ Bث:v+{VΉ{i Pes)UO 32 R^VN.9k8*vF Y]g#i@NQ_7ǿ!wK.YY3/ ~}*"// ~nWѮ^?G.7\ow=~9D 9]La_̃o 4OQ_Krygǯ˘K Fvqk^@r,}Y, @A*~On^E֛/?㭯S + @u@ԗҟzbsf1K9AoD[_ j&!@BQ_NC]$蝸[_`-m}v~̩Z}M@@8üg/[?7'/>Y0",td 9@s'@ D}z/,/x8:=e`C]n@qw{)?O @!@M@{yuƛ>78 y߳p<,Gy҂'Cz|Y/\PM8@ǡ =>)xuƛsz靸|֗kiy{I{FN+]C23{ ӈg񏰯7 @H(D}cy鯹V\|?oro}&vj_,zEtuu9bPU1Oҗi6E.xcy@hDz7K_֛x=0=1SӮQõLio7ǏTB2!c] @h)D?K52-:MXsy;lClg?k ֕o{j ĉ>[E^ 1:7KW@^kH_{<\hHE#Mm 8H楿\,}^z絷W]_BqL}4>O}t>7#)KxKW};p>T,V (H(xu^'Vgzw/^ryƼ2Xf@[t;O!NȻ,>xt-p><9)#m3t :zr<̵=}풱zP1{-fAU@|U 2T^+~W^?և|#V 3nHa/#&śuMXW^ӟ._ }>/g[";{a>l*ӯM0ӾP $@EmY#N_Yr'_N La_4F[̧ZE2U8_ xm˼ꖌObt}Ք5AUm8'!@@R+yL1]䋨r&No=#[tS~><å*5 r* ,2}X ͼ |sB~Q|zi< 󐱽/[hCFڛj5}Ky(iv| H߼N[olDE>߿_~̜~̔;_22llքIv`3TchIsUaǦg%k:#WP.t;kVgpi̓R3T`aCť bJ ^jKG.1W/ZO39t}"$652 mR73Mڻ`Y3HO5x֛݆f1oBm͸ D;*l!@D0//yMcYik>ukp*Ⳬ%NȚAی(7FCЭWڂe(=nFOJ@ '9/K5x zS{ETm71"]U R7_M|{"hjG5\⫗ցv{o6;Nꆟ53ᘩp/f3<;}eJ!8%G U~]1c*k#-mK#fjR[V/)<7{Roܽ k Ր^3WG̔-"e7WmHOڇT!7 N6cH !mF}M67ߔӧO?]K5xecCS= +WqUބ jWRcCL]\Nu~3W_rYWwǏwv z>^)7`_WwLװ3FBQ!PMЗ!@RJ UǣF /P.b}'yݿ lx֣u P}= @H7)MAE{vy;)]vL8׏7><v[,vhNgVqqEIˇ^kF+QTV u{:1fo-(v@@S^=:xUC`ԃ1AxFp9֫ 6"{6VtlhNY1~7G$i43h2zYF̈D!=*җ{ xoo-fd13UL4j %2s:A  zR;*J*^*,JZ9Q,F`ˉ܊. hklɗ>SfJL hJv6 N^nV4!W{uYQ̊yA5w̬"۳!ę4ԜP`@@2F {袋: zիnܧ?S~mLA#c|U]5y##Ċ#iEĮ"?kλ 7v5bݶZ B7j @F u^a8Ϲl $AW93eG>Ɗ嶫V/UE{VEq1{wL@vzDCuۋtn܃JssWoMl8@R)GB?੷5}flޟ] ^Qߕ5y:ԙ5^y[*:rPNaP@@ _TWzԽo4uk.]37# iTݹfi| ͇td{w_YW?ӆu77RwݺbityBv4l ;B T(Eշ3vFwֵa旜Vn TXkH )q46Vq/i3 %)&_jg>SwgeY*\Qm>i= @^vEeةѕ /|ɇJ?Nt,,GϚ3e!27[2TWϘql2˝*=C@(uŽ{Lv黨G~@@<vxtVE c4 d'qo}CIA"(q$$C `w.Mk 0رcӭi(B t$@^yM4IN:t & ꛀǥ[kSNǏ{`{PE u9AZ!ѣ "ISH@AI :}KCzuיi(=3Z-|e/`R J+Eɮ]vńރz/ @u!b!2>׾G}pf6{+m8Z)赅7pWNSp:P{Ϟ=x'9@EYRt̂QYQl+{1FՂ^y7k֬?ixiOLlڴ zM  vAmtdy=fU67lt豥;>O יu`o>cEOG\[w͚<}vZgG1]|yï77GMERLi9 ^jSVvbLV3/Gi~ϴ',v̲ Yy/۳L2.5eݟgl^ϿlмjL_7]=CQ6ڑt=]9#( zmzw!+WdzKа˗ƍԪss&yQoCoڻfeƣݗW}[tٓFw\ڻn69<상lףSuƏ~8o\on>OGǽ=ywb7w9l1oË&]ӦU\uvvÇaz^S9sB!@>Kݣ9֑1"^0n_/l[BbFτo^wȗ*YfYڵk%;ذa{,\зr)#iQ֢!.a4FkI[<&>4֫&'!3&\Y)qW,6{LNx$vb@kz5GٳeղsNw:[orܹP>Z7B6ӦM'NݻЇT@`$_ޅ[Or3k4\velhH| ځy{ խvtÉǠaכ7 c4!;f AWohh>)q㭧СCruفA.+QFɸq .ѣGѺN;N²U)Sd߾}~zb?6"Dx#d@%4\E/?q8nH=gϞ~| |ߕ/| Oʅ^XTR%:BL5ost}y٭~'A@ DS z 9s=̏>Phj֛c.t.Ǐ+sε!5:s֭[=)iN{ T@ԧil'*uD݆u;{¶!t`~tm&'O_ڵzg̘! E`tZOA|A`O#?Rta6}@2U@!.y)뚿nxPv" @wzߑR  ,BpF+Z>]r 7#]kRe^a=T@h D}KS) (k>#w__ZM wrʴ5B4D} ^#y׋+bGUNj:^\ݻEgAǎ$.eC3ߒ[l_|7;vرa(@ D}P)@ ôD}z {!d:6T؟>}) j'9!@*,Y"@| D}H@PJgToٲEtZ #55AH4͛7#42D}{ r~O9f_'AfTd Zx@-$o C?Ry">}̟?_h9 D}x @_^-ZOUX@UFb PW9 L&c V}dTDRNQC>h”@@Q]DHvKF!FDžj"  (xWȧZxٻW\=~ ׅfϞmCqNj@b ӕ4#PkK—n{x,\@da T{}c `wC \}r<'=W!I3 4u'zsm%cTۣ^{@hiDo)_ꌚe%`@sj@ xW~rUΘNyeqD-XB9ܹse2k֬P@I#OZ@ø*'M$8v 6C @HtF@H7 ͛?|paz@ @7K 𛚶'QH#< pB&8A!OLW$@g7,lS+,#55AO>@ 1Jps eaF{PW؟:uJ446B(KQ_ !@ T:t=}d28|')B~Pywu]{=/xP\9P~zʗV?~|Y}zz!V`Vҧn@'{#ňAsd_SeNڵ o};  -CS]w+Pv>"|1'~ʹk#}x h=<,"}BcZl.N'cϒ >-=M;!3S?#篼"?b|~i*T8p@?.+WSCQ% '0vXٱcT{n @ ie$x鍯&j¾!l\Ĕ>H?*5axb[>OМ4Lkiy @ԧi1Gc '>'}Ό6aZ?!hF*w-:a_!X`XwCCU_?,a6olΪׁ*I@O}z6@ W2&U ~ҤIx+0 OxVC@MR@ #}gg -EQ1 P͓#G^@ r(. $zkIvwM׻n+1Rmmm#e| d2k }r"l.A(KQ_ !f81sN9tЈE91o믿.'O,LC9>d 6_,G굈ٳgŋ\> JQ*n*@:H>l߾]zZO|Bz.i*֥uD,L~y쵣F!)C8P@R8 06ǷB@28Aoș3gS?d40ߊo|f+_\tE2fA'Uڵc?ud4V@'O|@K@EѨW_KM717>oK/ {P"M6oc!*@a~0:.*pNs=rQ1mжhŽtm{?G`ժUmg9~8X D1" @ lH\x~V>+WArw6imE>+V,nV<#< JVҧn$[-P5hGE {wsmSi MWZ$@'WiZLyUkQa㺒gݻWǍg1%\b\ǡ+w!fj0 D}@`)i'P*<$콂~2a+Xg^_D@'iJQמnD1W<^ k<رcZZD`JDt#@t s_~Yt8Їd…jĞ={ !7<Hu[(轱rJ<Ц@VZIh@-`d6F& D_Ot8ĒfL,-h@ 67[:Kxgy{+[n]ji->萮.y/LwP$>]K - {]U^w]<Ѻu"kާI Vݷa@ 7A|=AϽ(Ç7omfGQ>E.O|e&M|#6^O6@@ :X|*:}%H'GZJ~{U8p}`+۷: - @aP@"ZGWN@@=*ϟoKC"  GDD@ h}cMOo B@GB<_~@)a{}S]u@ @SMb@O {ca5"?}Zjk@Li5%Pts̩Y#. eɒ%h"d25\A@U_vrGN>*`|%{Yq.R?~L0n?ZuPik(Ack.D} NQ_OdϾ '' ,ZO>Y*=GME<>Y})!@MJ:fB ܹsryD~믿^>я5QO?M }D޿:%8Q l@r0P69}IK H hpws\Q؎\tE6ǽ H$4ic^THۋq@<੏G?a%RO`$ {kR=>L^j͛#o/B&v`] >|Xv)ﷵ!E[Vk䫯껱*IgYl̚5~?hNs~ҤIF!r `  'B~ڵ6\Cc]o{+z<_^|2n8+pouSN[ni=,-}l!)Huƴ{q*М7PgmJ/~ ;VőcNJK.$z ˖-;u@ؗ!@.p9i4fʕV;|ژxoa={6 /b~̘1 Uիz.}H %OhҬ S/+֑YE5ϟR?t*5.bf gfYpaUQv WGO;vo\HQ#L iY5K׭&7]ebk 2pƍ an*w a_7:.@j SnܹsmbBJoU4S:Uz H @d,7:}>2]!C[ /^,6lhj.^"AFO}5:K$ɓ'[/$A *M&<"5:(3=اi)!z\;O=L>zbg<džN)ٳ'66cht ^iؠ%A(GQ_ K`޽`ĶE.`k׮%&ҁXw%C 0Rp 4Ggggz L& &@ F'M4  qW  } ~PaI=c̆ %>HD}ոzf,ii$r}bA)qqh,f6B("^gϞm=:>O!@ nNBq1H̆O>@@VXaD؇M = Aj&!hDw/k%ÇΝ;e֌Ǐ~H"V` pa:ed֬YEyz %6r~ wk$ G-ge+uuбL\/ T!8*Uȯ]bteQMi{N߶mׯ[n%Lh}Q{d<,VB]'N(8\tqaMS^{:[&E˻ |&/{v/x6I HQ?K,T- Pq*l½S 4T$gYreꅽ a'ޟ~iuc*Ub0FA~CIcad޼yơ]@{BPD?*G%W^}~C&Uﰊwݪ]=*p'NhU̧!EۯN&e+ q[.ʆ,d'@^zY5?˗/5k… kL(G ^wUݻ׆QN\~G(3+'}Y`~GDQ֦ -U  W:EƍPbuH:^i׮]mq*F]y]`l 胑}p {4n/x6IoQ?w\*WM: h|}gg:t?u#{1֥q{GB^E NmZR5C@^'}RηzkfZD}{"২{LvQ d@tqƉJ@34B7o i&y衇xdfn:uoFoWpUV`caSOИV 9W$4K@:SW)ot}P!}=:Ygv;pvZ?5QJTAE:@ h Ƹ ( {}Ё з*3t[R X:x1鬘o$E+[-T@Xm_r7IKߪt$W'g&~#AO*U\ӊ+6}O |ui[ٳgU8}+Oo()(MiֶUxY&a.S[qz׭[cC,{ ~~5_l9w`Mc^gv҇ b#~T1O&L_J.NEHS @ >P} Lz$J+|Ϙ @&w}ٺm۶" lݺB)a4 H0. ~,ԳR ӏ&bٲev= ^Rd2M4;s~*rD+nj;5/1W9z@ڵk*U ҤgQ?އ;R:x[現[R͆7O ,aǎamܸcJ^stD}hVI#Rtɒ%V%+? ˖-;@q&a{;ᄏ&::SogtjT׏b h1 q3PرQ[:y}TSOŭ !Ǐ3jABWWTG@Soj'EhL/_jzY:"l!^V}5fVl$iT 3UU޿NT~R, jiO]8+\?sl%7_?>~84lB= ^S1B Z:7轤Q6HW+UUqCɞ={-~ʩL@?c^Em6L̻6p8SKDW9EgА 'vvvvʡCv 6e!kշNía@Gŋ˂ (<ވw^;㚆_zJ檥Z?~UƎ7DcI.>n8 7ee-շSRI|6WqO?pBStC~\OQ_?31~|*g֋T7]شig*E\̋8?}:IŽRT_wu6TDn(ݪyYf*McrpL8`~Qꀰ}%8M41aވ Nk7*TO W:CY&*n$3$eWѮ G^n{ kDǏrHMf`7I$E`ʕVDj`#ޏё℮ |2SNmUN2%QjK^z%;@GۢmRFLζ:D}u>M~|(I#04'"oz' ~hF;L[ԫ_}Gz]ro9ݪ8qPC‡\^I.E]TIJӪ~lUX/>MǏrf̘a$^ &믿d5R {?~GBcZӎ8c=c^D}{ &׏Ri^6 cC{66 4"'C@*P .@IDAT h\'iD]A%}c ` J TPa:~WA `~ WJ(^QeD3fE[;%&e2;ےN}v`JKR\ 5QFic]pE7mkܼ wTꝟ7o8qŠ}=dHAmN'#X~|(Ǐ2@vaٹsg x:uG.0lٲB|= P@ +@-BLE@? ?_v]FSϚ&O* sbm69uꔬ_^nT2эH=+WSH]e1S]k G9~M]}ɒ%V̫@sX?ʦdPQ*Ҷl½uQ a)+~N#X~|(Ǐ2E6-S1<͖#իWsjq~j[ctnN_` B%BL;v*U1:x.\F"@'>yˣG9~( АB&%u׹7n r8VDE)cS|Q+N :/X^~eJ3#Gaܹ6TB=$I@;;;СCM;.ʊVFF|-w,>rNsYn>wApmׁ֋/ 6W %C0x!@ hhjP?<~H_jEUaO@4 gڴi3Y2OU'Џ< , z5< I熞>}:>7@H;v}p޳gOH5R   RE@UڵkWm޽{eCC iT  P N%Nj Hci,Ocχf~#A@8pаuIOOOD, $L,}Zhʶ^P0㉍[ )x+]X ҷ#\swV~* S4r$:뭸2erGmG* 3eg*;$]c{tgdsae-譃;6-ii @@1?bF;z]vd q9y5rMwI`tgf̡wo4 @@_OF]rzUwdd@g<_F$Umiب W>T@ԧi, @J@\g6Q/j8Nkyس՟%]C3__0ӼU/3ov+bz4@@W=}tWr  @,2Kdp(6Z n+l9\I2}ޗ1/zF6&AmX~Srp[BJ[Lͼbl T6mLkM?V(?a-nxeӏ&<#<P12g8@'95&>H @ "OPgҔ,ZHv^@ NA$;yd9yFR+[Qg#l&ߒяA#(K?˻Zmx*P!n:驐 @ <xcMM #3fȁdҤIuލVY7.H te kҏ#X!h8[eY|9sWGv.vunFJ}LnG! =cmEl@&}wԩ.$7 D}S8̙#z\25(,"-M1)fˋ_fɮJez[%ݺN_UM0ׯ~L}r~pzĤ03CZb+VyE=V{. |WJG>[o$]݈Vٳ;v͛ k2M@bСCO[qsΰ͠>>u]N j*6ѹsʆ "EeFeI}KMLYAnoX^'YZoiQW-Y>1*{@Iq7@ ̊nA''NiӦڵk婧_CX#7(KƟn5 lOAu{K`?_oϘ+= u/p:g=zO /o #Wz})"zߓA|Jy?tMlfxza>̌Q]T79m;2**;}iOIT;ﴞ{R]g+#sx3&(DrC;zEtuƳ\vvt{31C_]ڶ7̞ⶎhİ ؞!dkҷP&\Gk>v-P q.ؖH}xb5k P>ŝO! &$kgőTM=XK&̢0SH㉠7KW{V2fut xwn3fjFRIlPf+ cAEug̔3C,xC`7gC+ pEgŸ|᭪dMa8LX'_uR?C6jƍbc̆@<X 0m'+<Z$+SLײR9:edu9+ԷwKf+ 35 qL3ҡgHt;`o%|-SP!n:1T @hfq-$3f5u6 S?J;~SPo-[dɜ>}J!@ ouꄀ9s[o+Wz @j'9!+VȼyD  @ OfҪp+ivmrw b}GJUpU)3%6ӋxX}ĉ2m4YvЯN:U!@H<)l/Ν;UՄ}C!x–?ctQE{O/^,;vZ~|kD&p2p$c#7 ,@ԋi&ѣUCtj. Ė>]i&{ާm >w?x`av :ulذ!vm`@!@L?)={twwXzxaYd8p@&MTbN@Go>L`QT1:>:}%zǏo7Y{}@RBO}J:fƟ4;c ٽ{;_jos|&?џctQT%cO>-7n,+1UO]D*J1E/,#J?F N_` иyN;vl5J $)-Jz=$OJ&Hdt&~SN@C @ YOZtKBs"$ @ 6@ctǏ7v1WAzF p L-D}$!F S4D}:S 4E)/!m6Yl @ $~h@u@N*7p1Ki>Gz꩔>S hQ[>)'ЈD3f̐ΆCT\R:&uK,[oU/^VƧixWX _~˖-|r(x H{y`Ba,8L&S<M@ȍ?AnBG30a3g#A/*ήZ[*rN z-Hxk<}w=ehTBZOHG K`Ŋ2oLG"G^hr"(_ P3[4w\ٹsgj54n8};Lԗ!ą@#1mիWm&wygiC*[ځx諒Htk-5~"N{$b׿n7ëj&!=~zm1LjLAJ_?E?CCЂJD} KԻvL_ڵK3C@9:&CZp!r8 =zGdӦMQ9u:ꋡ *DoYf cLjtfBE}:8o:bhǎn ֧: N@q%5G@߼[{ѣGo%9Q  }b9{l;Zc`5 5KrT+7G:=u]WO]E}ޤKoII%oa9QMP. sKq,SH@E }n_^UWϾiJ*5Fѱ/>񐟬;Q5)#OY\L;vL.b,Gu$k, <:P\57X*tRϻϳ>k'=Pb^k&n}ڬf r=ZHQBZnܴ>?*8`ÈO@SPq/R(Ul]zKq~ uzu넺9sPߵ g>L2ڧn@ԧiq8&lذ`o>[>Yv=~zj$z]ro9ݪ\&*Ľ u=aCew #!#^#15@1c޽ IC D}HA ʧPOיn\*OJ=.[@/Q5!@ h,}e GO}x (^&I"F@JO}U< @@ GX@ Up @'~a! @@WI@ D>} @JQ_'!@ }B @*D}U< @@ GX@ Up @'~a! @@WI@ D>}6mT w  d@'iE <#裏VmÇjNB <m&%Y#S;&cǎ lkk?sm&+VHȱ-ݶ=zMQzojҭۯ C@Fמ@+ ̚5K.\(s_~)[na7ah:B^?O/>)tT85SNɌ32}t+Txj idݢ'ջyNٟ˂o^=~E2ҙ-xp/ʅ^(\p>B@ MOSo4itww[o1GxbJϟgoaK8Ÿ~]sq KyR'j*A;wu?~=tPu'Ϝ1n?1N->˘1c zc|a: iHwA`8${k׮u։zI'[oPO= %@M|)Pפzޓ'bUGEwR\2&A@pǖ!(ٳ΄㦸 „ONBWA+˹oŚ@P}|xm.)-{ Do()(teGꫯ}% .6:;d\r[d{>~Ld· iO PD}mcNݑ޾l[R1>܉PMRyxozr ѸoȻ] + ]GxafD} 0DQ?Ăp^=Ĝ+'MmI>Vur?4FE~Ǝ(7MKh\i!v$qS< T%@٪x8N'Rt"msL yUԫ^ }ӱqlꗇAk׾*2xiZ+_#A@xᱦpybiR~ڥ: b;Is}Ov?Wrզ{7$@Hoэ4''1⺳RS"{\TG=91{'?ܚB^m~/ʿ9/O?Mj߲n)k2aܯzuǟ~R>rײsk䢱;@PSQ8a<ڧ*.=wm9ڠ69wfK䊫ʻ>_\?sX O=r[*׿1wGX^8ke^H <|иפ$wc|z]=,sTm&~'}Ư1s[}w[w- KQ.oj jx_rTS*tg2z1?T;O}݊6$74T1If JK%/Ez^QVՉ6zoL ?V{gUqoWG d @ǥ"l^0|>Y&!'Ia>Ê`qswog/?h/>(w/T:A$@ 5L$H /_~ӽGRͬso6>s:Bmmmy3h @ǧ 'TkJ>B SM {헧d;EE@[_ۍ /qȩ"3ېQNO'_M5O,φJ"@/*W z;}^'*b[[DFЛps|?L%׷~J/ EIFKEo1VBfKoNWqVݶllW7a7x>mio=cz蛲6¾]B@J Sqlz\ TDZUѰ篝Η㴼ۖWPsfЧKo11by6cc >@ 8A… <1ca:9\80 _zRF?ɘ3˨_ț?a6ĺQeԯL؄-?|Tl' ć>>}zKss)-X+}w[w<.[+MXț?\M&m,{2'Fk22by3w;_1^ENka? EQii@AoMqUMmUrǽy"g ..c~?eW7h("Ai OC/'NiK56䦔ۆlRM̙3r1N6]r @i. G86[3or1v[k 4<)0 ȥ;fn[ =>eNsB 2U@P:+ƅl~!sl)zO @>P$tK-XeBh쨈ϛ yk~o3K~siy_Q }˥l%ñM`snI pV |) 3ϡH2kW)ZH8]28𢴽ʒM[Q_a.wsJJ+ GQ[JB ^lt1Q`QF]UR敎FL@Xd$Xya-1v[(쫠׏[LE}0(iuvn AHD}B qi+s;YٕWx.yW$@h D}kS>@Qk%OY;oec=#븹3}w[wܳ?\v!$>ݚFYJ+Tx-;^%m91XٕWxFQMSsc鶮dmHox#mwSvU:nl(t"6@i%Ok'ݱRX3}w[w}w1-^z +ǯqݜwo" L Fߤf!ys>UWtW#Kh=. 1/<gKM~ce+ zqǫב+]cuxkp_120EeȥaҀ2ynAڮ*dLIO}TN}6#VuBu4[@SC\ \=xLlϔ{bH6}KyO9_Շ5暬|uyWaO.GwȲ=r6LȲۏȧ7U/-Tg;LWo͍y 옺UejSvW:% @S_?3rNFYtKִ90/ʍ%y6;+|<ϵˍ e0ۻ iwˍqLf eHK.-=澏tNG+3&5_7ӈ;3=Q|8~෇QZ:79+W;M2knK>  2uB S[5pX9c.WϬsiXvpghvyaw.06ͫg xޝV\G|^f:AqkʫtY*˺;k>{~/K}5#Ҧa߱^kod=?7<s\zn^G wٖ᭯|o_67nvr͟g@a & KpE!/W+LͷYdwVn3 /Pg"*"jRÔH9:vbˊk%\u̔~Jϝ̚w4ytu J 9!=_Usd2 dIפyP]:֘DI(~[\=JMζ6o@e(ɥo#GSj<$@`Rz9hXF߼MveZ\\_-ie17K0oU7˻_oWp-d_+LFQ%p\YzγߵO}w[ ~׈ҷx-}O3P?]z92pQ(V"r*|&^$Ǒ̐Bλf-kW+KYIN^ B^ 9duP8e}?H'Ccň!H,켿sfHYsNnzj##R*X, fO* @DB}DLJWre[ťV+ߍdxw@>vǙnRw.i׷աzW.y\[k!Zv|?r{dKߺs[zߍzۅr{{:F!OG% ?i,/$;$%؇i{cb(211!~Ȉ'% @w~WjMAbe#]A罖BW&ɅKp"u|ߒw~{[~Rk۷yګʽ;vSol[~iӪC8CMyKw֥ \1ZiyԐ:R#̫op/ -F%K]L?L-+ C'37ޕz >=N_s,]7ZΟ\Ο6˽~l[F?qujg)9+x}NEgi{PďM/jҖ  @]@ʔ.`R:Şi,u+m{˝>ꐍzGN!@ivRjv7ޏ?{x>8Si.Rj=]卯/xrO vzD'@-5wY]r}]}fV@omL۽ ;m}|ieXג @ti--YZi5ۺ=e7neg:moN;$Guԋ Y3٣G;駇'һ[s?s^=ugWڽ :mw/e?ަ\Ep ` ߹jX ɤ46v.7:Zols0@R ԧL`@6kX|z?LuhBEn[ɗb/@! @B}D^ʙW#v[i3]ib;%b ԤT#1rl*$/"gͳRv~/2B.@`p\)">2 }\-IN/  D GĤt ({ZX3RZ^|x=lY7A]rRcɍۨ|}ֳyQ 0羼J?5/,2K+3x!?77 da]8ڜ~UPv znHmE?0yzo7e:!`C̩"@_ [.kOJ"}}͇w?Nz23SO]fB]""2uUfj+n~Dr]ˈᩩoچCz@P)h7~O'`Vnrr4NŢn&P̜pt>9گRI ~t^^Gw!>{ tCP ULE 1J܈z&Mu]LLL gF-iDO^D?wZkk&^zO@_PV1[i rYT 0]K :bkkqQ3s]ϓCWkF>itrCAP5 E k:FG굴@5YN;;}4UP^*SFJr4Rvs'V@gSug}Y@NP?p]?̀l4K+f[pT> r{ee-Wvp./?l7 td7Ny dKPl@K_Pvm#Ҫ!i/EB9IL7F*R[G[ROɣd ;dwvF'_j> }ոyu;:^׵혈2BT]~nUk_Cy{Zhe2_F5J}oM[.Я5y2ʛ3{?}rn]k6J?~!g;Co=NJBU~әoycKj\]j{rR^¡|𧽺ƒ\Coߔk/Ю!\mTݶYa-kڲ4+@!;F /[ +.nlM9e~\d8/2?|E?i9= n\)YX(5]D9FGRZr#r#K4,~ˇm gNp-[o޺B{-۟@ gA`F*G.My!;2?Qъ xMq&05» h>UDÍj(c;Ҳ)5h<{[->!}I9,0o!^,|Z[ymw<@!@F?p+kh{8Q {r{*37\\>ߚAeG[Crkm 750궞~dQ;!k}wT]j\?x"17eyXhy ^<䈼i> ϖj ~p4r~!|$=\/>=5p7F5ZCpW=-exm ]Ig2~,C*L?gJ~bZrŒ}ٻn;!G2<yU6ޖhw5ćAF_/L@@&AswO>O?$#7kR/Fȹ=+-[aXk/Y&;:'꺞^᥯ޞ/IT yaLڑ %z Rwo )xW}ԏ]wЪmky~&y/pjޟ՘{ZZԂ>M] @'uڃdTWy$ˮ2+|Xf {[ O Z&|.]i{O/-:MNNJZ^G7dխ=5m>*K˴L^2T %+\N.lMRYwt^ vs R\IVPK[o䓣OIK@ zR[=G_@Mk=7!ܜa> 5$F ~4*z4IDATէ}5١ё}}䗟nc?,JC?PLy= x'`fנMYfYE@\BDUg# ?^GoMv,1Uky 0¼v!zW:Ziia^C|#7}nAF`ߪpk¹YXj ajYc@.&@Ge@t`5g:c" SytF䷾*gF-ki3,[ϳ؂u^Cl!J{M0kpo[KkO۲PoJۮعٱZէ<@!@H X0z4"?׏e~zJ&˲~uTJ<†4o{0{mT^}8kge˝KK w+= 򶏕ZҺ0rcezن @ ?bq6}L&WdyTVGeHŅY)OLɵqSW9诺nmu?"AƜn-]wTǻ+6[ҖDp{pel]lC@nRgt [:Ҫ_ޖ5ܔ]H+}pT!YJeBNrqX1)M]/Qa) 5nnn;ؕJEmɢDMnLhy}(y[0\yMf,tUSx=<󶅯 @h ;;;틑Z#u}nWDv*9Y;Q5''E9yYO2? ƅ5 suڌtG#6׺Ԍm@@/1W"`5ȇ-Zi^˺.$ GbuF-[OzאΓ^+mJ@@ r|&Vjpק[V:zoEh𶧆r v 6ƦՄ#6*u¬# @LB}LMJExXT--[i-[CK av ܭA>  @BPa@g}]ou0[HmapVX=  @Zi' -8-!]m2  >Mmں2 Nkapm(@@ _<"  I{*@@H_P9-"  UP@@H_P9-"  UP@@H_P9-"  UP@@H_P9-"  UP@@H_P9-"  UP@@H_P9-"  UP@@H_P9-"  UP@@H_P9-"  UP@@H_P9-"  UP@@H_P9-"  UP@@H_P9-"  UP@@H_P9-"  UP@@H_P9-"  UP@@H_P9-"  UP@@H_P9-"  UP@@H_P9-"  UP@@H_P9-"  UP@@H_P9-"  UP@@H_P9-"  UP@@H_P9-"  UP@@H_P9-"  UP@@H_P9-"  UP@@H_P9-"  UP@@H_P9-"  UP@@H_P9-"  UP@@H_P9-"  U~`jZIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/images/ironic_standalone_with_ibmc_driver.svg0000644000175000017500000040336300000000000027513 0ustar00coreycorey00000000000000 API API Conductor Conductor do node deploy User User Create ibmc driver node Set driver_info (ibmc_address, ibmc_username, ibmc_password, etc) Set instance_info(image_source, root_gb, etc.) Validate power, management and vendor interfaces Create bare metal node network port Set provision_state, optionally pass configdrive DHCP DHCP TFTP TFTP Validate power, management and vendor interfaces Node Node Set PXE boot devicethrough iBMC REBOOT through iBMC Prepare PXEenvironment fordeployment Run agent ramdisk Send PXE DHCP request Offer IP to node Send PXE image and agent image Send IPA a command to expose disks via iSCSI iSCSI attach Copies user image and configdrive, if presend iSCSI detach Install boot loader if requested Set boot device either to PXE or to disk Collect ramdisk logs POWER OFF POWER ON Mark node as ACTIVE 1 2 1 2 2 2 1 IBMC management interface IBMC power interface ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/images/logical_architecture.png0000644000175000017500000011230500000000000024552 0ustar00coreycorey00000000000000PNG  IHDRMsRGBgAMA a pHYs+ZIDATx^o]}'G3ƥLIlj.[MGjXāJ$p)v@P\yQ}Q21!~a3(ȃ[.2v@IGkἸQ2Z{u9ph|Zϟom|Ⱦ/ @ @ z @ @!ޅ@ @)a @ @!5@ @)a @ @!5@ @)a @ @!5@ @)a @ @!5@ @)a @ @!5@ @)a @ @!5@ @)a @ @!5@ @)a @ @!5@ @)a @ @!5@ @)a @ @!5@ @)a @ @!5@ @)a @ @!5@ @)a @ @!5@ @)a @ @!5@ @)a^ :x;v>mnqdBɓ +ݺx o*Fʌc e#w* @B|~I=y~~}_ e{Yhغ}Ζ6UX= Y_WC?`v;- lkyYms6~z.ҟtBَ,nv0]㭳>Vob?LhMe}fun9L @ B``!~v}gl3g'[_og7綘7?_7&vV~}rO_5F*#kA}g sfȞx"riOv_s nWIԧo>`Km ΅Cl)9ŇuyVUp @Mn%/etxO=^8+(GmrfA~.7[.7'koIs S(=hgՖF׌ _v8Xlct6]K4 @ V+o$7WO]Wߚ=Ȯxjt0SYئJ @@#[Ϟt>yJ|g}zn|,'  @6n%~to:Z-3YZ @ ЖC|[%@ @ l;}Z'@ @m mj @4( 7) @ Цߦ  @ @@B|"@ @m mj @4( 7) @ Цߦ  @ @@B|"@ @m mj @4( 7) @ Цߦ  @ @@B|"@ @m mj @4( 7) @ Цߦ  @ @@B|"@ @m mj @4( 7) @ Цߦ  @ @@B|"@ @m mj @4( 7) @ Цߦ  @ @@B|"@ @m mj @4( 7) @ Цߦ  @ @@B|S!E @}U99 o|#$@ @ XoXnUe| @ @``B|-5M @ -kpͱ- W @ B*0tM @ @@B|ũ44G @  D NIS @ P1 M4C @ F LU @ У_Ͱf5t @A@EN%@ @].4 @@@G 4N!@ @-q }%  @ @!~ Bt c؂̡ @ Р_3XJ9 @K cS J @ @@ 7o<}'^XG篆sӟGjZ @ 5Ia;1Fv @)]J8&J1p @D į(Bӡ7O?m//~Ĥ|v~_ZXdsצ! @ @t_p<~!u+_u%7=v @K@G!>7'{Yw}${(^g&}-AMviu{w @) t L|Ų> ي}ë'ڇnugy}Ȟ~ow 46u4 @, gBO»zȶ2}ǧ!+J/S~gs2J|WS>M @ s~Wn/Uo/gB};o2O?_́mCss< @J| wɏT/vV܋必?v/slwtZ'@ @mF?pkE~c  @ (WKos%gT @.0?0;W8 @* Vy  @Mw/ۿPA @Fֽ^~@ @/6۽XM @͗F @ @@, iKUy+G @ @OAxtKf @ @@ hK]u;g @ @ Ax!%ð @ Ж`B%²9K- @ @IAxKbMH @ɇxa%l۳2 @$L9 @ @ E!ӛ;Qw @ 0CMEO%ȧdE` @!pFU| _sk/#@ @!$rWc @ Ю߮uad] @ @`B|E7i @* @ @_vq-U @ R[o%@ @GuA @ -zZ&  @ @ B!lZmy @HH@oXUV| @ @ R!lz]ٮR#@ @Ⱦ5f;WmFe @p.7 98}{sYwMT;bno9lSlj{vn49ƦjTj|ަy%d /+b?;!JAh 'w+:(»? ;#j w . wX!Cl] Љ@ N и' N<f @`X~VM͆ on)1jbDZV. @ _A* @V@[o uO B?b*GUnM\@OOC~~J`[!~[1 @ @uK§T-c%@@uI+!+i @ @B|M@ @ @CDlOpMWsuC @ uO @:;  @+ t> @H@Z7 @ @_W @&?q qŨ ?) @" @ @0  @ @ @ @0  @ @ @ ^B8u@8pT0 - fG,=߷'ƴ(u_q~Q.jZw̮9T⫹9HM'ng,؟h'npwJ\ey^a'fJsșg/U?ٙ* ķʫq/kh@i;z6uF1>r*tᔭ]+'I=·Y9…-u}l8ZGsF\@w @xpa "8~'|grlJSs[h½ƅ٪t bN-o럞wlg+.3v}VPnw>Ock{)cjkҭ3G|lعpBjwnOvmq=L~??ƪ3@ΧGg3'2 /$|}N½Y;Ymv[=[緍wv}pjzѳob:م1LyYg&lxb^~EvS s>yr~uc.yuiT+9pb}Z? 痶 yPl/xv`w[݄dz>:; Ź-1]xbvqŭWu5r;(,YuѢ8\çcۊ?Y1Evǧ[g*<1ydX׍y1 ue :_`gRGDm{,m_~4l;k| IWK~[}fN j5-;7fok|R[aȅD~Z-ֿS_0q,o1cئ%@`BX+o @`L;:ּE~w'Cwb~fVWXOC_;=.{h%o.Awbo=n;+͓CQ0ڹL0얉wKԪ6![eyP7"X1:c(s9q 㪷 @ ϶g_xXlK?zna,+ mY_>^ o,ϖk?;ާ;o}ѕˬ:L3}V~:97{$ïygݘak<' 0t!~6? @`"pU=6h~elupol]wguh [/2s?tm^1=N/Lv6=s K]uϽשբ>nuư l׍O3 䵗Ǡω?w'#(2?nUV-' `%>"" @.yۥl' ohNr+  @ -g~6U=w~K~Vɓ#@  !|0 [s @ bǵ8TM @ @@B|E7< @ @@D>bb,|Gwl $N*)s}ar/} @ 5 M8j:C>jqosbV ! /=ۛ9t! WT1 9bNc:BۍOxk|}C- @&-5SEBȖE)\{ Z%oha~ @n)"AdqvXj%2%o6r _E9 @Y!~ &ֆp[E֏Gxo^|z @n³PPZ]/\!\qj(IvFNp|knۜ媖TeSa @-!~P`zqҮvS)8WE*W2koTٞyw]7aTDZoT @`?'4?ye^~Of1AJ@Ϥj^f~nj&no%[UA50C S}[߯  @c}s $i]G!: @`lnoH۶fbj, 6{R?F @@[ ˉK[/fUfnE] @:хxpA @]FF`o}uO1: @F]TΩԣ+z NNl @FvcW\CP& @ oQ'hB8k֩ x3% WZ2Z @@vir, @%0v )M6uk[ڳ2 @@wl @4)0$ @ @@V86M+q ?hx|`xL'?}<f?6  @#tY-M@C'tЏ.b8쥘X÷?xx %0o%udzpZ6cǗVӜQ @HQ`!>b3\ 7L~p%a)pϤ[<6WcN럙=yJuďb87W?q<D.NuB_mzo/~)_ _艝~~;|-%_?ݟΩJxr6CC!|gdӐ'σpiP7}7 i{XܣG?/f};_)~6ق ܯ~ṛ[Wm9hs  @XYuӏI y?b5P';dW^ gA>W'}7oo,;w3A/UZ>;ɓ簦o\Zq}O?d[i @ ZA)^]_X~z$ЁcU΂΂4OyUt#p}䃳;]̭.0h]?~lbX/V]  ]Ow%X? ojNVIPl?Oo;}eI' @\|w~''/>-M{}5:9.]G!j?ܟ?ܹoeo-]}4%V \=,<{ ɏO~U<R46[~zscm_7s|+`%_hZJ|Ӣ#МߜZr ͛ݢ]IF@I!IMmhV`P酠f/ @ @@\Z⻻7om4[v_ 1pg=+|:+qh @`Zň?6:{B}' @O!O}} 0*VQᬘ79{CdVOG@O͔#VCК*_N{9'G @Ws3&WؽYh7ܳᡧ'Nxxd:3&- )_+WnχoM~8_FEt% w% d+gLGw?`ojcRxݹ':_)oCso,of|ߺ6`xBV@4F#c_ }^ g?Vϯ϶go,p8,nXY;1kbX#[Rm^)v eGf]/wͳS/LͮKggIׁ  @E!+o}q'|b@?6 5?kiC \N`_$`\6K0pܡᥥc9tN~L`tSW۹h0|x3$Ws#Y0߄ٳ(^`'@F( ķPv(+V?/<0zxb8Lۿν4\VI[c* i>] ['ڕp1[YW].[1`)互.G/, y"(L}ūG rw߄?gzlp]o1 wmۦǼ?m: 4'p7i}i;Bok5hzxgZ^<7'7IK`s; PEXq/Ӱ ;X|S|v @Q *ǰowx5[Yσ|^f}xzsFqd4Wasm_=hk!qe+_<#hw9b~1+lW90ESV}tؑpjr @ *wdb+_dY5w}|}?<\ fI &?~2^sWxeﴛ'} ewtU^sT2SY`y ; =g➅B<c#@b8p#mPuƓ?Ԫϯg: '˞<@}>'vohWNVFfox|k!^uYeW) Ͻx_{.k`U@Lߔ>c9_q:N#0(+ sg+>m~>>Bn5E |>b77?qsx͏?+f>"~C!@,l+}xcﻳW~nV[ªpQ&YP  @B|WDavEs>{Xk[S L?SJ|L @ : n)+{~7^b/2{⻺b=Uxzr6aXFb@C ~*Ic%6' @l]" @ @ !>B& @d k;Zeݝu=_$@╖ @N~3 hU@oW @ @9xYHx6Y5e;fuufD @x3'@/7k @D`!~+b}\Pɾ?{= @Q`!>Fpc"@`7.=>ə @@V2]g5q4~Ծ @|Ĝk@"@ @`nd_c Scfs(V{OnpܾJƫz8X+n|1!]VɃyMdG(g){ M \ B}$(9UZ|UZwI5 @I@SuG@w1 @ bՌ_ @( H @@sB|sZ\@@G @F!~# @ @?  @)8'*fݬ'mPk @tN]@BG* X+c\B]YV"@ 0z!~Ԍ @ A * @ $ Ļ$# &B @>BKc h @l]" ɗ @() ėrX|u1a yҰ&d6 @7 c̟hKmj@}!z{@% @ s< @CRɑCIM @B #>R( @- -jYYO @ fF5j~T6Y @.cp @t, w |y+G @ 0!~uNn|r%3` @:;@vv^&@ @`<BxjL$d @Ν>1ttk7 + Dr)i @Q'!_C3 @ @}!}c=l]" @(' ėsuԪZi;cX*a @ ķ\"nȫtm^oSc @@@e|:z] j @-æ[n' 0'ɦ~z^HM 0j!W U`J+ ĪT=o&H 0H!j8D-.˾(C4>hb  Ѝq *B| l)|Iv֓$f @ χP;EË,񮪾yUĹ  @`B|ER #Xo=IDJ б1Vw|-ўk`G k @Q&!WF4[OR @# >\k;O=a̽|,8fH (pF<7z`idot!~>Ƹ5 {H?fu8{:S$@ bG\C/U޷>o0| @c_V5J\ߴj$׼>zDaS_~O`S7h[J|'P]@`fШ0R 0KA+|-}M((:B|= Ѕ߅>Tp3pl1̵V .)7Ub#w]_CV滾ߟ  @@[Vd>pV_FбC} u^X @f^XXOֈnCa3 @ чx+l/M.yW6.6]+]>6.mE Y`!/u[7m|9bc} uCf́ ؅UQmGx>$0?k\ @) .wӛrC;goE|5C{ʯsJC$01?> yp/CjN @8FȱT[QT+g @CMJb C75G @Fq=Kۜ*ɩ @ >űLتZoW^y9 @`;AxLow1l{4m;^Ռ[57g @llG7q&lCmeH~>^޵l @@ CbdK @EAxE&@7fgslQ+ @ hޏ{e< @` Xr߯  @M xJS!мB|4d󧏇õ;߻u~8_ }G @ bB|[N -2ܞ? @ 1pZcǗV[7V~y?-jgߞ?g @! U WֲGoL{&ݚ䱹w]tT^8l5ug['|}>:ٺ_+קAn/ſ$8>o49Mm(p­\Z|x-lcOi@c-ސ1N @B Ӌ^W~rj,O|w%vdw}|p4|;~Yiux<+V?&_>?Jj|.ax7NEu'_Yp{̓W/O gJyvVM/ھ7g__ c_B}O0"U?6o?9*t},ԿD4I;j|N, 6yn%~qE8`G540ijbe|cF Z>N5+oʯ/Ue\B_*8~Çj5tdyo7S*n܁HN wrtL#-L˃*oH "P^ $@z)|7N?= @I_^>zx8ܞ D!`Kw?eޞUlkY޼ƶg3X/ t} =w>z!@o>⮏HM@4:<9{P|ql#{|VU7m_K8vzYu{{;g++x xJ.S6Fmj Y`A|!ayl`?O[>y|WM޼xha._DH[n뗢wb_ ze:N]׋a}C- 03mU1YUcYȝ+vf"zŭC&/2}==HlewTR7;t+U{szGq/;UD @ ^!i~h~[<.ۗg}}lZ \9w~wy˵~ˍQ/ (]C>یN-Z,,D`Ʉ4B}> Bm,ntW`˹b|D?<$M87,lI6U`!~R=: WbxnSvkW·g+ON?L,[æ~zś7C0  x#b3mT&0˾ϯlU}}//x=?R14]qf4Zk7N`F?ʚ d)yhniM @@d$ C !VsW+ ]]KCӨXuڴj{n5gjV[mq6Ti5gU|fKM7tk[k d>_ @,F@ E!>Ū3f MA>͓1J|EY?$20#GVp%@ u|52 c໯:H uݳege7yի٪k~c+_oi4jMv^cwHUJ|3nx~nxTz:_"@_ު#c?^q qŨD# oWmVo9oWG @ |*vI`lImH[AczXƤ8-ƯFou~{ rj $[8+1mMM^jKe[ V)-hT@oSc-MPDU}P _"R @@ZɆxa" MҨQ:{uXCg 5C@LTB NdC|FE`y`#([bS/;geG @@zIxA" N}Oэ1ӛl_w>J7z!@!>Ut&0l)|W>- (]?'@`ɇ.j|c[_zy՘)kijI W ?ҙ9xL|xc$@ A>*S[2hvDѬgJ CW |Jlc`83sOӰV -0+ ڕu >nb]pi.]"eV盺R w|[n?s7MNs^\O<`<0\\(so2śl[A~2CCvϦ=<ٹO<8w0 SٶlK?BIо;TsJ8 ] W/09G^W'o  W]DDz7 ypH:hul]t/03 \LqC/nn1Wc[4oߝi}=׮OCb{i/Pӽ6= |5RbLlۿ0}Cnon :?wឝd{WW&ATv?} ?m$RLϋXWbWA̳ݘ : ]|{Z&@K>_~=<-m~|fԽ@-]-~vKY^Zpǭ ~G_ 륫׌~f *0?кش:_v)7?qsxfA~nƗ_ z'qqG=soݬT eǭ{#GEq.3| 0^71L?f7[*ϝ>Q{uye輁__rMu',!dJupD>vy!۹7<&p䡑óOk)̺;gi=h2߂?=O?nK i U`4!>/ _2j@jM!W5aOOӚM1C̣МZ}ޏS_ WO>8dJ{fcw|QM#v<ةr_}zFfoX̌F^wl 4 ??+UM K0W/>!_᎛><@ri%`>2}~gXܗw}r} *SO>/a % :{g*XӜgNG~u6o\\~#pFa<|c3ݻwy7 @&}DZUOϝx =]|^i0cyμv%\,ݕU1Տq$%0WJP]}rIul\~ǣ5FRU;~m+OfwK썀l+}1+?DW`—q= z;k>m£+oe--}C5b_7fѯc!0w -~k]wJ{帟WrowC?k]} K~c|z\elh;=7McS-_U~_y[G @ !~JeUz,LibȷT6U\u+}SXngd=uor)U<mqn~æ tpY'A´؝Zi k5!-ן@lX:3Gf!ο|pJ!Т7:Z4`- 5Yyߢ%@`"PFM!t|쳩hwG`M/ϨG“IO|ofOоzxgss';dYGi_ٛwt fxnietҏy%XAlV#m~j [ x m`EyS^Ny[ȡ(Pf=UÇ>6 ]wdG|~4p價 1ٖGS1y;gIkRX?v mn;IE@4|40QPWSg8Hmn/xHdu{;O.NN~x^xaq;p~vJdzбl|nol9~q9F( B@oU>lLS$$PVR;<_<.;qswnw?鹭{W7,/Y msdmWDoVa$@{%߳_Yr_ɶ{_Onpkϝ椝^/^xerxRlϟx?_?v!i XO~FOF֭+uVd,+ jdbE>̏k&>o(Pv=b~sӘc8쥱L< $' 'W2&@@uxH]I@Vz㭐[##C- -j  jՔ zp5Lg @I!IMm @gM/j3  й7U:'!$$fH}]- W@p_VGm+xD Pf;-ƿn>+Ŋp!b{O?wD(z'@`xʊ{7 )a @:;'! %7w # C arN"@`* Ļ U /Gv1 {+&p穧' [!yV{v}XiLv^g*׆{⻽F`!~-ǎ^J`/&ؗqc yՖ*Z4XʦqB|U!w{6B6Z@}PI(ߏ)W*|ޖpukPIWݑ{[B *B|5 Z@%7^T/ħ:9-v\[5!ķ @U!'cx_FӺ!6EZK=ۼC|:u:z@ ]ORJpo2W~E:bJz5Y5hW.#^aW )|B)}?nʼQ @$`%~L6׉@}]l.NXu~f{{ԽU:n^sVYO1V}sb^Ū}(NʼQ}WQ^ 0L+ìY- 5*U/2atȡJ|Eu 6բߖlz5ЌV]@0)_cUB|^}wՂҕt{M4c~ |c |Zp_mmo#6US$뭞D=?g +xW`}e/lO/+U!fPMdg +㨳Y[ q X.FUC@x݋g{/ v _ `{ڲ_U.ު(Ķ8*B|5D- į.؃˶{pMbP_-xmWT_Bvuv4.lR[_ '+>ζ-FI^o!?4m]j,`%~j|0)o^=UZ%ܶrzYʥ_Ǝ"Ї߇>˓5 寁6%!šm6S(Œq!\S~j(}}Q~{Ρy}ڷuF!>@?S$m]cnW___/B_{o_I5 k֙]>_w'c1UHB`Xl7zj6|rպͳ˼f&@?Cjrl6Bk(xʒo,aeǬz"I, ď =U䘙cscZMl^L7bV2i:!b . 8M<Խ:~+)o}Ig @] h:XXu/Afmi ƹ'z5xGR1'Vr\V7 % } =nP&kU|UGBb'02aBpcZM7}- u_{F, @ v!> ߎbTYpW,~%A_#01ϽM-pF5nOELLe.. me]7b+ _*ﵴ6j#r T+7q |nŽK7jbkLȽS% U!#)So2s, @`WvzWCV+S~yoUX:uS?wTϺ{޹n  0F!~UOl|7T(MB|{lL@=k4-`;}Ӣ#0<#) x@W@_ OfOnd\m s){vS{Q׮G(' ėsr@tf; 듭|Gg/0)ߥ۽mݛ c'0>}y} gl*{7o do a}ͅ{;vzfW @66T@-ܕx/wj|`V܇10*~7?]OB0~w @@!c-0Y^}W?>;gqEJX^Z+~ߝvWt$P_*|}C- @ esO<|c'0h7A`d'_X^g1u!j YJkn `S @ A X 2E+ Ļ* 0b2~ @ ѕĀ Џm c  0 2 @! &AzV9 Еߕ~ Ua(P xQ_) Y|Fy~*|"@9 HJf @!@ F?b* 0H!~e5)l(}-AJ@JZ?Y*|= @!DM'jٮk{?]Zv1 @`H^*|%4 @D@w! @`dVGVp%@% Ü֕w]#P @!g @{a) @5!5Z  @ .㪇 @UԜӹ-ݐsƹ^w% @@wB|wz"@@ouVϝ>۸uL ( Ļ"JnuV @!g @7: Z @+xFRV)v\jG%_ @@B|z @@gwF# ZP]^K\@OcUfγYϘZ>j  @!Gt, x6αXZ K% Оߞ  ЛUuLhU@oWm XE˯_lg KMx @G[aL\VGnR9*|*2N @BfΈL@ ݮ Jh)B ЌߌVzLS9js}JJ?J`"6  Еt% ۛ9#bA5ZV'  @ z!> ܨo{ Vc @qJDy`ZaхP7 9 @@"B|"2>tϰ n팜 k뼓8k Gs* @ q!>fڱw0#lV=͆ ýcc_E\M @"#-a#0UΫ VZ?wDhFN?f!Xi.+y @8qݬ3pcwUkhE ЖߖvH)4d.j>2 @!!!Hͤ/s@ylW>Hkd @@~X\,HpJ @`Bl]W]ƙ] XJZ? @4ȾQSSOm=WzX~{{)CzU^ PC̳T- -jub xSRfeRo31N?*# HJfUAd& Cl^M͈ PU@*<$`uK%@ @@@M @! ďfI@"V)a @z{-<5A I@$Az@% @ !>"" ?! @ ! Em @A bj* yRD@B|z @Zs @J9 @, \ 0n㮿 @s<HavNhhƚ!@+ t>05A?""  @- ďfO@V#(! @)a 0N-̈́ еߵ @ @@E!" PE*|5 @Bk-a}uf&  @#GVp%@}2a=ٝyRhz @(% ėbr zp6 @Ⱦ0Qs$@`8wzjɼqѭϩzªS>_?wDUHPX-av]eMSoUԆuNk`ճ%c6 @Vs< TYeoj@Mm>S [^wFl @ e+)W D`~)1ۥh|}]/{\|34" @m!ma @c$jblX~L˟DX}Jdн  @܌ .D@jMu>ϹO .S3#!@ VƸ P@Mc_Vd>(SEbq@ku gŘR ilU49- @e2J!@|ʗɨN,sϨ g @ !>B 9/תЮZs2{PEs |Niy(+#+D& GV!0bV*_) Y"@6 |ʪ>jןת.w?*#sޟႲ@Ep7g @GkCsx_-?y#-_}vdz<ݘ n|2I7z!PE@~CO ,s6*fc~L6W--jq< Xs6hJ@oJR;F( /P|RB?N|Z#@ ۊ9PjԶٶge N@w} `8jq794- 7-=H[57g/ _##$@a êhU@˯_ٳʲRǼ9K- @MB&!'@ 1#7Il[_@oXUG NsO90"@ .J 4\=YX?wew|^Mv᧳O|p~~}N\1͆6Ԙkc4 @@|B||51"8|cy]v+퇎'yI~<;ھO@&FD` /cÉ3~{/?ޯ7}`ϼM~4޹ wk: @@B|Z#@`;-™oVaOKsᅅm o; @hW.N@jUx[gu3=ݶOnpe+ xZ5Ϻmw}ZȷӻE`M!`%S _lt?`-S#@H@Z7$'"@ G@4UG3k=~u!j 0$!~H4 @? Z @4% 7% @ в2  @ @@SB|S!0=Uܗk/߿~ִZFKHQ@OjL Y_ < MO>}uWƒdKi~>|Íg_t_ PI@$C\4#>}c+8qÁ/:L"@` NsȌLpL-p~믄'U<ς?nc4Vmt++ _;ig?+V2ܣG?/f};Jm~W}_4ݪj;yEs'h @@!c/.O)4ObK|(V~b?uɱ^^h@[vތ4?9iG“]>;ɓ|u07e?ӭwihJ!R⹧m~]Gg'AJxS!;O.N~^xarN^U,k7?= "@& C z^o̷׿j߷f+fA>'gy~~wl#ޙ\۳l$_,q%|{eྞkק!~WnqW_z1|=o"{ r@B-sC6?f+Gݧ~e/ӟeS8Y>O{v~w2W&ATv?%H(AdO2B}Snm/gvC~'s7w>r'׬7{ oLkn/V_5t @ J!>ʲ nqssWl|Krqq'TJl5t)'n=,|pwc;ڴt/pF}z$@ F[UnUOy?õWi}m'?W|_+pKNtg? @ H@P1M@]uϿ  @cPes$@ @A(I_ f+e @) )W  @ @`TBm^ B/ @88`p~Jw @RS @ @hі  @K;>0<3w|{}?pED* GZ"Ч;ގ'?ׯ~~h 7gD @c!cp @y*yjVC;ܕA V.i @`Bn s/9>{lð޿ь@';93#x @[x%.nztӏy̑.㸧bt @@B|: @ ?)HY@OzNem7÷NG{a;?rxxhӖt&n @@!S @@Sy0ݸq D-Eܣ Wk j@B|:# AJc\c-ۺ_Vq㮏э[@w͞-߯ 4) 7- x]Z#@gg?z-h\@oT @`_!Ak-)} )UX  @mmO A^oL7X] 0XAҲmV @` @A ͖{Z#'bq?K/ x6Sco4 PGs.F 0 /^}# @@B|#@ @l4 @ еߵ @ @@E!" @ @@B|#@ @pN#@ 0TI| g] @] @ @ !>B&/y @   @ H  @ @ H } ;}"//} @{~_) Y"@ @{x @HD@OPI>lS_NVD_Uy @ @c!cp @ UVθ  PN*|9'G[@'@@B|B2Tl! oP= =@ @7Ⱦ? 0fs}s'@`hcVq%@@kSSX@CB|C!@XʏM@NrM`* Ļ @¼1!otc"л{ c6mlY6hMm}ocWMm+iۧEu^˺U6˚5h [8 @ @R>b @ @@B|50 @ PJ@/  @ п  @K19 @/ _# @ @RL"@ @  @ @@)! @ @@B|50 @ PJ@/  @ п  @K19 @/ _# @ @RL"@ @  @ @@)! @ @@B|50 @ PJ@/  @ п  @K19 @/ _# @ @RL"@ @  @ @@)! @ @@B|50 @ PJ@/  @ п  @K19 @/ _# @ @RL"@ @  @ @@)! @ @@B|50 @ PJ@/  @ п  @K19 @/ _# @ @RL"@ @  @ @@)! @ @@B|50 @ PJ@/  @ п  @K19 @/ _# @ @RL"@ @  @ @@)! @ @@B|50 @ PJ@/  @ п  @K19 @/ _# @ @RL"@ @  @ @@)! @ @@B|50 @ PJ@/  @ п  @K19 @/ _# @ @RL"@ @  @ @@)! @ @@B|50 @ PJ@/  @ п  @K19 @/ _# @ @RL"@ @  @ @@)ej}|IENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/images/sample_trace.svg0000644000175000017500000016123300000000000023054 0ustar00coreycorey00000000000000 image/svg+xml ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/images/sample_trace_details.svg0000644000175000017500000024445500000000000024571 0ustar00coreycorey00000000000000 image/svg+xml ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/images/states.svg0000644000175000017500000011642400000000000021722 0ustar00coreycorey00000000000000 Ironic states enroll enroll verifying verifying enroll->verifying manage (via API) verifying->enroll fail manageable manageable verifying->manageable done cleaning cleaning manageable->cleaning provide (via API) manageable->cleaning clean (via API) inspecting inspecting manageable->inspecting inspect (via API) adopting adopting manageable->adopting adopt (via API) cleaning->manageable manage available available cleaning->available done clean failed clean failed cleaning->clean failed fail clean wait clean wait cleaning->clean wait wait inspecting->manageable done inspect failed inspect failed inspecting->inspect failed fail inspect wait inspect wait inspecting->inspect wait wait active active adopting->active done adopt failed adopt failed adopting->adopt failed fail available->manageable manage (via API) deploying deploying available->deploying active (via API) deploying->active done deploy failed deploy failed deploying->deploy failed fail wait call-back wait call-back deploying->wait call-back wait active->deploying rebuild (via API) deleting deleting active->deleting deleted (via API) rescuing rescuing active->rescuing rescue (via API) deleting->cleaning clean error error deleting->error error rescue rescue rescuing->rescue done rescue wait rescue wait rescuing->rescue wait wait rescue failed rescue failed rescuing->rescue failed fail error->deploying rebuild (via API) error->deleting deleted (via API) rescue->deleting deleted (via API) rescue->rescuing rescue (via API) unrescuing unrescuing rescue->unrescuing unrescue (via API) unrescuing->active done unrescue failed unrescue failed unrescuing->unrescue failed fail deploy failed->deploying rebuild (via API) deploy failed->deploying active (via API) deploy failed->deleting deleted (via API) wait call-back->deploying resume wait call-back->deleting deleted (via API) wait call-back->deploy failed fail clean failed->manageable manage (via API) clean wait->cleaning resume clean wait->clean failed fail clean wait->clean failed abort (via API) inspect failed->manageable manage (via API) inspect failed->inspecting inspect (via API) inspect wait->manageable done inspect wait->inspect failed fail inspect wait->inspect failed abort (via API) adopt failed->manageable manage (via API) adopt failed->adopting adopt (via API) rescue wait->deleting deleted (via API) rescue wait->rescuing resume rescue wait->rescue failed fail rescue wait->rescue failed abort (via API) rescue failed->deleting deleted (via API) rescue failed->rescuing rescue (via API) rescue failed->unrescuing unrescue (via API) unrescue failed->deleting deleted (via API) unrescue failed->rescuing rescue (via API) unrescue failed->unrescuing unrescue (via API) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/index.rst0000644000175000017500000000461300000000000020266 0ustar00coreycorey00000000000000================================== Welcome to Ironic's documentation! ================================== Introduction ============ Ironic is an OpenStack project which provisions bare metal (as opposed to virtual) machines. It may be used independently or as part of an OpenStack Cloud, and integrates with the OpenStack Identity (keystone), Compute (nova), Network (neutron), Image (glance), and Object (swift) services. The Bare Metal service manages hardware through both common (eg. PXE and IPMI) and vendor-specific remote management protocols. It provides the cloud operator with a unified interface to a heterogeneous fleet of servers while also providing the Compute service with an interface that allows physical servers to be managed as though they were virtual machines. This documentation is continually updated and may not represent the state of the project at any specific prior release. To access documentation for a previous release of ironic, append the OpenStack release name to the URL; for example, the ``ocata`` release is available at https://docs.openstack.org/ironic/ocata/. Installation Guide ================== .. toctree:: :maxdepth: 2 install/index Upgrade Guide ============= .. toctree:: :maxdepth: 2 admin/upgrade-guide admin/upgrade-to-hardware-types User Guide ========== .. toctree:: :maxdepth: 2 user/index Administrator Guide =================== .. toctree:: :maxdepth: 2 admin/index Configuration Guide =================== .. toctree:: :maxdepth: 2 configuration/index Bare Metal API References ========================= Ironic's REST API has changed since its first release, and continues to evolve to meet the changing needs of the community. Here we provide a conceptual guide as well as more detailed reference documentation. .. toctree:: :maxdepth: 1 API Concept Guide API Reference (latest) API Version History Command References ================== Here are references for commands not elsewhere documented. .. toctree:: :maxdepth: 2 cli/index Contributor Guide ================= .. toctree:: :maxdepth: 2 contributor/index Release Notes ============= `Release Notes `_ .. only:: html Indices and tables ================== * :ref:`genindex` * :ref:`search` ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538406.1903992 ironic-14.0.1.dev163/doc/source/install/0000755000175000017500000000000000000000000020067 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/install/advanced.rst0000644000175000017500000000060000000000000022362 0ustar00coreycorey00000000000000.. _advanced: Advanced features ================= .. include:: include/local-boot-partition-images.inc .. include:: include/root-device-hints.inc .. include:: include/kernel-boot-parameters.inc .. include:: include/boot-mode.inc .. include:: include/disk-label.inc .. include:: include/trusted-boot.inc .. include:: include/notifications.inc .. include:: include/console.inc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/install/configdrive.rst0000644000175000017500000001450100000000000023121 0ustar00coreycorey00000000000000.. _configdrive: Enabling the configuration drive (configdrive) ============================================== The Bare Metal service supports exposing a configuration drive image to the instances. The configuration drive is used to store instance-specific metadata and is present to the instance as a disk partition labeled ``config-2``. The configuration drive has a maximum size of 64MB. One use case for using the configuration drive is to expose a networking configuration when you do not use DHCP to assign IP addresses to instances. The configuration drive is usually used in conjunction with the Compute service, but the Bare Metal service also offers a standalone way of using it. The following sections will describe both methods. When used with Compute service ------------------------------ To enable the configuration drive for a specific request, pass ``--config-drive true`` parameter to the :command:`nova boot` command, for example:: nova boot --config-drive true --flavor baremetal --image test-image instance-1 It's also possible to enable the configuration drive automatically on all instances by configuring the ``OpenStack Compute service`` to always create a configuration drive by setting the following option in the ``/etc/nova/nova.conf`` file, for example:: [DEFAULT] ... force_config_drive=True In some cases, you may wish to pass a user customized script when deploying an instance. To do this, pass ``--user-data /path/to/file`` to the :command:`nova boot` command. When used standalone -------------------- When used without the Compute service, the operator needs to create a configuration drive and provide the file or HTTP URL to the Bare Metal service. For the format of the configuration drive, Bare Metal service expects a ``gzipped`` and ``base64`` encoded ISO 9660 [#]_ file with a ``config-2`` label. The :python-ironicclient-doc:`openstack baremetal client ` can generate a configuration drive in the `expected format`_. Just pass a directory path containing the files that will be injected into it via the ``--config-drive`` parameter of the ``openstack baremetal node deploy`` command, for example:: openstack baremetal node deploy $node_identifier --config-drive /dir/configdrive_files Starting with the Stein release and `ironicclient` 2.7.0, you can request building a configdrive on the server side by providing a JSON with keys ``meta_data``, ``user_data`` and ``network_data`` (all optional), e.g.: .. code-block:: bash openstack baremetal node deploy $node_identifier \ --config-drive '{"meta_data": {"hostname": "server1.cluster"}}' Configuration drive storage in an object store ---------------------------------------------- Under normal circumstances, the configuration drive can be stored in the Bare Metal service when the size is less than 64KB. Optionally, if the size is larger than 64KB there is support to store it in a swift endpoint. Both swift and radosgw use swift-style APIs. The following option in ``/etc/ironic/ironic.conf`` enables swift as an object store backend to store config drive. This uses the Identity service to establish a session between the Bare Metal service and the Object Storage service. :: [deploy] ... configdrive_use_object_store = True Use the following options in ``/etc/ironic/ironic.conf`` to enable radosgw. Credentials in the swift section are needed because radosgw will not use the Identity service and relies on radosgw's username and password authentication instead. :: [deploy] ... configdrive_use_object_store = True [swift] ... username = USERNAME password = PASSWORD auth_url = http://RADOSGW_IP:8000/auth/v1 If the :ref:`direct-deploy` is being used, edit ``/etc/glance/glance-api.conf`` to store the instance images in respective object store (radosgw or swift) as well:: [glance_store] ... swift_store_user = USERNAME swift_store_key = PASSWORD swift_store_auth_address = http://RADOSGW_OR_SWIFT_IP:PORT/auth/v1 Accessing the configuration drive data -------------------------------------- When the configuration drive is enabled, the Bare Metal service will create a partition on the instance disk and write the configuration drive image onto it. The configuration drive must be mounted before use. This is performed automatically by many tools, such as cloud-init and cloudbase-init. To mount it manually on a Linux distribution that supports accessing devices by labels, simply run the following:: mkdir -p /mnt/config mount /dev/disk/by-label/config-2 /mnt/config If the guest OS doesn't support accessing devices by labels, you can use other tools such as ``blkid`` to identify which device corresponds to the configuration drive and mount it, for example:: CONFIG_DEV=$(blkid -t LABEL="config-2" -odevice) mkdir -p /mnt/config mount $CONFIG_DEV /mnt/config .. [#] A configuration drive could also be a data block with a VFAT filesystem on it instead of ISO 9660. But it's unlikely that it would be needed since ISO 9660 is widely supported across operating systems. Cloud-init integration ---------------------- The configuration drive can be especially useful when used with `cloud-init `_, but in order to use it we should follow some rules: * ``Cloud-init`` data should be organized in the `expected format`_. * Since the Bare Metal service uses a disk partition as the configuration drive, it will only work with `cloud-init version >= 0.7.5 `_. * ``Cloud-init`` has a collection of data source modules, so when building the image with `disk-image-builder`_ we have to define ``DIB_CLOUD_INIT_DATASOURCES`` environment variable and set the appropriate sources to enable the configuration drive, for example:: DIB_CLOUD_INIT_DATASOURCES="ConfigDrive, OpenStack" disk-image-create -o fedora-cloud-image fedora baremetal For more information see `how to configure cloud-init data sources `_. .. _`expected format`: https://docs.openstack.org/nova/latest/user/vendordata.html .. _disk-image-builder: https://docs.openstack.org/diskimage-builder/latest/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/install/configure-cleaning.rst0000644000175000017500000000212000000000000024353 0ustar00coreycorey00000000000000.. _configure-cleaning: Configure the Bare Metal service for cleaning ============================================= .. note:: If you configured the Bare Metal service to do :ref:`automated_cleaning` (which is enabled by default), you will need to set the ``cleaning_network`` configuration option. #. Note the network UUID (the `id` field) of the network you created in :ref:`configure-networking` or another network you created for cleaning: .. code-block:: console $ openstack network list #. Configure the cleaning network UUID via the ``cleaning_network`` option in the Bare Metal service configuration file (``/etc/ironic/ironic.conf``). In the following, replace ``NETWORK_UUID`` with the UUID you noted in the previous step: .. code-block:: ini [neutron] cleaning_network = NETWORK_UUID #. Restart the Bare Metal service's ironic-conductor: .. code-block:: console Fedora/RHEL7/CentOS7/SUSE: sudo systemctl restart openstack-ironic-conductor Ubuntu: sudo service ironic-conductor restart ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/install/configure-compute.rst0000644000175000017500000001277100000000000024264 0ustar00coreycorey00000000000000Configure the Compute service to use the Bare Metal service ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The Compute service needs to be configured to use the Bare Metal service's driver. The configuration file for the Compute service is typically located at ``/etc/nova/nova.conf``. .. note:: As of the Newton release, it is possible to have multiple nova-compute services running the ironic virtual driver (in nova) to provide redundancy. Bare metal nodes are mapped to the services via a hash ring. If a service goes down, the available bare metal nodes are remapped to different services. Once active, a node will stay mapped to the same nova-compute even when it goes down. The node is unable to be managed through the Compute API until the service responsible returns to an active state. The following configuration file must be modified on the Compute service's controller nodes and compute nodes. #. Change these configuration options in the Compute service configuration file (for example, ``/etc/nova/nova.conf``): .. code-block:: ini [default] # Defines which driver to use for controlling virtualization. # Enable the ironic virt driver for this compute instance. compute_driver=ironic.IronicDriver # Amount of memory in MB to reserve for the host so that it is always # available to host processes. # It is impossible to reserve any memory on bare metal nodes, so set # this to zero. reserved_host_memory_mb=0 [filter_scheduler] # Enables querying of individual hosts for instance information. # Not possible for bare metal nodes, so set it to False. track_instance_changes=False [scheduler] # This value controls how often (in seconds) the scheduler should # attempt to discover new hosts that have been added to cells. # If negative (the default), no automatic discovery will occur. # As each bare metal node is represented by a separate host, it has # to be discovered before the Compute service can deploy on it. # The value here has to be carefully chosen based on a compromise # between the enrollment speed and the load on the Compute scheduler. # The recommended value of 2 minutes matches how often the Compute # service polls the Bare Metal service for node information. discover_hosts_in_cells_interval=120 .. note:: The alternative to setting the ``discover_hosts_in_cells_interval`` option is to run the following command on any Compute controller node after each node is enrolled:: nova-manage cell_v2 discover_hosts --by-service #. Consider enabling the following option on controller nodes: .. code-block:: ini [filter_scheduler] # Enabling this option is beneficial as it reduces re-scheduling events # for ironic nodes when scheduling is based on resource classes, # especially for mixed hypervisor case with host_subset_size = 1. # However enabling it will also make packing of VMs on hypervisors # less dense even when scheduling weights are completely disabled. #shuffle_best_same_weighed_hosts = false #. Carefully consider the following option: .. code-block:: ini [compute] # This option will cause nova-compute to set itself to a disabled state # if a certain number of consecutive build failures occur. This will # prevent the scheduler from continuing to send builds to a compute # service that is consistently failing. In the case of bare metal # provisioning, however, a compute service is rarely the cause of build # failures. Furthermore, bare metal nodes, managed by a disabled # compute service, will be remapped to a different one. That may cause # the second compute service to also be disabled, and so on, until no # compute services are active. # If this is not the desired behavior, consider increasing this value or # setting it to 0 to disable this behavior completely. #consecutive_build_service_disable_threshold = 10 #. Change these configuration options in the ``ironic`` section. Replace: - ``IRONIC_PASSWORD`` with the password you chose for the ``ironic`` user in the Identity Service - ``IRONIC_NODE`` with the hostname or IP address of the ironic-api node - ``IDENTITY_IP`` with the IP of the Identity server .. code-block:: ini [ironic] # Ironic authentication type auth_type=password # Keystone API endpoint auth_url=http://IDENTITY_IP:5000/v3 # Ironic keystone project name project_name=service # Ironic keystone admin name username=ironic # Ironic keystone admin password password=IRONIC_PASSWORD # Ironic keystone project domain # or set project_domain_id project_domain_name=Default # Ironic keystone user domain # or set user_domain_id user_domain_name=Default #. On the Compute service's controller nodes, restart the ``nova-scheduler`` process: .. code-block:: console Fedora/RHEL7/CentOS7/SUSE: sudo systemctl restart openstack-nova-scheduler Ubuntu: sudo service nova-scheduler restart #. On the Compute service's compute nodes, restart the ``nova-compute`` process: .. code-block:: console Fedora/RHEL7/CentOS7/SUSE: sudo systemctl restart openstack-nova-compute Ubuntu: sudo service nova-compute restart ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/install/configure-glance-images.rst0000644000175000017500000000546400000000000025305 0ustar00coreycorey00000000000000.. _image-requirements: Add images to the Image service ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #. Build or download the user images as described in :doc:`creating-images`. #. Add the user images to the Image service Load all the images created in the below steps into the Image service, and note the image UUIDs in the Image service for each one as it is generated. For *partition images*: - Add the kernel and ramdisk images to the Image service: .. code-block:: console $ openstack image create my-kernel --public \ --disk-format aki --container-format aki --file my-image.vmlinuz Store the image uuid obtained from the above step as ``MY_VMLINUZ_UUID``. .. code-block:: console $ openstack image create my-image.initrd --public \ --disk-format ari --container-format ari --file my-image.initrd Store the image UUID obtained from the above step as ``MY_INITRD_UUID``. - Add the *my-image* to the Image service which is going to be the OS that the user is going to run. Also associate the above created images with this OS image. These two operations can be done by executing the following command: .. code-block:: console $ openstack image create my-image --public \ --disk-format qcow2 --container-format bare --property \ kernel_id=$MY_VMLINUZ_UUID --property \ ramdisk_id=$MY_INITRD_UUID --file my-image.qcow2 For *whole disk images*, skip uploading and configuring kernel and ramdisk images completely, proceed directly to uploading the main image: .. code-block:: console $ openstack image create my-whole-disk-image --public \ --disk-format qcow2 --container-format bare \ --file my-whole-disk-image.qcow2 .. warning:: The kernel/initramfs pair must not be set for whole disk images, otherwise they'll be mistaken for partition images. #. Build or download the deploy images The deploy images are used initially for preparing the server (creating disk partitions) before the actual OS can be deployed. There are several methods to build or download deploy images, please read the :ref:`deploy-ramdisk` section. #. Add the deploy images to the Image service Add the deployment kernel and ramdisk images to the Image service: .. code-block:: console $ openstack image create deploy-vmlinuz --public \ --disk-format aki --container-format aki \ --file ironic-python-agent.vmlinuz Store the image UUID obtained from the above step as ``DEPLOY_VMLINUZ_UUID``. .. code-block:: console $ openstack image create deploy-initrd --public \ --disk-format ari --container-format ari \ --file ironic-python-agent.initramfs Store the image UUID obtained from the above step as ``DEPLOY_INITRD_UUID``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/install/configure-glance-swift.rst0000644000175000017500000000577300000000000025177 0ustar00coreycorey00000000000000.. _image-store: Configure the Image service for temporary URLs ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Some drivers of the Baremetal service (in particular, any drivers using :ref:`direct-deploy` or :ref:`ansible-deploy` interfaces, and some virtual media drivers) require target user images to be available over clean HTTP(S) URL with no authentication involved (neither username/password-based, nor token-based). When using the Baremetal service integrated in OpenStack, this can be achieved by specific configuration of the Image service and Object Storage service as described below. #. Configure the Image service to have object storage as a backend for storing images. For more details, please refer to the Image service configuration guide. .. note:: When using Ceph+RadosGW for Object Storage service, images stored in Image service must be available over Object Storage service as well. #. Enable TempURLs for the Object Storage account used by the Image service for storing images in the Object Storage service. #. Check if TempURLs are enabled: .. code-block:: shell # executed under credentials of the user used by Image service # to access Object Storage service $ openstack object store account show +------------+---------------------------------------+ | Field | Value | +------------+---------------------------------------+ | Account | AUTH_bc39f1d9dcf9486899088007789ae643 | | Bytes | 536661727 | | Containers | 1 | | Objects | 19 | | properties | Temp-Url-Key='secret' | +------------+---------------------------------------+ #. If property ``Temp-Url-Key`` is set, note its value. #. If property ``Temp-Url-Key`` is not set, you have to configure it (``secret`` is used in the example below for the value): .. code-block:: shell $ openstack object store account set --property Temp-Url-Key=secret #. Optionally, configure the ironic-conductor service. The default configuration assumes that: #. the Object Storage service is implemented by :swift-doc:`swift <>`, #. the Object Storage service URL is available from the service catalog, #. the project, used by the Image service to access the Object Storage, is the same as the project, used by the Bare Metal service to access it, #. the container, used by the Image service, is called ``glance``. If any of these assumptions do not hold, you may want to change your configuration file (typically located at ``/etc/ironic/ironic.conf``), for example: .. code-block:: ini [glance] swift_endpoint_url = http://openstack/swift swift_account = AUTH_bc39f1d9dcf9486899088007789ae643 swift_container = glance swift_temp_url_key = secret #. (Re)start the ironic-conductor service. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/install/configure-identity.rst0000644000175000017500000000672700000000000024445 0ustar00coreycorey00000000000000Configure the Identity service for the Bare Metal service ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #. Create the Bare Metal service user (for example, ``ironic``). The service uses this to authenticate with the Identity service. Use the ``service`` tenant and give the user the ``admin`` role: .. code-block:: console $ openstack user create --password IRONIC_PASSWORD \ --email ironic@example.com ironic $ openstack role add --project service --user ironic admin #. You must register the Bare Metal service with the Identity service so that other OpenStack services can locate it. To register the service: .. code-block:: console $ openstack service create --name ironic --description \ "Ironic baremetal provisioning service" baremetal #. Use the ``id`` property that is returned from the Identity service when registering the service (above), to create the endpoint, and replace ``IRONIC_NODE`` with your Bare Metal service's API node: .. code-block:: console $ openstack endpoint create --region RegionOne \ baremetal admin http://$IRONIC_NODE:6385 $ openstack endpoint create --region RegionOne \ baremetal public http://$IRONIC_NODE:6385 $ openstack endpoint create --region RegionOne \ baremetal internal http://$IRONIC_NODE:6385 #. You may delegate limited privileges related to the Bare Metal service to your Users by creating Roles with the OpenStack Identity service. By default, the Bare Metal service expects the "baremetal_admin" and "baremetal_observer" Roles to exist, in addition to the default "admin" Role. There is no negative consequence if you choose not to create these Roles. They can be created with the following commands: .. code-block:: console $ openstack role create baremetal_admin $ openstack role create baremetal_observer If you choose to customize the names of Roles used with the Bare Metal service, do so by changing the "is_member", "is_observer", and "is_admin" policy settings in ``/etc/ironic/policy.json``. More complete documentation on managing Users and Roles within your OpenStack deployment are outside the scope of this document, but may be found :keystone-doc:`here `. #. You can further restrict access to the Bare Metal service by creating a separate "baremetal" Project, so that Bare Metal resources (Nodes, Ports, etc) are only accessible to members of this Project: .. code-block:: console $ openstack project create baremetal At this point, you may grant read-only access to the Bare Metal service API without granting any other access by issuing the following commands: .. code-block:: console $ openstack user create \ --domain default --project-domain default --project baremetal \ --password PASSWORD USERNAME $ openstack role add \ --user-domain default --project-domain default --project baremetal \ --user USERNAME baremetal_observer #. Further documentation is available elsewhere for the ``openstack`` :python-openstackclient-doc:`command-line client ` and the :keystone-doc:`Identity ` service. A :doc:`policy.json.sample ` file, which enumerates the service's default policies, is provided for your convenience with the Bare Metal Service. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/install/configure-integration.rst0000644000175000017500000000062500000000000025126 0ustar00coreycorey00000000000000========================================= Integration with other OpenStack services ========================================= .. toctree:: :maxdepth: 1 configure-identity configure-compute configure-networking configure-ipv6-networking configure-glance-swift enabling-https configure-cleaning configure-tenant-networks.rst configure-glance-images configure-nova-flavors ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/install/configure-ipmi.rst0000644000175000017500000000612500000000000023542 0ustar00coreycorey00000000000000Configuring IPMI support ------------------------ Installing ipmitool command ~~~~~~~~~~~~~~~~~~~~~~~~~~~ To enable one of the drivers that use IPMI_ protocol for power and management actions (for example, ``ipmi``), the ``ipmitool`` command must be present on the service node(s) where ``ironic-conductor`` is running. On most distros, it is provided as part of the ``ipmitool`` package. Source code is available at http://ipmitool.sourceforge.net/. .. warning:: Certain distros, notably Mac OS X and SLES, install ``openipmi`` instead of ``ipmitool`` by default. This driver is not compatible with ``openipmi`` as it relies on error handling options not provided by this tool. Please refer to the :doc:`/admin/drivers/ipmitool` for information on how to use IPMItool-based drivers. Validation and troubleshooting ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Check that you can connect to, and authenticate with, the IPMI controller in your bare metal server by running ``ipmitool``:: ipmitool -I lanplus -H -U -P chassis power status where ```` is the IP of the IPMI controller you want to access. This is not the bare metal node's main IP. The IPMI controller should have its own unique IP. If the above command doesn't return the power status of the bare metal server, check that - ``ipmitool`` is installed and is available via the ``$PATH`` environment variable. - The IPMI controller on your bare metal server is turned on. - The IPMI controller credentials and IP address passed in the command are correct. - The conductor node has a route to the IPMI controller. This can be checked by just pinging the IPMI controller IP from the conductor node. IPMI configuration ~~~~~~~~~~~~~~~~~~ If there are slow or unresponsive BMCs in the environment, the ``min_command_interval`` configuration option in the ``[ipmi]`` section may need to be raised. The default is fairly conservative, as setting this timeout too low can cause older BMCs to crash and require a hard-reset. .. _ipmi-sensor-data: Collecting sensor data ~~~~~~~~~~~~~~~~~~~~~~ Bare Metal service supports sending IPMI sensor data to Telemetry with certain hardware types, such as ``ipmi``, ``ilo`` and ``irmc``. By default, support for sending IPMI sensor data to Telemetry is disabled. If you want to enable it, you should make the following two changes in ``ironic.conf``: .. code-block:: ini [conductor] send_sensor_data = true [oslo_messaging_notifications] driver = messagingv2 If you want to customize the sensor types which will be sent to Telemetry, change the ``send_sensor_data_types`` option. For example, the below settings will send information about temperature, fan, voltage from sensors to the Telemetry service: .. code-block:: ini send_sensor_data_types=Temperature,Fan,Voltage Supported sensor types are defined by the Telemetry service, currently these are ``Temperature``, ``Fan``, ``Voltage``, ``Current``. Special value ``All`` (the default) designates all supported sensor types. .. _IPMI: https://en.wikipedia.org/wiki/Intelligent_Platform_Management_Interface ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/install/configure-ipv6-networking.rst0000644000175000017500000001337300000000000025660 0ustar00coreycorey00000000000000Configuring services for bare metal provisioning using IPv6 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Use of IPv6 addressing for baremetal provisioning requires additional configuration. This page covers the IPv6 specifics only. Please refer to :doc:`/install/configure-tenant-networks` and :doc:`/install/configure-networking` for general networking configuration. Configure ironic PXE driver for provisioning using IPv6 addressing ================================================================== The ironic PXE driver operates in either IPv4 or IPv6 mode (IPv4 is the default). To enable IPv6 mode, set the ``[pxe]/ip_version`` option in the Bare Metal Service's configuration file (``/etc/ironic/ironic.conf``) to ``6``. .. Note:: Support for dual mode IPv4 and IPv6 operations is planned for a future version of ironic. Provisioning with IPv6 stateless addressing ------------------------------------------- When using stateless addressing DHCPv6 does not provide addresses to the client. DHCPv6 however provides other configuration via DHCPv6 options such as the bootfile-url and bootfile-parameters. Once the PXE driver is set to operate in IPv6 mode no further configuration is required in the Baremetal Service. Creating networks and subnets in the Networking Service ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ When creating the Baremetal Service network(s) and subnet(s) in the Networking Service's, subnets should have ``ipv6-address-mode`` set to ``dhcpv6-stateless`` and ``ip-version`` set to ``6``. Depending on whether a router in the Networking Service is providing RA's (Router Advertisements) or not, the ``ipv6-ra-mode`` for the subnet(s) should either be set to ``dhcpv6-stateless`` or be left unset. .. Note:: If ``ipv6-ra-mode`` is left unset, an external router on the network is expected to provide RA's with the appropriate flags set for automatic addressing and other configuration. Provisioning with IPv6 stateful addressing ------------------------------------------ When using stateful addressing DHCPv6 is providing both addresses and other configuration via DHCPv6 options such as the bootfile-url and bootfile- parameters. The "identity-association" (IA) construct used by DHCPv6 is challenging when booting over the network. Firmware, and ramdisks typically end up using different DUID/IAID combinations and it is not always possible for one chain- booting stage to release its address before giving control to the next step. In case the DHCPv6 server is configured with static reservations only the result is that booting will fail because the DHCPv6 server has no addresses available. To get past this issue either configure the DHCPv6 server with multiple address reservations for each host, or use a dynamic range. .. Note:: Support for multiple address reservations requires dnsmasq version 2.81 or later. Some distributions may backport this feature to earlier dnsmasq version as part of the packaging, check the distributions release notes. If a different (not dnsmasq) DHCPv6 server backend is used with the Networking service, use of multiple address reservations might not work. Using the ``flat`` network interface ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Due to the "identity-association" challenges with DHCPv6 provisioning using the ``flat`` network interface is not recommended. When ironic operates with the ``flat`` network interface the server instance port is used for provisioning and other operations. Ironic will not use multiple address reservations in this scenario. Because of this **it will not work in most cases**. Using the ``neutron`` network interface ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ When using the ``neutron`` network interface the Baremetal Service will allocate multiple IPv6 addresses (4 addresses per port by default) on the service networks used for provisioning, cleaning, rescue and introspection. The number of addresses allocated can be controlled via the ``[neutron]/dhcpv6_stateful_address_count`` option in the Bare Metal Service's configuration file (``/etc/ironic/ironic.conf``). Using multiple address reservations ensures that the DHCPv6 server can lease addresses to each step. To enable IPv6 provisioning on neutron *flat* provider networks with no switch management, the ``local_link_connection`` field of baremetal ports must be set to ``{'network_type': 'unmanaged'}``. The following example shows how to set the local_link_connection for operation on unmanaged networks:: openstack baremetal port set \ --local-link-connection network_type=unmanaged The use of multiple IPv6 addresses must also be enabled in the Networking Service's dhcp agent configuration (``/etc/neutron/dhcp_agent.ini``) by setting the option ``[DEFAULT]/dnsmasq_enable_addr6_list`` to ``True`` (default ``False`` in Ussuri release). .. Note:: Support for multiple IPv6 address reservations in the dnsmasq backend was added to the Networking Service Ussuri release. It was also backported to the stable Train release. Creating networks and subnets in the Networking Service ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ When creating the ironic service network(s) and subnet(s) in the Networking Service, subnets should have ``ipv6-address-mode`` set to ``dhcpv6-stateful`` and ``ip-version`` set to ``6``. Depending on whether a router in the Networking Service is providing RA's (Router Advertisements) or not, the ``ipv6-ra-mode`` for the subnet(s) should be set to either ``dhcpv6-stateful`` or be left unset. .. Note:: If ``ipv6-ra-mode`` is left unset, an external router on the network is expected to provide RA's with the appropriate flags set for managed addressing and other configuration. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/install/configure-iscsi.rst0000644000175000017500000000025000000000000023707 0ustar00coreycorey00000000000000Configuring iSCSI-based drivers ------------------------------- Ensure that the ``qemu-img`` and ``iscsiadm`` tools are installed on the **ironic-conductor** host(s). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/install/configure-networking.rst0000644000175000017500000001227400000000000024775 0ustar00coreycorey00000000000000.. _configure-networking: Configure the Networking service for bare metal provisioning ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You need to configure Networking so that the bare metal server can communicate with the Networking service for DHCP, PXE boot and other requirements. This section covers configuring Networking for a single flat network for bare metal provisioning. It is recommended to use the baremetal ML2 mechanism driver and L2 agent for proper integration with the Networking service. Documentation regarding installation and configuration of the baremetal mechanism driver and L2 agent is available :networking-baremetal-doc:`here `. For use with :neutron-doc:`routed networks ` the baremetal ML2 components are required. .. Note:: When the baremetal ML2 components are *not* used, ports in the Networking service will have status: ``DOWN``, and binding_vif_type: ``binding_failed``. This was always the status for Bare Metal service ``flat`` network interface ports prior to the introduction of the baremetal ML2 integration. For a non-routed network, bare metal servers can still be deployed and are functional, despite this port binding state in the Networking service. You will also need to provide Bare Metal service with the MAC address(es) of each node that it is provisioning; Bare Metal service in turn will pass this information to Networking service for DHCP and PXE boot configuration. An example of this is shown in the :ref:`enrollment` section. #. Install the networking-baremetal ML2 mechanism driver and L2 agent in the Networking service. #. Edit ``/etc/neutron/plugins/ml2/ml2_conf.ini`` and modify these: .. code-block:: ini [ml2] type_drivers = flat tenant_network_types = flat mechanism_drivers = openvswitch,baremetal [ml2_type_flat] flat_networks = physnet1 [securitygroup] firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver enable_security_group = True [ovs] bridge_mappings = physnet1:br-eth2 # Replace eth2 with the interface on the neutron node which you # are using to connect to the bare metal server #. Restart the ``neutron-server`` service, to load the new configuration. #. Create and edit ``/etc/neutron/plugins/ml2/ironic_neutron_agent.ini`` and add the required configuration. For example: .. code-block:: ini [ironic] project_domain_name = Default project_name = service user_domain_name = Default password = password username = ironic auth_url = http://identity-server.example.com/identity auth_type = password region_name = RegionOne #. Make sure the ``ironic-neutron-agent`` service is started. #. If neutron-openvswitch-agent runs with ``ovs_neutron_plugin.ini`` as the input config-file, edit ``ovs_neutron_plugin.ini`` to configure the bridge mappings by adding the [ovs] section described in the previous step, and restart the neutron-openvswitch-agent. #. Add the integration bridge to Open vSwitch: .. code-block:: console $ ovs-vsctl add-br br-int #. Create the br-eth2 network bridge to handle communication between the OpenStack services (and the Bare Metal services) and the bare metal nodes using eth2. Replace eth2 with the interface on the network node which you are using to connect to the Bare Metal service: .. code-block:: console $ ovs-vsctl add-br br-eth2 $ ovs-vsctl add-port br-eth2 eth2 #. Restart the Open vSwitch agent: .. code-block:: console # service neutron-plugin-openvswitch-agent restart #. On restarting the Networking service Open vSwitch agent, the veth pair between the bridges br-int and br-eth2 is automatically created. Your Open vSwitch bridges should look something like this after following the above steps: .. code-block:: console $ ovs-vsctl show Bridge br-int fail_mode: secure Port "int-br-eth2" Interface "int-br-eth2" type: patch options: {peer="phy-br-eth2"} Port br-int Interface br-int type: internal Bridge "br-eth2" Port "phy-br-eth2" Interface "phy-br-eth2" type: patch options: {peer="int-br-eth2"} Port "eth2" Interface "eth2" Port "br-eth2" Interface "br-eth2" type: internal ovs_version: "2.3.0" #. Create the flat network on which you are going to launch the instances: .. code-block:: console $ openstack network create --project $TENANT_ID sharednet1 --share \ --provider-network-type flat --provider-physical-network physnet1 #. Create the subnet on the newly created network: .. code-block:: console $ openstack subnet create $SUBNET_NAME --network sharednet1 \ --subnet-range $NETWORK_CIDR --ip-version 4 --gateway $GATEWAY_IP \ --allocation-pool start=$START_IP,end=$END_IP --dhcp ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/install/configure-nova-flavors.rst0000644000175000017500000001130500000000000025215 0ustar00coreycorey00000000000000.. _flavor-creation: Create flavors for use with the Bare Metal service ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You'll need to create a special bare metal flavor in the Compute service. The flavor is mapped to the bare metal node through the node's ``resource_class`` field (available starting with Bare Metal API version 1.21). A flavor can request *exactly one* instance of a bare metal resource class. Note that when creating the flavor, it's useful to add the ``RAM_MB`` and ``CPU`` properties as a convenience to users, although they are not used for scheduling. The ``DISK_GB`` property is also not used for scheduling, but is still used to determine the root partition size. #. Change these to match your hardware: .. code-block:: console $ RAM_MB=1024 $ CPU=2 $ DISK_GB=100 #. Create the bare metal flavor by executing the following command: .. code-block:: console $ openstack flavor create --ram $RAM_MB --vcpus $CPU --disk $DISK_GB \ my-baremetal-flavor .. note:: You can add ``--id `` to specify an ID for the flavor. See the :python-openstackclient-doc:`docs on this command ` for other options that may be specified. After creation, associate each flavor with one custom resource class. The name of a custom resource class that corresponds to a node's resource class (in the Bare Metal service) is: * the bare metal node's resource class all upper-cased * prefixed with ``CUSTOM_`` * all punctuation replaced with an underscore For example, if the resource class is named ``baremetal-small``, associate the flavor with this custom resource class via: .. code-block:: console $ openstack flavor set --property resources:CUSTOM_BAREMETAL_SMALL=1 my-baremetal-flavor Another set of flavor properties must be used to disable scheduling based on standard properties for a bare metal flavor: .. code-block:: console $ openstack flavor set --property resources:VCPU=0 my-baremetal-flavor $ openstack flavor set --property resources:MEMORY_MB=0 my-baremetal-flavor $ openstack flavor set --property resources:DISK_GB=0 my-baremetal-flavor Example ------- If you want to define a class of nodes called ``baremetal.with-GPU``, start with tagging some nodes with it: .. code-block:: console $ openstack --os-baremetal-api-version 1.21 baremetal node set $NODE_UUID \ --resource-class baremetal.with-GPU .. warning:: It is possible to **add** a resource class to ``active`` nodes, but it is not possible to **replace** an existing resource class on them. Then you can update your flavor to request the resource class instead of the standard properties: .. code-block:: console $ openstack flavor set --property resources:CUSTOM_BAREMETAL_WITH_GPU=1 my-baremetal-flavor $ openstack flavor set --property resources:VCPU=0 my-baremetal-flavor $ openstack flavor set --property resources:MEMORY_MB=0 my-baremetal-flavor $ openstack flavor set --property resources:DISK_GB=0 my-baremetal-flavor Note how ``baremetal.with-GPU`` in the node's ``resource_class`` field becomes ``CUSTOM_BAREMETAL_WITH_GPU`` in the flavor's properties. .. _scheduling-traits: Scheduling based on traits -------------------------- Starting with the Queens release, the Compute service supports scheduling based on qualitative attributes using traits. Starting with Bare Metal REST API version 1.37, it is possible to assign a list of traits to each bare metal node. Traits assigned to a bare metal node will be assigned to the corresponding resource provider in the Compute service placement API. When creating a flavor in the Compute service, required traits may be specified via flavor properties. The Compute service will then schedule instances only to bare metal nodes with all of the required traits. Traits can be either standard or custom. Standard traits are listed in the `os_traits library `_. Custom traits must meet the following requirements: * prefixed with ``CUSTOM_`` * contain only upper case characters A to Z, digits 0 to 9, or underscores * no longer than 255 characters in length A bare metal node can have a maximum of 50 traits. Example ^^^^^^^ To add the standard trait ``HW_CPU_X86_VMX`` and a custom trait ``CUSTOM_TRAIT1`` to a node: .. code-block:: console $ openstack --os-baremetal-api-version 1.37 baremetal node add trait \ $NODE_UUID CUSTOM_TRAIT1 HW_CPU_X86_VMX Then, update the flavor to require these traits: .. code-block:: console $ openstack flavor set --property trait:CUSTOM_TRAIT1=required my-baremetal-flavor $ openstack flavor set --property trait:HW_CPU_X86_VMX=required my-baremetal-flavor ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/install/configure-pxe.rst0000644000175000017500000004147300000000000023405 0ustar00coreycorey00000000000000Configuring PXE and iPXE ======================== DHCP server setup ----------------- A DHCP server is required by PXE/iPXE client. You need to follow steps below. #. Set the ``[dhcp]/dhcp_provider`` to ``neutron`` in the Bare Metal Service's configuration file (``/etc/ironic/ironic.conf``): .. note:: Refer :doc:`/install/configure-tenant-networks` for details. The ``dhcp_provider`` configuration is already set by the configuration defaults, and when you create subnet, DHCP is also enabled if you do not add any dhcp options at "openstack subnet create" command. #. Enable DHCP in the subnet of PXE network. #. Set the ip address range in the subnet for DHCP. .. note:: Refer :doc:`/install/configure-networking` for details about the two precedent steps. #. Connect the openstack DHCP agent to the external network through the OVS bridges and the interface ``eth2``. .. note:: Refer :doc:`/install/configure-networking` for details. You do not require this part if br-int, br-eth2 and eth2 are already connected. #. Configure the host ip at ``br-eth2``. If it locates at ``eth2``, do below:: ip addr del 192.168.2.10/24 dev eth2 ip addr add 192.168.2.10/24 dev br-eth2 .. note:: Replace eth2 with the interface on the network node which you are using to connect to the Bare Metal service. TFTP server setup ----------------- In order to deploy instances via PXE, a TFTP server needs to be set up on the Bare Metal service nodes which run the ``ironic-conductor``. #. Make sure the tftp root directory exist and can be written to by the user the ``ironic-conductor`` is running as. For example:: sudo mkdir -p /tftpboot sudo chown -R ironic /tftpboot #. Install tftp server: Ubuntu:: sudo apt-get install xinetd tftpd-hpa RHEL7/CentOS7:: sudo yum install tftp-server xinetd Fedora:: sudo dnf install tftp-server xinetd SUSE:: sudo zypper install tftp xinetd #. Using xinetd to provide a tftp server setup to serve ``/tftpboot``. Create or edit ``/etc/xinetd.d/tftp`` as below:: service tftp { protocol = udp port = 69 socket_type = dgram wait = yes user = root server = /usr/sbin/in.tftpd server_args = -v -v -v -v -v --map-file /tftpboot/map-file /tftpboot disable = no # This is a workaround for Fedora, where TFTP will listen only on # IPv6 endpoint, if IPv4 flag is not used. flags = IPv4 } and restart the ``xinetd`` service: Ubuntu:: sudo service xinetd restart Fedora/RHEL7/CentOS7/SUSE:: sudo systemctl restart xinetd .. note:: In certain environments the network's MTU may cause TFTP UDP packets to get fragmented. Certain PXE firmwares struggle to reconstruct the fragmented packets which can cause significant slow down or even prevent the server from PXE booting. In order to avoid this, TFTPd provides an option to limit the packet size so that it they do not get fragmented. To set this additional option in the server_args above:: --blocksize #. Create a map file in the tftp boot directory (``/tftpboot``):: echo 're ^(/tftpboot/) /tftpboot/\2' > /tftpboot/map-file echo 're ^/tftpboot/ /tftpboot/' >> /tftpboot/map-file echo 're ^(^/) /tftpboot/\1' >> /tftpboot/map-file echo 're ^([^/]) /tftpboot/\1' >> /tftpboot/map-file UEFI PXE - Grub setup --------------------- In order to deploy instances with PXE on bare metal nodes which support UEFI, perform these additional steps on the ironic conductor node to configure the PXE UEFI environment. #. Install Grub2 and shim packages: Ubuntu (16.04LTS and later):: sudo apt-get install grub-efi-amd64-signed shim-signed RHEL7/CentOS7:: sudo yum install grub2-efi shim Fedora:: sudo dnf install grub2-efi shim SUSE:: sudo zypper install grub2-x86_64-efi shim #. Copy grub and shim boot loader images to ``/tftpboot`` directory: Ubuntu (16.04LTS and later):: sudo cp /usr/lib/shim/shim.efi.signed /tftpboot/bootx64.efi sudo cp /usr/lib/grub/x86_64-efi-signed/grubnetx64.efi.signed /tftpboot/grubx64.efi Fedora:: sudo cp /boot/efi/EFI/fedora/shim.efi /tftpboot/bootx64.efi sudo cp /boot/efi/EFI/fedora/grubx64.efi /tftpboot/grubx64.efi RHEL7/CentOS7:: sudo cp /boot/efi/EFI/centos/shim.efi /tftpboot/bootx64.efi sudo cp /boot/efi/EFI/centos/grubx64.efi /tftpboot/grubx64.efi SUSE:: sudo cp /usr/lib64/efi/shim.efi /tftpboot/bootx64.efi sudo cp /usr/lib/grub2/x86_64-efi/grub.efi /tftpboot/grubx64.efi #. Create master grub.cfg: Ubuntu: Create grub.cfg under ``/tftpboot/grub`` directory:: GRUB_DIR=/tftpboot/grub Fedora: Create grub.cfg under ``/tftpboot/EFI/fedora`` directory:: GRUB_DIR=/tftpboot/EFI/fedora RHEL7/CentOS7: Create grub.cfg under ``/tftpboot/EFI/centos`` directory:: GRUB_DIR=/tftpboot/EFI/centos SUSE: Create grub.cfg under ``/tftpboot/boot/grub`` directory:: GRUB_DIR=/tftpboot/boot/grub Create directory ``GRUB_DIR``:: sudo mkdir -p $GRUB_DIR This file is used to redirect grub to baremetal node specific config file. It redirects it to specific grub config file based on DHCP IP assigned to baremetal node. .. literalinclude:: ../../../ironic/drivers/modules/master_grub_cfg.txt Change the permission of grub.cfg:: sudo chmod 644 $GRUB_DIR/grub.cfg #. Update the bare metal node with ``boot_mode:uefi`` capability in node's properties field. See :ref:`boot_mode_support` for details. #. Make sure that bare metal node is configured to boot in UEFI boot mode and boot device is set to network/pxe. .. note:: Some drivers, e.g. ``ilo``, ``irmc`` and ``redfish``, support automatic setting of the boot mode during deployment. This step is not required for them. Please check :doc:`../admin/drivers` for information on whether your driver requires manual UEFI configuration. Legacy BIOS - Syslinux setup ---------------------------- In order to deploy instances with PXE on bare metal using Legacy BIOS boot mode, perform these additional steps on the ironic conductor node. #. Install the syslinux package with the PXE boot images: Ubuntu (16.04LTS and later):: sudo apt-get install syslinux-common pxelinux RHEL7/CentOS7:: sudo yum install syslinux-tftpboot Fedora:: sudo dnf install syslinux-tftpboot SUSE:: sudo zypper install syslinux #. Copy the PXE image to ``/tftpboot``. The PXE image might be found at [1]_: Ubuntu (16.04LTS and later):: sudo cp /usr/lib/PXELINUX/pxelinux.0 /tftpboot RHEL7/CentOS7/SUSE:: sudo cp /usr/share/syslinux/pxelinux.0 /tftpboot #. If whole disk images need to be deployed via PXE-netboot, copy the chain.c32 image to ``/tftpboot`` to support it: Ubuntu (16.04LTS and later):: sudo cp /usr/lib/syslinux/modules/bios/chain.c32 /tftpboot Fedora:: sudo cp /boot/extlinux/chain.c32 /tftpboot RHEL7/CentOS7/SUSE:: sudo cp /usr/share/syslinux/chain.c32 /tftpboot/ #. If the version of syslinux is **greater than** 4 we also need to make sure that we copy the library modules into the ``/tftpboot`` directory [2]_ [1]_. For example, for Ubuntu run:: sudo cp /usr/lib/syslinux/modules/*/ldlinux.* /tftpboot #. Update the bare metal node with ``boot_mode:bios`` capability in node's properties field. See :ref:`boot_mode_support` for details. #. Make sure that bare metal node is configured to boot in Legacy BIOS boot mode and boot device is set to network/pxe. .. [1] On **Fedora/RHEL** the ``syslinux-tftpboot`` package already installs the library modules and PXE image at ``/tftpboot``. If the TFTP server is configured to listen to a different directory you should copy the contents of ``/tftpboot`` to the configured directory .. [2] http://www.syslinux.org/wiki/index.php/Library_modules iPXE setup ---------- If you will be using iPXE to boot instead of PXE, iPXE needs to be set up on the Bare Metal service node(s) where ``ironic-conductor`` is running. #. Make sure these directories exist and can be written to by the user the ``ironic-conductor`` is running as. For example:: sudo mkdir -p /tftpboot sudo mkdir -p /httpboot sudo chown -R ironic /tftpboot sudo chown -R ironic /httpboot #. Create a map file in the tftp boot directory (``/tftpboot``):: echo 'r ^([^/]) /tftpboot/\1' > /tftpboot/map-file echo 'r ^(/tftpboot/) /tftpboot/\2' >> /tftpboot/map-file .. _HTTP server: #. Set up TFTP and HTTP servers. These servers should be running and configured to use the local /tftpboot and /httpboot directories respectively, as their root directories. (Setting up these servers is outside the scope of this install guide.) These root directories need to be mounted locally to the ``ironic-conductor`` services, so that the services can access them. The Bare Metal service's configuration file (/etc/ironic/ironic.conf) should be edited accordingly to specify the TFTP and HTTP root directories and server addresses. For example: .. code-block:: ini [pxe] # Ironic compute node's tftp root path. (string value) tftp_root=/tftpboot # IP address of Ironic compute node's tftp server. (string # value) tftp_server=192.168.0.2 [deploy] # Ironic compute node's http root path. (string value) http_root=/httpboot # Ironic compute node's HTTP server URL. Example: # http://192.1.2.3:8080 (string value) http_url=http://192.168.0.2:8080 #. Install the iPXE package with the boot images: Ubuntu:: apt-get install ipxe RHEL7/CentOS7:: yum install ipxe-bootimgs Fedora:: dnf install ipxe-bootimgs .. note:: SUSE does not provide a package containing iPXE boot images. If you are using SUSE or if the packaged version of the iPXE boot image doesn't work, you can download a prebuilt one from http://boot.ipxe.org or build one image from source, see http://ipxe.org/download for more information. #. Copy the iPXE boot image (``undionly.kpxe`` for **BIOS** and ``ipxe.efi`` for **UEFI**) to ``/tftpboot``. The binary might be found at: Ubuntu:: cp /usr/lib/ipxe/{undionly.kpxe,ipxe.efi} /tftpboot Fedora/RHEL7/CentOS7:: cp /usr/share/ipxe/{undionly.kpxe,ipxe.efi} /tftpboot #. Enable/Configure iPXE in the Bare Metal Service's configuration file (/etc/ironic/ironic.conf): .. code-block:: ini [pxe] # Enable iPXE boot. (boolean value) ipxe_enabled=True # Neutron bootfile DHCP parameter. (string value) pxe_bootfile_name=undionly.kpxe # Bootfile DHCP parameter for UEFI boot mode. (string value) uefi_pxe_bootfile_name=ipxe.efi # Template file for PXE configuration. (string value) pxe_config_template=$pybasedir/drivers/modules/ipxe_config.template # Template file for PXE configuration for UEFI boot loader. # (string value) uefi_pxe_config_template=$pybasedir/drivers/modules/ipxe_config.template .. note:: The ``[pxe]ipxe_enabled`` option has been deprecated and will be removed in the T* development cycle. Users should instead consider use of the ``ipxe`` boot interface. The same default use of iPXE functionality can be achieved by setting the ``[DEFAULT]default_boot_interface`` option to ``ipxe``. #. It is possible to configure the Bare Metal service in such a way that nodes will boot into the deploy image directly from Object Storage. Doing this avoids having to cache the images on the ironic-conductor host and serving them via the ironic-conductor's `HTTP server`_. This can be done if: #. the Image Service is used for image storage; #. the images in the Image Service are internally stored in Object Storage; #. the Object Storage supports generating temporary URLs for accessing objects stored in it. Both the OpenStack Swift and RADOS Gateway provide support for this. * See :doc:`/admin/radosgw` on how to configure the Bare Metal Service with RADOS Gateway as the Object Storage. Configure this by setting the ``[pxe]/ipxe_use_swift`` configuration option to ``True`` as follows: .. code-block:: ini [pxe] # Download deploy images directly from swift using temporary # URLs. If set to false (default), images are downloaded to # the ironic-conductor node and served over its local HTTP # server. Applicable only when 'ipxe_enabled' option is set to # true. (boolean value) ipxe_use_swift=True Although the `HTTP server`_ still has to be deployed and configured (as it will serve iPXE boot script and boot configuration files for nodes), such configuration will shift some load from ironic-conductor hosts to the Object Storage service which can be scaled horizontally. Note that when SSL is enabled on the Object Storage service you have to ensure that iPXE firmware on the nodes can indeed boot from generated temporary URLs that use HTTPS protocol. #. Restart the ``ironic-conductor`` process: Fedora/RHEL7/CentOS7/SUSE:: sudo systemctl restart openstack-ironic-conductor Ubuntu:: sudo service ironic-conductor restart PXE multi-architecture setup ---------------------------- It is possible to deploy servers of different architecture by one conductor. To use this feature, architecture-specific boot and template files must be configured using the configuration options ``[pxe]pxe_bootfile_name_by_arch`` and ``[pxe]pxe_config_template_by_arch`` respectively, in the Bare Metal service's configuration file (/etc/ironic/ironic.conf). These two options are dictionary values; the key is the architecture and the value is the boot (or config template) file. A node's ``cpu_arch`` property is used as the key to get the appropriate boot file and template file. If the node's ``cpu_arch`` is not in the dictionary, the configuration options (in [pxe] group) ``pxe_bootfile_name``, ``pxe_config_template``, ``uefi_pxe_bootfile_name`` and ``uefi_pxe_config_template`` will be used instead. In the following example, since 'x86' and 'x86_64' keys are not in the ``pxe_bootfile_name_by_arch`` or ``pxe_config_template_by_arch`` options, x86 and x86_64 nodes will be deployed by 'pxelinux.0' or 'bootx64.efi', depending on the node's ``boot_mode`` capability ('bios' or 'uefi'). However, aarch64 nodes will be deployed by 'grubaa64.efi', and ppc64 nodes by 'bootppc64':: [pxe] # Bootfile DHCP parameter. (string value) pxe_bootfile_name=pxelinux.0 # On ironic-conductor node, template file for PXE # configuration. (string value) pxe_config_template = $pybasedir/drivers/modules/pxe_config.template # Bootfile DHCP parameter for UEFI boot mode. (string value) uefi_pxe_bootfile_name=bootx64.efi # On ironic-conductor node, template file for PXE # configuration for UEFI boot loader. (string value) uefi_pxe_config_template=$pybasedir/drivers/modules/pxe_grub_config.template # Bootfile DHCP parameter per node architecture. (dict value) pxe_bootfile_name_by_arch=aarch64:grubaa64.efi,ppc64:bootppc64 # On ironic-conductor node, template file for PXE # configuration per node architecture. For example: # aarch64:/opt/share/grubaa64_pxe_config.template (dict value) pxe_config_template_by_arch=aarch64:pxe_grubaa64_config.template,ppc64:pxe_ppc64_config.template .. note:: The grub implementation may vary on different architecture, you may need to tweak the pxe config template for a specific arch. For example, grubaa64.efi shipped with CentoOS7 does not support ``linuxefi`` and ``initrdefi`` commands, you'll need to switch to use ``linux`` and ``initrd`` command instead. PXE timeouts tuning ------------------- Because of its reliance on UDP-based protocols (DHCP and TFTP), PXE is particularly vulnerable to random failures during the booting stage. If the deployment ramdisk never calls back to the bare metal conductor, the build will be aborted, and the node will be moved to the ``deploy failed`` state, after the deploy callback timeout. This timeout can be changed via the :oslo.config:option:`conductor.deploy_callback_timeout` configuration option. Starting with the Train release, the Bare Metal service can retry PXE boot if it takes too long. The timeout is defined via :oslo.config:option:`pxe.boot_retry_timeout` and must be smaller than the ``deploy_callback_timeout``, otherwise it will have no effect. For example, the following configuration sets the overall timeout to 60 minutes, allowing two retries after 20 minutes: .. code-block:: ini [conductor] deploy_callback_timeout = 3600 [pxe] boot_retry_timeout = 1200 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/install/configure-tenant-networks.rst0000644000175000017500000001542000000000000025745 0ustar00coreycorey00000000000000.. _configure-tenant-networks: Configure tenant networks ========================= Below is an example flow of how to set up the Bare Metal service so that node provisioning will happen in a multi-tenant environment (which means using the ``neutron`` network interface as stated above): #. Network interfaces can be enabled on ironic-conductor by adding them to the ``enabled_network_interfaces`` configuration option under the ``default`` section of the configuration file:: [DEFAULT] ... enabled_network_interfaces=noop,flat,neutron Keep in mind that, ideally, all ironic-conductors should have the same list of enabled network interfaces, but it may not be the case during ironic-conductor upgrades. This may cause problems if one of the ironic-conductors dies and some node that is taken over is mapped to an ironic-conductor that does not support the node's network interface. Any actions that involve calling the node's driver will fail until that network interface is installed and enabled on that ironic-conductor. #. It is recommended to set the default network interface via the ``default_network_interface`` configuration option under the ``default`` section of the configuration file:: [DEFAULT] ... default_network_interface=neutron This default value will be used for all nodes that don't have a network interface explicitly specified in the creation request. If this configuration option is not set, the default network interface is determined by looking at the ``[dhcp]dhcp_provider`` configuration option value. If it is ``neutron``, then ``flat`` network interface becomes the default, otherwise ``noop`` is the default. #. Define a provider network in the Networking service, which we shall refer to as the "provisioning" network. Using the ``neutron`` network interface requires that ``provisioning_network`` and ``cleaning_network`` configuration options are set to valid identifiers (UUID or name) of networks in the Networking service. If these options are not set correctly, cleaning or provisioning will fail to start. There are two ways to set these values: - Under the ``neutron`` section of ironic configuration file: .. code-block:: ini [neutron] cleaning_network = $CLEAN_UUID_OR_NAME provisioning_network = $PROVISION_UUID_OR_NAME - Under ``provisioning_network`` and ``cleaning_network`` keys of the node's ``driver_info`` field as ``driver_info['provisioning_network']`` and ``driver_info['cleaning_network']`` respectively. .. note:: If these ``provisioning_network`` and ``cleaning_network`` values are not specified in node's `driver_info` then ironic falls back to the configuration in the ``neutron`` section. Please refer to :doc:`configure-cleaning` for more information about cleaning. .. warning:: Please make sure that the Bare Metal service has exclusive access to the provisioning and cleaning networks. Spawning instances by non-admin users in these networks and getting access to the Bare Metal service's control plane is a security risk. For this reason, the provisioning and cleaning networks should be configured as non-shared networks in the ``admin`` tenant. .. note:: When using the ``flat`` network interface, bare metal instances are normally spawned onto the "provisioning" network. This is not supported with the ``neutron`` interface and the deployment will fail. Please ensure a different network is chosen in the Networking service when a bare metal instance is booted from the Compute service. .. note:: The "provisioning" and "cleaning" networks may be the same network or distinct networks. To ensure that communication between the Bare Metal service and the deploy ramdisk works, it is important to ensure that security groups are disabled for these networks, *or* that the default security groups allow: * DHCP * TFTP * egress port used for the Bare Metal service (6385 by default) * ingress port used for ironic-python-agent (9999 by default) * if using :ref:`iscsi-deploy`, the ingress port used for iSCSI (3260 by default) * if using :ref:`direct-deploy`, the egress port used for the Object Storage service (typically 80 or 443) * if using iPXE, the egress port used for the HTTP server running on the ironic-conductor nodes (typically 80). #. This step is optional and applicable only if you want to use security groups during provisioning and/or cleaning of the nodes. If not specified, default security groups are used. #. Define security groups in the Networking service, to be used for provisioning and/or cleaning networks. #. Add the list of these security group UUIDs under the ``neutron`` section of ironic-conductor's configuration file as shown below:: [neutron] ... cleaning_network=$CLEAN_UUID_OR_NAME cleaning_network_security_groups=[$LIST_OF_CLEAN_SECURITY_GROUPS] provisioning_network=$PROVISION_UUID_OR_NAME provisioning_network_security_groups=[$LIST_OF_PROVISION_SECURITY_GROUPS] Multiple security groups may be applied to a given network, hence, they are specified as a list. The same security group(s) could be used for both provisioning and cleaning networks. .. warning:: If security groups are configured as described above, do not set the "port_security_enabled" flag to False for the corresponding Networking service's network or port. This will cause the deploy to fail. For example: if ``provisioning_network_security_groups`` configuration option is used, ensure that "port_security_enabled" flag for the provisioning network is set to True. This flag is set to True by default; make sure not to override it by manually setting it to False. #. Install and configure a compatible ML2 mechanism driver which supports bare metal provisioning for your switch. See :neutron-doc:`ML2 plugin configuration manual ` for details. #. Restart the ironic-conductor and ironic-api services after the modifications: - Fedora/RHEL7/CentOS7:: sudo systemctl restart openstack-ironic-api sudo systemctl restart openstack-ironic-conductor - Ubuntu:: sudo service ironic-api restart sudo service ironic-conductor restart #. Make sure that the ironic-conductor is reachable over the provisioning network by trying to download a file from a TFTP server on it, from some non-control-plane server in that network:: tftp $TFTP_IP -c get $FILENAME where FILENAME is the file located at the TFTP server. See :ref:`multitenancy` for required node configuration. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/install/creating-images.rst0000644000175000017500000000713600000000000023667 0ustar00coreycorey00000000000000Create user images for the Bare Metal service ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Bare Metal provisioning requires two sets of images: the deploy images and the user images. The :ref:`deploy images ` are used by the Bare Metal service to prepare the bare metal server for actual OS deployment. Whereas the user images are installed on the bare metal server to be used by the end user. There are two types of user images: *partition images* contain only the contents of the root partition. Additionally, two more images are used together with them: an image with a kernel and with an initramfs. .. warning:: To use partition images with local boot, Grub2 must be installed on them. *whole disk images* contain a complete partition table with one or more partitions. .. warning:: The kernel/initramfs pair must not be used with whole disk images, otherwise they'll be mistaken for partition images. Building user images ^^^^^^^^^^^^^^^^^^^^ disk-image-builder ------------------ The `disk-image-builder`_ can be used to create user images required for deployment and the actual OS which the user is going to run. - Install diskimage-builder package (use virtualenv, if you don't want to install anything globally): .. code-block:: console # pip install diskimage-builder - Build the image your users will run (Ubuntu image has been taken as an example): - Partition images .. code-block:: console $ disk-image-create ubuntu baremetal dhcp-all-interfaces grub2 -o my-image - Whole disk images .. code-block:: console $ disk-image-create ubuntu vm dhcp-all-interfaces -o my-image The partition image command creates ``my-image.qcow2``, ``my-image.vmlinuz`` and ``my-image.initrd`` files. The ``grub2`` element in the partition image creation command is only needed if local boot will be used to deploy ``my-image.qcow2``, otherwise the images ``my-image.vmlinuz`` and ``my-image.initrd`` will be used for PXE booting after deploying the bare metal with ``my-image.qcow2``. For whole disk images only the main image is used. If you want to use Fedora image, replace ``ubuntu`` with ``fedora`` in the chosen command. .. _disk-image-builder: https://docs.openstack.org/diskimage-builder/latest/ Virtual machine --------------- Virtual machine software can also be used to build user images. There are different software options available, qemu-kvm is usually a good choice on linux platform, it supports emulating many devices and even building images for architectures other than the host machine by software emulation. VirtualBox is another good choice for non-linux host. The procedure varies depending on the software used, but the steps for building an image are similar, the user creates a virtual machine, and installs the target system just like what is done for a real hardware. The system can be highly customized like partition layout, drivers or software shipped, etc. Usually libvirt and its management tools are used to make interaction with qemu-kvm easier, for example, to create a virtual machine with ``virt-install``:: $ virt-install --name centos8 --ram 4096 --vcpus=2 -f centos8.qcow2 \ > --cdrom CentOS-8-x86_64-1905-dvd1.iso Graphic frontend like ``virt-manager`` can also be utilized. The disk file can be used as user image after the system is set up and powered off. The path of the disk file varies depending on the software used, usually it's stored in a user-selected part of the local file system. For qemu-kvm or GUI frontend building upon it, it's typically stored at ``/var/lib/libvirt/images``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/install/deploy-ramdisk.rst0000644000175000017500000000247000000000000023550 0ustar00coreycorey00000000000000.. _deploy-ramdisk: Building or downloading a deploy ramdisk image ============================================== Ironic depends on having an image with the :ironic-python-agent-doc:`ironic-python-agent (IPA) <>` service running on it for controlling and deploying bare metal nodes. Two kinds of images are published on every commit from every branch of :ironic-python-agent-doc:`ironic-python-agent (IPA) <>` * DIB_ images are suitable for production usage and can be downloaded from https://tarballs.openstack.org/ironic-python-agent/dib/files/. * For Train and older use CentOS 7 images. * For Ussuri and newer use CentOS 8 images. .. warning:: CentOS 7 master images are no longer updated and must not be used. * TinyIPA_ images are suitable for CI and testing environments and can be downloaded from https://tarballs.openstack.org/ironic-python-agent/tinyipa/files/. Building from source -------------------- Check the ironic-python-agent-builder_ project for information on how to build ironic-python-agent ramdisks. .. _DIB: https://docs.openstack.org/ironic-python-agent-builder/latest/admin/dib.html .. _TinyIPA: https://docs.openstack.org/ironic-python-agent-builder/latest/admin/tinyipa.html .. _ironic-python-agent-builder: https://docs.openstack.org/ironic-python-agent-builder/latest/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/install/enabling-drivers.rst0000644000175000017500000002425200000000000024061 0ustar00coreycorey00000000000000Enabling drivers and hardware types =================================== Introduction ------------ The Bare Metal service delegates actual hardware management to **drivers**. *Drivers*, also called *hardware types*, consist of *hardware interfaces*: sets of functionality dealing with some aspect of bare metal provisioning in a vendor-specific way. There are generic **hardware types** (eg. ``redfish`` and ``ipmi``), and vendor-specific ones (eg. ``ilo`` and ``irmc``). .. note:: Starting with the Rocky release, the terminologies *driver*, *dynamic driver*, and *hardware type* have the same meaning in the scope of Bare Metal service. .. _enable-hardware-types: Enabling hardware types ----------------------- Hardware types are enabled in the configuration file of the **ironic-conductor** service by setting the ``enabled_hardware_types`` configuration option, for example: .. code-block:: ini [DEFAULT] enabled_hardware_types = ipmi,redfish Due to the driver's dynamic nature, they also require configuring enabled hardware interfaces. .. note:: All available hardware types and interfaces are listed in setup.cfg_ file in the source code tree. .. _enable-hardware-interfaces: Enabling hardware interfaces ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ There are several types of hardware interfaces: bios manages configuration of the BIOS settings of a bare metal node. This interface is vendor-specific and can be enabled via the ``enabled_bios_interfaces`` option: .. code-block:: ini [DEFAULT] enabled_hardware_types = enabled_bios_interfaces = See :doc:`/admin/bios` for details. boot manages booting of both the deploy ramdisk and the user instances on the bare metal node. See :doc:`/admin/interfaces/boot` for details. Boot interface implementations are often vendor specific, and can be enabled via the ``enabled_boot_interfaces`` option: .. code-block:: ini [DEFAULT] enabled_hardware_types = ipmi,ilo enabled_boot_interfaces = pxe,ilo-virtual-media Boot interfaces with ``pxe`` in their name require :doc:`configure-pxe`. There are also a few hardware-specific boot interfaces - see :doc:`/admin/drivers` for their required configuration. console manages access to the serial console of a bare metal node. See :doc:`/admin/console` for details. deploy defines how the image gets transferred to the target disk. See :doc:`/admin/interfaces/deploy` for an explanation of the difference between supported deploy interfaces ``direct`` and ``iscsi``. The deploy interfaces can be enabled as follows: .. code-block:: ini [DEFAULT] enabled_hardware_types = ipmi,redfish enabled_deploy_interfaces = iscsi,direct Additionally, * the ``iscsi`` deploy interface requires :doc:`configure-iscsi` * the ``direct`` deploy interface requires the Object Storage service or an HTTP service inspect implements fetching hardware information from nodes. Can be implemented out-of-band (via contacting the node's BMC) or in-band (via booting a ramdisk on a node). The latter implementation is called ``inspector`` and uses a separate service called :ironic-inspector-doc:`ironic-inspector <>`. Example: .. code-block:: ini [DEFAULT] enabled_hardware_types = ipmi,ilo,irmc enabled_inspect_interfaces = ilo,irmc,inspector See :doc:`/admin/inspection` for more details. management provides additional hardware management actions, like getting or setting boot devices. This interface is usually vendor-specific, and its name often matches the name of the hardware type (with ``ipmitool`` being a notable exception). For example: .. code-block:: ini [DEFAULT] enabled_hardware_types = ipmi,redfish,ilo,irmc enabled_management_interfaces = ipmitool,redfish,ilo,irmc Using ``ipmitool`` requires :doc:`configure-ipmi`. See :doc:`/admin/drivers` for the required configuration of each driver. network connects/disconnects bare metal nodes to/from virtual networks. See :doc:`configure-tenant-networks` for more details. power runs power actions on nodes. Similar to the management interface, it is usually vendor-specific, and its name often matches the name of the hardware type (with ``ipmitool`` being again an exception). For example: .. code-block:: ini [DEFAULT] enabled_hardware_types = ipmi,redfish,ilo,irmc enabled_power_interfaces = ipmitool,redfish,ilo,irmc Using ``ipmitool`` requires :doc:`configure-ipmi`. See :doc:`/admin/drivers` for the required configuration of each driver. raid manages building and tearing down RAID on nodes. Similar to inspection, it can be implemented either out-of-band or in-band (via ``agent`` implementation). See :doc:`/admin/raid` for details. For example: .. code-block:: ini [DEFAULT] enabled_hardware_types = ipmi,redfish,ilo,irmc enabled_raid_interfaces = agent,no-raid storage manages the interaction with a remote storage subsystem, such as the Block Storage service, and helps facilitate booting from a remote volume. This interface ensures that volume target and connector information is updated during the lifetime of a deployed instance. See :doc:`/admin/boot-from-volume` for more details. This interface defaults to a ``noop`` driver as it is considered an "opt-in" interface which requires additional configuration by the operator to be usable. For example: .. code-block:: ini [DEFAULT] enabled_hardware_types = ipmi,irmc enabled_storage_interfaces = cinder,noop vendor is a place for vendor extensions to be exposed in API. See :doc:`/contributor/vendor-passthru` for details. .. code-block:: ini [DEFAULT] enabled_hardware_types = ipmi,redfish,ilo,irmc enabled_vendor_interfaces = ipmitool,no-vendor Here is a complete configuration example, enabling two generic protocols, IPMI and Redfish, with a few additional features: .. code-block:: ini [DEFAULT] enabled_hardware_types = ipmi,redfish enabled_boot_interfaces = pxe enabled_console_interfaces = ipmitool-socat,no-console enabled_deploy_interfaces = iscsi,direct enabled_inspect_interfaces = inspector enabled_management_interfaces = ipmitool,redfish enabled_network_interfaces = flat,neutron enabled_power_interfaces = ipmitool,redfish enabled_raid_interfaces = agent enabled_storage_interfaces = cinder,noop enabled_vendor_interfaces = ipmitool,no-vendor Note that some interfaces have implementations named ``no-`` where ```` is the interface type. These implementations do nothing and return errors when used from API. Hardware interfaces in multi-conductor environments ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When enabling hardware types and their interfaces, make sure that for every enabled hardware type, the whole set of enabled interfaces matches for all conductors. However, different conductors can have different hardware types enabled. For example, you can have two conductors with the following configuration respectively: .. code-block:: ini [DEFAULT] enabled_hardware_types = ipmi enabled_deploy_interfaces = direct enabled_power_interfaces = ipmitool enabled_management_interfaces = ipmitool .. code-block:: ini [DEFAULT] enabled_hardware_types = redfish enabled_deploy_interfaces = iscsi enabled_power_interfaces = redfish enabled_management_interfaces = redfish But you cannot have two conductors with the following configuration respectively: .. code-block:: ini [DEFAULT] enabled_hardware_types = ipmi,redfish enabled_deploy_interfaces = direct enabled_power_interfaces = ipmitool,redfish enabled_management_interfaces = ipmitool,redfish .. code-block:: ini [DEFAULT] enabled_hardware_types = redfish enabled_deploy_interfaces = iscsi enabled_power_interfaces = redfish enabled_management_interfaces = redfish This is because the ``redfish`` hardware type will have different enabled *deploy* interfaces on these conductors. It would have been fine, if the second conductor had ``enabled_deploy_interfaces = direct`` instead of ``iscsi``. This situation is not detected by the Bare Metal service, but it can cause inconsistent behavior in the API, when node functionality will depend on which conductor it gets assigned to. .. note:: We don't treat this as an error, because such *temporary* inconsistency is inevitable during a rolling upgrade or a configuration update. Configuring interface defaults ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When an operator does not provide an explicit value for one of the interfaces (when creating a node or updating its driver), the default value is calculated as described in :ref:`hardware_interfaces_defaults`. It is also possible to override the defaults for any interfaces by setting one of the options named ``default__interface``, where ```` is the interface name. For example: .. code-block:: ini [DEFAULT] default_deploy_interface = direct default_network_interface = neutron This configuration forces the default *deploy* interface to be ``direct`` and the default *network* interface to be ``neutron`` for all hardware types. The defaults are calculated and set on a node when creating it or updating its hardware type. Thus, changing these configuration options has no effect on existing nodes. .. warning:: The default interface implementation must be configured the same way across all conductors in the cloud, except maybe for a short period of time during an upgrade or configuration update. Otherwise the default implementation will depend on which conductor handles which node, and this mapping is not predictable or even persistent. .. warning:: These options should be used with care. If a hardware type does not support the provided default implementation, its users will have to always provide an explicit value for this interface when creating a node. .. _setup.cfg: https://opendev.org/openstack/ironic/src/branch/master/setup.cfg ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/install/enabling-https.rst0000644000175000017500000000630200000000000023541 0ustar00coreycorey00000000000000.. _enabling-https: Enabling HTTPS -------------- .. _EnableHTTPSinSwift: Enabling HTTPS in Swift ======================= The drivers using virtual media use swift for storing boot images and node configuration information (contains sensitive information for Ironic conductor to provision bare metal hardware). By default, HTTPS is not enabled in swift. HTTPS is required to encrypt all communication between swift and Ironic conductor and swift and bare metal (via virtual media). It can be enabled in one of the following ways: * `Using an SSL termination proxy `_ * :swift-doc:`Using native SSL support in swift ` (recommended only for testing purpose by swift). .. _EnableHTTPSinGlance: Enabling HTTPS in Image service =============================== Ironic drivers usually use Image service during node provisioning. By default, image service does not use HTTPS, but it is required for secure communication. It can be enabled by making the following changes to ``/etc/glance/glance-api.conf``: #. :glance-doc:`Configuring SSL support ` #. Restart the glance-api service:: Fedora/RHEL7/CentOS7/SUSE: sudo systemctl restart openstack-glance-api Debian/Ubuntu: sudo service glance-api restart See the :glance-doc:`Glance <>` documentation, for more details on the Image service. Enabling HTTPS communication between Image service and Object storage ===================================================================== This section describes the steps needed to enable secure HTTPS communication between Image service and Object storage when Object storage is used as the Backend. To enable secure HTTPS communication between Image service and Object storage follow these steps: #. :ref:`EnableHTTPSinSwift` #. :glance-doc:`Configure Swift Storage Backend ` #. :ref:`EnableHTTPSinGlance` Enabling HTTPS communication between Image service and Bare Metal service ========================================================================= This section describes the steps needed to enable secure HTTPS communication between Image service and Bare Metal service. To enable secure HTTPS communication between Bare Metal service and Image service follow these steps: #. Edit ``/etc/ironic/ironic.conf``:: [glance] ... glance_cafile=/path/to/certfile .. note:: 'glance_cafile' is an optional path to a CA certificate bundle to be used to validate the SSL certificate served by Image service. #. If not using the keystone service catalog for the Image service API endpoint discovery, also edit the ``endpoint_override`` option to point to HTTPS URL of image service (replace ```` with hostname[:port][path] of the Image service endpoint):: [glance] ... endpoint_override = https:// #. Restart ironic-conductor service:: Fedora/RHEL7/CentOS7/SUSE: sudo systemctl restart openstack-ironic-conductor Debian/Ubuntu: sudo service ironic-conductor restart ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/install/enrollment.rst0000644000175000017500000010135400000000000023004 0ustar00coreycorey00000000000000.. _enrollment: Enrollment ========== After all the services have been properly configured, you should enroll your hardware with the Bare Metal service, and confirm that the Compute service sees the available hardware. The nodes will be visible to the Compute service once they are in the ``available`` provision state. .. note:: After enrolling nodes with the Bare Metal service, the Compute service will not be immediately notified of the new resources. The Compute service's resource tracker syncs periodically, and so any changes made directly to the Bare Metal service's resources will become visible in the Compute service only after the next run of that periodic task. More information is in the :ref:`troubleshooting-install` section. .. note:: Any bare metal node that is visible to the Compute service may have a workload scheduled to it, if both the ``power`` and ``management`` interfaces pass the ``validate`` check. If you wish to exclude a node from the Compute service's scheduler, for instance so that you can perform maintenance on it, you can set the node to "maintenance" mode. For more information see the :ref:`maintenance_mode` section. Choosing a driver ----------------- When enrolling a node, the most important information to supply is *driver*. See :doc:`enabling-drivers` for a detailed explanation of bare metal drivers, hardware types and interfaces. The ``driver list`` command can be used to list all drivers enabled on all hosts: .. code-block:: console openstack baremetal driver list +---------------------+-----------------------+ | Supported driver(s) | Active host(s) | +---------------------+-----------------------+ | ipmi | localhost.localdomain | +---------------------+-----------------------+ The specific driver to use should be picked based on actual hardware capabilities and expected features. See :doc:`/admin/drivers` for more hints on that. Each driver has a list of *driver properties* that need to be specified via the node's ``driver_info`` field, in order for the driver to operate on node. This list consists of the properties of the hardware interfaces that the driver uses. These driver properties are available with the ``driver property list`` command: .. code-block:: console $ openstack baremetal driver property list ipmi +----------------------+-------------------------------------------------------------------------------------------------------------+ | Property | Description | +----------------------+-------------------------------------------------------------------------------------------------------------+ | ipmi_address | IP address or hostname of the node. Required. | | ipmi_password | password. Optional. | | ipmi_username | username; default is NULL user. Optional. | | ... | ... | | deploy_kernel | UUID (from Glance) of the deployment kernel. Required. | | deploy_ramdisk | UUID (from Glance) of the ramdisk that is mounted at boot time. Required. | +----------------------+-------------------------------------------------------------------------------------------------------------+ The properties marked as required must be supplied either during node creation or shortly after. Some properties may only be required for certain features. Note on API versions -------------------- Starting with API version 1.11, the Bare Metal service added a new initial provision state of ``enroll`` to its state machine. When this or later API version is used, new nodes get this state instead of ``available``. Existing automation tooling that use an API version lower than 1.11 are not affected, since the initial provision state is still ``available``. However, using API version 1.11 or above may break existing automation tooling with respect to node creation. The default API version used by (the most recent) python-ironicclient is 1.9, but it may change in the future and should not be relied on. In the examples below we will use version 1.11 of the Bare metal API. This gives us the following advantages: * Explicit power credentials validation before leaving the ``enroll`` state. * Running node cleaning before entering the ``available`` state. * Not exposing half-configured nodes to the scheduler. To set the API version for all commands, you can set the environment variable ``IRONIC_API_VERSION``. For the OpenStackClient baremetal plugin, set the ``OS_BAREMETAL_API_VERSION`` variable to the same value. For example: .. code-block:: console $ export IRONIC_API_VERSION=1.11 $ export OS_BAREMETAL_API_VERSION=1.11 Enrollment process ------------------ Creating a node ~~~~~~~~~~~~~~~ This section describes the main steps to enroll a node and make it available for provisioning. Some steps are shown separately for illustration purposes, and may be combined if desired. #. Create a node in the Bare Metal service with the ``node create`` command. At a minimum, you must specify the driver name (for example, ``ipmi``). This command returns the node UUID along with other information about the node. The node's provision state will be ``enroll``: .. code-block:: console $ export OS_BAREMETAL_API_VERSION=1.11 $ openstack baremetal node create --driver ipmi +--------------+--------------------------------------+ | Property | Value | +--------------+--------------------------------------+ | uuid | dfc6189f-ad83-4261-9bda-b27258eb1987 | | driver_info | {} | | extra | {} | | driver | ipmi | | chassis_uuid | | | properties | {} | | name | None | +--------------+--------------------------------------+ $ openstack baremetal node show dfc6189f-ad83-4261-9bda-b27258eb1987 +------------------------+--------------------------------------+ | Property | Value | +------------------------+--------------------------------------+ | target_power_state | None | | extra | {} | | last_error | None | | maintenance_reason | None | | provision_state | enroll | | uuid | dfc6189f-ad83-4261-9bda-b27258eb1987 | | console_enabled | False | | target_provision_state | None | | provision_updated_at | None | | maintenance | False | | power_state | None | | driver | ipmi | | properties | {} | | instance_uuid | None | | name | None | | driver_info | {} | | ... | ... | +------------------------+--------------------------------------+ A node may also be referred to by a logical name as well as its UUID. A name can be assigned to the node during its creation by adding the ``-n`` option to the ``node create`` command or by updating an existing node with the ``node set`` command. See `Logical Names`_ for examples. #. Starting with API version 1.31 (and ``python-ironicclient`` 1.13), you can pick which hardware interface to use with nodes that use hardware types. Each interface is represented by a node field called ``_interface`` where ```` in the interface type, e.g. ``boot``. See :doc:`enabling-drivers` for details on hardware interfaces. An interface can be set either separately: .. code-block:: console $ openstack baremetal --os-baremetal-api-version 1.31 node set $NODE_UUID \ --deploy-interface direct \ --raid-interface agent or set during node creation: .. code-block:: console $ openstack baremetal --os-baremetal-api-version 1.31 node create --driver ipmi \ --deploy-interface direct \ --raid-interface agent If no value is provided for some interfaces, `Defaults for hardware interfaces`_ are used instead. #. Update the node ``driver_info`` with the required driver properties, so that the Bare Metal service can manage the node: .. code-block:: console $ openstack baremetal node set $NODE_UUID \ --driver-info ipmi_username=$USER \ --driver-info ipmi_password=$PASS \ --driver-info ipmi_address=$ADDRESS .. note:: If IPMI is running on a port other than 623 (the default). The port must be added to ``driver_info`` by specifying the ``ipmi_port`` value. Example: .. code-block:: console $ openstack baremetal node set $NODE_UUID --driver-info ipmi_port=$PORT_NUMBER You may also specify all ``driver_info`` parameters during node creation by passing the **--driver-info** option multiple times: .. code-block:: console $ openstack baremetal node create --driver ipmi \ --driver-info ipmi_username=$USER \ --driver-info ipmi_password=$PASS \ --driver-info ipmi_address=$ADDRESS See `Choosing a driver`_ above for details on driver properties. #. Specify a deploy kernel and ramdisk compatible with the node's driver, for example: .. code-block:: console $ openstack baremetal node set $NODE_UUID \ --driver-info deploy_kernel=$DEPLOY_VMLINUZ_UUID \ --driver-info deploy_ramdisk=$DEPLOY_INITRD_UUID See :doc:`configure-glance-images` for details. #. Optionally you can specify the provisioning and/or cleaning network UUID or name in the node's ``driver_info``. The ``neutron`` network interface requires both ``provisioning_network`` and ``cleaning_network``, while the ``flat`` network interface requires the ``cleaning_network`` to be set either in the configuration or on the nodes. For example: .. code-block:: console $ openstack baremetal node set $NODE_UUID \ --driver-info cleaning_network=$CLEAN_UUID_OR_NAME \ --driver-info provisioning_network=$PROVISION_UUID_OR_NAME See :doc:`configure-tenant-networks` for details. #. You must also inform the Bare Metal service of the network interface cards which are part of the node by creating a port with each NIC's MAC address. These MAC addresses are passed to the Networking service during instance provisioning and used to configure the network appropriately: .. code-block:: console $ openstack baremetal port create $MAC_ADDRESS --node $NODE_UUID .. _enrollment-scheduling: Adding scheduling information ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #. Assign a *resource class* to the node. A *resource class* should represent a class of hardware in your data center, that corresponds to a Compute flavor. For example, let's split hardware into these three groups: #. nodes with a lot of RAM and powerful CPU for computational tasks, #. nodes with powerful GPU for OpenCL computing, #. smaller nodes for development and testing. We can define three resource classes to reflect these hardware groups, named ``large-cpu``, ``large-gpu`` and ``small`` respectively. Then, for each node in each of the hardware groups, we'll set their ``resource_class`` appropriately via: .. code-block:: console $ openstack --os-baremetal-api-version 1.21 baremetal node set $NODE_UUID \ --resource-class $CLASS_NAME The ``--resource-class`` argument can also be used when creating a node: .. code-block:: console $ openstack --os-baremetal-api-version 1.21 baremetal node create \ --driver $DRIVER --resource-class $CLASS_NAME To use resource classes for scheduling you need to update your flavors as described in :doc:`configure-nova-flavors`. .. note:: This is not required for standalone deployments, only for those using the Compute service for provisioning bare metal instances. #. Update the node's properties to match the actual hardware of the node: .. code-block:: console $ openstack baremetal node set $NODE_UUID \ --property cpus=$CPU_COUNT \ --property memory_mb=$RAM_MB \ --property local_gb=$DISK_GB As above, these can also be specified at node creation by passing the **--property** option to ``node create`` multiple times: .. code-block:: console $ openstack baremetal node create --driver ipmi \ --driver-info ipmi_username=$USER \ --driver-info ipmi_password=$PASS \ --driver-info ipmi_address=$ADDRESS \ --property cpus=$CPU_COUNT \ --property memory_mb=$RAM_MB \ --property local_gb=$DISK_GB These values can also be discovered during `Hardware Inspection`_. .. warning:: The value provided for the ``local_gb`` property must match the size of the root device you're going to deploy on. By default **ironic-python-agent** picks the smallest disk which is not smaller than 4 GiB. If you override this logic by using root device hints (see :ref:`root-device-hints`), the ``local_gb`` value should match the size of picked target disk. #. If you wish to perform more advanced scheduling of the instances based on hardware capabilities, you may add metadata to each node that will be exposed to the Compute scheduler (see: :nova-doc:`ComputeCapabilitiesFilter `). A full explanation of this is outside of the scope of this document. It can be done through the special ``capabilities`` member of node properties: .. code-block:: console $ openstack baremetal node set $NODE_UUID \ --property capabilities=key1:val1,key2:val2 Some capabilities can also be discovered during `Hardware Inspection`_. #. If you wish to perform advanced scheduling of instances based on qualitative attributes of bare metal nodes, you may add traits to each bare metal node that will be exposed to the Compute scheduler (see: :ref:`scheduling-traits` for a more in-depth discussion of traits in the Bare Metal service). For example, to add the standard trait ``HW_CPU_X86_VMX`` and a custom trait ``CUSTOM_TRAIT1`` to a node: .. code-block:: console $ openstack baremetal node add trait $NODE_UUID \ CUSTOM_TRAIT1 HW_CPU_X86_VMX Validating node information ~~~~~~~~~~~~~~~~~~~~~~~~~~~ #. To check if Bare Metal service has the minimum information necessary for a node's driver to be functional, you may ``validate`` it: .. code-block:: console $ openstack baremetal node validate $NODE_UUID +------------+--------+--------+ | Interface | Result | Reason | +------------+--------+--------+ | boot | True | | | console | True | | | deploy | True | | | inspect | True | | | management | True | | | network | True | | | power | True | | | raid | True | | | storage | True | | +------------+--------+--------+ If the node fails validation, each driver interface will return information as to why it failed: .. code-block:: console $ openstack baremetal node validate $NODE_UUID +------------+--------+-------------------------------------------------------------------------------------------------------------------------------------+ | Interface | Result | Reason | +------------+--------+-------------------------------------------------------------------------------------------------------------------------------------+ | boot | True | | | console | None | not supported | | deploy | False | Cannot validate iSCSI deploy. Some parameters were missing in node's instance_info. Missing are: ['root_gb', 'image_source'] | | inspect | True | | | management | False | Missing the following IPMI credentials in node's driver_info: ['ipmi_address']. | | network | True | | | power | False | Missing the following IPMI credentials in node's driver_info: ['ipmi_address']. | | raid | None | not supported | | storage | True | | +------------+--------+-------------------------------------------------------------------------------------------------------------------------------------+ When using the Compute Service with the Bare Metal service, it is safe to ignore the deploy interface's validation error due to lack of image information. You may continue the enrollment process. This information will be set by the Compute Service just before deploying, when an instance is requested: .. code-block:: console $ openstack baremetal node validate $NODE_UUID +------------+--------+------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | Interface | Result | Reason | +------------+--------+------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | boot | False | Cannot validate image information for node because one or more parameters are missing from its instance_info. Missing are: ['ramdisk', 'kernel', 'image_source'] | | console | True | | | deploy | False | Cannot validate image information for node because one or more parameters are missing from its instance_info. Missing are: ['ramdisk', 'kernel', 'image_source'] | | inspect | True | | | management | True | | | network | True | | | power | True | | | raid | None | not supported | | storage | True | | +------------+--------+------------------------------------------------------------------------------------------------------------------------------------------------------------------+ Making node available for deployment ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In order for nodes to be available for deploying workloads on them, nodes must be in the ``available`` provision state. To do this, nodes created with API version 1.11 and above must be moved from the ``enroll`` state to the ``manageable`` state and then to the ``available`` state. This section can be safely skipped, if API version 1.10 or earlier is used (which is the case by default). After creating a node and before moving it from its initial provision state of ``enroll``, basic power and port information needs to be configured on the node. The Bare Metal service needs this information because it verifies that it is capable of controlling the node when transitioning the node from ``enroll`` to ``manageable`` state. To move a node from ``enroll`` to ``manageable`` provision state: .. code-block:: console $ openstack baremetal --os-baremetal-api-version 1.11 node manage $NODE_UUID $ openstack baremetal node show $NODE_UUID +------------------------+--------------------------------------------------------------------+ | Property | Value | +------------------------+--------------------------------------------------------------------+ | ... | ... | | provision_state | manageable | <- verify correct state | uuid | 0eb013bb-1e4b-4f4c-94b5-2e7468242611 | | ... | ... | +------------------------+--------------------------------------------------------------------+ .. note:: Since it is an asynchronous call, the response for ``openstack baremetal node manage`` will not indicate whether the transition succeeded or not. You can check the status of the operation via ``openstack baremetal node show``. If it was successful, ``provision_state`` will be in the desired state. If it failed, there will be information in the node's ``last_error``. When a node is moved from the ``manageable`` to ``available`` provision state, the node will go through automated cleaning if configured to do so (see :ref:`configure-cleaning`). To move a node from ``manageable`` to ``available`` provision state: .. code-block:: console $ openstack baremetal --os-baremetal-api-version 1.11 node provide $NODE_UUID $ openstack baremetal node show $NODE_UUID +------------------------+--------------------------------------------------------------------+ | Property | Value | +------------------------+--------------------------------------------------------------------+ | ... | ... | | provision_state | available | < - verify correct state | uuid | 0eb013bb-1e4b-4f4c-94b5-2e7468242611 | | ... | ... | +------------------------+--------------------------------------------------------------------+ For more details on the Bare Metal service's state machine, see the :doc:`/contributor/states` documentation. Mapping nodes to Compute cells ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If the Compute service is used for scheduling, and the ``discover_hosts_in_cells_interval`` was not set as described in :doc:`configure-compute`, then log into any controller node and run the following command to map the new node(s) to Compute cells:: nova-manage cell_v2 discover_hosts Logical names ------------- A node may also be referred to by a logical name as well as its UUID. Names can be assigned either during its creation by adding the ``-n`` option to the ``node create`` command or by updating an existing node with the ``node set`` command. Node names must be unique, and conform to: - rfc952_ - rfc1123_ - wiki_hostname_ The node is named 'example' in the following examples: .. code-block:: console $ openstack baremetal node create --driver ipmi --name example or .. code-block:: console $ openstack baremetal node set $NODE_UUID --name example Once assigned a logical name, a node can then be referred to by name or UUID interchangeably: .. code-block:: console $ openstack baremetal node create --driver ipmi --name example +--------------+--------------------------------------+ | Property | Value | +--------------+--------------------------------------+ | uuid | 71e01002-8662-434d-aafd-f068f69bb85e | | driver_info | {} | | extra | {} | | driver | ipmi | | chassis_uuid | | | properties | {} | | name | example | +--------------+--------------------------------------+ $ openstack baremetal node show example +------------------------+--------------------------------------+ | Property | Value | +------------------------+--------------------------------------+ | target_power_state | None | | extra | {} | | last_error | None | | updated_at | 2015-04-24T16:23:46+00:00 | | ... | ... | | instance_info | {} | +------------------------+--------------------------------------+ .. _rfc952: https://tools.ietf.org/html/rfc952 .. _rfc1123: https://tools.ietf.org/html/rfc1123 .. _wiki_hostname: https://en.wikipedia.org/wiki/Hostname .. _hardware_interfaces_defaults: Defaults for hardware interfaces -------------------------------- For *hardware types*, users can request one of enabled implementations when creating or updating a node as explained in `Creating a node`_. When no value is provided for a certain interface when creating a node, or changing a node's hardware type, the default value is used. You can use the driver details command to list the current enabled and default interfaces for a hardware type (for your deployment): .. code-block:: console $ openstack baremetal --os-baremetal-api-version 1.31 driver show ipmi +-------------------------------+----------------+ | Field | Value | +-------------------------------+----------------+ | default_boot_interface | pxe | | default_console_interface | no-console | | default_deploy_interface | iscsi | | default_inspect_interface | no-inspect | | default_management_interface | ipmitool | | default_network_interface | flat | | default_power_interface | ipmitool | | default_raid_interface | no-raid | | default_vendor_interface | no-vendor | | enabled_boot_interfaces | pxe | | enabled_console_interfaces | no-console | | enabled_deploy_interfaces | iscsi, direct | | enabled_inspect_interfaces | no-inspect | | enabled_management_interfaces | ipmitool | | enabled_network_interfaces | flat, noop | | enabled_power_interfaces | ipmitool | | enabled_raid_interfaces | no-raid, agent | | enabled_vendor_interfaces | no-vendor | | hosts | ironic-host-1 | | name | ipmi | | type | dynamic | +-------------------------------+----------------+ The defaults are calculated as follows: #. If the ``default__interface`` configuration option (where ```` is the interface name) is set, its value is used as the default. If this implementation is not compatible with the node's hardware type, an error is returned to a user. An explicit value has to be provided for the node's ``_interface`` field in this case. #. Otherwise, the first supported implementation that is enabled by an operator is used as the default. A list of supported implementations is calculated by taking the intersection between the implementations supported by the node's hardware type and implementations enabled by the ``enabled__interfaces`` option (where ```` is the interface name). The calculation preserves the order of items, as provided by the hardware type. If the list of supported implementations is not empty, the first one is used. Otherwise, an error is returned to a user. In this case, an explicit value has to be provided for the ``_interface`` field. See :doc:`enabling-drivers` for more details on configuration. Example ~~~~~~~ Consider the following configuration (shortened for simplicity): .. code-block:: ini [DEFAULT] enabled_hardware_types = ipmi,redfish enabled_console_interfaces = no-console,ipmitool-shellinabox enabled_deploy_interfaces = iscsi,direct enabled_management_interfaces = ipmitool,redfish enabled_power_interfaces = ipmitool,redfish default_deploy_interface = direct A new node is created with the ``ipmi`` driver and no interfaces specified: .. code-block:: console $ export OS_BAREMETAL_API_VERSION=1.31 $ openstack baremetal node create --driver ipmi +--------------+--------------------------------------+ | Property | Value | +--------------+--------------------------------------+ | uuid | dfc6189f-ad83-4261-9bda-b27258eb1987 | | driver_info | {} | | extra | {} | | driver | ipmi | | chassis_uuid | | | properties | {} | | name | None | +--------------+--------------------------------------+ Then the defaults for the interfaces that will be used by the node in this example are calculated as follows: deploy An explicit value of ``direct`` is provided for ``default_deploy_interface``, so it is used. power No default is configured. The ``ipmi`` hardware type supports only ``ipmitool`` power. The intersection between supported power interfaces and values provided in the ``enabled_power_interfaces`` option has only one item: ``ipmitool``. It is used. console No default is configured. The ``ipmi`` hardware type supports the following console interfaces: ``ipmitool-socat``, ``ipmitool-shellinabox`` and ``no-console`` (in this order). Of these three, only two are enabled: ``no-console`` and ``ipmitool-shellinabox`` (order does not matter). The intersection contains ``ipmitool-shellinabox`` and ``no-console``. The first item is used, and it is ``ipmitool-shellinabox``. management Following the same calculation as *power*, the ``ipmitool`` management interface is used. Hardware Inspection ------------------- The Bare Metal service supports hardware inspection that simplifies enrolling nodes - please see :doc:`/admin/inspection` for details. Tenant Networks and Port Groups ------------------------------- See :doc:`/admin/multitenancy` and :doc:`/admin/portgroups`. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/install/get_started.rst0000644000175000017500000001414100000000000023127 0ustar00coreycorey00000000000000=========================== Bare Metal service overview =========================== The Bare Metal service, codenamed ``ironic``, is a collection of components that provides support to manage and provision physical machines. Bare Metal service components ----------------------------- The Bare Metal service includes the following components: ironic-api A RESTful API that processes application requests by sending them to the ironic-conductor over `remote procedure call (RPC)`_. Can be run through WSGI_ or as a separate process. ironic-conductor Adds/edits/deletes nodes; powers on/off nodes with IPMI or other vendor-specific protocol; provisions/deploys/cleans bare metal nodes. ironic-conductor uses :doc:`drivers ` to execute operations on hardware. ironic-python-agent A python service which is run in a temporary ramdisk to provide ironic-conductor and ironic-inspector services with remote access, in-band hardware control, and hardware introspection. Additionally, the Bare Metal service has certain external dependencies, which are very similar to other OpenStack services: - A database to store hardware information and state. You can set the database back-end type and location. A simple approach is to use the same database back end as the Compute service. Another approach is to use a separate database back-end to further isolate bare metal resources (and associated metadata) from users. - An :oslo.messaging-doc:`oslo.messaging <>` compatible queue, such as RabbitMQ. It may use the same implementation as that of the Compute service, but that is not a requirement. Used to implement RPC between ironic-api and ironic-conductor. Deployment architecture ----------------------- The Bare Metal RESTful API service is used to enroll hardware that the Bare Metal service will manage. A cloud administrator usually registers it, specifying their attributes such as MAC addresses and IPMI credentials. There can be multiple instances of the API service. The *ironic-conductor* process does the bulk of the work. For security reasons, it is advisable to place it on an isolated host, since it is the only service that requires access to both the data plane and IPMI control plane. There can be multiple instances of the conductor service to support various class of drivers and also to manage fail over. Instances of the conductor service should be on separate nodes. Each conductor can itself run many drivers to operate heterogeneous hardware. This is depicted in the following figure. .. figure:: ../images/deployment_architecture_2.png :alt: Deployment Architecture The API exposes a list of supported drivers and the names of conductor hosts servicing them. Interaction with OpenStack components ------------------------------------- The Bare Metal service may, depending upon configuration, interact with several other OpenStack services. This includes: - the OpenStack Telemetry module (``ceilometer``) for consuming the IPMI metrics - the OpenStack Identity service (``keystone``) for request authentication and to locate other OpenStack services - the OpenStack Image service (``glance``) from which to retrieve images and image meta-data - the OpenStack Networking service (``neutron``) for DHCP and network configuration - the OpenStack Compute service (``nova``) works with the Bare Metal service and acts as a user-facing API for instance management, while the Bare Metal service provides the admin/operator API for hardware management. The OpenStack Compute service also provides scheduling facilities (matching flavors <-> images <-> hardware), tenant quotas, IP assignment, and other services which the Bare Metal service does not, in and of itself, provide. - the OpenStack Object Storage (``swift``) provides temporary storage for the configdrive, user images, deployment logs and inspection data. Logical architecture -------------------- The diagram below shows the logical architecture. It shows the basic components that form the Bare Metal service, the relation of the Bare Metal service with other OpenStack services and the logical flow of a boot instance request resulting in the provisioning of a physical server. .. figure:: ../images/logical_architecture.png :alt: Logical Architecture A user's request to boot an instance is passed to the Compute service via the Compute API and the Compute Scheduler. The Compute service uses the *ironic virt driver* to hand over this request to the Bare Metal service, where the request passes from the Bare Metal API, to the Conductor, to a Driver to successfully provision a physical server for the user. Just as the Compute service talks to various OpenStack services like Image, Network, Object Store etc to provision a virtual machine instance, here the Bare Metal service talks to the same OpenStack services for image, network and other resource needs to provision a bare metal instance. See :ref:`understanding-deployment` for a more detailed breakdown of a typical deployment process. Associated projects ------------------- Optionally, one may wish to utilize the following associated projects for additional functionality: :python-ironicclient-doc:`python-ironicclient <>` A command-line interface (CLI) and python bindings for interacting with the Bare Metal service. :ironic-ui-doc:`ironic-ui <>` Horizon dashboard, providing graphical interface (GUI) for the Bare Metal API. :ironic-inspector-doc:`ironic-inspector <>` An associated service which performs in-band hardware introspection by PXE booting unregistered hardware into the ironic-python-agent ramdisk. diskimage-builder_ A related project to help facilitate the creation of ramdisks and machine images, such as those running the ironic-python-agent. :bifrost-doc:`bifrost <>` A set of Ansible playbooks that automates the task of deploying a base image onto a set of known hardware using ironic in a standalone mode. .. _remote procedure call (RPC): https://en.wikipedia.org/wiki/Remote_procedure_call .. _WSGI: https://en.wikipedia.org/wiki/Web_Server_Gateway_Interface .. _diskimage-builder: https://docs.openstack.org/diskimage-builder/latest/ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538406.1943994 ironic-14.0.1.dev163/doc/source/install/include/0000755000175000017500000000000000000000000021512 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/install/include/boot-mode.inc0000644000175000017500000000663200000000000024101 0ustar00coreycorey00000000000000.. _boot_mode_support: Boot mode support ----------------- Some of the bare metal hardware types (namely, ``redfish``, ``ilo`` and generic ``ipmi``) support setting boot mode (Legacy BIOS or UEFI). .. note:: Setting boot mode support in generic ``ipmi`` driver is coupled with setting boot device. That makes boot mode support in the ``ipmi`` driver incomplete. .. note:: In this chapter we will distinguish *ironic node* from *bare metal node*. The difference is that *ironic node* refers to a logical node, as it is configured in ironic, while *bare metal node* indicates the hardware machine that ironic is managing. The following rules apply in order when ironic manages node boot mode: * If the hardware type (or bare metal node) does not implement reading current boot mode of the bare metal node, then ironic assumes that boot mode is not set on the bare metal node * If boot mode is not set on ironic node and bare metal node boot mode is unknown (not set, can't be read etc.), ironic node boot mode is set to the value of the `[deploy]/default_boot_mode` option * If boot mode is set on a bare metal node, but is not set on ironic node, bare metal node boot mode is set on ironic node * If boot mode is set on ironic node, but is not set on the bare metal node, ironic node boot mode is attempted to be set on the bare metal node (failure to set boot mode on the bare metal node will not fail ironic node deployment) * If different boot modes appear on to be set ironic node and on the bare metal node, ironic node boot mode is attempted to be set on the bare metal node (failure to set boot mode on the bare metal node will fail ironic node deployment) .. warning:: If a bare metal node does not support setting boot mode, then the operator needs to make sure that boot mode configuration is consistent between ironic node and the bare metal node. The boot modes can be configured in the Bare Metal service in the following way: * Only one boot mode (either ``uefi`` or ``bios``) can be configured for the node. * If the operator wants a node to boot always in ``uefi`` mode or ``bios`` mode, then they may use ``capabilities`` parameter within ``properties`` field of an bare metal node. The operator must manually set the appropriate boot mode on the bare metal node. To configure a node in ``uefi`` mode, then set ``capabilities`` as below:: openstack baremetal node set --property capabilities='boot_mode:uefi' Nodes having ``boot_mode`` set to ``uefi`` may be requested by adding an ``extra_spec`` to the Compute service flavor:: nova flavor-key ironic-test-3 set capabilities:boot_mode="uefi" nova boot --flavor ironic-test-3 --image test-image instance-1 If ``capabilities`` is used in ``extra_spec`` as above, nova scheduler (``ComputeCapabilitiesFilter``) will match only bare metal nodes which have the ``boot_mode`` set appropriately in ``properties/capabilities``. It will filter out rest of the nodes. The above facility for matching in the Compute service can be used in heterogeneous environments where there is a mix of ``uefi`` and ``bios`` machines, and operator wants to provide a choice to the user regarding boot modes. If the flavor doesn't contain ``boot_mode`` and ``boot_mode`` is configured for bare metal nodes, then nova scheduler will consider all nodes and user may get either ``bios`` or ``uefi`` machine. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/install/include/common-configure.inc0000644000175000017500000000122100000000000025450 0ustar00coreycorey00000000000000The Bare Metal service is configured via its configuration file. This file is typically located at ``/etc/ironic/ironic.conf``. Although some configuration options are mentioned here, it is recommended that you review all the :doc:`/configuration/sample-config` so that the Bare Metal service is configured for your needs. It is possible to set up an ironic-api and an ironic-conductor services on the same host or different hosts. Users also can add new ironic-conductor hosts to deal with an increasing number of bare metal nodes. But the additional ironic-conductor services should be at the same version as that of existing ironic-conductor services. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/install/include/common-prerequisites.inc0000644000175000017500000000220200000000000026373 0ustar00coreycorey00000000000000Install and configure prerequisites ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The Bare Metal service is a collection of components that provides support to manage and provision physical machines. You can configure these components to run on separate nodes or the same node. In this guide, the components run on one node, typically the Compute Service's compute node. It assumes that the Identity, Image, Compute, and Networking services have already been set up. Set up the database for Bare Metal ---------------------------------- The Bare Metal service stores information in a database. This guide uses the MySQL database that is used by other OpenStack services. #. In MySQL, create an ``ironic`` database that is accessible by the ``ironic`` user. Replace ``IRONIC_DBPASSWORD`` with a suitable password: .. code-block:: console # mysql -u root -p mysql> CREATE DATABASE ironic CHARACTER SET utf8; mysql> GRANT ALL PRIVILEGES ON ironic.* TO 'ironic'@'localhost' \ IDENTIFIED BY 'IRONIC_DBPASSWORD'; mysql> GRANT ALL PRIVILEGES ON ironic.* TO 'ironic'@'%' \ IDENTIFIED BY 'IRONIC_DBPASSWORD'; ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/install/include/configure-ironic-api-mod_wsgi.inc0000644000175000017500000000464500000000000030035 0ustar00coreycorey00000000000000Configuring ironic-api behind mod_wsgi -------------------------------------- Bare Metal service comes with an example file for configuring the ``ironic-api`` service to run behind Apache with mod_wsgi. #. Install the apache service: RHEL7/CentOS7:: sudo yum install httpd Fedora:: sudo dnf install httpd Debian/Ubuntu:: apt-get install apache2 SUSE:: zypper install apache2 #. Download the ``etc/apache2/ironic`` file from the `Ironic project tree `_ and copy it to the apache sites: Fedora/RHEL7/CentOS7:: sudo cp etc/apache2/ironic /etc/httpd/conf.d/ironic.conf Debian/Ubuntu:: sudo cp etc/apache2/ironic /etc/apache2/sites-available/ironic.conf SUSE:: sudo cp etc/apache2/ironic /etc/apache2/vhosts.d/ironic.conf #. Edit the recently copied ``/ironic.conf``: #. Modify the ``WSGIDaemonProcess``, ``APACHE_RUN_USER`` and ``APACHE_RUN_GROUP`` directives to set the user and group values to an appropriate user on your server. #. Modify the ``WSGIScriptAlias`` directive to point to the automatically generated ``ironic-api-wsgi`` script that is located in `IRONIC_BIN` directory. #. Modify the ``Directory`` directive to set the path to the Ironic API code. #. Modify the ``ErrorLog`` and ``CustomLog`` to redirect the logs to the right directory (on Red Hat systems this is usually under /var/log/httpd). #. Enable the apache ``ironic`` in site and reload: Fedora/RHEL7/CentOS7:: sudo systemctl reload httpd Debian/Ubuntu:: sudo a2ensite ironic sudo service apache2 reload SUSE:: sudo systemctl reload apache2 .. note:: The file ``ironic-api-wsgi`` is automatically generated by pbr and is available in `IRONIC_BIN` directory. It should not be modified. Configure another WSGI container -------------------------------- A slightly different approach has to be used for WSGI containers that cannot use ``ironic-api-wsgi``. For example, for *gunicorn*: .. code-block:: console gunicorn -b 0.0.0.0:6385 'ironic.api.wsgi:initialize_wsgi_app(argv=[])' If you want to pass a configuration file, use: .. code-block:: console gunicorn -b 0.0.0.0:6385 \ 'ironic.api.wsgi:initialize_wsgi_app(argv=["ironic-api", "--config-file=/path/to/_ironic.conf"])' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/install/include/configure-ironic-api.inc0000644000175000017500000000724500000000000026226 0ustar00coreycorey00000000000000Configuring ironic-api service ------------------------------ #. The Bare Metal service stores information in a database. This guide uses the MySQL database that is used by other OpenStack services. Configure the location of the database via the ``connection`` option. In the following, replace ``IRONIC_DBPASSWORD`` with the password of your ``ironic`` user, and replace ``DB_IP`` with the IP address where the DB server is located: .. code-block:: ini [database] # The SQLAlchemy connection string used to connect to the # database (string value) connection=mysql+pymysql://ironic:IRONIC_DBPASSWORD@DB_IP/ironic?charset=utf8 #. Configure the ironic-api service to use the RabbitMQ message broker by setting the following option. Replace ``RPC_*`` with appropriate address details and credentials of RabbitMQ server: .. code-block:: ini [DEFAULT] # A URL representing the messaging driver to use and its full # configuration. (string value) transport_url = rabbit://RPC_USER:RPC_PASSWORD@RPC_HOST:RPC_PORT/ Alternatively, you can use JSON RPC for interactions between ironic-conductor and ironic-api. Enable it in the configuration and provide the keystone credentials to use for authentication: .. code-block:: ini [DEFAULT] rpc_transport = json-rpc [json_rpc] # Authentication type to load (string value) auth_type = password # Authentication URL (string value) auth_url=https://IDENTITY_IP:5000/ # Username (string value) username=ironic # User's password (string value) password=IRONIC_PASSWORD # Project name to scope to (string value) project_name=service # Domain ID containing project (string value) project_domain_id=default # User's domain id (string value) user_domain_id=default If you use port other than the default 8089 for JSON RPC, you have to configure it, for example: .. code-block:: ini [json_rpc] port = 9999 #. Configure the ironic-api service to use these credentials with the Identity service. Replace ``PUBLIC_IDENTITY_IP`` with the public IP of the Identity server, ``PRIVATE_IDENTITY_IP`` with the private IP of the Identity server and replace ``IRONIC_PASSWORD`` with the password you chose for the ``ironic`` user in the Identity service: .. code-block:: ini [DEFAULT] # Authentication strategy used by ironic-api: one of # "keystone" or "noauth". "noauth" should not be used in a # production environment because all authentication will be # disabled. (string value) auth_strategy=keystone [keystone_authtoken] # Authentication type to load (string value) auth_type=password # Complete public Identity API endpoint (string value) www_authenticate_uri=http://PUBLIC_IDENTITY_IP:5000 # Complete admin Identity API endpoint. (string value) auth_url=http://PRIVATE_IDENTITY_IP:5000 # Service username. (string value) username=ironic # Service account password. (string value) password=IRONIC_PASSWORD # Service tenant name. (string value) project_name=service # Domain name containing project (string value) project_domain_name=Default # User's domain name (string value) user_domain_name=Default #. Create the Bare Metal service database tables: .. code-block:: bash $ ironic-dbsync --config-file /etc/ironic/ironic.conf create_schema #. Restart the ironic-api service: Fedora/RHEL7/CentOS7/SUSE:: sudo systemctl restart openstack-ironic-api Ubuntu:: sudo service ironic-api restart ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/install/include/configure-ironic-conductor.inc0000644000175000017500000001736400000000000027460 0ustar00coreycorey00000000000000Configuring ironic-conductor service ------------------------------------ #. Replace ``HOST_IP`` with IP of the conductor host. .. code-block:: ini [DEFAULT] # IP address of this host. If unset, will determine the IP # programmatically. If unable to do so, will use "127.0.0.1". # (string value) my_ip=HOST_IP .. note:: If a conductor host has multiple IPs, ``my_ip`` should be set to the IP which is on the same network as the bare metal nodes. #. Configure the location of the database. Ironic-conductor should use the same configuration as ironic-api. Replace ``IRONIC_DBPASSWORD`` with the password of your ``ironic`` user, and replace DB_IP with the IP address where the DB server is located: .. code-block:: ini [database] # The SQLAlchemy connection string to use to connect to the # database. (string value) connection=mysql+pymysql://ironic:IRONIC_DBPASSWORD@DB_IP/ironic?charset=utf8 #. Configure the ironic-conductor service to use the RabbitMQ message broker by setting the following option. Ironic-conductor should use the same configuration as ironic-api. Replace ``RPC_*`` with appropriate address details and credentials of RabbitMQ server: .. code-block:: ini [DEFAULT] # A URL representing the messaging driver to use and its full # configuration. (string value) transport_url = rabbit://RPC_USER:RPC_PASSWORD@RPC_HOST:RPC_PORT/ Alternatively, you can use JSON RPC for interactions between ironic-conductor and ironic-api. Enable it in the configuration and provide the keystone credentials to use for authenticating incoming requests (can be the same as for the API): .. code-block:: ini [DEFAULT] rpc_transport = json-rpc [keystone_authtoken] # Authentication type to load (string value) auth_type=password # Complete public Identity API endpoint (string value) www_authenticate_uri=http://PUBLIC_IDENTITY_IP:5000 # Complete admin Identity API endpoint. (string value) auth_url=http://PRIVATE_IDENTITY_IP:5000 # Service username. (string value) username=ironic # Service account password. (string value) password=IRONIC_PASSWORD # Service tenant name. (string value) project_name=service # Domain name containing project (string value) project_domain_name=Default # User's domain name (string value) user_domain_name=Default You can optionally change the host and the port the JSON RPC service will bind to, for example: .. code-block:: ini [json_rpc] host_ip = 192.168.0.10 port = 9999 .. warning:: Hostnames of ironic-conductor machines must be resolvable by ironic-api services when JSON RPC is used. #. Configure credentials for accessing other OpenStack services. In order to communicate with other OpenStack services, the Bare Metal service needs to use service users to authenticate to the OpenStack Identity service when making requests to other services. These users' credentials have to be configured in each configuration file section related to the corresponding service: * ``[neutron]`` - to access the OpenStack Networking service * ``[glance]`` - to access the OpenStack Image service * ``[swift]`` - to access the OpenStack Object Storage service * ``[cinder]`` - to access the OpenStack Block Storage service * ``[inspector]`` - to access the OpenStack Bare Metal Introspection service * ``[service_catalog]`` - a special section holding credentials the Bare Metal service will use to discover its own API URL endpoint as registered in the OpenStack Identity service catalog. For simplicity, you can use the same service user for all services. For backward compatibility, this should be the same user configured in the ``[keystone_authtoken]`` section for the ironic-api service (see "Configuring ironic-api service"). However, this is not necessary, and you can create and configure separate service users for each service. Under the hood, Bare Metal service uses ``keystoneauth`` library together with ``Authentication plugin``, ``Session`` and ``Adapter`` concepts provided by it to instantiate service clients. Please refer to `Keystoneauth documentation`_ for supported plugins, their available options as well as Session- and Adapter-related options for authentication, connection and endpoint discovery respectively. In the example below, authentication information for user to access the OpenStack Networking service is configured to use: * Networking service is deployed in the Identity service region named ``RegionTwo``, with only its ``public`` endpoint interface registered in the service catalog. * HTTPS connection with specific CA SSL certificate when making requests * the same service user as configured for ironic-api service * dynamic ``password`` authentication plugin that will discover appropriate version of Identity service API based on other provided options - replace ``IDENTITY_IP`` with the IP of the Identity server, and replace ``IRONIC_PASSWORD`` with the password you chose for the ``ironic`` user in the Identity service .. code-block:: ini [neutron] # Authentication type to load (string value) auth_type = password # Authentication URL (string value) auth_url=https://IDENTITY_IP:5000/ # Username (string value) username=ironic # User's password (string value) password=IRONIC_PASSWORD # Project name to scope to (string value) project_name=service # Domain ID containing project (string value) project_domain_id=default # User's domain id (string value) user_domain_id=default # PEM encoded Certificate Authority to use when verifying # HTTPs connections. (string value) cafile=/opt/stack/data/ca-bundle.pem # The default region_name for endpoint URL discovery. (string # value) region_name = RegionTwo # List of interfaces, in order of preference, for endpoint # URL. (list value) valid_interfaces=public By default, in order to communicate with another service, the Bare Metal service will attempt to discover an appropriate endpoint for that service via the Identity service's service catalog. The relevant configuration options from that service group in the Bare Metal service configuration file are used for this purpose. If you want to use a different endpoint for a particular service, specify this via the ``endpoint_override`` configuration option of that service group, in the Bare Metal service's configuration file. Taking the previous Networking service example, this would be .. code-block:: ini [neutron] ... endpoint_override = (Replace `` with actual address of a specific Networking service endpoint.) #. Configure enabled drivers and hardware types as described in :doc:`/install/enabling-drivers`. A. If you enabled any driver that uses :ref:`direct-deploy`, Swift backend for the Image service must be installed and configured, see :ref:`image-store`. Ceph Object Gateway (RADOS Gateway) is also supported as the Image service's backend, see :ref:`radosgw support`. #. Configure the network for ironic-conductor service to perform node cleaning, see :ref:`cleaning` from the admin guide. #. Restart the ironic-conductor service: Fedora/RHEL7/CentOS7/SUSE:: sudo systemctl restart openstack-ironic-conductor Ubuntu:: sudo service ironic-conductor restart .. _Keystoneauth documentation: https://docs.openstack.org/keystoneauth/latest/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/install/include/console.inc0000644000175000017500000000021400000000000023644 0ustar00coreycorey00000000000000Configuring node web console ---------------------------- See :ref:`console`. .. TODO(dtantsur): move the installation documentation here ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/install/include/disk-label.inc0000644000175000017500000000477500000000000024231 0ustar00coreycorey00000000000000.. _choosing_the_disk_label: Choosing the disk label ----------------------- .. note:: The term ``disk label`` is historically used in Ironic and was taken from `parted `_. Apparently everyone seems to have a different word for ``disk label`` - these are all the same thing: disk type, partition table, partition map and so on... Ironic allows operators to choose which disk label they want their bare metal node to be deployed with when Ironic is responsible for partitioning the disk; therefore choosing the disk label does not apply when the image being deployed is a ``whole disk image``. There are some edge cases where someone may want to choose a specific disk label for the images being deployed, including but not limited to: * For machines in ``bios`` boot mode with disks larger than 2 terabytes it's recommended to use a ``gpt`` disk label. That's because a capacity beyond 2 terabytes is not addressable by using the MBR partitioning type. But, although GPT claims to be backward compatible with legacy BIOS systems `that's not always the case `_. * Operators may want to force the partitioning to be always MBR (even if the machine is deployed with boot mode ``uefi``) to avoid breakage of applications and tools running on those instances. The disk label can be configured in two ways; when Ironic is used with the Compute service or in standalone mode. The following bullet points and sections will describe both methods: * When no disk label is provided Ironic will configure it according to the boot mode (see :ref:`boot_mode_support`); ``bios`` boot mode will use ``msdos`` and ``uefi`` boot mode will use ``gpt``. * Only one disk label - either ``msdos`` or ``gpt`` - can be configured for the node. When used with Compute service ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When Ironic is used with the Compute service the disk label should be set to node's ``properties/capabilities`` field and also to the flavor which will request such capability, for example:: openstack baremetal node set --property capabilities='disk_label:gpt' As for the flavor:: nova flavor-key baremetal set capabilities:disk_label="gpt" When used in standalone mode ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When used without the Compute service, the disk label should be set directly to the node's ``instance_info`` field, as below:: openstack baremetal node set --instance-info capabilities='{"disk_label": "gpt"}' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/install/include/kernel-boot-parameters.inc0000644000175000017500000000743600000000000026601 0ustar00coreycorey00000000000000.. _kernel-boot-parameters: Appending kernel parameters to boot instances --------------------------------------------- The Bare Metal service supports passing custom kernel parameters to boot instances to fit users' requirements. The way to append the kernel parameters is depending on how to boot instances. Network boot ~~~~~~~~~~~~ Currently, the Bare Metal service supports assigning unified kernel parameters to PXE booted instances by: * Modifying the ``[pxe]/pxe_append_params`` configuration option, for example:: [pxe] pxe_append_params = quiet splash * Copying a template from shipped templates to another place, for example:: https://opendev.org/openstack/ironic/src/branch/master/ironic/drivers/modules/pxe_config.template Making the modifications and pointing to the custom template via the configuration options: ``[pxe]/pxe_config_template`` and ``[pxe]/uefi_pxe_config_template``. Local boot ~~~~~~~~~~ For local boot instances, users can make use of configuration drive (see :ref:`configdrive`) to pass a custom script to append kernel parameters when creating an instance. This is more flexible and can vary per instance. Here is an example for grub2 with ubuntu, users can customize it to fit their use case: .. code:: python #!/usr/bin/env python import os # Default grub2 config file in Ubuntu grub_file = '/etc/default/grub' # Add parameters here to pass to instance. kernel_parameters = ['quiet', 'splash'] grub_cmd = 'GRUB_CMDLINE_LINUX' old_grub_file = grub_file+'~' os.rename(grub_file, old_grub_file) cmdline_existed = False with open(grub_file, 'w') as writer, \ open(old_grub_file, 'r') as reader: for line in reader: key = line.split('=')[0] if key == grub_cmd: #If there is already some value: if line.strip()[-1] == '"': line = line.strip()[:-1] + ' ' + ' '.join(kernel_parameters) + '"' cmdline_existed = True writer.write(line) if not cmdline_existed: line = grub_cmd + '=' + '"' + ' '.join(kernel_parameters) + '"' writer.write(line) os.remove(old_grub_file) os.system('update-grub') os.system('reboot') Console ~~~~~~~ In order to change default console configuration in the Bare Metal service configuration file (``[pxe]`` section in ``/etc/ironic/ironic.conf``), include the serial port terminal and serial speed. Serial speed must be the same as the serial configuration in the BIOS settings, so that the operating system boot process can be seen in the serial console or web console. Following examples represent possible parameters for serial and web console respectively. * Node serial console. The console parameter ``console=ttyS0,115200n8`` uses ``ttyS0`` for console output at ``115200bps, 8bit, non-parity``, e.g.:: [pxe] # Additional append parameters for baremetal PXE boot. pxe_append_params = nofb nomodeset vga=normal console=ttyS0,115200n8 * For node web console configuration is similar with the addition of ``ttyX`` parameter, see example:: [pxe] # Additional append parameters for baremetal PXE boot. pxe_append_params = nofb nomodeset vga=normal console=tty0 console=ttyS0,115200n8 For detailed information on how to add consoles see the reference documents `kernel params`_ and `serial console`_. In case of local boot the Bare Metal service is not able to control kernel boot parameters. To configure console locally, follow 'Local boot' section above. .. _`kernel params`: https://www.kernel.org/doc/html/latest/admin-guide/kernel-parameters.html .. _`serial console`: https://www.kernel.org/doc/html/latest/admin-guide/serial-console.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/install/include/local-boot-partition-images.inc0000644000175000017500000000410700000000000027514 0ustar00coreycorey00000000000000.. _local-boot-partition-images: Local boot with partition images -------------------------------- The Bare Metal service supports local boot with partition images, meaning that after the deployment the node's subsequent reboots won't happen via PXE or Virtual Media. Instead, it will boot from a local boot loader installed on the disk. .. note:: Whole disk images, on the contrary, support only local boot, and use it by default. It's important to note that in order for this to work the image being deployed with Bare Metal service **must** contain ``grub2`` installed within it. Enabling the local boot is different when Bare Metal service is used with Compute service and without it. The following sections will describe both methods. .. _ironic-python-agent: https://docs.openstack.org/ironic-python-agent/latest/ Enabling local boot with Compute service ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To enable local boot we need to set a capability on the bare metal node, for example:: openstack baremetal node set --property capabilities="boot_option:local" Nodes having ``boot_option`` set to ``local`` may be requested by adding an ``extra_spec`` to the Compute service flavor, for example:: nova flavor-key baremetal set capabilities:boot_option="local" .. note:: If the node is configured to use ``UEFI``, Bare Metal service will create an ``EFI partition`` on the disk and switch the partition table format to ``gpt``. The ``EFI partition`` will be used later by the boot loader (which is installed from the deploy ramdisk). .. _local-boot-without-compute: Enabling local boot without Compute ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Since adding ``capabilities`` to the node's properties is only used by the nova scheduler to perform more advanced scheduling of instances, we need a way to enable local boot when Compute is not present. To do that we can simply specify the capability via the ``instance_info`` attribute of the node, for example:: openstack baremetal node set --instance-info capabilities='{"boot_option": "local"}' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/install/include/notifications.inc0000644000175000017500000000144400000000000025061 0ustar00coreycorey00000000000000Notifications ------------- The Bare Metal service supports the emission of notifications, which are messages sent on a message broker (like RabbitMQ or anything else supported by the `oslo messaging library `_) that indicate various events which occur, such as when a node changes power states. These can be consumed by an external service reading from the message bus. For example, `Searchlight `_ is an OpenStack service that uses notifications to index (and make searchable) resources from the Bare Metal service. Notifications are disabled by default. For a complete list of available notifications and instructions for how to enable them, see the :doc:`/admin/notifications`. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/install/include/root-device-hints.inc0000644000175000017500000000723300000000000025555 0ustar00coreycorey00000000000000.. _root-device-hints: Specifying the disk for deployment (root device hints) ------------------------------------------------------ The Bare Metal service supports passing hints to the deploy ramdisk about which disk it should pick for the deployment. The list of supported hints is: * model (STRING): device identifier * vendor (STRING): device vendor * serial (STRING): disk serial number * size (INT): size of the device in GiB .. note:: A node's 'local_gb' property is often set to a value 1 GiB less than the actual disk size to account for partitioning (this is how DevStack, TripleO and Ironic Inspector work, to name a few). However, in this case ``size`` should be the actual size. For example, for a 128 GiB disk ``local_gb`` will be 127, but size hint will be 128. * wwn (STRING): unique storage identifier * wwn_with_extension (STRING): unique storage identifier with the vendor extension appended * wwn_vendor_extension (STRING): unique vendor storage identifier * rotational (BOOLEAN): whether it's a rotational device or not. This hint makes it easier to distinguish HDDs (rotational) and SSDs (not rotational) when choosing which disk Ironic should deploy the image onto. * hctl (STRING): the SCSI address (Host, Channel, Target and Lun), e.g '1:0:0:0' * name (STRING): the device name, e.g /dev/md0 .. warning:: The root device hint name should only be used for devices with constant names (e.g RAID volumes). For SATA, SCSI and IDE disk controllers this hint is not recommended because the order in which the device nodes are added in Linux is arbitrary, resulting in devices like /dev/sda and /dev/sdb `switching around at boot time `_. To associate one or more hints with a node, update the node's properties with a ``root_device`` key, for example:: openstack baremetal node set --property root_device='{"wwn": "0x4000cca77fc4dba1"}' That will guarantee that Bare Metal service will pick the disk device that has the ``wwn`` equal to the specified wwn value, or fail the deployment if it can not be found. .. note:: Starting with the Ussuri release, root device hints can be specified per-instance, see :doc:`/install/standalone`. The hints can have an operator at the beginning of the value string. If no operator is specified the default is ``==`` (for numerical values) and ``s==`` (for string values). The supported operators are: * For numerical values: * ``=`` equal to or greater than. This is equivalent to ``>=`` and is supported for `legacy reasons `_ * ``==`` equal to * ``!=`` not equal to * ``>=`` greater than or equal to * ``>`` greater than * ``<=`` less than or equal to * ``<`` less than * For strings (as python comparisons): * ``s==`` equal to * ``s!=`` not equal to * ``s>=`` greater than or equal to * ``s>`` greater than * ``s<=`` less than or equal to * ``s<`` less than * ```` substring * For collections: * ```` all elements contained in collection * ```` find one of these Examples are: * Finding a disk larger or equal to 60 GiB and non-rotational (SSD):: openstack baremetal node set --property root_device='{"size": ">= 60", "rotational": false}' * Finding a disk whose vendor is ``samsung`` or ``winsys``:: openstack baremetal node set --property root_device='{"vendor": " samsung winsys"}' .. note:: If multiple hints are specified, a device must satisfy all the hints. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/install/include/trusted-boot.inc0000644000175000017500000000554400000000000024650 0ustar00coreycorey00000000000000.. _trusted-boot: Trusted boot with partition image --------------------------------- The Bare metal service supports trusted boot with partition images. This means at the end of the deployment process, when the node is rebooted with the new user image, ``trusted boot`` will be performed. It will measure the node's BIOS, boot loader, Option ROM and the Kernel/Ramdisk, to determine whether a bare metal node deployed by Ironic should be trusted. It's important to note that in order for this to work the node being deployed **must** have Intel `TXT`_ hardware support. The image being deployed with Ironic must have ``oat-client`` installed within it. The following will describe how to enable ``trusted boot`` and boot with PXE and Nova: #. Create a customized user image with ``oat-client`` installed:: disk-image-create -u fedora baremetal oat-client -o $TRUST_IMG For more information on creating customized images, see :ref:`image-requirements`. #. Enable VT-x, VT-d, TXT and TPM on the node. This can be done manually through the BIOS. Depending on the platform, several reboots may be needed. #. Enroll the node and update the node capability value:: openstack baremetal node create --driver ipmi openstack baremetal node set $NODE_UUID --property capabilities={'trusted_boot':true} #. Create a special flavor:: nova flavor-key $TRUST_FLAVOR_UUID set 'capabilities:trusted_boot'=true #. Prepare `tboot`_ and mboot.c32 and put them into tftp_root or http_root directory on all nodes with the ironic-conductor processes:: Ubuntu: cp /usr/lib/syslinux/mboot.c32 /tftpboot/ Fedora: cp /usr/share/syslinux/mboot.c32 /tftpboot/ *Note: The actual location of mboot.c32 varies among different distribution versions.* tboot can be downloaded from https://sourceforge.net/projects/tboot/files/latest/download #. Install an OAT Server. An `OAT Server`_ should be running and configured correctly. #. Boot an instance with Nova:: nova boot --flavor $TRUST_FLAVOR_UUID --image $TRUST_IMG --user-data $TRUST_SCRIPT trusted_instance *Note* that the node will be measured during ``trusted boot`` and the hash values saved into `TPM`_. An example of TRUST_SCRIPT can be found in `trust script example`_. #. Verify the result via OAT Server. This is outside the scope of Ironic. At the moment, users can manually verify the result by following the `manual verify steps`_. .. _`TXT`: http://en.wikipedia.org/wiki/Trusted_Execution_Technology .. _`tboot`: https://sourceforge.net/projects/tboot .. _`TPM`: http://en.wikipedia.org/wiki/Trusted_Platform_Module .. _`OAT Server`: https://github.com/OpenAttestation/OpenAttestation/wiki .. _`trust script example`: https://wiki.openstack.org/wiki/Bare-metal-trust#Trust_Script_Example .. _`manual verify steps`: https://wiki.openstack.org/wiki/Bare-metal-trust#Manual_verify_result ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/install/index.rst0000644000175000017500000000127500000000000021735 0ustar00coreycorey00000000000000===================================== Bare Metal Service Installation Guide ===================================== The Bare Metal service is a collection of components that provides support to manage and provision physical machines. This chapter assumes a working setup of OpenStack following the `OpenStack Installation Guides `_. It contains the following sections: .. toctree:: :maxdepth: 2 get_started.rst refarch/index install.rst creating-images.rst deploy-ramdisk.rst configure-integration.rst setup-drivers.rst enrollment.rst standalone.rst configdrive.rst advanced.rst troubleshooting.rst next-steps.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/install/install-obs.rst0000644000175000017500000000222600000000000023052 0ustar00coreycorey00000000000000.. _install-obs: ============================================================ Install and configure for openSUSE and SUSE Linux Enterprise ============================================================ This section describes how to install and configure the Bare Metal service for openSUSE Leap 42.2 and SUSE Linux Enterprise Server 12 SP2. .. note:: Installation of the Bare Metal service on openSUSE and SUSE Linux Enterprise Server is not officially supported. Nevertheless, installation should be possible. .. include:: include/common-prerequisites.inc Install and configure components ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #. Install from packages .. code-block:: console # zypper install openstack-ironic-api openstack-ironic-conductor python-ironicclient #. Enable services .. code-block:: console # systemctl enable openstack-ironic-api openstack-ironic-conductor # systemctl start openstack-ironic-api openstack-ironic-conductor .. include:: include/common-configure.inc .. include:: include/configure-ironic-api.inc .. include:: include/configure-ironic-api-mod_wsgi.inc .. include:: include/configure-ironic-conductor.inc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/install/install-rdo.rst0000644000175000017500000000216500000000000023055 0ustar00coreycorey00000000000000.. _install-rdo: ============================================================= Install and configure for Red Hat Enterprise Linux and CentOS ============================================================= This section describes how to install and configure the Bare Metal service for Red Hat Enterprise Linux 7 and CentOS 7. .. include:: include/common-prerequisites.inc Install and configure components ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #. Install from packages - Using ``dnf`` .. code-block:: console # dnf install openstack-ironic-api openstack-ironic-conductor python-ironicclient - Using ``yum`` .. code-block:: console # yum install openstack-ironic-api openstack-ironic-conductor python-ironicclient #. Enable services .. code-block:: console # systemctl enable openstack-ironic-api openstack-ironic-conductor # systemctl start openstack-ironic-api openstack-ironic-conductor .. include:: include/common-configure.inc .. include:: include/configure-ironic-api.inc .. include:: include/configure-ironic-api-mod_wsgi.inc .. include:: include/configure-ironic-conductor.inc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/install/install-ubuntu.rst0000644000175000017500000000134700000000000023614 0ustar00coreycorey00000000000000.. _install-ubuntu: ================================ Install and configure for Ubuntu ================================ This section describes how to install and configure the Bare Metal service for Ubuntu 14.04 (LTS). .. include:: include/common-prerequisites.inc Install and configure components ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #. Install from packages (using apt-get) .. code-block:: console # apt-get install ironic-api ironic-conductor python-ironicclient #. Enable services Services are enabled by default on Ubuntu. .. include:: include/common-configure.inc .. include:: include/configure-ironic-api.inc .. include:: include/configure-ironic-api-mod_wsgi.inc .. include:: include/configure-ironic-conductor.inc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/install/install.rst0000644000175000017500000000054300000000000022271 0ustar00coreycorey00000000000000.. _install: Install and configure the Bare Metal service ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the Bare Metal service, code-named ironic. Note that installation and configuration vary by distribution. .. toctree:: :maxdepth: 2 install-rdo.rst install-ubuntu.rst install-obs.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/install/next-steps.rst0000644000175000017500000000016300000000000022733 0ustar00coreycorey00000000000000.. _next-steps: ========== Next steps ========== Your OpenStack environment now includes the Bare Metal service. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538406.1943994 ironic-14.0.1.dev163/doc/source/install/refarch/0000755000175000017500000000000000000000000021501 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/install/refarch/common.rst0000644000175000017500000003321500000000000023527 0ustar00coreycorey00000000000000Common Considerations ===================== This section covers considerations that are equally important to all described architectures. .. contents:: :local: .. _refarch-common-components: Components ---------- As explained in :doc:`../get_started`, the Bare Metal service has three components. * The Bare Metal API service (``ironic-api``) should be deployed in a similar way as the control plane API services. The exact location will depend on the architecture used. * The Bare Metal conductor service (``ironic-conductor``) is where most of the provisioning logic lives. The following considerations are the most important when deciding on the way to deploy it: * The conductor manages a certain proportion of nodes, distributed to it via a hash ring. This includes constantly polling these nodes for their current power state and hardware sensor data (if enabled and supported by hardware, see :ref:`ipmi-sensor-data` for an example). * The conductor needs access to the `management controller`_ of each node it manages. * The conductor co-exists with TFTP (for PXE) and/or HTTP (for iPXE) services that provide the kernel and ramdisk to boot the nodes. The conductor manages them by writing files to their root directories. * If serial console is used, the conductor launches console processes locally. If the ``nova-serialproxy`` service (part of the Compute service) is used, it has to be able to reach the conductors. Otherwise, they have to be directly accessible by the users. * There must be mutual connectivity between the conductor and the nodes being deployed or cleaned. See Networking_ for details. * The provisioning ramdisk which runs the ``ironic-python-agent`` service on start up. .. warning:: The ``ironic-python-agent`` service is not intended to be used or executed anywhere other than a provisioning/cleaning/rescue ramdisk. Hardware and drivers -------------------- The Bare Metal service strives to provide the best support possible for a variety of hardware. However, not all hardware is supported equally well. It depends on both the capabilities of hardware itself and the available drivers. This section covers various considerations related to the hardware interfaces. See :doc:`/install/enabling-drivers` for a detailed introduction into hardware types and interfaces before proceeding. Power and management interfaces ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The minimum set of capabilities that the hardware has to provide and the driver has to support is as follows: #. getting and setting the power state of the machine #. getting and setting the current boot device #. booting an image provided by the Bare Metal service (in the simplest case, support booting using PXE_ and/or iPXE_) .. note:: Strictly speaking, it is possible to make the Bare Metal service provision nodes without some of these capabilities via some manual steps. It is not the recommended way of deployment, and thus it is not covered in this guide. Once you make sure that the hardware supports these capabilities, you need to find a suitable driver. Most of enterprise-grade hardware has support for IPMI_ and thus can utilize :doc:`/admin/drivers/ipmitool`. Some newer hardware also supports :doc:`/admin/drivers/redfish`. Several vendors provide more specific drivers that usually provide additional capabilities. Check :doc:`/admin/drivers` to find the most suitable one. .. _refarch-common-boot: Boot interface ~~~~~~~~~~~~~~ The boot interface of a node manages booting of both the deploy ramdisk and the user instances on the bare metal node. The deploy interface orchestrates the deployment and defines how the image gets transferred to the target disk. The main alternatives are to use PXE/iPXE or virtual media - see :doc:`/admin/interfaces/boot` for a detailed explanation. If a virtual media implementation is available for the hardware, it is recommended using it for better scalability and security. Otherwise, it is recommended to use iPXE, when it is supported by target hardware. Deploy interface ~~~~~~~~~~~~~~~~ There are two deploy interfaces in-tree, ``iscsi`` and ``direct``. See :doc:`../../admin/interfaces/deploy` for explanation of the difference. With the ``iscsi`` deploy method, most of the deployment operations happen on the conductor. If the Object Storage service (swift) or RadosGW is present in the environment, it is recommended to use the ``direct`` deploy method for better scalability and reliability. .. TODO(dtantsur): say something about the ansible deploy, when it's in Hardware specifications ~~~~~~~~~~~~~~~~~~~~~~~ The Bare Metal services does not impose too many restrictions on the characteristics of hardware itself. However, keep in mind that * By default, the Bare Metal service will pick the smallest hard drive that is larger than 4 GiB for deployment. Another hard drive can be used, but it requires setting :ref:`root device hints `. .. note:: This device does not have to match the boot device set in BIOS (or similar firmware). * The machines should have enough RAM to fit the deployment/cleaning ramdisk to run. The minimum varies greatly depending on the way the ramdisk was built. For example, *tinyipa*, the TinyCoreLinux-based ramdisk used in the CI, only needs 400 MiB of RAM, while ramdisks built by *diskimage-builder* may require 3 GiB or more. Image types ----------- The Bare Metal service can deploy two types of images: * *Whole-disk* images that contain a complete partitioning table with all necessary partitions and a bootloader. Such images are the most universal, but may be harder to build. * *Partition images* that contain only the root partition. The Bare Metal service will create the necessary partitions and install a boot loader, if needed. .. warning:: Partition images are only supported with GNU/Linux operating systems. .. warning:: If you plan on using local boot, your partition images must contain GRUB2 bootloader tools to enable ironic to set up the bootloader during deploy. Local vs network boot --------------------- The Bare Metal service supports booting user instances either using a local bootloader or using the driver's boot interface (e.g. via PXE_ or iPXE_ protocol in case of the ``pxe`` interface). Network boot cannot be used with certain architectures (for example, when no tenant networks have access to the control plane). Additional considerations are related to the ``pxe`` boot interface, and other boot interfaces based on it: * Local boot makes node's boot process independent of the Bare Metal conductor managing it. Thus, nodes are able to reboot correctly, even if the Bare Metal TFTP or HTTP service is down. * Network boot (and iPXE) must be used when booting nodes from remote volumes, if the driver does not support attaching volumes out-of-band. The default boot option for the cloud can be changed via the Bare Metal service configuration file, for example: .. code-block:: ini [deploy] default_boot_option = local This default can be overridden by setting the ``boot_option`` capability on a node. See :ref:`local-boot-partition-images` for details. .. note:: Currently, network boot is used by default. However, we plan on changing it in the future, so it's safer to set the ``default_boot_option`` explicitly. .. _refarch-common-networking: Networking ---------- There are several recommended network topologies to be used with the Bare Metal service. They are explained in depth in specific architecture documentation. However, several considerations are common for all of them: * There has to be a *provisioning* network, which is used by nodes during the deployment process. If allowed by the architecture, this network should not be accessible by end users, and should not have access to the internet. * There has to be a *cleaning* network, which is used by nodes during the cleaning process. * There should be a *rescuing* network, which is used by nodes during the rescue process. It can be skipped if the rescue process is not supported. .. note:: In the majority of cases, the same network should be used for cleaning, provisioning and rescue for simplicity. Unless noted otherwise, everything in these sections apply to all three networks. * The baremetal nodes must have access to the Bare Metal API while connected to the provisioning/cleaning/rescuing network. .. note:: Only two endpoints need to be exposed there:: GET /v1/lookup POST /v1/heartbeat/[a-z0-9\-]+ You may want to limit access from this network to only these endpoints, and make these endpoint not accessible from other networks. * If the ``pxe`` boot interface (or any boot interface based on it) is used, then the baremetal nodes should have untagged (access mode) connectivity to the provisioning/cleaning/rescuing networks. It allows PXE firmware, which does not support VLANs, to communicate with the services required for provisioning. .. note:: It depends on the *network interface* whether the Bare Metal service will handle it automatically. Check the networking documentation for the specific architecture. Sometimes it may be necessary to disable the spanning tree protocol delay on the switch - see :ref:`troubleshooting-stp`. * The Baremetal nodes need to have access to any services required for provisioning/cleaning/rescue, while connected to the provisioning/cleaning/rescuing network. This may include: * a TFTP server for PXE boot and also an HTTP server when iPXE is enabled * either an HTTP server or the Object Storage service in case of the ``direct`` deploy interface and some virtual media boot interfaces * The Baremetal Conductors need to have access to the booted baremetal nodes during provisioning/cleaning/rescue. A conductor communicates with an internal API, provided by **ironic-python-agent**, to conduct actions on nodes. .. _refarch-common-ha: HA and Scalability ------------------ ironic-api ~~~~~~~~~~ The Bare Metal API service is stateless, and thus can be easily scaled horizontally. It is recommended to deploy it as a WSGI application behind e.g. Apache or another WSGI container. .. note:: This service accesses the ironic database for reading entities (e.g. in response to ``GET /v1/nodes`` request) and in rare cases for writing. ironic-conductor ~~~~~~~~~~~~~~~~ High availability ^^^^^^^^^^^^^^^^^ The Bare Metal conductor service utilizes the active/active HA model. Every conductor manages a certain subset of nodes. The nodes are organized in a hash ring that tries to keep the load spread more or less uniformly across the conductors. When a conductor is considered offline, its nodes are taken over by other conductors. As a result of this, you need at least 2 conductor hosts for an HA deployment. Performance ^^^^^^^^^^^ Conductors can be resource intensive, so it is recommended (but not required) to keep all conductors separate from other services in the cloud. The minimum required number of conductors in a deployment depends on several factors: * the performance of the hardware where the conductors will be running, * the speed and reliability of the `management controller`_ of the bare metal nodes (for example, handling slower controllers may require having less nodes per conductor), * the frequency, at which the management controllers are polled by the Bare Metal service (see the ``sync_power_state_interval`` option), * the bare metal driver used for nodes (see `Hardware and drivers`_ above), * the network performance, * the maximum number of bare metal nodes that are provisioned simultaneously (see the ``max_concurrent_builds`` option for the Compute service). We recommend a target of **100** bare metal nodes per conductor for maximum reliability and performance. There is some tolerance for a larger number per conductor. However, it was reported [1]_ [2]_ that reliability degrades when handling approximately 300 bare metal nodes per conductor. Disk space ^^^^^^^^^^ Each conductor needs enough free disk space to cache images it uses. Depending on the combination of the deploy interface and the boot option, the space requirements are different: * The deployment kernel and ramdisk are always cached during the deployment. * The ``iscsi`` deploy method requires caching of the whole instance image locally during the deployment. The image has to be converted to the raw format, which may increase the required amount of disk space, as well as the CPU load. .. note:: This is not a concern for the ``direct`` deploy interface, as in this case the deployment ramdisk downloads the image and either streams it to the disk or caches it in memory. * When network boot is used, the instance image kernel and ramdisk are cached locally while the instance is active. .. note:: All images may be stored for some time after they are no longer needed. This is done to speed up simultaneous deployments of many similar images. The caching can be configured via the ``image_cache_size`` and ``image_cache_ttl`` configuration options in the ``pxe`` group. .. [1] http://lists.openstack.org/pipermail/openstack-dev/2017-June/118033.html .. [2] http://lists.openstack.org/pipermail/openstack-dev/2017-June/118327.html Other services ~~~~~~~~~~~~~~ When integrating with other OpenStack services, more considerations may need to be applied. This is covered in other parts of this guide. .. _PXE: https://en.wikipedia.org/wiki/Preboot_Execution_Environment .. _iPXE: https://en.wikipedia.org/wiki/IPXE .. _IPMI: https://en.wikipedia.org/wiki/Intelligent_Platform_Management_Interface .. _management controller: https://en.wikipedia.org/wiki/Out-of-band_management ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/install/refarch/index.rst0000644000175000017500000000072700000000000023350 0ustar00coreycorey00000000000000Reference Deploy Architectures ============================== This section covers the way we recommend the Bare Metal service to be deployed and managed. It is assumed that a reader has already gone through :doc:`/user/index`. It may be also useful to try :ref:`deploy_devstack` first to get better familiar with the concepts used in this guide. .. toctree:: :maxdepth: 2 common Scenarios --------- .. toctree:: :maxdepth: 2 small-cloud-trusted-tenants ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/install/refarch/small-cloud-trusted-tenants.rst0000644000175000017500000002306400000000000027616 0ustar00coreycorey00000000000000Small cloud with trusted tenants ================================ Story ----- As an operator I would like to build a small cloud with both virtual and bare metal instances or add bare metal provisioning to my existing small or medium scale single-site OpenStack cloud. The expected number of bare metal machines is less than 100, and the rate of provisioning and unprovisioning is expected to be low. All users of my cloud are trusted by me to not conduct malicious actions towards each other or the cloud infrastructure itself. As a user I would like to occasionally provision bare metal instances through the Compute API by selecting an appropriate Compute flavor. I would like to be able to boot them from images provided by the Image service or from volumes provided by the Volume service. Components ---------- This architecture assumes `an OpenStack installation`_ with the following components participating in the bare metal provisioning: * The :nova-doc:`Compute service <>` manages bare metal instances. * The :neutron-doc:`Networking service <>` provides DHCP for bare metal instances. * The :glance-doc:`Image service <>` provides images for bare metal instances. The following services can be optionally used by the Bare Metal service: * The :cinder-doc:`Volume service <>` provides volumes to boot bare metal instances from. * The :ironic-inspector-doc:`Bare Metal Introspection service <>` simplifies enrolling new bare metal machines by conducting in-band introspection. Node roles ---------- An OpenStack installation in this guide has at least these three types of nodes: * A *controller* node hosts the control plane services. * A *compute* node runs the virtual machines and hosts a subset of Compute and Networking components. * A *block storage* node provides persistent storage space for both virtual and bare metal nodes. The *compute* and *block storage* nodes are configured as described in the installation guides of the :nova-doc:`Compute service <>` and the :cinder-doc:`Volume service <>` respectively. The *controller* nodes host the Bare Metal service components. Networking ---------- The networking architecture will highly depend on the exact operating requirements. This guide expects the following existing networks: *control plane*, *storage* and *public*. Additionally, two more networks will be needed specifically for bare metal provisioning: *bare metal* and *management*. .. TODO(dtantsur): describe the storage network? .. TODO(dtantsur): a nice picture to illustrate the layout Control plane network ~~~~~~~~~~~~~~~~~~~~~ The *control plane network* is the network where OpenStack control plane services provide their public API. The Bare Metal API will be served to the operators and to the Compute service through this network. Public network ~~~~~~~~~~~~~~ The *public network* is used in a typical OpenStack deployment to create floating IPs for outside access to instances. Its role is the same for a bare metal deployment. .. note:: Since, as explained below, bare metal nodes will be put on a flat provider network, it is also possible to organize direct access to them, without using floating IPs and bypassing the Networking service completely. Bare metal network ~~~~~~~~~~~~~~~~~~ The *Bare metal network* is a dedicated network for bare metal nodes managed by the Bare Metal service. This architecture uses :ref:`flat bare metal networking `, in which both tenant traffic and technical traffic related to the Bare Metal service operation flow through this one network. Specifically, this network will serve as the *provisioning*, *cleaning* and *rescuing* network. It will also be used for introspection via the Bare Metal Introspection service. See :ref:`common networking considerations ` for an in-depth explanation of the networks used by the Bare Metal service. DHCP and boot parameters will be provided on this network by the Networking service's DHCP agents. For booting from volumes this network has to have a route to the *storage network*. Management network ~~~~~~~~~~~~~~~~~~ *Management network* is an independent network on which BMCs of the bare metal nodes are located. The ``ironic-conductor`` process needs access to this network. The tenants of the bare metal nodes must not have access to it. .. note:: The :ref:`direct deploy interface ` and certain :doc:`/admin/drivers` require the *management network* to have access to the Object storage service backend. Controllers ----------- A *controller* hosts the OpenStack control plane services as described in the `control plane design guide`_. While this architecture allows using *controllers* in a non-HA configuration, it is recommended to have at least three of them for HA. See :ref:`refarch-common-ha` for more details. Bare Metal services ~~~~~~~~~~~~~~~~~~~ The following components of the Bare Metal service are installed on a *controller* (see :ref:`components of the Bare Metal service `): * The Bare Metal API service either as a WSGI application or the ``ironic-api`` process. Typically, a load balancer, such as HAProxy, spreads the load between the API instances on the *controllers*. The API has to be served on the *control plane network*. Additionally, it has to be exposed to the *bare metal network* for the ramdisk callback API. * The ``ironic-conductor`` process. These processes work in active/active HA mode as explained in :ref:`refarch-common-ha`, thus they can be installed on all *controllers*. Each will handle a subset of bare metal nodes. The ``ironic-conductor`` processes have to have access to the following networks: * *control plane* for interacting with other services * *management* for contacting node's BMCs * *bare metal* for contacting deployment, cleaning or rescue ramdisks * TFTP and HTTP service for booting the nodes. Each ``ironic-conductor`` process has to have a matching TFTP and HTTP service. They should be exposed only to the *bare metal network* and must not be behind a load balancer. * The ``nova-compute`` process (from the Compute service). These processes work in active/active HA mode when dealing with bare metal nodes, thus they can be installed on all *controllers*. Each will handle a subset of bare metal nodes. .. note:: There is no 1-1 mapping between ``ironic-conductor`` and ``nova-compute`` processes, as they communicate only through the Bare Metal API service. * The :networking-baremetal-doc:`networking-baremetal <>` ML2 plugin should be loaded into the Networking service to assist with binding bare metal ports. The :ironic-neutron-agent-doc:`ironic-neutron-agent <>` service should be started as well. * If the Bare Metal introspection is used, its ``ironic-inspector`` process has to be installed on all *controllers*. Each such process works as both Bare Metal Introspection API and conductor service. A load balancer should be used to spread the API load between *controllers*. The API has to be served on the *control plane network*. Additionally, it has to be exposed to the *bare metal network* for the ramdisk callback API. .. TODO(dtantsur): a nice picture to illustrate the above Shared services ~~~~~~~~~~~~~~~ A *controller* also hosts two services required for the normal operation of OpenStack: * Database service (MySQL/MariaDB is typically used, but other enterprise-grade database solutions can be used as well). All Bare Metal service components need access to the database service. * Message queue service (RabbitMQ is typically used, but other enterprise-grade message queue brokers can be used as well). Both Bare Metal API (WSGI application or ``ironic-api`` process) and the ``ironic-conductor`` processes need access to the message queue service. The Bare Metal Introspection service does not need it. .. note:: These services are required for all OpenStack services. If you're adding the Bare Metal service to your cloud, you may reuse the existing database and messaging queue services. Bare metal nodes ---------------- Each bare metal node must be capable of booting from network, virtual media or other boot technology supported by the Bare Metal service as explained in :ref:`refarch-common-boot`. Each node must have one NIC on the *bare metal network*, and this NIC (and **only** it) must be configured to be able to boot from network. This is usually done in the *BIOS setup* or a similar firmware configuration utility. There is no need to alter the boot order, as it is managed by the Bare Metal service. Other NICs, if present, will not be managed by OpenStack. The NIC on the *bare metal network* should have untagged connectivity to it, since PXE firmware usually does not support VLANs - see :ref:`refarch-common-networking` for details. Storage ------- If your hardware **and** its bare metal :doc:`driver ` support booting from remote volumes, please check the driver documentation for information on how to enable it. It may include routing *management* and/or *bare metal* networks to the *storage network*. In case of the standard :ref:`pxe-boot`, booting from remote volumes is done via iPXE. In that case, the Volume storage backend must support iSCSI_ protocol, and the *bare metal network* has to have a route to the *storage network*. See :doc:`/admin/boot-from-volume` for more details. .. _an OpenStack installation: https://docs.openstack.org/arch-design/use-cases/use-case-general-compute.html .. _control plane design guide: https://docs.openstack.org/arch-design/design-control-plane.html .. _iSCSI: https://en.wikipedia.org/wiki/ISCSI ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/install/setup-drivers.rst0000644000175000017500000000030500000000000023433 0ustar00coreycorey00000000000000Set up the drivers for the Bare Metal service ============================================= .. toctree:: :maxdepth: 1 enabling-drivers configure-pxe configure-ipmi configure-iscsi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/install/standalone.rst0000644000175000017500000002347000000000000022757 0ustar00coreycorey00000000000000 Using Bare Metal service as a standalone service ================================================ It is possible to use the Bare Metal service without other OpenStack services. You should make the following changes to ``/etc/ironic/ironic.conf``: #. To disable usage of Identity service tokens:: [DEFAULT] ... auth_strategy=noauth #. If you want to disable the Networking service, you should have your network pre-configured to serve DHCP and TFTP for machines that you're deploying. To disable it, change the following lines:: [dhcp] ... dhcp_provider=none .. note:: If you disabled the Networking service and the driver that you use is supported by at most one conductor, PXE boot will still work for your nodes without any manual config editing. This is because you know all the DHCP options that will be used for deployment and can set up your DHCP server appropriately. If you have multiple conductors per driver, it would be better to use Networking since it will do all the dynamically changing configurations for you. #. If you want to disable using a messaging broker between conductor and API processes, switch to JSON RPC instead: .. code-block:: ini [DEFAULT] rpc_transport = json-rpc If you don't use Image service, it's possible to provide images to Bare Metal service via a URL. .. note:: At the moment, only two types of URLs are acceptable instead of Image service UUIDs: HTTP(S) URLs (for example, "http://my.server.net/images/img") and file URLs (file:///images/img). There are however some limitations for different hardware interfaces: * If you're using :ref:`direct-deploy`, you have to provide the Bare Metal service with the MD5 checksum of your instance image. To compute it, you can use the following command:: md5sum image.qcow2 ed82def8730f394fb85aef8a208635f6 image.qcow2 * :ref:`direct-deploy` requires the instance image be accessible through a HTTP(s) URL. Steps to start a deployment are pretty similar to those when using Compute: #. To use the :python-ironicclient-doc:`openstack baremetal CLI `, set up these environment variables. Since no authentication strategy is being used, the value none must be set for OS_AUTH_TYPE. OS_ENDPOINT is the URL of the ironic-api process. For example:: export OS_AUTH_TYPE=none export OS_ENDPOINT=http://localhost:6385/ #. Create a node in Bare Metal service. At minimum, you must specify the driver name (for example, ``ipmi``). You can also specify all the required driver parameters in one command. This will return the node UUID:: openstack baremetal node create --driver ipmi \ --driver-info ipmi_address=ipmi.server.net \ --driver-info ipmi_username=user \ --driver-info ipmi_password=pass \ --driver-info deploy_kernel=file:///images/deploy.vmlinuz \ --driver-info deploy_ramdisk=http://my.server.net/images/deploy.ramdisk +--------------+--------------------------------------------------------------------------+ | Property | Value | +--------------+--------------------------------------------------------------------------+ | uuid | be94df40-b80a-4f63-b92b-e9368ee8d14c | | driver_info | {u'deploy_ramdisk': u'http://my.server.net/images/deploy.ramdisk', | | | u'deploy_kernel': u'file:///images/deploy.vmlinuz', u'ipmi_address': | | | u'ipmi.server.net', u'ipmi_username': u'user', u'ipmi_password': | | | u'******'} | | extra | {} | | driver | ipmi | | chassis_uuid | | | properties | {} | +--------------+--------------------------------------------------------------------------+ Note that here deploy_kernel and deploy_ramdisk contain links to images instead of Image service UUIDs. #. As in case of Compute service, you can also provide ``capabilities`` to node properties, but they will be used only by Bare Metal service (for example, boot mode). Although you don't need to add properties like ``memory_mb``, ``cpus`` etc. as Bare Metal service will require UUID of a node you're going to deploy. #. Then create a port to inform Bare Metal service of the network interface cards which are part of the node by creating a port with each NIC's MAC address. In this case, they're used for naming of PXE configs for a node:: openstack baremetal port create $MAC_ADDRESS --node $NODE_UUID #. You also need to specify image information in the node's ``instance_info`` (see :doc:`creating-images`): * ``image_source`` - URL of the whole disk or root partition image, mandatory. For :ref:`direct-deploy` only HTTP(s) links are accepted, while :ref:`iscsi-deploy` also accepts links to local files (prefixed with ``file://``). * ``root_gb`` - size of the root partition, required for partition images. .. note:: Older versions of the Bare Metal service used to require a positive integer for ``root_gb`` even for whole-disk images. You may want to set it for compatibility. * ``image_checksum`` - MD5 checksum of the image specified by ``image_source``, only required for :ref:`direct-deploy`. .. note:: Additional checksum support exists via the ``image_os_hash_algo`` and ``image_os_hash_value`` fields. They may be used instead of the ``image_checksum`` field. Starting with the Stein release of ironic-python-agent can also be a URL to a checksums file, e.g. one generated with: .. code-block:: shell cd /path/to/http/root md5sum *.img > checksums * ``kernel``, ``ramdisk`` - HTTP(s) or file URLs of the kernel and initramfs of the target OS. Must be added **only** for partition images. For example:: openstack baremetal node set $NODE_UUID \ --instance-info image_source=$IMG \ --instance-info image_checksum=$MD5HASH \ --instance-info kernel=$KERNEL \ --instance-info ramdisk=$RAMDISK \ --instance-info root_gb=10 With a whole disk image:: openstack baremetal node set $NODE_UUID \ --instance-info image_source=$IMG \ --instance-info image_checksum=$MD5HASH #. :ref:`Boot mode ` can be specified per instance:: openstack baremetal node set $NODE_UUID \ --instance-info deploy_boot_mode=uefi Otherwise, the ``boot_mode`` capability from the node's ``properties`` will be used. .. warning:: The two settings must not contradict each other. .. note:: The ``boot_mode`` capability is only used in the node's ``properties``, not in ``instance_info`` like most other capabilities. Use the separate ``instance_info/deploy_boot_mode`` field instead. #. To override the :ref:`boot option ` used for this instance, set the ``boot_option`` capability:: openstack baremetal node set $NODE_UUID \ --instance-info capabilities='{"boot_option": "local"}' #. Starting with the Ussuri release, you can set :ref:`root device hints ` per instance:: openstack baremetal node set $NODE_UUID \ --instance-info root_device='{"wwn": "0x4000cca77fc4dba1"}' This setting overrides any previous setting in ``properties`` and will be removed on undeployment. #. Validate that all parameters are correct:: openstack baremetal node validate $NODE_UUID +------------+--------+----------------------------------------------------------------+ | Interface | Result | Reason | +------------+--------+----------------------------------------------------------------+ | boot | True | | | console | False | Missing 'ipmi_terminal_port' parameter in node's driver_info. | | deploy | True | | | inspect | True | | | management | True | | | network | True | | | power | True | | | raid | True | | | storage | True | | +------------+--------+----------------------------------------------------------------+ #. Now you can start the deployment, run:: openstack baremetal node deploy $NODE_UUID For iLO drivers, fields that should be provided are: * ``ilo_deploy_iso`` under ``driver_info``; * ``ilo_boot_iso``, ``image_source``, ``root_gb`` under ``instance_info``. .. note:: The Bare Metal service tracks content changes for non-Glance images by checking their modification date and time. For example, for HTTP image, if 'Last-Modified' header value from response to a HEAD request to "http://my.server.net/images/deploy.ramdisk" is greater than cached image modification time, Ironic will re-download the content. For "file://" images, the file system modification time is used. Other references ---------------- * :ref:`local-boot-without-compute` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/install/troubleshooting.rst0000644000175000017500000001767000000000000024063 0ustar00coreycorey00000000000000.. _troubleshooting-install: =============== Troubleshooting =============== Once all the services are running and configured properly, and a node has been enrolled with the Bare Metal service and is in the ``available`` provision state, the Compute service should detect the node as an available resource and expose it to the scheduler. .. note:: There is a delay, and it may take up to a minute (one periodic task cycle) for the Compute service to recognize any changes in the Bare Metal service's resources (both additions and deletions). In addition to watching ``nova-compute`` log files, you can see the available resources by looking at the list of Compute hypervisors. The resources reported therein should match the bare metal node properties, and the Compute service flavor. Here is an example set of commands to compare the resources in Compute service and Bare Metal service:: $ openstack baremetal node list +--------------------------------------+---------------+-------------+--------------------+-------------+ | UUID | Instance UUID | Power State | Provisioning State | Maintenance | +--------------------------------------+---------------+-------------+--------------------+-------------+ | 86a2b1bb-8b29-4964-a817-f90031debddb | None | power off | available | False | +--------------------------------------+---------------+-------------+--------------------+-------------+ $ openstack baremetal node show 86a2b1bb-8b29-4964-a817-f90031debddb +------------------------+----------------------------------------------------------------------+ | Property | Value | +------------------------+----------------------------------------------------------------------+ | instance_uuid | None | | properties | {u'memory_mb': u'1024', u'cpu_arch': u'x86_64', u'local_gb': u'10', | | | u'cpus': u'1'} | | maintenance | False | | driver_info | { [SNIP] } | | extra | {} | | last_error | None | | created_at | 2014-11-20T23:57:03+00:00 | | target_provision_state | None | | driver | ipmi | | updated_at | 2014-11-21T00:47:34+00:00 | | instance_info | {} | | chassis_uuid | 7b49bbc5-2eb7-4269-b6ea-3f1a51448a59 | | provision_state | available | | reservation | None | | power_state | power off | | console_enabled | False | | uuid | 86a2b1bb-8b29-4964-a817-f90031debddb | +------------------------+----------------------------------------------------------------------+ $ nova hypervisor-list +--------------------------------------+--------------------------------------+-------+---------+ | ID | Hypervisor hostname | State | Status | +--------------------------------------+--------------------------------------+-------+---------+ | 584cfdc8-9afd-4fbb-82ef-9ff25e1ad3f3 | 86a2b1bb-8b29-4964-a817-f90031debddb | up | enabled | +--------------------------------------+--------------------------------------+-------+---------+ $ nova hypervisor-show 584cfdc8-9afd-4fbb-82ef-9ff25e1ad3f3 +-------------------------+--------------------------------------+ | Property | Value | +-------------------------+--------------------------------------+ | cpu_info | baremetal cpu | | current_workload | 0 | | disk_available_least | - | | free_disk_gb | 10 | | free_ram_mb | 1024 | | host_ip | [ SNIP ] | | hypervisor_hostname | 86a2b1bb-8b29-4964-a817-f90031debddb | | hypervisor_type | ironic | | hypervisor_version | 1 | | id | 1 | | local_gb | 10 | | local_gb_used | 0 | | memory_mb | 1024 | | memory_mb_used | 0 | | running_vms | 0 | | service_disabled_reason | - | | service_host | my-test-host | | service_id | 6 | | state | up | | status | enabled | | vcpus | 1 | | vcpus_used | 0 | +-------------------------+--------------------------------------+ .. _maintenance_mode: Maintenance mode ---------------- Maintenance mode may be used if you need to take a node out of the resource pool. Putting a node in maintenance mode will prevent Bare Metal service from executing periodic tasks associated with the node. This will also prevent Compute service from placing a tenant instance on the node by not exposing the node to the nova scheduler. Nodes can be placed into maintenance mode with the following command. :: $ openstack baremetal node maintenance set $NODE_UUID A maintenance reason may be included with the optional ``--reason`` command line option. This is a free form text field that will be displayed in the ``maintenance_reason`` section of the ``node show`` command. :: $ openstack baremetal node maintenance set $UUID --reason "Need to add ram." $ openstack baremetal node show $UUID +------------------------+--------------------------------------+ | Property | Value | +------------------------+--------------------------------------+ | target_power_state | None | | extra | {} | | last_error | None | | updated_at | 2015-04-27T15:43:58+00:00 | | maintenance_reason | Need to add ram. | | ... | ... | | maintenance | True | | ... | ... | +------------------------+--------------------------------------+ To remove maintenance mode and clear any ``maintenance_reason`` use the following command. :: $ openstack baremetal node maintenance unset $NODE_UUID ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538406.1943994 ironic-14.0.1.dev163/doc/source/user/0000755000175000017500000000000000000000000017377 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/doc/source/user/index.rst0000644000175000017500000004224400000000000021246 0ustar00coreycorey00000000000000.. _user-guide: ============================= Bare Metal Service User Guide ============================= Ironic is an OpenStack project which provisions bare metal (as opposed to virtual) machines. It may be used independently or as part of an OpenStack Cloud, and integrates with the OpenStack Identity (keystone), Compute (nova), Network (neutron), Image (glance) and Object (swift) services. When the Bare Metal service is appropriately configured with the Compute and Network services, it is possible to provision both virtual and physical machines through the Compute service's API. However, the set of instance actions is limited, arising from the different characteristics of physical servers and switch hardware. For example, live migration can not be performed on a bare metal instance. The community maintains reference drivers that leverage open-source technologies (eg. PXE and IPMI) to cover a wide range of hardware. Ironic's pluggable driver architecture also allows hardware vendors to write and contribute drivers that may improve performance or add functionality not provided by the community drivers. .. TODO: the remainder of this file needs to be cleaned up still Why Provision Bare Metal ======================== Here are a few use-cases for bare metal (physical server) provisioning in cloud; there are doubtless many more interesting ones: - High-performance computing clusters - Computing tasks that require access to hardware devices which can't be virtualized - Database hosting (some databases run poorly in a hypervisor) - Single tenant, dedicated hardware for performance, security, dependability and other regulatory requirements - Or, rapidly deploying a cloud infrastructure Conceptual Architecture ======================= The following diagram shows the relationships and how all services come into play during the provisioning of a physical server. (Note that Ceilometer and Swift can be used with Ironic, but are missing from this diagram.) .. figure:: ../images/conceptual_architecture.png :alt: ConceptualArchitecture Key Technologies for Bare Metal Hosting ======================================= Preboot Execution Environment (PXE) ----------------------------------- PXE is part of the Wired for Management (WfM) specification developed by Intel and Microsoft. The PXE enables system's BIOS and network interface card (NIC) to bootstrap a computer from the network in place of a disk. Bootstrapping is the process by which a system loads the OS into local memory so that it can be executed by the processor. This capability of allowing a system to boot over a network simplifies server deployment and server management for administrators. Dynamic Host Configuration Protocol (DHCP) ------------------------------------------ DHCP is a standardized networking protocol used on Internet Protocol (IP) networks for dynamically distributing network configuration parameters, such as IP addresses for interfaces and services. Using PXE, the BIOS uses DHCP to obtain an IP address for the network interface and to locate the server that stores the network bootstrap program (NBP). Network Bootstrap Program (NBP) ------------------------------- NBP is equivalent to GRUB (GRand Unified Bootloader) or LILO (LInux LOader) - loaders which are traditionally used in local booting. Like the boot program in a hard drive environment, the NBP is responsible for loading the OS kernel into memory so that the OS can be bootstrapped over a network. Trivial File Transfer Protocol (TFTP) ------------------------------------- TFTP is a simple file transfer protocol that is generally used for automated transfer of configuration or boot files between machines in a local environment. In a PXE environment, TFTP is used to download NBP over the network using information from the DHCP server. Intelligent Platform Management Interface (IPMI) ------------------------------------------------ IPMI is a standardized computer system interface used by system administrators for out-of-band management of computer systems and monitoring of their operation. It is a method to manage systems that may be unresponsive or powered off by using only a network connection to the hardware rather than to an operating system. .. _understanding-deployment: Understanding Bare Metal Deployment =================================== What happens when a boot instance request comes in? The below diagram walks through the steps involved during the provisioning of a bare metal instance. These pre-requisites must be met before the deployment process: * Dependent packages to be configured on the Bare Metal service node(s) where ironic-conductor is running like tftp-server, ipmi, syslinux etc for bare metal provisioning. * Nova must be configured to make use of the bare metal service endpoint and compute driver should be configured to use ironic driver on the Nova compute node(s). * Flavors to be created for the available hardware. Nova must know the flavor to boot from. * Images to be made available in Glance. Listed below are some image types required for successful bare metal deployment: - bm-deploy-kernel - bm-deploy-ramdisk - user-image - user-image-vmlinuz - user-image-initrd * Hardware to be enrolled via Ironic RESTful API service. Deploy Process -------------- This describes a typical ironic node deployment using PXE and the Ironic Python Agent (IPA). Depending on the ironic driver interfaces used, some of the steps might be marginally different, however the majority of them will remain the same. #. A boot instance request comes in via the Nova API, through the message queue to the Nova scheduler. #. Nova scheduler applies filters and finds the eligible hypervisor. The nova scheduler also uses the flavor's ``extra_specs``, such as ``cpu_arch``, to match the target physical node. #. Nova compute manager claims the resources of the selected hypervisor. #. Nova compute manager creates (unbound) tenant virtual interfaces (VIFs) in the Networking service according to the network interfaces requested in the nova boot request. A caveat here is, the MACs of the ports are going to be randomly generated, and will be updated when the VIF is attached to some node to correspond to the node network interface card's (or bond's) MAC. #. A spawn task is created by the nova compute which contains all the information such as which image to boot from etc. It invokes the ``driver.spawn`` from the virt layer of Nova compute. During the spawn process, the virt driver does the following: #. Updates the target ironic node with the information about deploy image, instance UUID, requested capabilities and various flavor properties. #. Validates node's power and deploy interfaces, by calling the ironic API. #. Attaches the previously created VIFs to the node. Each neutron port can be attached to any ironic port or port group, with port groups having higher priority than ports. On ironic side, this work is done by the network interface. Attachment here means saving the VIF identifier into ironic port or port group and updating VIF MAC to match the port's or port group's MAC, as described in bullet point 4. #. Generates config drive, if requested. #. Nova's ironic virt driver issues a deploy request via the Ironic API to the Ironic conductor servicing the bare metal node. #. Virtual interfaces are plugged in and Neutron API updates DHCP port to set PXE/TFTP options. In case of using ``neutron`` network interface, ironic creates separate provisioning ports in the Networking service, while in case of ``flat`` network interface, the ports created by nova are used both for provisioning and for deployed instance networking. #. The ironic node's boot interface prepares (i)PXE configuration and caches deploy kernel and ramdisk. #. The ironic node's management interface issues commands to enable network boot of a node. #. The ironic node's deploy interface caches the instance image (in case of ``iscsi`` deploy interface), and kernel and ramdisk if needed (it is needed in case of netboot for example). #. The ironic node's power interface instructs the node to power on. #. The node boots the deploy ramdisk. #. Depending on the exact driver used, either the conductor copies the image over iSCSI to the physical node (:ref:`iscsi-deploy`) or the deploy ramdisk downloads the image from a temporary URL (:ref:`direct-deploy`). The temporary URL can be generated by Swift API-compatible object stores, for example Swift itself or RadosGW. The image deployment is done. #. The node's boot interface switches pxe config to refer to instance images (or, in case of local boot, sets boot device to disk), and asks the ramdisk agent to soft power off the node. If the soft power off by the ramdisk agent fails, the bare metal node is powered off via IPMI/BMC call. #. The deploy interface triggers the network interface to remove provisioning ports if they were created, and binds the tenant ports to the node if not already bound. Then the node is powered on. .. note:: There are 2 power cycles during bare metal deployment; the first time the node is powered-on when ramdisk is booted, the second time after the image is deployed. #. The bare metal node's provisioning state is updated to ``active``. Below is the diagram that describes the above process. .. graphviz:: digraph "Deployment Steps" { node [shape=box, style=rounded, fontsize=10]; edge [fontsize=10]; /* cylinder shape works only in graphviz 2.39+ */ { rank=same; node [shape=cylinder]; "Nova DB"; "Ironic DB"; } { rank=same; "Nova API"; "Ironic API"; } { rank=same; "Nova Message Queue"; "Ironic Message Queue"; } { rank=same; "Ironic Conductor"; "TFTP Server"; } { rank=same; "Deploy Interface"; "Boot Interface"; "Power Interface"; "Management Interface"; } { rank=same; "Glance"; "Neutron"; } "Bare Metal Nodes" [shape=box3d]; "Nova API" -> "Nova Message Queue" [label=" 1"]; "Nova Message Queue" -> "Nova Conductor" [dir=both]; "Nova Message Queue" -> "Nova Scheduler" [label=" 2"]; "Nova Conductor" -> "Nova DB" [dir=both, label=" 3"]; "Nova Message Queue" -> "Nova Compute" [dir=both]; "Nova Compute" -> "Neutron" [label=" 4"]; "Nova Compute" -> "Nova Ironic Virt Driver" [label=5]; "Nova Ironic Virt Driver" -> "Ironic API" [label=6]; "Ironic API" -> "Ironic Message Queue"; "Ironic Message Queue" -> "Ironic Conductor" [dir=both]; "Ironic API" -> "Ironic DB" [dir=both]; "Ironic Conductor" -> "Ironic DB" [dir=both, label=16]; "Ironic Conductor" -> "Boot Interface" [label="8, 14"]; "Ironic Conductor" -> "Management Interface" [label=" 9"]; "Ironic Conductor" -> "Deploy Interface" [label=10]; "Deploy Interface" -> "Network Interface" [label="7, 15"]; "Ironic Conductor" -> "Power Interface" [label=11]; "Ironic Conductor" -> "Glance"; "Network Interface" -> "Neutron"; "Power Interface" -> "Bare Metal Nodes"; "Management Interface" -> "Bare Metal Nodes"; "TFTP Server" -> "Bare Metal Nodes" [label=12]; "Ironic Conductor" -> "Bare Metal Nodes" [style=dotted, label=13]; "Boot Interface" -> "TFTP Server"; } The following two examples describe what ironic is doing in more detail, leaving out the actions performed by nova and some of the more advanced options. .. _iscsi-deploy-example: Example 1: PXE Boot and iSCSI Deploy Process -------------------------------------------- This process is how :ref:`iscsi-deploy` works. .. seqdiag:: :scale: 75 diagram { Nova; API; Conductor; Neutron; HTTPStore; "TFTP/HTTPd"; Node; activation = none; span_height = 1; edge_length = 250; default_note_color = white; default_fontsize = 14; Nova -> API [label = "Set instance_info\n(image_source,\nroot_gb, etc.)"]; Nova -> API [label = "Validate power and deploy\ninterfaces"]; Nova -> API [label = "Plug VIFs to the node"]; Nova -> API [label = "Set provision_state,\noptionally pass configdrive"]; API -> Conductor [label = "do_node_deploy()"]; Conductor -> Conductor [label = "Validate power and deploy interfaces"]; Conductor -> HTTPStore [label = "Store configdrive if configdrive_use_swift \noption is set"]; Conductor -> Node [label = "POWER OFF"]; Conductor -> Neutron [label = "Attach provisioning network to port(s)"]; Conductor -> Neutron [label = "Update DHCP boot options"]; Conductor -> Conductor [label = "Prepare PXE\nenvironment for\ndeployment"]; Conductor -> Node [label = "Set PXE boot device \nthrough the BMC"]; Conductor -> Conductor [label = "Cache deploy\nkernel, ramdisk,\ninstance images"]; Conductor -> Node [label = "REBOOT"]; Node -> Neutron [label = "DHCP request"]; Neutron -> Node [label = "next-server = Conductor"]; Node -> Node [label = "Runs agent\nramdisk"]; Node -> API [label = "lookup()"]; API -> Node [label = "Pass UUID"]; Node -> API [label = "Heartbeat (UUID)"]; API -> Conductor [label = "Heartbeat"]; Conductor -> Node [label = "Send IPA a command to expose disks via iSCSI"]; Conductor -> Node [label = "iSCSI attach"]; Conductor -> Node [label = "Copies user image and configdrive, if present"]; Conductor -> Node [label = "iSCSI detach"]; Conductor -> Conductor [label = "Delete instance\nimage from cache"]; Conductor -> Node [label = "Install boot loader, if requested"]; Conductor -> Neutron [label = "Update DHCP boot options"]; Conductor -> Conductor [label = "Prepare PXE\nenvironment for\ninstance image"]; Conductor -> Node [label = "Set boot device either to PXE or to disk"]; Conductor -> Node [label = "Collect ramdisk logs"]; Conductor -> Node [label = "POWER OFF"]; Conductor -> Neutron [label = "Detach provisioning network\nfrom port(s)"]; Conductor -> Neutron [label = "Bind tenant port"]; Conductor -> Node [label = "POWER ON"]; Conductor -> Conductor [label = "Mark node as\nACTIVE"]; } (From a `talk`_ and `slides`_) .. _direct-deploy-example: Example 2: PXE Boot and Direct Deploy Process --------------------------------------------- This process is how :ref:`direct-deploy` works. .. seqdiag:: :scale: 75 diagram { Nova; API; Conductor; Neutron; HTTPStore; "TFTP/HTTPd"; Node; activation = none; edge_length = 250; span_height = 1; default_note_color = white; default_fontsize = 14; Nova -> API [label = "Set instance_info\n(image_source,\nroot_gb, etc.)"]; Nova -> API [label = "Validate power and deploy\ninterfaces"]; Nova -> API [label = "Plug VIFs to the node"]; Nova -> API [label = "Set provision_state,\noptionally pass configdrive"]; API -> Conductor [label = "do_node_deploy()"]; Conductor -> Conductor [label = "Validate power and deploy interfaces"]; Conductor -> HTTPStore [label = "Store configdrive if configdrive_use_swift \noption is set"]; Conductor -> Node [label = "POWER OFF"]; Conductor -> Neutron [label = "Attach provisioning network to port(s)"]; Conductor -> Neutron [label = "Update DHCP boot options"]; Conductor -> Conductor [label = "Prepare PXE\nenvironment for\ndeployment"]; Conductor -> Node [label = "Set PXE boot device \nthrough the BMC"]; Conductor -> Conductor [label = "Cache deploy\nand instance\nkernel and ramdisk"]; Conductor -> Node [label = "REBOOT"]; Node -> Neutron [label = "DHCP request"]; Neutron -> Node [label = "next-server = Conductor"]; Node -> Node [label = "Runs agent\nramdisk"]; Node -> API [label = "lookup()"]; API -> Node [label = "Pass UUID"]; Node -> API [label = "Heartbeat (UUID)"]; API -> Conductor [label = "Heartbeat"]; Conductor -> Node [label = "Continue deploy asynchronously: Pass image, disk info"]; Node -> HTTPStore [label = "Downloads image, writes to disk, \nwrites configdrive if present"]; === Heartbeat periodically === Conductor -> Node [label = "Is deploy done?"]; Node -> Conductor [label = "Still working..."]; === ... === Node -> Conductor [label = "Deploy is done"]; Conductor -> Node [label = "Install boot loader, if requested"]; Conductor -> Neutron [label = "Update DHCP boot options"]; Conductor -> Conductor [label = "Prepare PXE\nenvironment for\ninstance image\nif needed"]; Conductor -> Node [label = "Set boot device either to PXE or to disk"]; Conductor -> Node [label = "Collect ramdisk logs"]; Conductor -> Node [label = "POWER OFF"]; Conductor -> Neutron [label = "Detach provisioning network\nfrom port(s)"]; Conductor -> Neutron [label = "Bind tenant port"]; Conductor -> Node [label = "POWER ON"]; Conductor -> Conductor [label = "Mark node as\nACTIVE"]; } (From a `talk`_ and `slides`_) .. _talk: https://www.openstack.org/summit/vancouver-2015/summit-videos/presentation/isn-and-039t-it-ironic-the-bare-metal-cloud .. _slides: http://www.slideshare.net/devananda1/isnt-it-ironic-managing-a-bare-metal-cloud-osl-tes-2015 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/driver-requirements.txt0000644000175000017500000000123500000000000021132 0ustar00coreycorey00000000000000# This file lists all python libraries which are utilized by drivers, # but not listed in global-requirements. # It is intended to help package maintainers to discover additional # python projects they should package as optional dependencies for Ironic. # These are available on pypi proliantutils>=2.9.1 pysnmp>=4.3.0,<5.0.0 python-scciclient>=0.8.0 python-dracclient>=3.1.0,<4.0.0 python-xclarityclient>=0.1.6 # The Redfish hardware type uses the Sushy library sushy>=3.2.0 # Ansible-deploy interface ansible>=2.7 # HUAWEI iBMC hardware type uses the python-ibmcclient library python-ibmcclient>=0.1.0 # Dell EMC iDRAC sushy OEM extension sushy-oem-idrac<=0.1.0 ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586538406.146399 ironic-14.0.1.dev163/etc/0000755000175000017500000000000000000000000015127 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538406.1943994 ironic-14.0.1.dev163/etc/apache2/0000755000175000017500000000000000000000000016432 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/etc/apache2/ironic0000644000175000017500000000257100000000000017645 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This is an example Apache2 configuration file for using the # Ironic API through mod_wsgi. This version assumes you are # running devstack to configure the software, and PBR has generated # and installed the ironic-api-wsgi script while installing ironic. Listen 6385 WSGIDaemonProcess ironic user=stack group=stack threads=10 display-name=%{GROUP} WSGIScriptAlias / /usr/local/bin/ironic-api-wsgi SetEnv APACHE_RUN_USER stack SetEnv APACHE_RUN_GROUP stack WSGIProcessGroup ironic ErrorLog /var/log/apache2/ironic_error.log LogLevel info CustomLog /var/log/apache2/ironic_access.log combined WSGIProcessGroup ironic WSGIApplicationGroup %{GLOBAL} AllowOverride All Require all granted ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538406.1943994 ironic-14.0.1.dev163/etc/ironic/0000755000175000017500000000000000000000000016412 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/etc/ironic/README-ironic.conf.txt0000644000175000017500000000040400000000000022313 0ustar00coreycorey00000000000000To generate the sample ironic.conf file, run the following command from the top level of the repo: tox -egenconfig For a pre-generated example of the latest ironic.conf, see: https://docs.openstack.org/ironic/latest/configuration/sample-config.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/etc/ironic/README-policy.yaml.txt0000644000175000017500000000040400000000000022344 0ustar00coreycorey00000000000000To generate the sample policy.yaml file, run the following command from the top level of the repo: tox -egenpolicy For a pre-generated example of the latest policy.yaml, see: https://docs.openstack.org/ironic/latest/configuration/sample-policy.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/etc/ironic/api_audit_map.conf.sample0000644000175000017500000000134300000000000023336 0ustar00coreycorey00000000000000[DEFAULT] # default target endpoint type # should match the endpoint type defined in service catalog target_endpoint_type = None # possible end path of API requests # path of api requests for CADF target typeURI # Just need to include top resource path to identify class # of resources. Ex: Log audit event for API requests # path containing "nodes" keyword and node uuid. [path_keywords] nodes = node drivers = driver chassis = chassis ports = port states = state power = None provision = None maintenance = None validate = None boot_device = None supported = None console = None vendor_passthru = vendor_passthru # map endpoint type defined in service catalog to CADF typeURI [service_endpoints] baremetal = service/compute/baremetal ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/etc/ironic/rootwrap.conf0000644000175000017500000000165000000000000021140 0ustar00coreycorey00000000000000# Configuration for ironic-rootwrap # This file should be owned by (and only writable by) the root user [DEFAULT] # List of directories to load filter definitions from (separated by ','). # These directories MUST all be only writable by root ! filters_path=/etc/ironic/rootwrap.d,/usr/share/ironic/rootwrap # List of directories to search executables in, in case filters do not # explicitly specify a full path (separated by ',') # If not specified, defaults to system PATH environment variable. # These directories MUST all be only writable by root ! exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin # Enable logging to syslog # Default value is False use_syslog=False # Which syslog facility to use. # Valid values include auth, authpriv, syslog, user0, user1... # Default value is 'syslog' syslog_log_facility=syslog # Which messages to log. # INFO means log all usage # ERROR means only log unsuccessful attempts syslog_log_level=ERROR ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538406.1943994 ironic-14.0.1.dev163/etc/ironic/rootwrap.d/0000755000175000017500000000000000000000000020511 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/etc/ironic/rootwrap.d/ironic-images.filters0000644000175000017500000000032400000000000024630 0ustar00coreycorey00000000000000# ironic-rootwrap command filters to manipulate images # This file should be owned by (and only-writable by) the root user [Filters] # ironic/common/images.py: 'qemu-img' qemu-img: CommandFilter, qemu-img, root ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/etc/ironic/rootwrap.d/ironic-utils.filters0000644000175000017500000000047000000000000024525 0ustar00coreycorey00000000000000# ironic-rootwrap command filters for disk manipulation # This file should be owned by (and only-writable by) the root user [Filters] # ironic/drivers/modules/deploy_utils.py iscsiadm: CommandFilter, iscsiadm, root # ironic/common/utils.py mount: CommandFilter, mount, root umount: CommandFilter, umount, root ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538406.1943994 ironic-14.0.1.dev163/ironic/0000755000175000017500000000000000000000000015637 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/ironic/__init__.py0000644000175000017500000000000000000000000017736 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538406.1943994 ironic-14.0.1.dev163/ironic/api/0000755000175000017500000000000000000000000016410 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/ironic/api/__init__.py0000644000175000017500000000115500000000000020523 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pecan request = pecan.request response = pecan.response del pecan ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/ironic/api/app.py0000644000175000017500000001170300000000000017544 0ustar00coreycorey00000000000000# -*- encoding: utf-8 -*- # Copyright © 2012 New Dream Network, LLC (DreamHost) # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import keystonemiddleware.audit as audit_middleware from oslo_config import cfg import oslo_middleware.cors as cors_middleware from oslo_middleware import healthcheck from oslo_middleware import http_proxy_to_wsgi import osprofiler.web as osprofiler_web import pecan from ironic.api import config from ironic.api.controllers import base from ironic.api import hooks from ironic.api import middleware from ironic.api.middleware import auth_token from ironic.api.middleware import json_ext from ironic.common import exception from ironic.conf import CONF class IronicCORS(cors_middleware.CORS): """Ironic-specific CORS class We're adding the Ironic-specific version headers to the list of simple headers in order that a request bearing those headers might be accepted by the Ironic REST API. """ simple_headers = cors_middleware.CORS.simple_headers + [ 'X-Auth-Token', base.Version.max_string, base.Version.min_string, base.Version.string ] def get_pecan_config(): # Set up the pecan configuration filename = config.__file__.replace('.pyc', '.py') return pecan.configuration.conf_from_file(filename) def setup_app(pecan_config=None, extra_hooks=None): app_hooks = [hooks.ConfigHook(), hooks.DBHook(), hooks.ContextHook(pecan_config.app.acl_public_routes), hooks.RPCHook(), hooks.NoExceptionTracebackHook(), hooks.PublicUrlHook()] if extra_hooks: app_hooks.extend(extra_hooks) if not pecan_config: pecan_config = get_pecan_config() pecan.configuration.set_config(dict(pecan_config), overwrite=True) app = pecan.make_app( pecan_config.app.root, debug=CONF.pecan_debug, static_root=pecan_config.app.static_root if CONF.pecan_debug else None, force_canonical=getattr(pecan_config.app, 'force_canonical', True), hooks=app_hooks, wrap_app=middleware.ParsableErrorMiddleware, # NOTE(dtantsur): enabling this causes weird issues with nodes named # as if they had a known mime extension, e.g. "mynode.1". We do # simulate the same behaviour for .json extensions for backward # compatibility through JsonExtensionMiddleware. guess_content_type_from_ext=False, ) if CONF.audit.enabled: try: app = audit_middleware.AuditMiddleware( app, audit_map_file=CONF.audit.audit_map_file, ignore_req_list=CONF.audit.ignore_req_list ) except (EnvironmentError, OSError, audit_middleware.PycadfAuditApiConfigError) as e: raise exception.InputFileError( file_name=CONF.audit.audit_map_file, reason=e ) if CONF.auth_strategy == "keystone": app = auth_token.AuthTokenMiddleware( app, {"oslo_config_config": cfg.CONF}, public_api_routes=pecan_config.app.acl_public_routes) if CONF.profiler.enabled: app = osprofiler_web.WsgiMiddleware(app) # NOTE(pas-ha) this registers oslo_middleware.enable_proxy_headers_parsing # option, when disabled (default) this is noop middleware app = http_proxy_to_wsgi.HTTPProxyToWSGI(app, CONF) # add in the healthcheck middleware if enabled # NOTE(jroll) this is after the auth token middleware as we don't want auth # in front of this, and WSGI works from the outside in. Requests to # /healthcheck will be handled and returned before the auth middleware # is reached. if CONF.healthcheck.enabled: app = healthcheck.Healthcheck(app, CONF) # Create a CORS wrapper, and attach ironic-specific defaults that must be # included in all CORS responses. app = IronicCORS(app, CONF) cors_middleware.set_defaults( allow_methods=['GET', 'PUT', 'POST', 'DELETE', 'PATCH'], expose_headers=[base.Version.max_string, base.Version.min_string, base.Version.string] ) app = json_ext.JsonExtensionMiddleware(app) return app class VersionSelectorApplication(object): def __init__(self): pc = get_pecan_config() self.v1 = setup_app(pecan_config=pc) def __call__(self, environ, start_response): return self.v1(environ, start_response) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/ironic/api/config.py0000644000175000017500000000251100000000000020226 0ustar00coreycorey00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Server Specific Configurations # See https://pecan.readthedocs.org/en/latest/configuration.html#server-configuration # noqa server = { 'port': '6385', 'host': '0.0.0.0' } # Pecan Application Configurations # See https://pecan.readthedocs.org/en/latest/configuration.html#application-configuration # noqa app = { 'root': 'ironic.api.controllers.root.RootController', 'modules': ['ironic.api'], 'static_root': '%(confdir)s/public', 'debug': False, 'acl_public_routes': [ '/', '/v1', # IPA ramdisk methods '/v1/lookup', '/v1/heartbeat/[a-z0-9\\-]+', ], } # WSME Configurations # See https://wsme.readthedocs.org/en/latest/integrate.html#configuration wsme = { 'debug': False, } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538406.1983993 ironic-14.0.1.dev163/ironic/api/controllers/0000755000175000017500000000000000000000000020756 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/ironic/api/controllers/__init__.py0000644000175000017500000000000000000000000023055 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/ironic/api/controllers/base.py0000644000175000017500000001053100000000000022242 0ustar00coreycorey00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import functools from webob import exc import wsme from ironic.common.i18n import _ class AsDictMixin(object): """Mixin class adding an as_dict() method.""" def as_dict(self): """Render this object as a dict of its fields.""" def _attr_as_pod(attr): """Return an attribute as a Plain Old Data (POD) type.""" if isinstance(attr, list): return [_attr_as_pod(item) for item in attr] # Recursively evaluate objects that support as_dict(). try: return attr.as_dict() except AttributeError: return attr return dict((k, _attr_as_pod(getattr(self, k))) for k in self.fields if hasattr(self, k) and getattr(self, k) != wsme.Unset) class Base(AsDictMixin): """Base type for complex types""" def __init__(self, **kw): for key, value in kw.items(): if hasattr(self, key): setattr(self, key, value) def unset_fields_except(self, except_list=None): """Unset fields so they don't appear in the message body. :param except_list: A list of fields that won't be touched. """ if except_list is None: except_list = [] for k in self.as_dict(): if k not in except_list: setattr(self, k, wsme.Unset) class APIBase(Base): created_at = wsme.wsattr(datetime.datetime, readonly=True) """The time in UTC at which the object is created""" updated_at = wsme.wsattr(datetime.datetime, readonly=True) """The time in UTC at which the object is updated""" @functools.total_ordering class Version(object): """API Version object.""" string = 'X-OpenStack-Ironic-API-Version' """HTTP Header string carrying the requested version""" min_string = 'X-OpenStack-Ironic-API-Minimum-Version' """HTTP response header""" max_string = 'X-OpenStack-Ironic-API-Maximum-Version' """HTTP response header""" def __init__(self, headers, default_version, latest_version): """Create an API Version object from the supplied headers. :param headers: webob headers :param default_version: version to use if not specified in headers :param latest_version: version to use if latest is requested :raises: webob.HTTPNotAcceptable """ (self.major, self.minor) = Version.parse_headers( headers, default_version, latest_version) def __repr__(self): return '%s.%s' % (self.major, self.minor) @staticmethod def parse_headers(headers, default_version, latest_version): """Determine the API version requested based on the headers supplied. :param headers: webob headers :param default_version: version to use if not specified in headers :param latest_version: version to use if latest is requested :returns: a tuple of (major, minor) version numbers :raises: webob.HTTPNotAcceptable """ version_str = headers.get(Version.string, default_version) if version_str.lower() == 'latest': parse_str = latest_version else: parse_str = version_str try: version = tuple(int(i) for i in parse_str.split('.')) except ValueError: version = () if len(version) != 2: raise exc.HTTPNotAcceptable(_( "Invalid value for %s header") % Version.string) return version def __gt__(self, other): return (self.major, self.minor) > (other.major, other.minor) def __eq__(self, other): return (self.major, self.minor) == (other.major, other.minor) def __ne__(self, other): return not self.__eq__(other) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/ironic/api/controllers/link.py0000644000175000017500000000373300000000000022273 0ustar00coreycorey00000000000000# Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from wsme import types as wtypes from ironic import api from ironic.api.controllers import base def build_url(resource, resource_args, bookmark=False, base_url=None): if base_url is None: base_url = api.request.public_url template = '%(url)s/%(res)s' if bookmark else '%(url)s/v1/%(res)s' # FIXME(lucasagomes): I'm getting a 404 when doing a GET on # a nested resource that the URL ends with a '/'. # https://groups.google.com/forum/#!topic/pecan-dev/QfSeviLg5qs template += '%(args)s' if resource_args.startswith('?') else '/%(args)s' return template % {'url': base_url, 'res': resource, 'args': resource_args} class Link(base.Base): """A link representation.""" href = str """The url of a link.""" rel = str """The name of a link.""" type = str """Indicates the type of document/link.""" @staticmethod def make_link(rel_name, url, resource, resource_args, bookmark=False, type=wtypes.Unset): href = build_url(resource, resource_args, bookmark=bookmark, base_url=url) return Link(href=href, rel=rel_name, type=type) @classmethod def sample(cls): sample = cls(href="http://localhost:6385/chassis/" "eaaca217-e7d8-47b4-bb41-3f99f20eed89", rel="bookmark") return sample ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/ironic/api/controllers/root.py0000644000175000017500000000456600000000000022326 0ustar00coreycorey00000000000000# -*- encoding: utf-8 -*- # # Copyright © 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pecan from pecan import rest from ironic.api.controllers import base from ironic.api.controllers import v1 from ironic.api.controllers import version from ironic.api import expose class Root(base.Base): name = str """The name of the API""" description = str """Some information about this API""" versions = [version.Version] """Links to all the versions available in this API""" default_version = version.Version """A link to the default version of the API""" @staticmethod def convert(): root = Root() root.name = "OpenStack Ironic API" root.description = ("Ironic is an OpenStack project which aims to " "provision baremetal machines.") root.default_version = version.default_version() root.versions = [root.default_version] return root class RootController(rest.RestController): _versions = [version.ID_VERSION1] """All supported API versions""" _default_version = version.ID_VERSION1 """The default API version""" v1 = v1.Controller() @expose.expose(Root) def get(self): # NOTE: The reason why convert() it's being called for every # request is because we need to get the host url from # the request object to make the links. return Root.convert() @pecan.expose() def _route(self, args, request=None): """Overrides the default routing behavior. It redirects the request to the default version of the ironic API if the version number is not specified in the url. """ if args[0] and args[0] not in self._versions: args = [self._default_version] + args return super(RootController, self)._route(args, request) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538406.1983993 ironic-14.0.1.dev163/ironic/api/controllers/v1/0000755000175000017500000000000000000000000021304 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/ironic/api/controllers/v1/__init__.py0000644000175000017500000002761700000000000023432 0ustar00coreycorey00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Version 1 of the Ironic API Specification can be found at doc/source/webapi/v1.rst """ import pecan from pecan import rest from webob import exc from ironic import api from ironic.api.controllers import base from ironic.api.controllers import link from ironic.api.controllers.v1 import allocation from ironic.api.controllers.v1 import chassis from ironic.api.controllers.v1 import conductor from ironic.api.controllers.v1 import deploy_template from ironic.api.controllers.v1 import driver from ironic.api.controllers.v1 import event from ironic.api.controllers.v1 import node from ironic.api.controllers.v1 import port from ironic.api.controllers.v1 import portgroup from ironic.api.controllers.v1 import ramdisk from ironic.api.controllers.v1 import utils from ironic.api.controllers.v1 import versions from ironic.api.controllers.v1 import volume from ironic.api.controllers import version from ironic.api import expose from ironic.common.i18n import _ BASE_VERSION = versions.BASE_VERSION def min_version(): return base.Version( {base.Version.string: versions.min_version_string()}, versions.min_version_string(), versions.max_version_string()) def max_version(): return base.Version( {base.Version.string: versions.max_version_string()}, versions.min_version_string(), versions.max_version_string()) class MediaType(base.Base): """A media type representation.""" base = str type = str def __init__(self, base, type): self.base = base self.type = type class V1(base.Base): """The representation of the version 1 of the API.""" id = str """The ID of the version, also acts as the release number""" media_types = [MediaType] """An array of supported media types for this version""" links = [link.Link] """Links that point to a specific URL for this version and documentation""" chassis = [link.Link] """Links to the chassis resource""" nodes = [link.Link] """Links to the nodes resource""" ports = [link.Link] """Links to the ports resource""" portgroups = [link.Link] """Links to the portgroups resource""" drivers = [link.Link] """Links to the drivers resource""" volume = [link.Link] """Links to the volume resource""" lookup = [link.Link] """Links to the lookup resource""" heartbeat = [link.Link] """Links to the heartbeat resource""" conductors = [link.Link] """Links to the conductors resource""" allocations = [link.Link] """Links to the allocations resource""" deploy_templates = [link.Link] """Links to the deploy_templates resource""" version = version.Version """Version discovery information.""" events = [link.Link] """Links to the events resource""" @staticmethod def convert(): v1 = V1() v1.id = "v1" v1.links = [link.Link.make_link('self', api.request.public_url, 'v1', '', bookmark=True), link.Link.make_link('describedby', 'https://docs.openstack.org', '/ironic/latest/contributor/', 'webapi.html', bookmark=True, type='text/html') ] v1.media_types = [MediaType('application/json', 'application/vnd.openstack.ironic.v1+json')] v1.chassis = [link.Link.make_link('self', api.request.public_url, 'chassis', ''), link.Link.make_link('bookmark', api.request.public_url, 'chassis', '', bookmark=True) ] v1.nodes = [link.Link.make_link('self', api.request.public_url, 'nodes', ''), link.Link.make_link('bookmark', api.request.public_url, 'nodes', '', bookmark=True) ] v1.ports = [link.Link.make_link('self', api.request.public_url, 'ports', ''), link.Link.make_link('bookmark', api.request.public_url, 'ports', '', bookmark=True) ] if utils.allow_portgroups(): v1.portgroups = [ link.Link.make_link('self', api.request.public_url, 'portgroups', ''), link.Link.make_link('bookmark', api.request.public_url, 'portgroups', '', bookmark=True) ] v1.drivers = [link.Link.make_link('self', api.request.public_url, 'drivers', ''), link.Link.make_link('bookmark', api.request.public_url, 'drivers', '', bookmark=True) ] if utils.allow_volume(): v1.volume = [ link.Link.make_link('self', api.request.public_url, 'volume', ''), link.Link.make_link('bookmark', api.request.public_url, 'volume', '', bookmark=True) ] if utils.allow_ramdisk_endpoints(): v1.lookup = [link.Link.make_link('self', api.request.public_url, 'lookup', ''), link.Link.make_link('bookmark', api.request.public_url, 'lookup', '', bookmark=True) ] v1.heartbeat = [link.Link.make_link('self', api.request.public_url, 'heartbeat', ''), link.Link.make_link('bookmark', api.request.public_url, 'heartbeat', '', bookmark=True) ] if utils.allow_expose_conductors(): v1.conductors = [link.Link.make_link('self', api.request.public_url, 'conductors', ''), link.Link.make_link('bookmark', api.request.public_url, 'conductors', '', bookmark=True) ] if utils.allow_allocations(): v1.allocations = [link.Link.make_link('self', api.request.public_url, 'allocations', ''), link.Link.make_link('bookmark', api.request.public_url, 'allocations', '', bookmark=True) ] if utils.allow_expose_events(): v1.events = [link.Link.make_link('self', api.request.public_url, 'events', ''), link.Link.make_link('bookmark', api.request.public_url, 'events', '', bookmark=True) ] if utils.allow_deploy_templates(): v1.deploy_templates = [ link.Link.make_link('self', api.request.public_url, 'deploy_templates', ''), link.Link.make_link('bookmark', api.request.public_url, 'deploy_templates', '', bookmark=True) ] v1.version = version.default_version() return v1 class Controller(rest.RestController): """Version 1 API controller root.""" nodes = node.NodesController() ports = port.PortsController() portgroups = portgroup.PortgroupsController() chassis = chassis.ChassisController() drivers = driver.DriversController() volume = volume.VolumeController() lookup = ramdisk.LookupController() heartbeat = ramdisk.HeartbeatController() conductors = conductor.ConductorsController() allocations = allocation.AllocationsController() events = event.EventsController() deploy_templates = deploy_template.DeployTemplatesController() @expose.expose(V1) def get(self): # NOTE: The reason why convert() it's being called for every # request is because we need to get the host url from # the request object to make the links. return V1.convert() def _check_version(self, version, headers=None): if headers is None: headers = {} # ensure that major version in the URL matches the header if version.major != BASE_VERSION: raise exc.HTTPNotAcceptable(_( "Mutually exclusive versions requested. Version %(ver)s " "requested but not supported by this service. The supported " "version range is: [%(min)s, %(max)s].") % {'ver': version, 'min': versions.min_version_string(), 'max': versions.max_version_string()}, headers=headers) # ensure the minor version is within the supported range if version < min_version() or version > max_version(): raise exc.HTTPNotAcceptable(_( "Version %(ver)s was requested but the minor version is not " "supported by this service. The supported version range is: " "[%(min)s, %(max)s].") % {'ver': version, 'min': versions.min_version_string(), 'max': versions.max_version_string()}, headers=headers) @pecan.expose() def _route(self, args, request=None): v = base.Version(api.request.headers, versions.min_version_string(), versions.max_version_string()) # Always set the min and max headers api.response.headers[base.Version.min_string] = ( versions.min_version_string()) api.response.headers[base.Version.max_string] = ( versions.max_version_string()) # assert that requested version is supported self._check_version(v, api.response.headers) api.response.headers[base.Version.string] = str(v) api.request.version = v return super(Controller, self)._route(args, request) __all__ = ('Controller',) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/ironic/api/controllers/v1/allocation.py0000644000175000017500000006046200000000000024013 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from http import client as http_client from ironic_lib import metrics_utils from oslo_utils import uuidutils import pecan from webob import exc as webob_exc import wsme from wsme import types as wtypes from ironic import api from ironic.api.controllers import base from ironic.api.controllers import link from ironic.api.controllers.v1 import collection from ironic.api.controllers.v1 import notification_utils as notify from ironic.api.controllers.v1 import types from ironic.api.controllers.v1 import utils as api_utils from ironic.api import expose from ironic.common import exception from ironic.common.i18n import _ from ironic.common import policy from ironic.common import states as ir_states from ironic import objects METRICS = metrics_utils.get_metrics_logger(__name__) def hide_fields_in_newer_versions(obj): # if requested version is < 1.60, hide owner field if not api_utils.allow_allocation_owner(): obj.owner = wsme.Unset class Allocation(base.APIBase): """API representation of an allocation. This class enforces type checking and value constraints, and converts between the internal object model and the API representation of a allocation. """ uuid = types.uuid """Unique UUID for this allocation""" extra = {str: types.jsontype} """This allocation's meta data""" node_uuid = wsme.wsattr(types.uuid, readonly=True) """The UUID of the node this allocation belongs to""" node = wsme.wsattr(str) """The node to backfill the allocation for (POST only)""" name = wsme.wsattr(str) """The logical name for this allocation""" links = wsme.wsattr([link.Link], readonly=True) """A list containing a self link and associated allocation links""" state = wsme.wsattr(str, readonly=True) """The current state of the allocation""" last_error = wsme.wsattr(str, readonly=True) """Last error that happened to this allocation""" resource_class = wsme.wsattr(wtypes.StringType(max_length=80)) """Requested resource class for this allocation""" owner = wsme.wsattr(str) """Owner of allocation""" # NOTE(dtantsur): candidate_nodes is a list of UUIDs on the database level, # but the API level also accept names, converting them on fly. candidate_nodes = wsme.wsattr([str]) """Candidate nodes for this allocation""" traits = wsme.wsattr([str]) """Requested traits for the allocation""" def __init__(self, **kwargs): self.fields = [] fields = list(objects.Allocation.fields) # NOTE: node_uuid is not part of objects.Allocation.fields # because it's an API-only attribute fields.append('node_uuid') for field in fields: # Skip fields we do not expose. if not hasattr(self, field): continue self.fields.append(field) setattr(self, field, kwargs.get(field, wtypes.Unset)) @staticmethod def _convert_with_links(allocation, url): """Add links to the allocation.""" # This field is only used in POST, never return it. allocation.node = wsme.Unset allocation.links = [ link.Link.make_link('self', url, 'allocations', allocation.uuid), link.Link.make_link('bookmark', url, 'allocations', allocation.uuid, bookmark=True) ] return allocation @classmethod def convert_with_links(cls, rpc_allocation, fields=None, sanitize=True): """Add links to the allocation.""" allocation = Allocation(**rpc_allocation.as_dict()) if rpc_allocation.node_id: try: allocation.node_uuid = objects.Node.get_by_id( api.request.context, rpc_allocation.node_id).uuid except exception.NodeNotFound: allocation.node_uuid = None else: allocation.node_uuid = None if fields is not None: api_utils.check_for_invalid_fields(fields, allocation.fields) # Make the default values consistent between POST and GET API if allocation.candidate_nodes is None: allocation.candidate_nodes = [] if allocation.traits is None: allocation.traits = [] allocation = cls._convert_with_links(allocation, api.request.host_url) if not sanitize: return allocation allocation.sanitize(fields) return allocation def sanitize(self, fields=None): """Removes sensitive and unrequested data. Will only keep the fields specified in the ``fields`` parameter. :param fields: list of fields to preserve, or ``None`` to preserve them all :type fields: list of str """ hide_fields_in_newer_versions(self) if fields is not None: self.unset_fields_except(fields) @classmethod def sample(cls): """Return a sample of the allocation.""" sample = cls(uuid='a594544a-2daf-420c-8775-17a8c3e0852f', node_uuid='7ae81bb3-dec3-4289-8d6c-da80bd8001ae', name='node1-allocation-01', state=ir_states.ALLOCATING, last_error=None, resource_class='baremetal', traits=['CUSTOM_GPU'], candidate_nodes=[], extra={'foo': 'bar'}, created_at=datetime.datetime(2000, 1, 1, 12, 0, 0), updated_at=datetime.datetime(2000, 1, 1, 12, 0, 0), owner=None) return cls._convert_with_links(sample, 'http://localhost:6385') class AllocationCollection(collection.Collection): """API representation of a collection of allocations.""" allocations = [Allocation] """A list containing allocation objects""" def __init__(self, **kwargs): self._type = 'allocations' @staticmethod def convert_with_links(rpc_allocations, limit, url=None, fields=None, **kwargs): collection = AllocationCollection() collection.allocations = [ Allocation.convert_with_links(p, fields=fields, sanitize=False) for p in rpc_allocations ] collection.next = collection.get_next(limit, url=url, fields=fields, **kwargs) for item in collection.allocations: item.sanitize(fields=fields) return collection @classmethod def sample(cls): """Return a sample of the allocation.""" sample = cls() sample.allocations = [Allocation.sample()] return sample class AllocationPatchType(types.JsonPatchType): _api_base = Allocation class AllocationsController(pecan.rest.RestController): """REST controller for allocations.""" invalid_sort_key_list = ['extra', 'candidate_nodes', 'traits'] @pecan.expose() def _route(self, args, request=None): if not api_utils.allow_allocations(): msg = _("The API version does not allow allocations") if api.request.method == "GET": raise webob_exc.HTTPNotFound(msg) else: raise webob_exc.HTTPMethodNotAllowed(msg) return super(AllocationsController, self)._route(args, request) def _get_allocations_collection(self, node_ident=None, resource_class=None, state=None, owner=None, marker=None, limit=None, sort_key='id', sort_dir='asc', resource_url=None, fields=None): """Return allocations collection. :param node_ident: UUID or name of a node. :param marker: Pagination marker for large data sets. :param limit: Maximum number of resources to return in a single result. :param sort_key: Column to sort results by. Default: id. :param sort_dir: Direction to sort. "asc" or "desc". Default: asc. :param resource_url: Optional, URL to the allocation resource. :param fields: Optional, a list with a specified set of fields of the resource to be returned. :param owner: project_id of owner to filter by """ limit = api_utils.validate_limit(limit) sort_dir = api_utils.validate_sort_dir(sort_dir) if sort_key in self.invalid_sort_key_list: raise exception.InvalidParameterValue( _("The sort_key value %(key)s is an invalid field for " "sorting") % {'key': sort_key}) marker_obj = None if marker: marker_obj = objects.Allocation.get_by_uuid(api.request.context, marker) if node_ident: try: node_uuid = api_utils.get_rpc_node(node_ident).uuid except exception.NodeNotFound as exc: exc.code = http_client.BAD_REQUEST raise else: node_uuid = None possible_filters = { 'node_uuid': node_uuid, 'resource_class': resource_class, 'state': state, 'owner': owner } filters = {} for key, value in possible_filters.items(): if value is not None: filters[key] = value allocations = objects.Allocation.list(api.request.context, limit=limit, marker=marker_obj, sort_key=sort_key, sort_dir=sort_dir, filters=filters) return AllocationCollection.convert_with_links(allocations, limit, url=resource_url, fields=fields, sort_key=sort_key, sort_dir=sort_dir) def _check_allowed_allocation_fields(self, fields): """Check if fetching a particular field of an allocation is allowed. Check if the required version is being requested for fields that are only allowed to be fetched in a particular API version. :param fields: list or set of fields to check :raises: NotAcceptable if a field is not allowed """ if fields is None: return if 'owner' in fields and not api_utils.allow_allocation_owner(): raise exception.NotAcceptable() @METRICS.timer('AllocationsController.get_all') @expose.expose(AllocationCollection, types.uuid_or_name, str, str, types.uuid, int, str, str, types.listtype, str) def get_all(self, node=None, resource_class=None, state=None, marker=None, limit=None, sort_key='id', sort_dir='asc', fields=None, owner=None): """Retrieve a list of allocations. :param node: UUID or name of a node, to get only allocations for that node. :param resource_class: Filter by requested resource class. :param state: Filter by allocation state. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. This value cannot be larger than the value of max_limit in the [api] section of the ironic configuration, or only max_limit resources will be returned. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. :param fields: Optional, a list with a specified set of fields of the resource to be returned. :param owner: Filter by owner. """ owner = api_utils.check_list_policy('allocation', owner) self._check_allowed_allocation_fields(fields) if owner is not None and not api_utils.allow_allocation_owner(): raise exception.NotAcceptable() return self._get_allocations_collection(node, resource_class, state, owner, marker, limit, sort_key, sort_dir, fields=fields) @METRICS.timer('AllocationsController.get_one') @expose.expose(Allocation, types.uuid_or_name, types.listtype) def get_one(self, allocation_ident, fields=None): """Retrieve information about the given allocation. :param allocation_ident: UUID or logical name of an allocation. :param fields: Optional, a list with a specified set of fields of the resource to be returned. """ rpc_allocation = api_utils.check_allocation_policy_and_retrieve( 'baremetal:allocation:get', allocation_ident) self._check_allowed_allocation_fields(fields) return Allocation.convert_with_links(rpc_allocation, fields=fields) def _authorize_create_allocation(self, allocation): cdict = api.request.context.to_policy_values() try: policy.authorize('baremetal:allocation:create', cdict, cdict) self._check_allowed_allocation_fields(allocation.as_dict()) except exception.HTTPForbidden: owner = cdict.get('project_id') if not owner or (allocation.owner and owner != allocation.owner): raise policy.authorize('baremetal:allocation:create_restricted', cdict, cdict) self._check_allowed_allocation_fields(allocation.as_dict()) allocation.owner = owner return allocation @METRICS.timer('AllocationsController.post') @expose.expose(Allocation, body=Allocation, status_code=http_client.CREATED) def post(self, allocation): """Create a new allocation. :param allocation: an allocation within the request body. """ context = api.request.context allocation = self._authorize_create_allocation(allocation) if (allocation.name and not api_utils.is_valid_logical_name(allocation.name)): msg = _("Cannot create allocation with invalid name " "'%(name)s'") % {'name': allocation.name} raise exception.Invalid(msg) if allocation.traits: for trait in allocation.traits: api_utils.validate_trait(trait) node = None if allocation.node is not wtypes.Unset: if api_utils.allow_allocation_backfill(): try: node = api_utils.get_rpc_node(allocation.node) except exception.NodeNotFound as exc: exc.code = http_client.BAD_REQUEST raise else: msg = _("Cannot set node when creating an allocation " "in this API version") raise exception.Invalid(msg) if not allocation.resource_class: if node: allocation.resource_class = node.resource_class else: msg = _("The resource_class field is mandatory when not " "backfilling") raise exception.Invalid(msg) if allocation.candidate_nodes: # Convert nodes from names to UUIDs and check their validity try: converted = api.request.dbapi.check_node_list( allocation.candidate_nodes) except exception.NodeNotFound as exc: exc.code = http_client.BAD_REQUEST raise else: # Make sure we keep the ordering of candidate nodes. allocation.candidate_nodes = [ converted[ident] for ident in allocation.candidate_nodes] all_dict = allocation.as_dict() # NOTE(yuriyz): UUID is mandatory for notifications payload if not all_dict.get('uuid'): if node and node.instance_uuid: # When backfilling without UUID requested, assume that the # target instance_uuid is the desired UUID all_dict['uuid'] = node.instance_uuid else: all_dict['uuid'] = uuidutils.generate_uuid() new_allocation = objects.Allocation(context, **all_dict) if node: new_allocation.node_id = node.id topic = api.request.rpcapi.get_topic_for(node) else: topic = api.request.rpcapi.get_random_topic() notify.emit_start_notification(context, new_allocation, 'create') with notify.handle_error_notification(context, new_allocation, 'create'): new_allocation = api.request.rpcapi.create_allocation( context, new_allocation, topic) notify.emit_end_notification(context, new_allocation, 'create') # Set the HTTP Location Header api.response.location = link.build_url('allocations', new_allocation.uuid) return Allocation.convert_with_links(new_allocation) def _validate_patch(self, patch): allowed_fields = ['name', 'extra'] fields = set() for p in patch: path = p['path'].split('/')[1] if path not in allowed_fields: msg = _("Cannot update %s in an allocation. Only 'name' and " "'extra' are allowed to be updated.") raise exception.Invalid(msg % p['path']) fields.add(path) self._check_allowed_allocation_fields(fields) @METRICS.timer('AllocationsController.patch') @wsme.validate(types.uuid, [AllocationPatchType]) @expose.expose(Allocation, types.uuid_or_name, body=[AllocationPatchType]) def patch(self, allocation_ident, patch): """Update an existing allocation. :param allocation_ident: UUID or logical name of an allocation. :param patch: a json PATCH document to apply to this allocation. """ if not api_utils.allow_allocation_update(): raise webob_exc.HTTPMethodNotAllowed(_( "The API version does not allow updating allocations")) context = api.request.context rpc_allocation = api_utils.check_allocation_policy_and_retrieve( 'baremetal:allocation:update', allocation_ident) self._validate_patch(patch) names = api_utils.get_patch_values(patch, '/name') for name in names: if name and not api_utils.is_valid_logical_name(name): msg = _("Cannot update allocation with invalid name " "'%(name)s'") % {'name': name} raise exception.Invalid(msg) allocation_dict = rpc_allocation.as_dict() allocation = Allocation(**api_utils.apply_jsonpatch(allocation_dict, patch)) # Update only the fields that have changed for field in objects.Allocation.fields: try: patch_val = getattr(allocation, field) except AttributeError: # Ignore fields that aren't exposed in the API continue if patch_val == wtypes.Unset: patch_val = None if rpc_allocation[field] != patch_val: rpc_allocation[field] = patch_val notify.emit_start_notification(context, rpc_allocation, 'update') with notify.handle_error_notification(context, rpc_allocation, 'update'): rpc_allocation.save() notify.emit_end_notification(context, rpc_allocation, 'update') return Allocation.convert_with_links(rpc_allocation) @METRICS.timer('AllocationsController.delete') @expose.expose(None, types.uuid_or_name, status_code=http_client.NO_CONTENT) def delete(self, allocation_ident): """Delete an allocation. :param allocation_ident: UUID or logical name of an allocation. """ context = api.request.context rpc_allocation = api_utils.check_allocation_policy_and_retrieve( 'baremetal:allocation:delete', allocation_ident) if rpc_allocation.node_id: node_uuid = objects.Node.get_by_id(api.request.context, rpc_allocation.node_id).uuid else: node_uuid = None notify.emit_start_notification(context, rpc_allocation, 'delete', node_uuid=node_uuid) with notify.handle_error_notification(context, rpc_allocation, 'delete', node_uuid=node_uuid): topic = api.request.rpcapi.get_random_topic() api.request.rpcapi.destroy_allocation(context, rpc_allocation, topic) notify.emit_end_notification(context, rpc_allocation, 'delete', node_uuid=node_uuid) class NodeAllocationController(pecan.rest.RestController): """REST controller for allocations.""" invalid_sort_key_list = ['extra', 'candidate_nodes', 'traits'] @pecan.expose() def _route(self, args, request=None): if not api_utils.allow_allocations(): raise webob_exc.HTTPNotFound(_( "The API version does not allow allocations")) return super(NodeAllocationController, self)._route(args, request) def __init__(self, node_ident): super(NodeAllocationController, self).__init__() self.parent_node_ident = node_ident self.inner = AllocationsController() @METRICS.timer('NodeAllocationController.get_all') @expose.expose(Allocation, types.listtype) def get_all(self, fields=None): cdict = api.request.context.to_policy_values() policy.authorize('baremetal:allocation:get', cdict, cdict) result = self.inner._get_allocations_collection(self.parent_node_ident, fields=fields) try: return result.allocations[0] except IndexError: raise exception.AllocationNotFound( _("Allocation for node %s was not found") % self.parent_node_ident) @METRICS.timer('NodeAllocationController.delete') @expose.expose(None, status_code=http_client.NO_CONTENT) def delete(self): context = api.request.context cdict = context.to_policy_values() policy.authorize('baremetal:allocation:delete', cdict, cdict) rpc_node = api_utils.get_rpc_node_with_suffix(self.parent_node_ident) allocations = objects.Allocation.list( api.request.context, filters={'node_uuid': rpc_node.uuid}) try: rpc_allocation = allocations[0] except IndexError: raise exception.AllocationNotFound( _("Allocation for node %s was not found") % self.parent_node_ident) notify.emit_start_notification(context, rpc_allocation, 'delete', node_uuid=rpc_node.uuid) with notify.handle_error_notification(context, rpc_allocation, 'delete', node_uuid=rpc_node.uuid): topic = api.request.rpcapi.get_random_topic() api.request.rpcapi.destroy_allocation(context, rpc_allocation, topic) notify.emit_end_notification(context, rpc_allocation, 'delete', node_uuid=rpc_node.uuid) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/ironic/api/controllers/v1/bios.py0000644000175000017500000001101700000000000022612 0ustar00coreycorey00000000000000# Copyright 2018 Red Hat Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ironic_lib import metrics_utils from pecan import rest import wsme from wsme import types as wtypes from ironic import api from ironic.api.controllers import base from ironic.api.controllers import link from ironic.api.controllers.v1 import types from ironic.api.controllers.v1 import utils as api_utils from ironic.api import expose from ironic.common import exception from ironic.common import policy from ironic import objects METRICS = metrics_utils.get_metrics_logger(__name__) class BIOSSetting(base.APIBase): """API representation of a BIOS setting.""" name = wsme.wsattr(str) value = wsme.wsattr(str) links = wsme.wsattr([link.Link], readonly=True) def __init__(self, **kwargs): self.fields = [] fields = list(objects.BIOSSetting.fields) for k in fields: if hasattr(self, k): self.fields.append(k) value = kwargs.get(k, wtypes.Unset) setattr(self, k, value) @staticmethod def _convert_with_links(bios, node_uuid, url): """Add links to the bios setting.""" name = bios.name bios.links = [link.Link.make_link('self', url, 'nodes', "%s/bios/%s" % (node_uuid, name)), link.Link.make_link('bookmark', url, 'nodes', "%s/bios/%s" % (node_uuid, name), bookmark=True)] return bios @classmethod def convert_with_links(cls, rpc_bios, node_uuid): """Add links to the bios setting.""" bios = BIOSSetting(**rpc_bios.as_dict()) return cls._convert_with_links(bios, node_uuid, api.request.host_url) class BIOSSettingsCollection(base.Base): """API representation of the bios settings for a node.""" bios = [BIOSSetting] """Node bios settings list""" @staticmethod def collection_from_list(node_ident, bios_settings): col = BIOSSettingsCollection() bios_list = [] for bios_setting in bios_settings: bios_list.append(BIOSSetting.convert_with_links(bios_setting, node_ident)) col.bios = bios_list return col class NodeBiosController(rest.RestController): """REST controller for bios.""" def __init__(self, node_ident=None): super(NodeBiosController, self).__init__() self.node_ident = node_ident @METRICS.timer('NodeBiosController.get_all') @expose.expose(BIOSSettingsCollection) def get_all(self): """List node bios settings.""" cdict = api.request.context.to_policy_values() policy.authorize('baremetal:node:bios:get', cdict, cdict) node = api_utils.get_rpc_node(self.node_ident) settings = objects.BIOSSettingList.get_by_node_id( api.request.context, node.id) return BIOSSettingsCollection.collection_from_list(self.node_ident, settings) @METRICS.timer('NodeBiosController.get_one') @expose.expose({str: BIOSSetting}, types.name) def get_one(self, setting_name): """Retrieve information about the given bios setting. :param setting_name: Logical name of the setting to retrieve. """ cdict = api.request.context.to_policy_values() policy.authorize('baremetal:node:bios:get', cdict, cdict) node = api_utils.get_rpc_node(self.node_ident) try: setting = objects.BIOSSetting.get(api.request.context, node.id, setting_name) except exception.BIOSSettingNotFound: raise exception.BIOSSettingNotFound(node=node.uuid, name=setting_name) return {setting_name: BIOSSetting.convert_with_links(setting, node.uuid)} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/ironic/api/controllers/v1/chassis.py0000644000175000017500000003443300000000000023322 0ustar00coreycorey00000000000000# Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from http import client as http_client from ironic_lib import metrics_utils from oslo_utils import uuidutils from pecan import rest import wsme from wsme import types as wtypes from ironic import api from ironic.api.controllers import base from ironic.api.controllers import link from ironic.api.controllers.v1 import collection from ironic.api.controllers.v1 import node from ironic.api.controllers.v1 import notification_utils as notify from ironic.api.controllers.v1 import types from ironic.api.controllers.v1 import utils as api_utils from ironic.api import expose from ironic.common import exception from ironic.common.i18n import _ from ironic.common import policy from ironic import objects METRICS = metrics_utils.get_metrics_logger(__name__) _DEFAULT_RETURN_FIELDS = ('uuid', 'description') class Chassis(base.APIBase): """API representation of a chassis. This class enforces type checking and value constraints, and converts between the internal object model and the API representation of a chassis. """ uuid = types.uuid """The UUID of the chassis""" description = wtypes.StringType(max_length=255) """The description of the chassis""" extra = {str: types.jsontype} """The metadata of the chassis""" links = wsme.wsattr([link.Link], readonly=True) """A list containing a self link and associated chassis links""" nodes = wsme.wsattr([link.Link], readonly=True) """Links to the collection of nodes contained in this chassis""" def __init__(self, **kwargs): self.fields = [] for field in objects.Chassis.fields: # Skip fields we do not expose. if not hasattr(self, field): continue self.fields.append(field) setattr(self, field, kwargs.get(field, wtypes.Unset)) @staticmethod def _convert_with_links(chassis, url, fields=None): if fields is None: chassis.nodes = [link.Link.make_link('self', url, 'chassis', chassis.uuid + "/nodes"), link.Link.make_link('bookmark', url, 'chassis', chassis.uuid + "/nodes", bookmark=True) ] chassis.links = [link.Link.make_link('self', url, 'chassis', chassis.uuid), link.Link.make_link('bookmark', url, 'chassis', chassis.uuid, bookmark=True) ] return chassis @classmethod def convert_with_links(cls, rpc_chassis, fields=None, sanitize=True): chassis = Chassis(**rpc_chassis.as_dict()) if fields is not None: api_utils.check_for_invalid_fields(fields, chassis.as_dict()) chassis = cls._convert_with_links(chassis, api.request.public_url, fields) if not sanitize: return chassis chassis.sanitize(fields) return chassis def sanitize(self, fields=None): """Removes sensitive and unrequested data. Will only keep the fields specified in the ``fields`` parameter. :param fields: list of fields to preserve, or ``None`` to preserve them all :type fields: list of str """ if fields is not None: self.unset_fields_except(fields) @classmethod def sample(cls, expand=True): time = datetime.datetime(2000, 1, 1, 12, 0, 0) sample = cls(uuid='eaaca217-e7d8-47b4-bb41-3f99f20eed89', extra={}, description='Sample chassis', created_at=time, updated_at=time) fields = None if expand else _DEFAULT_RETURN_FIELDS return cls._convert_with_links(sample, 'http://localhost:6385', fields=fields) class ChassisPatchType(types.JsonPatchType): _api_base = Chassis class ChassisCollection(collection.Collection): """API representation of a collection of chassis.""" chassis = [Chassis] """A list containing chassis objects""" def __init__(self, **kwargs): self._type = 'chassis' @staticmethod def convert_with_links(chassis, limit, url=None, fields=None, **kwargs): collection = ChassisCollection() collection.chassis = [Chassis.convert_with_links(ch, fields=fields, sanitize=False) for ch in chassis] url = url or None collection.next = collection.get_next(limit, url=url, fields=fields, **kwargs) for item in collection.chassis: item.sanitize(fields) return collection @classmethod def sample(cls): # FIXME(jroll) hack for docs build, bug #1560508 if not hasattr(objects, 'Chassis'): objects.register_all() sample = cls() sample.chassis = [Chassis.sample(expand=False)] return sample class ChassisController(rest.RestController): """REST controller for Chassis.""" nodes = node.NodesController() """Expose nodes as a sub-element of chassis""" # Set the flag to indicate that the requests to this resource are # coming from a top-level resource nodes.from_chassis = True _custom_actions = { 'detail': ['GET'], } invalid_sort_key_list = ['extra'] def _get_chassis_collection(self, marker, limit, sort_key, sort_dir, resource_url=None, fields=None, detail=None): limit = api_utils.validate_limit(limit) sort_dir = api_utils.validate_sort_dir(sort_dir) marker_obj = None if marker: marker_obj = objects.Chassis.get_by_uuid(api.request.context, marker) if sort_key in self.invalid_sort_key_list: raise exception.InvalidParameterValue( _("The sort_key value %(key)s is an invalid field for sorting") % {'key': sort_key}) chassis = objects.Chassis.list(api.request.context, limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir) parameters = {} if detail is not None: parameters['detail'] = detail return ChassisCollection.convert_with_links(chassis, limit, url=resource_url, fields=fields, sort_key=sort_key, sort_dir=sort_dir, **parameters) @METRICS.timer('ChassisController.get_all') @expose.expose(ChassisCollection, types.uuid, int, str, str, types.listtype, types.boolean) def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc', fields=None, detail=None): """Retrieve a list of chassis. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. This value cannot be larger than the value of max_limit in the [api] section of the ironic configuration, or only max_limit resources will be returned. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. :param fields: Optional, a list with a specified set of fields of the resource to be returned. """ cdict = api.request.context.to_policy_values() policy.authorize('baremetal:chassis:get', cdict, cdict) api_utils.check_allow_specify_fields(fields) fields = api_utils.get_request_return_fields(fields, detail, _DEFAULT_RETURN_FIELDS) return self._get_chassis_collection(marker, limit, sort_key, sort_dir, fields=fields, detail=detail) @METRICS.timer('ChassisController.detail') @expose.expose(ChassisCollection, types.uuid, int, str, str) def detail(self, marker=None, limit=None, sort_key='id', sort_dir='asc'): """Retrieve a list of chassis with detail. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. This value cannot be larger than the value of max_limit in the [api] section of the ironic configuration, or only max_limit resources will be returned. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. """ cdict = api.request.context.to_policy_values() policy.authorize('baremetal:chassis:get', cdict, cdict) # /detail should only work against collections parent = api.request.path.split('/')[:-1][-1] if parent != "chassis": raise exception.HTTPNotFound() resource_url = '/'.join(['chassis', 'detail']) return self._get_chassis_collection(marker, limit, sort_key, sort_dir, resource_url) @METRICS.timer('ChassisController.get_one') @expose.expose(Chassis, types.uuid, types.listtype) def get_one(self, chassis_uuid, fields=None): """Retrieve information about the given chassis. :param chassis_uuid: UUID of a chassis. :param fields: Optional, a list with a specified set of fields of the resource to be returned. """ cdict = api.request.context.to_policy_values() policy.authorize('baremetal:chassis:get', cdict, cdict) api_utils.check_allow_specify_fields(fields) rpc_chassis = objects.Chassis.get_by_uuid(api.request.context, chassis_uuid) return Chassis.convert_with_links(rpc_chassis, fields=fields) @METRICS.timer('ChassisController.post') @expose.expose(Chassis, body=Chassis, status_code=http_client.CREATED) def post(self, chassis): """Create a new chassis. :param chassis: a chassis within the request body. """ context = api.request.context cdict = context.to_policy_values() policy.authorize('baremetal:chassis:create', cdict, cdict) # NOTE(yuriyz): UUID is mandatory for notifications payload if not chassis.uuid: chassis.uuid = uuidutils.generate_uuid() new_chassis = objects.Chassis(context, **chassis.as_dict()) notify.emit_start_notification(context, new_chassis, 'create') with notify.handle_error_notification(context, new_chassis, 'create'): new_chassis.create() notify.emit_end_notification(context, new_chassis, 'create') # Set the HTTP Location Header api.response.location = link.build_url('chassis', new_chassis.uuid) return Chassis.convert_with_links(new_chassis) @METRICS.timer('ChassisController.patch') @wsme.validate(types.uuid, [ChassisPatchType]) @expose.expose(Chassis, types.uuid, body=[ChassisPatchType]) def patch(self, chassis_uuid, patch): """Update an existing chassis. :param chassis_uuid: UUID of a chassis. :param patch: a json PATCH document to apply to this chassis. """ context = api.request.context cdict = context.to_policy_values() policy.authorize('baremetal:chassis:update', cdict, cdict) rpc_chassis = objects.Chassis.get_by_uuid(context, chassis_uuid) chassis = Chassis( **api_utils.apply_jsonpatch(rpc_chassis.as_dict(), patch)) # Update only the fields that have changed for field in objects.Chassis.fields: try: patch_val = getattr(chassis, field) except AttributeError: # Ignore fields that aren't exposed in the API continue if patch_val == wtypes.Unset: patch_val = None if rpc_chassis[field] != patch_val: rpc_chassis[field] = patch_val notify.emit_start_notification(context, rpc_chassis, 'update') with notify.handle_error_notification(context, rpc_chassis, 'update'): rpc_chassis.save() notify.emit_end_notification(context, rpc_chassis, 'update') return Chassis.convert_with_links(rpc_chassis) @METRICS.timer('ChassisController.delete') @expose.expose(None, types.uuid, status_code=http_client.NO_CONTENT) def delete(self, chassis_uuid): """Delete a chassis. :param chassis_uuid: UUID of a chassis. """ context = api.request.context cdict = context.to_policy_values() policy.authorize('baremetal:chassis:delete', cdict, cdict) rpc_chassis = objects.Chassis.get_by_uuid(context, chassis_uuid) notify.emit_start_notification(context, rpc_chassis, 'delete') with notify.handle_error_notification(context, rpc_chassis, 'delete'): rpc_chassis.destroy() notify.emit_end_notification(context, rpc_chassis, 'delete') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/ironic/api/controllers/v1/collection.py0000644000175000017500000000402000000000000024005 0ustar00coreycorey00000000000000# Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from wsme import types as wtypes from ironic import api from ironic.api.controllers import base from ironic.api.controllers import link class Collection(base.Base): next = str """A link to retrieve the next subset of the collection""" @property def collection(self): return getattr(self, self._type) @classmethod def get_key_field(cls): return 'uuid' def has_next(self, limit): """Return whether collection has more items.""" return len(self.collection) and len(self.collection) == limit def get_next(self, limit, url=None, **kwargs): """Return a link to the next subset of the collection.""" if not self.has_next(limit): return wtypes.Unset resource_url = url or self._type fields = kwargs.pop('fields', None) # NOTE(saga): If fields argument is present in kwargs and not None. It # is a list so convert it into a comma seperated string. if fields: kwargs['fields'] = ','.join(fields) q_args = ''.join(['%s=%s&' % (key, kwargs[key]) for key in kwargs]) next_args = '?%(args)slimit=%(limit)d&marker=%(marker)s' % { 'args': q_args, 'limit': limit, 'marker': getattr(self.collection[-1], self.get_key_field())} return link.Link.make_link('next', api.request.public_url, resource_url, next_args).href ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/ironic/api/controllers/v1/conductor.py0000644000175000017500000002276300000000000023670 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from ironic_lib import metrics_utils from oslo_log import log from oslo_utils import timeutils from pecan import rest import wsme from wsme import types as wtypes from ironic import api from ironic.api.controllers import base from ironic.api.controllers import link from ironic.api.controllers.v1 import collection from ironic.api.controllers.v1 import types from ironic.api.controllers.v1 import utils as api_utils from ironic.api import expose from ironic.common import exception from ironic.common.i18n import _ from ironic.common import policy import ironic.conf from ironic import objects CONF = ironic.conf.CONF LOG = log.getLogger(__name__) METRICS = metrics_utils.get_metrics_logger(__name__) _DEFAULT_RETURN_FIELDS = ('hostname', 'conductor_group', 'alive') class Conductor(base.APIBase): """API representation of a bare metal conductor.""" hostname = wsme.wsattr(str) """The hostname for this conductor""" conductor_group = wsme.wsattr(str) """The conductor group this conductor belongs to""" alive = types.boolean """Indicates whether this conductor is considered alive""" drivers = wsme.wsattr([str]) """The drivers enabled on this conductor""" links = wsme.wsattr([link.Link]) """A list containing a self link and associated conductor links""" def __init__(self, **kwargs): self.fields = [] fields = list(objects.Conductor.fields) # NOTE(kaifeng): alive is not part of objects.Conductor.fields # because it's an API-only attribute. fields.append('alive') for field in fields: # Skip fields we do not expose. if not hasattr(self, field): continue self.fields.append(field) setattr(self, field, kwargs.get(field, wtypes.Unset)) @staticmethod def _convert_with_links(conductor, url, fields=None): conductor.links = [link.Link.make_link('self', url, 'conductors', conductor.hostname), link.Link.make_link('bookmark', url, 'conductors', conductor.hostname, bookmark=True)] return conductor @classmethod def convert_with_links(cls, rpc_conductor, fields=None): conductor = Conductor(**rpc_conductor.as_dict()) conductor.alive = not timeutils.is_older_than( conductor.updated_at, CONF.conductor.heartbeat_timeout) if fields is not None: api_utils.check_for_invalid_fields(fields, conductor.as_dict()) conductor = cls._convert_with_links(conductor, api.request.public_url, fields=fields) conductor.sanitize(fields) return conductor def sanitize(self, fields): """Removes sensitive and unrequested data. Will only keep the fields specified in the ``fields`` parameter. :param fields: list of fields to preserve, or ``None`` to preserve them all :type fields: list of str """ if fields is not None: self.unset_fields_except(fields) @classmethod def sample(cls, expand=True): time = datetime.datetime(2000, 1, 1, 12, 0, 0) sample = cls(hostname='computer01', conductor_group='', alive=True, drivers=['ipmi'], created_at=time, updated_at=time) fields = None if expand else _DEFAULT_RETURN_FIELDS return cls._convert_with_links(sample, 'http://localhost:6385', fields=fields) class ConductorCollection(collection.Collection): """API representation of a collection of conductors.""" conductors = [Conductor] """A list containing conductor objects""" def __init__(self, **kwargs): self._type = 'conductors' # NOTE(kaifeng) Override because conductors use hostname instead of uuid. @classmethod def get_key_field(cls): return 'hostname' @staticmethod def convert_with_links(conductors, limit, url=None, fields=None, **kwargs): collection = ConductorCollection() collection.conductors = [Conductor.convert_with_links(c, fields=fields) for c in conductors] collection.next = collection.get_next(limit, url=url, fields=fields, **kwargs) for conductor in collection.conductors: conductor.sanitize(fields) return collection @classmethod def sample(cls): sample = cls() conductor = Conductor.sample(expand=False) sample.conductors = [conductor] return sample class ConductorsController(rest.RestController): """REST controller for conductors.""" invalid_sort_key_list = ['alive', 'drivers'] def _get_conductors_collection(self, marker, limit, sort_key, sort_dir, resource_url=None, fields=None, detail=None): limit = api_utils.validate_limit(limit) sort_dir = api_utils.validate_sort_dir(sort_dir) if sort_key in self.invalid_sort_key_list: raise exception.InvalidParameterValue( _("The sort_key value %(key)s is an invalid field for " "sorting") % {'key': sort_key}) marker_obj = None if marker: marker_obj = objects.Conductor.get_by_hostname( api.request.context, marker, online=None) conductors = objects.Conductor.list(api.request.context, limit=limit, marker=marker_obj, sort_key=sort_key, sort_dir=sort_dir) parameters = {'sort_key': sort_key, 'sort_dir': sort_dir} if detail is not None: parameters['detail'] = detail return ConductorCollection.convert_with_links(conductors, limit, url=resource_url, fields=fields, **parameters) @METRICS.timer('ConductorsController.get_all') @expose.expose(ConductorCollection, types.name, int, str, str, types.listtype, types.boolean) def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc', fields=None, detail=None): """Retrieve a list of conductors. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. This value cannot be larger than the value of max_limit in the [api] section of the ironic configuration, or only max_limit resources will be returned. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. :param fields: Optional, a list with a specified set of fields of the resource to be returned. :param detail: Optional, boolean to indicate whether retrieve a list of conductors with detail. """ cdict = api.request.context.to_policy_values() policy.authorize('baremetal:conductor:get', cdict, cdict) if not api_utils.allow_expose_conductors(): raise exception.NotFound() api_utils.check_allow_specify_fields(fields) api_utils.check_allowed_fields(fields) api_utils.check_allowed_fields([sort_key]) fields = api_utils.get_request_return_fields(fields, detail, _DEFAULT_RETURN_FIELDS) return self._get_conductors_collection(marker, limit, sort_key, sort_dir, fields=fields, detail=detail) @METRICS.timer('ConductorsController.get_one') @expose.expose(Conductor, types.name, types.listtype) def get_one(self, hostname, fields=None): """Retrieve information about the given conductor. :param hostname: hostname of a conductor. :param fields: Optional, a list with a specified set of fields of the resource to be returned. """ cdict = api.request.context.to_policy_values() policy.authorize('baremetal:conductor:get', cdict, cdict) if not api_utils.allow_expose_conductors(): raise exception.NotFound() api_utils.check_allow_specify_fields(fields) api_utils.check_allowed_fields(fields) conductor = objects.Conductor.get_by_hostname(api.request.context, hostname, online=None) return Conductor.convert_with_links(conductor, fields=fields) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/ironic/api/controllers/v1/deploy_template.py0000644000175000017500000004232200000000000025050 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import datetime from http import client as http_client from ironic_lib import metrics_utils from oslo_log import log from oslo_utils import strutils from oslo_utils import uuidutils import pecan from pecan import rest from webob import exc as webob_exc import wsme from wsme import types as wtypes from ironic import api from ironic.api.controllers import base from ironic.api.controllers import link from ironic.api.controllers.v1 import collection from ironic.api.controllers.v1 import notification_utils as notify from ironic.api.controllers.v1 import types from ironic.api.controllers.v1 import utils as api_utils from ironic.api import expose from ironic.common import exception from ironic.common.i18n import _ from ironic.conductor import steps as conductor_steps import ironic.conf from ironic import objects CONF = ironic.conf.CONF LOG = log.getLogger(__name__) METRICS = metrics_utils.get_metrics_logger(__name__) _DEFAULT_RETURN_FIELDS = ('uuid', 'name') _DEPLOY_INTERFACE_TYPE = wtypes.Enum( str, *conductor_steps.DEPLOYING_INTERFACE_PRIORITY) class DeployStepType(wtypes.Base, base.AsDictMixin): """A type describing a deployment step.""" interface = wsme.wsattr(_DEPLOY_INTERFACE_TYPE, mandatory=True) step = wsme.wsattr(str, mandatory=True) args = wsme.wsattr({str: types.jsontype}, mandatory=True) priority = wsme.wsattr(wtypes.IntegerType(0), mandatory=True) def __init__(self, **kwargs): self.fields = ['interface', 'step', 'args', 'priority'] for field in self.fields: value = kwargs.get(field, wtypes.Unset) setattr(self, field, value) def sanitize(self): """Removes sensitive data.""" if self.args != wtypes.Unset: self.args = strutils.mask_dict_password(self.args, "******") class DeployTemplate(base.APIBase): """API representation of a deploy template.""" uuid = types.uuid """Unique UUID for this deploy template.""" name = wsme.wsattr(str, mandatory=True) """The logical name for this deploy template.""" steps = wsme.wsattr([DeployStepType], mandatory=True) """The deploy steps of this deploy template.""" links = wsme.wsattr([link.Link]) """A list containing a self link and associated deploy template links.""" extra = {str: types.jsontype} """This deploy template's meta data""" def __init__(self, **kwargs): self.fields = [] fields = list(objects.DeployTemplate.fields) for field in fields: # Skip fields we do not expose. if not hasattr(self, field): continue value = kwargs.get(field, wtypes.Unset) if field == 'steps' and value != wtypes.Unset: value = [DeployStepType(**step) for step in value] self.fields.append(field) setattr(self, field, value) @staticmethod def validate(value): if value is None: return # The name is mandatory, but the 'mandatory' attribute support in # wtypes.wsattr allows None. if value.name is None: err = _("Deploy template name cannot be None") raise exception.InvalidDeployTemplate(err=err) # The name must also be a valid trait. api_utils.validate_trait( value.name, error_prefix=_("Deploy template name must be a valid trait")) # There must be at least one step. if not value.steps: err = _("No deploy steps specified. A deploy template must have " "at least one deploy step.") raise exception.InvalidDeployTemplate(err=err) # TODO(mgoddard): Determine the consequences of allowing duplicate # steps. # * What if one step has zero priority and another non-zero? # * What if a step that is enabled by default is included in a # template? Do we override the default or add a second invocation? # Check for duplicate steps. Each interface/step combination can be # specified at most once. counter = collections.Counter((step.interface, step.step) for step in value.steps) duplicates = {key for key, count in counter.items() if count > 1} if duplicates: duplicates = {"interface: %s, step: %s" % (interface, step) for interface, step in duplicates} err = _("Duplicate deploy steps. A deploy template cannot have " "multiple deploy steps with the same interface and step. " "Duplicates: %s") % "; ".join(duplicates) raise exception.InvalidDeployTemplate(err=err) return value @staticmethod def _convert_with_links(template, url, fields=None): template.links = [ link.Link.make_link('self', url, 'deploy_templates', template.uuid), link.Link.make_link('bookmark', url, 'deploy_templates', template.uuid, bookmark=True) ] return template @classmethod def convert_with_links(cls, rpc_template, fields=None, sanitize=True): """Add links to the deploy template.""" template = DeployTemplate(**rpc_template.as_dict()) if fields is not None: api_utils.check_for_invalid_fields(fields, template.as_dict()) template = cls._convert_with_links(template, api.request.public_url, fields=fields) if sanitize: template.sanitize(fields) return template def sanitize(self, fields): """Removes sensitive and unrequested data. Will only keep the fields specified in the ``fields`` parameter. :param fields: list of fields to preserve, or ``None`` to preserve them all :type fields: list of str """ if self.steps != wtypes.Unset: for step in self.steps: step.sanitize() if fields is not None: self.unset_fields_except(fields) @classmethod def sample(cls, expand=True): time = datetime.datetime(2000, 1, 1, 12, 0, 0) template_uuid = '534e73fa-1014-4e58-969a-814cc0cb9d43' template_name = 'CUSTOM_RAID1' template_steps = [{ "interface": "raid", "step": "create_configuration", "args": { "logical_disks": [{ "size_gb": "MAX", "raid_level": "1", "is_root_volume": True }], "delete_configuration": True }, "priority": 10 }] template_extra = {'foo': 'bar'} sample = cls(uuid=template_uuid, name=template_name, steps=template_steps, extra=template_extra, created_at=time, updated_at=time) fields = None if expand else _DEFAULT_RETURN_FIELDS return cls._convert_with_links(sample, 'http://localhost:6385', fields=fields) class DeployTemplatePatchType(types.JsonPatchType): _api_base = DeployTemplate class DeployTemplateCollection(collection.Collection): """API representation of a collection of deploy templates.""" _type = 'deploy_templates' deploy_templates = [DeployTemplate] """A list containing deploy template objects""" @staticmethod def convert_with_links(templates, limit, fields=None, **kwargs): collection = DeployTemplateCollection() collection.deploy_templates = [ DeployTemplate.convert_with_links(t, fields=fields, sanitize=False) for t in templates] collection.next = collection.get_next(limit, fields=fields, **kwargs) for template in collection.deploy_templates: template.sanitize(fields) return collection @classmethod def sample(cls): sample = cls() template = DeployTemplate.sample(expand=False) sample.deploy_templates = [template] return sample class DeployTemplatesController(rest.RestController): """REST controller for deploy templates.""" invalid_sort_key_list = ['extra', 'steps'] @pecan.expose() def _route(self, args, request=None): if not api_utils.allow_deploy_templates(): msg = _("The API version does not allow deploy templates") if api.request.method == "GET": raise webob_exc.HTTPNotFound(msg) else: raise webob_exc.HTTPMethodNotAllowed(msg) return super(DeployTemplatesController, self)._route(args, request) def _update_changed_fields(self, template, rpc_template): """Update rpc_template based on changed fields in a template.""" for field in objects.DeployTemplate.fields: try: patch_val = getattr(template, field) except AttributeError: # Ignore fields that aren't exposed in the API. continue if patch_val == wtypes.Unset: patch_val = None if rpc_template[field] != patch_val: if field == 'steps' and patch_val is not None: # Convert from DeployStepType to dict. patch_val = [s.as_dict() for s in patch_val] rpc_template[field] = patch_val @METRICS.timer('DeployTemplatesController.get_all') @expose.expose(DeployTemplateCollection, types.name, int, str, str, types.listtype, types.boolean) def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc', fields=None, detail=None): """Retrieve a list of deploy templates. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. This value cannot be larger than the value of max_limit in the [api] section of the ironic configuration, or only max_limit resources will be returned. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. :param fields: Optional, a list with a specified set of fields of the resource to be returned. :param detail: Optional, boolean to indicate whether retrieve a list of deploy templates with detail. """ api_utils.check_policy('baremetal:deploy_template:get') api_utils.check_allowed_fields(fields) api_utils.check_allowed_fields([sort_key]) fields = api_utils.get_request_return_fields(fields, detail, _DEFAULT_RETURN_FIELDS) limit = api_utils.validate_limit(limit) sort_dir = api_utils.validate_sort_dir(sort_dir) if sort_key in self.invalid_sort_key_list: raise exception.InvalidParameterValue( _("The sort_key value %(key)s is an invalid field for " "sorting") % {'key': sort_key}) marker_obj = None if marker: marker_obj = objects.DeployTemplate.get_by_uuid( api.request.context, marker) templates = objects.DeployTemplate.list( api.request.context, limit=limit, marker=marker_obj, sort_key=sort_key, sort_dir=sort_dir) parameters = {'sort_key': sort_key, 'sort_dir': sort_dir} if detail is not None: parameters['detail'] = detail return DeployTemplateCollection.convert_with_links( templates, limit, fields=fields, **parameters) @METRICS.timer('DeployTemplatesController.get_one') @expose.expose(DeployTemplate, types.uuid_or_name, types.listtype) def get_one(self, template_ident, fields=None): """Retrieve information about the given deploy template. :param template_ident: UUID or logical name of a deploy template. :param fields: Optional, a list with a specified set of fields of the resource to be returned. """ api_utils.check_policy('baremetal:deploy_template:get') api_utils.check_allowed_fields(fields) rpc_template = api_utils.get_rpc_deploy_template_with_suffix( template_ident) return DeployTemplate.convert_with_links(rpc_template, fields=fields) @METRICS.timer('DeployTemplatesController.post') @expose.expose(DeployTemplate, body=DeployTemplate, status_code=http_client.CREATED) def post(self, template): """Create a new deploy template. :param template: a deploy template within the request body. """ api_utils.check_policy('baremetal:deploy_template:create') context = api.request.context tdict = template.as_dict() # NOTE(mgoddard): UUID is mandatory for notifications payload if not tdict.get('uuid'): tdict['uuid'] = uuidutils.generate_uuid() new_template = objects.DeployTemplate(context, **tdict) notify.emit_start_notification(context, new_template, 'create') with notify.handle_error_notification(context, new_template, 'create'): new_template.create() # Set the HTTP Location Header api.response.location = link.build_url('deploy_templates', new_template.uuid) api_template = DeployTemplate.convert_with_links(new_template) notify.emit_end_notification(context, new_template, 'create') return api_template @METRICS.timer('DeployTemplatesController.patch') @wsme.validate(types.uuid, types.boolean, [DeployTemplatePatchType]) @expose.expose(DeployTemplate, types.uuid_or_name, types.boolean, body=[DeployTemplatePatchType]) def patch(self, template_ident, patch=None): """Update an existing deploy template. :param template_ident: UUID or logical name of a deploy template. :param patch: a json PATCH document to apply to this deploy template. """ api_utils.check_policy('baremetal:deploy_template:update') context = api.request.context rpc_template = api_utils.get_rpc_deploy_template_with_suffix( template_ident) template_dict = rpc_template.as_dict() template = DeployTemplate( **api_utils.apply_jsonpatch(template_dict, patch)) template.validate(template) self._update_changed_fields(template, rpc_template) # NOTE(mgoddard): There could be issues with concurrent updates of a # template. This is particularly true for the complex 'steps' field, # where operations such as modifying a single step could result in # changes being lost, e.g. two requests concurrently appending a step # to the same template could result in only one of the steps being # added, due to the read/modify/write nature of this patch operation. # This issue should not be present for 'simple' string fields, or # complete replacement of the steps (the only operation supported by # the openstack baremetal CLI). It's likely that this is an issue for # other resources, even those modified in the conductor under a lock. # This is due to the fact that the patch operation is always applied in # the API. Ways to avoid this include passing the patch to the # conductor to apply while holding a lock, or a collision detection # & retry mechansim using e.g. the updated_at field. notify.emit_start_notification(context, rpc_template, 'update') with notify.handle_error_notification(context, rpc_template, 'update'): rpc_template.save() api_template = DeployTemplate.convert_with_links(rpc_template) notify.emit_end_notification(context, rpc_template, 'update') return api_template @METRICS.timer('DeployTemplatesController.delete') @expose.expose(None, types.uuid_or_name, status_code=http_client.NO_CONTENT) def delete(self, template_ident): """Delete a deploy template. :param template_ident: UUID or logical name of a deploy template. """ api_utils.check_policy('baremetal:deploy_template:delete') context = api.request.context rpc_template = api_utils.get_rpc_deploy_template_with_suffix( template_ident) notify.emit_start_notification(context, rpc_template, 'delete') with notify.handle_error_notification(context, rpc_template, 'delete'): rpc_template.destroy() notify.emit_end_notification(context, rpc_template, 'delete') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/ironic/api/controllers/v1/driver.py0000644000175000017500000004142500000000000023157 0ustar00coreycorey00000000000000# Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import client as http_client from ironic_lib import metrics_utils from pecan import rest import wsme from ironic import api from ironic.api.controllers import base from ironic.api.controllers import link from ironic.api.controllers.v1 import types from ironic.api.controllers.v1 import utils as api_utils from ironic.api import expose from ironic.common import exception from ironic.common.i18n import _ from ironic.common import policy from ironic.drivers import base as driver_base METRICS = metrics_utils.get_metrics_logger(__name__) # Property information for drivers: # key = driver name; # value = dictionary of properties of that driver: # key = property name. # value = description of the property. # NOTE(rloo). This is cached for the lifetime of the API service. If one or # more conductor services are restarted with new driver versions, the API # service should be restarted. _DRIVER_PROPERTIES = {} # Vendor information for drivers: # key = driver name; # value = dictionary of vendor methods of that driver: # key = method name. # value = dictionary with the metadata of that method. # NOTE(lucasagomes). This is cached for the lifetime of the API # service. If one or more conductor services are restarted with new driver # versions, the API service should be restarted. _VENDOR_METHODS = {} # RAID (logical disk) configuration information for drivers: # key = driver name; # value = dictionary of RAID configuration information of that driver: # key = property name. # value = description of the property # NOTE(rloo). This is cached for the lifetime of the API service. If one or # more conductor services are restarted with new driver versions, the API # service should be restarted. _RAID_PROPERTIES = {} def hide_fields_in_newer_versions(obj): """This method hides fields that were added in newer API versions. Certain fields were introduced at certain API versions. These fields are only made available when the request's API version matches or exceeds the versions when these fields were introduced. """ if not api_utils.allow_storage_interface(): obj.default_storage_interface = wsme.Unset obj.enabled_storage_interfaces = wsme.Unset if not api_utils.allow_rescue_interface(): obj.default_rescue_interface = wsme.Unset obj.enabled_rescue_interfaces = wsme.Unset if not api_utils.allow_bios_interface(): obj.default_bios_interface = wsme.Unset obj.enabled_bios_interfaces = wsme.Unset class Driver(base.Base): """API representation of a driver.""" name = str """The name of the driver""" hosts = [str] """A list of active conductors that support this driver""" type = str """Whether the driver is classic or dynamic (hardware type)""" links = wsme.wsattr([link.Link], readonly=True) """A list containing self and bookmark links""" properties = wsme.wsattr([link.Link], readonly=True) """A list containing links to driver properties""" """Default interface for a hardware type""" default_bios_interface = str default_boot_interface = str default_console_interface = str default_deploy_interface = str default_inspect_interface = str default_management_interface = str default_network_interface = str default_power_interface = str default_raid_interface = str default_rescue_interface = str default_storage_interface = str default_vendor_interface = str """A list of enabled interfaces for a hardware type""" enabled_bios_interfaces = [str] enabled_boot_interfaces = [str] enabled_console_interfaces = [str] enabled_deploy_interfaces = [str] enabled_inspect_interfaces = [str] enabled_management_interfaces = [str] enabled_network_interfaces = [str] enabled_power_interfaces = [str] enabled_raid_interfaces = [str] enabled_rescue_interfaces = [str] enabled_storage_interfaces = [str] enabled_vendor_interfaces = [str] @staticmethod def convert_with_links(name, hosts, detail=False, interface_info=None): """Convert driver/hardware type info to an API-serializable object. :param name: name of a hardware type. :param hosts: list of conductor hostnames driver is active on. :param detail: boolean, whether to include detailed info, such as the 'type' field and default/enabled interfaces fields. :param interface_info: optional list of dicts of hardware interface info. :returns: API-serializable driver object. """ driver = Driver() driver.name = name driver.hosts = hosts driver.links = [ link.Link.make_link('self', api.request.public_url, 'drivers', name), link.Link.make_link('bookmark', api.request.public_url, 'drivers', name, bookmark=True) ] if api_utils.allow_links_node_states_and_driver_properties(): driver.properties = [ link.Link.make_link('self', api.request.public_url, 'drivers', name + "/properties"), link.Link.make_link('bookmark', api.request.public_url, 'drivers', name + "/properties", bookmark=True) ] if api_utils.allow_dynamic_drivers(): # NOTE(dtantsur): only dynamic drivers (based on hardware types) # are supported starting with the Rocky release. driver.type = 'dynamic' if detail: if interface_info is None: # TODO(jroll) objectify this interface_info = (api.request.dbapi .list_hardware_type_interfaces([name])) for iface_type in driver_base.ALL_INTERFACES: default = None enabled = set() for iface in interface_info: if iface['interface_type'] == iface_type: iface_name = iface['interface_name'] enabled.add(iface_name) # NOTE(jroll) this assumes the default is the same # on all conductors if iface['default']: default = iface_name default_key = 'default_%s_interface' % iface_type enabled_key = 'enabled_%s_interfaces' % iface_type setattr(driver, default_key, default) setattr(driver, enabled_key, list(enabled)) hide_fields_in_newer_versions(driver) return driver @classmethod def sample(cls): attrs = { 'name': 'sample-driver', 'hosts': ['fake-host'], 'type': 'classic', } for iface_type in driver_base.ALL_INTERFACES: attrs['default_%s_interface' % iface_type] = None attrs['enabled_%s_interfaces' % iface_type] = None sample = cls(**attrs) return sample class DriverList(base.Base): """API representation of a list of drivers.""" drivers = [Driver] """A list containing drivers objects""" @staticmethod def convert_with_links(hardware_types, detail=False): """Convert drivers and hardware types to an API-serializable object. :param hardware_types: dict mapping hardware type names to conductor hostnames. :param detail: boolean, whether to include detailed info, such as the 'type' field and default/enabled interfaces fields. :returns: an API-serializable driver collection object. """ collection = DriverList() collection.drivers = [] # NOTE(jroll) we return hardware types in all API versions, # but restrict type/default/enabled fields to 1.30. # This is checked in Driver.convert_with_links(), however also # checking here can save us a DB query. if api_utils.allow_dynamic_drivers() and detail: iface_info = api.request.dbapi.list_hardware_type_interfaces( list(hardware_types)) else: iface_info = [] for htname in hardware_types: interface_info = [i for i in iface_info if i['hardware_type'] == htname] collection.drivers.append( Driver.convert_with_links(htname, list(hardware_types[htname]), detail=detail, interface_info=interface_info)) return collection @classmethod def sample(cls): sample = cls() sample.drivers = [Driver.sample()] return sample class DriverPassthruController(rest.RestController): """REST controller for driver passthru. This controller allow vendors to expose cross-node functionality in the Ironic API. Ironic will merely relay the message from here to the specified driver, no introspection will be made in the message body. """ _custom_actions = { 'methods': ['GET'] } @METRICS.timer('DriverPassthruController.methods') @expose.expose(str, str) def methods(self, driver_name): """Retrieve information about vendor methods of the given driver. :param driver_name: name of the driver. :returns: dictionary with : entries. :raises: DriverNotFound if the driver name is invalid or the driver cannot be loaded. """ cdict = api.request.context.to_policy_values() policy.authorize('baremetal:driver:vendor_passthru', cdict, cdict) if driver_name not in _VENDOR_METHODS: topic = api.request.rpcapi.get_topic_for_driver(driver_name) ret = api.request.rpcapi.get_driver_vendor_passthru_methods( api.request.context, driver_name, topic=topic) _VENDOR_METHODS[driver_name] = ret return _VENDOR_METHODS[driver_name] @METRICS.timer('DriverPassthruController._default') @expose.expose(str, str, str, body=str) def _default(self, driver_name, method, data=None): """Call a driver API extension. :param driver_name: name of the driver to call. :param method: name of the method, to be passed to the vendor implementation. :param data: body of data to supply to the specified method. """ cdict = api.request.context.to_policy_values() policy.authorize('baremetal:driver:vendor_passthru', cdict, cdict) topic = api.request.rpcapi.get_topic_for_driver(driver_name) return api_utils.vendor_passthru(driver_name, method, topic, data=data, driver_passthru=True) class DriverRaidController(rest.RestController): _custom_actions = { 'logical_disk_properties': ['GET'] } @METRICS.timer('DriverRaidController.logical_disk_properties') @expose.expose(types.jsontype, str) def logical_disk_properties(self, driver_name): """Returns the logical disk properties for the driver. :param driver_name: Name of the driver. :returns: A dictionary containing the properties that can be mentioned for logical disks and a textual description for them. :raises: UnsupportedDriverExtension if the driver doesn't support RAID configuration. :raises: NotAcceptable, if requested version of the API is less than 1.12. :raises: DriverNotFound, if driver is not loaded on any of the conductors. """ cdict = api.request.context.to_policy_values() policy.authorize('baremetal:driver:get_raid_logical_disk_properties', cdict, cdict) if not api_utils.allow_raid_config(): raise exception.NotAcceptable() if driver_name not in _RAID_PROPERTIES: topic = api.request.rpcapi.get_topic_for_driver(driver_name) try: info = api.request.rpcapi.get_raid_logical_disk_properties( api.request.context, driver_name, topic=topic) except exception.UnsupportedDriverExtension as e: # Change error code as 404 seems appropriate because RAID is a # standard interface and all drivers might not have it. e.code = http_client.NOT_FOUND raise _RAID_PROPERTIES[driver_name] = info return _RAID_PROPERTIES[driver_name] class DriversController(rest.RestController): """REST controller for Drivers.""" vendor_passthru = DriverPassthruController() raid = DriverRaidController() """Expose RAID as a sub-element of drivers""" _custom_actions = { 'properties': ['GET'], } @METRICS.timer('DriversController.get_all') @expose.expose(DriverList, str, types.boolean) def get_all(self, type=None, detail=None): """Retrieve a list of drivers.""" # FIXME(deva): formatting of the auto-generated REST API docs # will break from a single-line doc string. # This is a result of a bug in sphinxcontrib-pecanwsme # https://github.com/dreamhost/sphinxcontrib-pecanwsme/issues/8 cdict = api.request.context.to_policy_values() policy.authorize('baremetal:driver:get', cdict, cdict) api_utils.check_allow_driver_detail(detail) api_utils.check_allow_filter_driver_type(type) if type not in (None, 'classic', 'dynamic'): raise exception.Invalid(_( '"type" filter must be one of "classic" or "dynamic", ' 'if specified.')) if type is None or type == 'dynamic': hw_type_dict = api.request.dbapi.get_active_hardware_type_dict() else: # NOTE(dtantsur): we don't support classic drivers starting with # the Rocky release. hw_type_dict = {} return DriverList.convert_with_links(hw_type_dict, detail=detail) @METRICS.timer('DriversController.get_one') @expose.expose(Driver, str) def get_one(self, driver_name): """Retrieve a single driver.""" # NOTE(russell_h): There is no way to make this more efficient than # retrieving a list of drivers using the current sqlalchemy schema, but # this path must be exposed for Pecan to route any paths we might # choose to expose below it. cdict = api.request.context.to_policy_values() policy.authorize('baremetal:driver:get', cdict, cdict) hw_type_dict = api.request.dbapi.get_active_hardware_type_dict() for name, hosts in hw_type_dict.items(): if name == driver_name: return Driver.convert_with_links(name, list(hosts), detail=True) raise exception.DriverNotFound(driver_name=driver_name) @METRICS.timer('DriversController.properties') @expose.expose(str, str) def properties(self, driver_name): """Retrieve property information of the given driver. :param driver_name: name of the driver. :returns: dictionary with : entries. :raises: DriverNotFound (HTTP 404) if the driver name is invalid or the driver cannot be loaded. """ cdict = api.request.context.to_policy_values() policy.authorize('baremetal:driver:get_properties', cdict, cdict) if driver_name not in _DRIVER_PROPERTIES: topic = api.request.rpcapi.get_topic_for_driver(driver_name) properties = api.request.rpcapi.get_driver_properties( api.request.context, driver_name, topic=topic) _DRIVER_PROPERTIES[driver_name] = properties return _DRIVER_PROPERTIES[driver_name] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/ironic/api/controllers/v1/event.py0000644000175000017500000000360000000000000022776 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import client as http_client from ironic_lib import metrics_utils from oslo_log import log import pecan from ironic import api from ironic.api.controllers.v1 import collection from ironic.api.controllers.v1 import types from ironic.api.controllers.v1 import utils as api_utils from ironic.api import expose from ironic.common import exception from ironic.common import policy METRICS = metrics_utils.get_metrics_logger(__name__) LOG = log.getLogger(__name__) class EvtCollection(collection.Collection): """API representation of a collection of events.""" events = [types.eventtype] """A list containing event dict objects""" class EventsController(pecan.rest.RestController): """REST controller for Events.""" @pecan.expose() def _lookup(self): if not api_utils.allow_expose_events(): pecan.abort(http_client.NOT_FOUND) @METRICS.timer('EventsController.post') @expose.expose(None, body=EvtCollection, status_code=http_client.NO_CONTENT) def post(self, evts): if not api_utils.allow_expose_events(): raise exception.NotFound() cdict = api.request.context.to_policy_values() policy.authorize('baremetal:events:post', cdict, cdict) for e in evts.events: LOG.debug("Received external event: %s", e) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/ironic/api/controllers/v1/node.py0000644000175000017500000031750100000000000022612 0ustar00coreycorey00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from http import client as http_client from ironic_lib import metrics_utils import jsonschema from oslo_log import log from oslo_utils import strutils from oslo_utils import uuidutils import pecan from pecan import rest import wsme from wsme import types as wtypes from ironic import api from ironic.api.controllers import base from ironic.api.controllers import link from ironic.api.controllers.v1 import allocation from ironic.api.controllers.v1 import bios from ironic.api.controllers.v1 import collection from ironic.api.controllers.v1 import notification_utils as notify from ironic.api.controllers.v1 import port from ironic.api.controllers.v1 import portgroup from ironic.api.controllers.v1 import types from ironic.api.controllers.v1 import utils as api_utils from ironic.api.controllers.v1 import versions from ironic.api.controllers.v1 import volume from ironic.api import expose from ironic.common import exception from ironic.common.i18n import _ from ironic.common import policy from ironic.common import states as ir_states from ironic.conductor import steps as conductor_steps import ironic.conf from ironic import objects CONF = ironic.conf.CONF LOG = log.getLogger(__name__) _CLEAN_STEPS_SCHEMA = { "$schema": "http://json-schema.org/schema#", "title": "Clean steps schema", "type": "array", # list of clean steps "items": { "type": "object", # args is optional "required": ["interface", "step"], "properties": { "interface": { "description": "driver interface", "enum": list(conductor_steps.CLEANING_INTERFACE_PRIORITY) # interface value must be one of the valid interfaces }, "step": { "description": "name of clean step", "type": "string", "minLength": 1 }, "args": { "description": "additional args", "type": "object", "properties": {} }, }, # interface, step and args are the only expected keys "additionalProperties": False } } METRICS = metrics_utils.get_metrics_logger(__name__) # Vendor information for node's driver: # key = driver name; # value = dictionary of node vendor methods of that driver: # key = method name. # value = dictionary with the metadata of that method. # NOTE(lucasagomes). This is cached for the lifetime of the API # service. If one or more conductor services are restarted with new driver # versions, the API service should be restarted. _VENDOR_METHODS = {} _DEFAULT_RETURN_FIELDS = ('instance_uuid', 'maintenance', 'power_state', 'provision_state', 'uuid', 'name') # States where calling do_provisioning_action makes sense PROVISION_ACTION_STATES = (ir_states.VERBS['manage'], ir_states.VERBS['provide'], ir_states.VERBS['abort'], ir_states.VERBS['adopt']) _NODES_CONTROLLER_RESERVED_WORDS = None ALLOWED_TARGET_POWER_STATES = (ir_states.POWER_ON, ir_states.POWER_OFF, ir_states.REBOOT, ir_states.SOFT_REBOOT, ir_states.SOFT_POWER_OFF) _NODE_DESCRIPTION_MAX_LENGTH = 4096 def get_nodes_controller_reserved_names(): global _NODES_CONTROLLER_RESERVED_WORDS if _NODES_CONTROLLER_RESERVED_WORDS is None: _NODES_CONTROLLER_RESERVED_WORDS = ( api_utils.get_controller_reserved_names(NodesController)) return _NODES_CONTROLLER_RESERVED_WORDS def hide_fields_in_newer_versions(obj): """This method hides fields that were added in newer API versions. Certain node fields were introduced at certain API versions. These fields are only made available when the request's API version matches or exceeds the versions when these fields were introduced. """ for field in api_utils.disallowed_fields(): setattr(obj, field, wsme.Unset) def reject_fields_in_newer_versions(obj): """When creating an object, reject fields that appear in newer versions.""" for field in api_utils.disallowed_fields(): if field == 'conductor_group': # NOTE(jroll) this is special-cased to "" and not Unset, # because it is used in hash ring calculations empty_value = '' elif field == 'name' and obj.name is None: # NOTE(dtantsur): for some reason we allow specifying name=None # explicitly even in old API versions.. continue else: empty_value = wtypes.Unset if getattr(obj, field, empty_value) != empty_value: LOG.debug('Field %(field)s is not acceptable in version %(ver)s', {'field': field, 'ver': api.request.version}) raise exception.NotAcceptable() def reject_patch_in_newer_versions(patch): for field in api_utils.disallowed_fields(): value = api_utils.get_patch_values(patch, '/%s' % field) if value: LOG.debug('Field %(field)s is not acceptable in version %(ver)s', {'field': field, 'ver': api.request.version}) raise exception.NotAcceptable() def update_state_in_older_versions(obj): """Change provision state names for API backwards compatibility. :param obj: The object being returned to the API client that is to be updated by this method. """ # if requested version is < 1.2, convert AVAILABLE to the old NOSTATE if (api.request.version.minor < versions.MINOR_2_AVAILABLE_STATE and obj.provision_state == ir_states.AVAILABLE): obj.provision_state = ir_states.NOSTATE # if requested version < 1.39, convert INSPECTWAIT to INSPECTING if (not api_utils.allow_inspect_wait_state() and obj.provision_state == ir_states.INSPECTWAIT): obj.provision_state = ir_states.INSPECTING class BootDeviceController(rest.RestController): _custom_actions = { 'supported': ['GET'], } def _get_boot_device(self, rpc_node, supported=False): """Get the current boot device or a list of supported devices. :param rpc_node: RPC Node object. :param supported: Boolean value. If true return a list of supported boot devices, if false return the current boot device. Default: False. :returns: The current boot device or a list of the supported boot devices. """ topic = api.request.rpcapi.get_topic_for(rpc_node) if supported: return api.request.rpcapi.get_supported_boot_devices( api.request.context, rpc_node.uuid, topic) else: return api.request.rpcapi.get_boot_device(api.request.context, rpc_node.uuid, topic) @METRICS.timer('BootDeviceController.put') @expose.expose(None, types.uuid_or_name, str, types.boolean, status_code=http_client.NO_CONTENT) def put(self, node_ident, boot_device, persistent=False): """Set the boot device for a node. Set the boot device to use on next reboot of the node. :param node_ident: the UUID or logical name of a node. :param boot_device: the boot device, one of :mod:`ironic.common.boot_devices`. :param persistent: Boolean value. True if the boot device will persist to all future boots, False if not. Default: False. """ rpc_node = api_utils.check_node_policy_and_retrieve( 'baremetal:node:set_boot_device', node_ident) topic = api.request.rpcapi.get_topic_for(rpc_node) api.request.rpcapi.set_boot_device(api.request.context, rpc_node.uuid, boot_device, persistent=persistent, topic=topic) @METRICS.timer('BootDeviceController.get') @expose.expose(str, types.uuid_or_name) def get(self, node_ident): """Get the current boot device for a node. :param node_ident: the UUID or logical name of a node. :returns: a json object containing: :boot_device: the boot device, one of :mod:`ironic.common.boot_devices` or None if it is unknown. :persistent: Whether the boot device will persist to all future boots or not, None if it is unknown. """ rpc_node = api_utils.check_node_policy_and_retrieve( 'baremetal:node:get_boot_device', node_ident) return self._get_boot_device(rpc_node) @METRICS.timer('BootDeviceController.supported') @expose.expose(str, types.uuid_or_name) def supported(self, node_ident): """Get a list of the supported boot devices. :param node_ident: the UUID or logical name of a node. :returns: A json object with the list of supported boot devices. """ rpc_node = api_utils.check_node_policy_and_retrieve( 'baremetal:node:get_boot_device', node_ident) boot_devices = self._get_boot_device(rpc_node, supported=True) return {'supported_boot_devices': boot_devices} class IndicatorAtComponent(object): def __init__(self, **kwargs): name = kwargs.get('name') component = kwargs.get('component') unique_name = kwargs.get('unique_name') if name and component: self.unique_name = name + '@' + component self.name = name self.component = component elif unique_name: try: index = unique_name.index('@') except ValueError: raise exception.InvalidParameterValue( _('Malformed indicator name "%s"') % unique_name) self.component = unique_name[index + 1:] self.name = unique_name[:index] self.unique_name = unique_name else: raise exception.MissingParameterValue( _('Missing indicator name "%s"')) class IndicatorState(base.APIBase): """API representation of indicator state.""" state = wsme.wsattr(wtypes.text) def __init__(self, **kwargs): self.state = kwargs.get('state') class Indicator(base.APIBase): """API representation of an indicator.""" name = wsme.wsattr(wtypes.text) component = wsme.wsattr(wtypes.text) readonly = types.BooleanType() states = wtypes.ArrayType(str) links = wsme.wsattr([link.Link], readonly=True) def __init__(self, **kwargs): self.name = kwargs.get('name') self.component = kwargs.get('component') self.readonly = kwargs.get('readonly', True) self.states = kwargs.get('states', []) @staticmethod def _convert_with_links(node_uuid, indicator, url): """Add links to the indicator.""" indicator.links = [ link.Link.make_link( 'self', url, 'nodes', '%s/management/indicators/%s' % ( node_uuid, indicator.name)), link.Link.make_link( 'bookmark', url, 'nodes', '%s/management/indicators/%s' % ( node_uuid, indicator.name), bookmark=True)] return indicator @classmethod def convert_with_links(cls, node_uuid, rpc_component, rpc_name, **rpc_fields): """Add links to the indicator.""" indicator = Indicator( component=rpc_component, name=rpc_name, **rpc_fields) return cls._convert_with_links( node_uuid, indicator, pecan.request.host_url) class IndicatorsCollection(wtypes.Base): """API representation of the indicators for a node.""" indicators = [Indicator] """Node indicators list""" @staticmethod def collection_from_dict(node_ident, indicators): col = IndicatorsCollection() indicator_list = [] for component, names in indicators.items(): for name, fields in names.items(): indicator_at_component = IndicatorAtComponent( component=component, name=name) indicator = Indicator.convert_with_links( node_ident, component, indicator_at_component.unique_name, **fields) indicator_list.append(indicator) col.indicators = indicator_list return col class IndicatorController(rest.RestController): @METRICS.timer('IndicatorController.put') @expose.expose(None, types.uuid_or_name, wtypes.text, wtypes.text, status_code=http_client.NO_CONTENT) def put(self, node_ident, indicator, state): """Set node hardware component indicator to the desired state. :param node_ident: the UUID or logical name of a node. :param indicator: Indicator ID (as reported by `get_supported_indicators`). :param state: Indicator state, one of mod:`ironic.common.indicator_states`. """ cdict = pecan.request.context.to_policy_values() policy.authorize('baremetal:node:set_indicator_state', cdict, cdict) rpc_node = api_utils.get_rpc_node(node_ident) topic = pecan.request.rpcapi.get_topic_for(rpc_node) indicator_at_component = IndicatorAtComponent(unique_name=indicator) pecan.request.rpcapi.set_indicator_state( pecan.request.context, rpc_node.uuid, indicator_at_component.component, indicator_at_component.name, state, topic=topic) @METRICS.timer('IndicatorController.get_one') @expose.expose(IndicatorState, types.uuid_or_name, wtypes.text) def get_one(self, node_ident, indicator): """Get node hardware component indicator and its state. :param node_ident: the UUID or logical name of a node. :param indicator: Indicator ID (as reported by `get_supported_indicators`). :returns: a dict with the "state" key and one of mod:`ironic.common.indicator_states` as a value. """ cdict = pecan.request.context.to_policy_values() policy.authorize('baremetal:node:get_indicator_state', cdict, cdict) rpc_node = api_utils.get_rpc_node(node_ident) topic = pecan.request.rpcapi.get_topic_for(rpc_node) indicator_at_component = IndicatorAtComponent(unique_name=indicator) state = pecan.request.rpcapi.get_indicator_state( pecan.request.context, rpc_node.uuid, indicator_at_component.component, indicator_at_component.name, topic=topic) return IndicatorState(state=state) @METRICS.timer('IndicatorController.get_all') @expose.expose(IndicatorsCollection, types.uuid_or_name, wtypes.text, ignore_extra_args=True) def get_all(self, node_ident): """Get node hardware components and their indicators. :param node_ident: the UUID or logical name of a node. :returns: A json object of hardware components (:mod:`ironic.common.components`) as keys with indicator IDs (from `get_supported_indicators`) as values. """ cdict = pecan.request.context.to_policy_values() policy.authorize('baremetal:node:get_indicator_state', cdict, cdict) rpc_node = api_utils.get_rpc_node(node_ident) topic = pecan.request.rpcapi.get_topic_for(rpc_node) indicators = pecan.request.rpcapi.get_supported_indicators( pecan.request.context, rpc_node.uuid, topic=topic) return IndicatorsCollection.collection_from_dict( node_ident, indicators) class InjectNmiController(rest.RestController): @METRICS.timer('InjectNmiController.put') @expose.expose(None, types.uuid_or_name, status_code=http_client.NO_CONTENT) def put(self, node_ident): """Inject NMI for a node. Inject NMI (Non Maskable Interrupt) for a node immediately. :param node_ident: the UUID or logical name of a node. :raises: NotFound if requested version of the API doesn't support inject nmi. :raises: HTTPForbidden if the policy is not authorized. :raises: NodeNotFound if the node is not found. :raises: NodeLocked if the node is locked by another conductor. :raises: UnsupportedDriverExtension if the node's driver doesn't support management or management.inject_nmi. :raises: InvalidParameterValue when the wrong driver info is specified or an invalid boot device is specified. :raises: MissingParameterValue if missing supplied info. """ if not api_utils.allow_inject_nmi(): raise exception.NotFound() rpc_node = api_utils.check_node_policy_and_retrieve( 'baremetal:node:inject_nmi', node_ident) topic = api.request.rpcapi.get_topic_for(rpc_node) api.request.rpcapi.inject_nmi(api.request.context, rpc_node.uuid, topic=topic) class NodeManagementController(rest.RestController): boot_device = BootDeviceController() """Expose boot_device as a sub-element of management""" inject_nmi = InjectNmiController() """Expose inject_nmi as a sub-element of management""" indicators = IndicatorController() """Expose indicators as a sub-element of management""" class ConsoleInfo(base.Base): """API representation of the console information for a node.""" console_enabled = types.boolean """The console state: if the console is enabled or not.""" console_info = {str: types.jsontype} """The console information. It typically includes the url to access the console and the type of the application that hosts the console.""" @classmethod def sample(cls): console = {'type': 'shellinabox', 'url': 'http://:4201'} return cls(console_enabled=True, console_info=console) class NodeConsoleController(rest.RestController): @METRICS.timer('NodeConsoleController.get') @expose.expose(ConsoleInfo, types.uuid_or_name) def get(self, node_ident): """Get connection information about the console. :param node_ident: UUID or logical name of a node. """ rpc_node = api_utils.check_node_policy_and_retrieve( 'baremetal:node:get_console', node_ident) topic = api.request.rpcapi.get_topic_for(rpc_node) try: console = api.request.rpcapi.get_console_information( api.request.context, rpc_node.uuid, topic) console_state = True except exception.NodeConsoleNotEnabled: console = None console_state = False return ConsoleInfo(console_enabled=console_state, console_info=console) @METRICS.timer('NodeConsoleController.put') @expose.expose(None, types.uuid_or_name, types.boolean, status_code=http_client.ACCEPTED) def put(self, node_ident, enabled): """Start and stop the node console. :param node_ident: UUID or logical name of a node. :param enabled: Boolean value; whether to enable or disable the console. """ rpc_node = api_utils.check_node_policy_and_retrieve( 'baremetal:node:set_console_state', node_ident) topic = api.request.rpcapi.get_topic_for(rpc_node) api.request.rpcapi.set_console_mode(api.request.context, rpc_node.uuid, enabled, topic) # Set the HTTP Location Header url_args = '/'.join([node_ident, 'states', 'console']) api.response.location = link.build_url('nodes', url_args) class NodeStates(base.APIBase): """API representation of the states of a node.""" console_enabled = types.boolean """Indicates whether the console access is enabled or disabled on the node.""" power_state = str """Represent the current (not transition) power state of the node""" provision_state = str """Represent the current (not transition) provision state of the node""" provision_updated_at = datetime.datetime """The UTC date and time of the last provision state change""" target_power_state = str """The user modified desired power state of the node.""" target_provision_state = str """The user modified desired provision state of the node.""" last_error = str """Any error from the most recent (last) asynchronous transaction that started but failed to finish.""" raid_config = wsme.wsattr({str: types.jsontype}, readonly=True) """Represents the RAID configuration that the node is configured with.""" target_raid_config = wsme.wsattr({str: types.jsontype}, readonly=True) """The desired RAID configuration, to be used the next time the node is configured.""" @staticmethod def convert(rpc_node): attr_list = ['console_enabled', 'last_error', 'power_state', 'provision_state', 'target_power_state', 'target_provision_state', 'provision_updated_at'] if api_utils.allow_raid_config(): attr_list.extend(['raid_config', 'target_raid_config']) states = NodeStates() for attr in attr_list: setattr(states, attr, getattr(rpc_node, attr)) update_state_in_older_versions(states) return states @classmethod def sample(cls): sample = cls(target_power_state=ir_states.POWER_ON, target_provision_state=ir_states.ACTIVE, last_error=None, console_enabled=False, provision_updated_at=None, power_state=ir_states.POWER_ON, provision_state=None, raid_config=None, target_raid_config=None) return sample class NodeStatesController(rest.RestController): _custom_actions = { 'power': ['PUT'], 'provision': ['PUT'], 'raid': ['PUT'], } console = NodeConsoleController() """Expose console as a sub-element of states""" @METRICS.timer('NodeStatesController.get') @expose.expose(NodeStates, types.uuid_or_name) def get(self, node_ident): """List the states of the node. :param node_ident: the UUID or logical_name of a node. """ rpc_node = api_utils.check_node_policy_and_retrieve( 'baremetal:node:get_states', node_ident) # NOTE(lucasagomes): All these state values come from the # DB. Ironic counts with a periodic task that verify the current # power states of the nodes and update the DB accordingly. return NodeStates.convert(rpc_node) @METRICS.timer('NodeStatesController.raid') @expose.expose(None, types.uuid_or_name, body=types.jsontype) def raid(self, node_ident, target_raid_config): """Set the target raid config of the node. :param node_ident: the UUID or logical name of a node. :param target_raid_config: Desired target RAID configuration of the node. It may be an empty dictionary as well. :raises: UnsupportedDriverExtension, if the node's driver doesn't support RAID configuration. :raises: InvalidParameterValue, if validation of target raid config fails. :raises: NotAcceptable, if requested version of the API is less than 1.12. """ rpc_node = api_utils.check_node_policy_and_retrieve( 'baremetal:node:set_raid_state', node_ident) if not api_utils.allow_raid_config(): raise exception.NotAcceptable() topic = api.request.rpcapi.get_topic_for(rpc_node) try: api.request.rpcapi.set_target_raid_config( api.request.context, rpc_node.uuid, target_raid_config, topic=topic) except exception.UnsupportedDriverExtension as e: # Change error code as 404 seems appropriate because RAID is a # standard interface and all drivers might not have it. e.code = http_client.NOT_FOUND raise @METRICS.timer('NodeStatesController.power') @expose.expose(None, types.uuid_or_name, str, wtypes.IntegerType(minimum=1), status_code=http_client.ACCEPTED) def power(self, node_ident, target, timeout=None): """Set the power state of the node. :param node_ident: the UUID or logical name of a node. :param target: The desired power state of the node. :param timeout: timeout (in seconds) positive integer (> 0) for any power state. ``None`` indicates to use default timeout. :raises: ClientSideError (HTTP 409) if a power operation is already in progress. :raises: InvalidStateRequested (HTTP 400) if the requested target state is not valid or if the node is in CLEANING state. :raises: NotAcceptable (HTTP 406) for soft reboot, soft power off or timeout parameter, if requested version of the API is less than 1.27. :raises: Invalid (HTTP 400) if timeout value is less than 1. """ rpc_node = api_utils.check_node_policy_and_retrieve( 'baremetal:node:set_power_state', node_ident) # TODO(lucasagomes): Test if it's able to transition to the # target state from the current one topic = api.request.rpcapi.get_topic_for(rpc_node) if ((target in [ir_states.SOFT_REBOOT, ir_states.SOFT_POWER_OFF] or timeout) and not api_utils.allow_soft_power_off()): raise exception.NotAcceptable() # FIXME(naohirot): This check is workaround because # wtypes.IntegerType(minimum=1) is not effective if timeout is not None and timeout < 1: raise exception.Invalid( _("timeout has to be positive integer")) if target not in ALLOWED_TARGET_POWER_STATES: raise exception.InvalidStateRequested( action=target, node=node_ident, state=rpc_node.power_state) # Don't change power state for nodes being cleaned elif rpc_node.provision_state in (ir_states.CLEANWAIT, ir_states.CLEANING): raise exception.InvalidStateRequested( action=target, node=node_ident, state=rpc_node.provision_state) api.request.rpcapi.change_node_power_state(api.request.context, rpc_node.uuid, target, timeout=timeout, topic=topic) # Set the HTTP Location Header url_args = '/'.join([node_ident, 'states']) api.response.location = link.build_url('nodes', url_args) def _do_provision_action(self, rpc_node, target, configdrive=None, clean_steps=None, rescue_password=None): topic = api.request.rpcapi.get_topic_for(rpc_node) # Note that there is a race condition. The node state(s) could change # by the time the RPC call is made and the TaskManager manager gets a # lock. if target in (ir_states.ACTIVE, ir_states.REBUILD): rebuild = (target == ir_states.REBUILD) api.request.rpcapi.do_node_deploy(context=api.request.context, node_id=rpc_node.uuid, rebuild=rebuild, configdrive=configdrive, topic=topic) elif (target == ir_states.VERBS['unrescue']): api.request.rpcapi.do_node_unrescue( api.request.context, rpc_node.uuid, topic) elif (target == ir_states.VERBS['rescue']): if not (rescue_password and rescue_password.strip()): msg = (_('A non-empty "rescue_password" is required when ' 'setting target provision state to %s') % ir_states.VERBS['rescue']) raise exception.ClientSideError( msg, status_code=http_client.BAD_REQUEST) api.request.rpcapi.do_node_rescue( api.request.context, rpc_node.uuid, rescue_password, topic) elif target == ir_states.DELETED: api.request.rpcapi.do_node_tear_down( api.request.context, rpc_node.uuid, topic) elif target == ir_states.VERBS['inspect']: api.request.rpcapi.inspect_hardware( api.request.context, rpc_node.uuid, topic=topic) elif target == ir_states.VERBS['clean']: if not clean_steps: msg = (_('"clean_steps" is required when setting target ' 'provision state to %s') % ir_states.VERBS['clean']) raise exception.ClientSideError( msg, status_code=http_client.BAD_REQUEST) _check_clean_steps(clean_steps) api.request.rpcapi.do_node_clean( api.request.context, rpc_node.uuid, clean_steps, topic) elif target in PROVISION_ACTION_STATES: api.request.rpcapi.do_provisioning_action( api.request.context, rpc_node.uuid, target, topic) else: msg = (_('The requested action "%(action)s" could not be ' 'understood.') % {'action': target}) raise exception.InvalidStateRequested(message=msg) @METRICS.timer('NodeStatesController.provision') @expose.expose(None, types.uuid_or_name, str, types.jsontype, types.jsontype, str, status_code=http_client.ACCEPTED) def provision(self, node_ident, target, configdrive=None, clean_steps=None, rescue_password=None): """Asynchronous trigger the provisioning of the node. This will set the target provision state of the node, and a background task will begin which actually applies the state change. This call will return a 202 (Accepted) indicating the request was accepted and is in progress; the client should continue to GET the status of this node to observe the status of the requested action. :param node_ident: UUID or logical name of a node. :param target: The desired provision state of the node or verb. :param configdrive: Optional. A gzipped and base64 encoded configdrive or a dict to build a configdrive from. Only valid when setting provision state to "active" or "rebuild". :param clean_steps: An ordered list of cleaning steps that will be performed on the node. A cleaning step is a dictionary with required keys 'interface' and 'step', and optional key 'args'. If specified, the value for 'args' is a keyword variable argument dictionary that is passed to the cleaning step method.:: { 'interface': , 'step': , 'args': {: , ..., : } } For example (this isn't a real example, this cleaning step doesn't exist):: { 'interface': 'deploy', 'step': 'upgrade_firmware', 'args': {'force': True} } This is required (and only valid) when target is "clean". :param rescue_password: A string representing the password to be set inside the rescue environment. This is required (and only valid), when target is "rescue". :raises: NodeLocked (HTTP 409) if the node is currently locked. :raises: ClientSideError (HTTP 409) if the node is already being provisioned. :raises: InvalidParameterValue (HTTP 400), if validation of clean_steps or power driver interface fails. :raises: InvalidStateRequested (HTTP 400) if the requested transition is not possible from the current state. :raises: NodeInMaintenance (HTTP 400), if operation cannot be performed because the node is in maintenance mode. :raises: NoFreeConductorWorker (HTTP 503) if no workers are available. :raises: NotAcceptable (HTTP 406) if the API version specified does not allow the requested state transition. """ rpc_node = api_utils.check_node_policy_and_retrieve( 'baremetal:node:set_provision_state', node_ident) api_utils.check_allow_management_verbs(target) if (target in (ir_states.ACTIVE, ir_states.REBUILD) and rpc_node.maintenance): raise exception.NodeInMaintenance(op=_('provisioning'), node=rpc_node.uuid) m = ir_states.machine.copy() m.initialize(rpc_node.provision_state) if not m.is_actionable_event(ir_states.VERBS.get(target, target)): # Normally, we let the task manager recognize and deal with # NodeLocked exceptions. However, that isn't done until the RPC # calls below. # In order to main backward compatibility with our API HTTP # response codes, we have this check here to deal with cases where # a node is already being operated on (DEPLOYING or such) and we # want to continue returning 409. Without it, we'd return 400. if rpc_node.reservation: raise exception.NodeLocked(node=rpc_node.uuid, host=rpc_node.reservation) raise exception.InvalidStateRequested( action=target, node=rpc_node.uuid, state=rpc_node.provision_state) api_utils.check_allow_configdrive(target, configdrive) if clean_steps and target != ir_states.VERBS['clean']: msg = (_('"clean_steps" is only valid when setting target ' 'provision state to %s') % ir_states.VERBS['clean']) raise exception.ClientSideError( msg, status_code=http_client.BAD_REQUEST) if (rescue_password is not None and target != ir_states.VERBS['rescue']): msg = (_('"rescue_password" is only valid when setting target ' 'provision state to %s') % ir_states.VERBS['rescue']) raise exception.ClientSideError( msg, status_code=http_client.BAD_REQUEST) if (rpc_node.provision_state == ir_states.INSPECTWAIT and target == ir_states.VERBS['abort']): if not api_utils.allow_inspect_abort(): raise exception.NotAcceptable() self._do_provision_action(rpc_node, target, configdrive, clean_steps, rescue_password) # Set the HTTP Location Header url_args = '/'.join([node_ident, 'states']) api.response.location = link.build_url('nodes', url_args) def _check_clean_steps(clean_steps): """Ensure all necessary keys are present and correct in clean steps. Check that the user-specified clean steps are in the expected format and include the required information. :param clean_steps: a list of clean steps. For more details, see the clean_steps parameter of :func:`NodeStatesController.provision`. :raises: InvalidParameterValue if validation of clean steps fails. """ try: jsonschema.validate(clean_steps, _CLEAN_STEPS_SCHEMA) except jsonschema.ValidationError as exc: raise exception.InvalidParameterValue(_('Invalid clean_steps: %s') % exc) class Traits(base.APIBase): """API representation of the traits for a node.""" traits = wtypes.ArrayType(str) """node traits""" @classmethod def sample(cls): traits = ["CUSTOM_TRAIT1", "CUSTOM_TRAIT2"] return cls(traits=traits) def _get_chassis_uuid(node): """Return the UUID of a node's chassis, or None. :param node: a Node object. :returns: the UUID of the node's chassis, or None if the node has no chassis set. """ if not node.chassis_id: return chassis = objects.Chassis.get_by_id(api.request.context, node.chassis_id) return chassis.uuid def _make_trait_list(context, node_id, traits): """Return a TraitList object for the specified node and traits. The Trait objects will not be created in the database. :param context: a request context. :param node_id: the ID of a node. :param traits: a list of trait strings to add to the TraitList. :returns: a TraitList object. """ trait_objs = [objects.Trait(context, node_id=node_id, trait=t) for t in traits] return objects.TraitList(context, objects=trait_objs) class NodeTraitsController(rest.RestController): def __init__(self, node_ident): super(NodeTraitsController, self).__init__() self.node_ident = node_ident @METRICS.timer('NodeTraitsController.get_all') @expose.expose(Traits) def get_all(self): """List node traits.""" node = api_utils.check_node_policy_and_retrieve( 'baremetal:node:traits:list', self.node_ident) traits = objects.TraitList.get_by_node_id(api.request.context, node.id) return Traits(traits=traits.get_trait_names()) @METRICS.timer('NodeTraitsController.put') @expose.expose(None, str, wtypes.ArrayType(str), status_code=http_client.NO_CONTENT) def put(self, trait=None, traits=None): """Add a trait to a node. :param trait: String value; trait to add to a node, or None. Mutually exclusive with 'traits'. If not None, adds this trait to the node. :param traits: List of Strings; traits to set for a node, or None. Mutually exclusive with 'trait'. If not None, replaces the node's traits with this list. """ context = api.request.context node = api_utils.check_node_policy_and_retrieve( 'baremetal:node:traits:set', self.node_ident) if (trait and traits is not None) or not (trait or traits is not None): msg = _("A single node trait may be added via PUT " "/v1/nodes//traits/ with no body, " "or all node traits may be replaced via PUT " "/v1/nodes//traits with the list of " "traits specified in the request body.") raise exception.Invalid(msg) if trait: if api.request.body and api.request.json_body: # Ensure PUT nodes/uuid1/traits/trait1 with a non-empty body # fails. msg = _("No body should be provided when adding a trait") raise exception.Invalid(msg) traits = [trait] replace = False new_traits = {t.trait for t in node.traits} | {trait} else: replace = True new_traits = set(traits) for trait in traits: api_utils.validate_trait(trait) # Update the node's traits to reflect the desired state. node.traits = _make_trait_list(context, node.id, sorted(new_traits)) node.obj_reset_changes() chassis_uuid = _get_chassis_uuid(node) notify.emit_start_notification(context, node, 'update', chassis_uuid=chassis_uuid) with notify.handle_error_notification(context, node, 'update', chassis_uuid=chassis_uuid): topic = api.request.rpcapi.get_topic_for(node) api.request.rpcapi.add_node_traits( context, node.id, traits, replace=replace, topic=topic) notify.emit_end_notification(context, node, 'update', chassis_uuid=chassis_uuid) if not replace: # For single traits, set the HTTP Location Header. url_args = '/'.join((self.node_ident, 'traits', trait)) api.response.location = link.build_url('nodes', url_args) @METRICS.timer('NodeTraitsController.delete') @expose.expose(None, str, status_code=http_client.NO_CONTENT) def delete(self, trait=None): """Remove one or all traits from a node. :param trait: String value; trait to remove from a node, or None. If None, all traits are removed. """ context = api.request.context node = api_utils.check_node_policy_and_retrieve( 'baremetal:node:traits:delete', self.node_ident) if trait: traits = [trait] new_traits = {t.trait for t in node.traits} - {trait} else: traits = None new_traits = set() # Update the node's traits to reflect the desired state. node.traits = _make_trait_list(context, node.id, sorted(new_traits)) node.obj_reset_changes() chassis_uuid = _get_chassis_uuid(node) notify.emit_start_notification(context, node, 'update', chassis_uuid=chassis_uuid) with notify.handle_error_notification(context, node, 'update', chassis_uuid=chassis_uuid): topic = api.request.rpcapi.get_topic_for(node) try: api.request.rpcapi.remove_node_traits( context, node.id, traits, topic=topic) except exception.NodeTraitNotFound: # NOTE(hshiina): Internal node ID should not be exposed. raise exception.NodeTraitNotFound(node_id=node.uuid, trait=trait) notify.emit_end_notification(context, node, 'update', chassis_uuid=chassis_uuid) class Node(base.APIBase): """API representation of a bare metal node. This class enforces type checking and value constraints, and converts between the internal object model and the API representation of a node. """ _chassis_uuid = None def _get_chassis_uuid(self): return self._chassis_uuid def _set_chassis_uuid(self, value): if value in (wtypes.Unset, None): self._chassis_uuid = value elif self._chassis_uuid != value: try: chassis = objects.Chassis.get(api.request.context, value) self._chassis_uuid = chassis.uuid # NOTE(lucasagomes): Create the chassis_id attribute on-the-fly # to satisfy the api -> rpc object # conversion. self.chassis_id = chassis.id except exception.ChassisNotFound as e: # Change error code because 404 (NotFound) is inappropriate # response for a POST request to create a Port e.code = http_client.BAD_REQUEST raise uuid = types.uuid """Unique UUID for this node""" instance_uuid = types.uuid """The UUID of the instance in nova-compute""" name = wsme.wsattr(str) """The logical name for this node""" power_state = wsme.wsattr(str, readonly=True) """Represent the current (not transition) power state of the node""" target_power_state = wsme.wsattr(str, readonly=True) """The user modified desired power state of the node.""" last_error = wsme.wsattr(str, readonly=True) """Any error from the most recent (last) asynchronous transaction that started but failed to finish.""" provision_state = wsme.wsattr(str, readonly=True) """Represent the current (not transition) provision state of the node""" reservation = wsme.wsattr(str, readonly=True) """The hostname of the conductor that holds an exclusive lock on the node.""" provision_updated_at = datetime.datetime """The UTC date and time of the last provision state change""" inspection_finished_at = datetime.datetime """The UTC date and time when the last hardware inspection finished successfully.""" inspection_started_at = datetime.datetime """The UTC date and time when the hardware inspection was started""" maintenance = types.boolean """Indicates whether the node is in maintenance mode.""" maintenance_reason = wsme.wsattr(str, readonly=True) """Indicates reason for putting a node in maintenance mode.""" fault = wsme.wsattr(str, readonly=True) """Indicates the active fault of a node.""" target_provision_state = wsme.wsattr(str, readonly=True) """The user modified desired provision state of the node.""" console_enabled = types.boolean """Indicates whether the console access is enabled or disabled on the node.""" instance_info = {str: types.jsontype} """This node's instance info.""" driver = wsme.wsattr(str, mandatory=True) """The driver responsible for controlling the node""" driver_info = {str: types.jsontype} """This node's driver configuration""" driver_internal_info = wsme.wsattr({str: types.jsontype}, readonly=True) """This driver's internal configuration""" clean_step = wsme.wsattr({str: types.jsontype}, readonly=True) """The current clean step""" deploy_step = wsme.wsattr({str: types.jsontype}, readonly=True) """The current deploy step""" raid_config = wsme.wsattr({str: types.jsontype}, readonly=True) """Represents the current RAID configuration of the node """ target_raid_config = wsme.wsattr({str: types.jsontype}, readonly=True) """The user modified RAID configuration of the node """ extra = {str: types.jsontype} """This node's meta data""" resource_class = wsme.wsattr(wtypes.StringType(max_length=80)) """The resource class for the node, useful for classifying or grouping nodes. Used, for example, to classify nodes in Nova's placement engine.""" # NOTE: properties should use a class to enforce required properties # current list: arch, cpus, disk, ram, image properties = {str: types.jsontype} """The physical characteristics of this node""" chassis_uuid = wsme.wsproperty(types.uuid, _get_chassis_uuid, _set_chassis_uuid) """The UUID of the chassis this node belongs""" links = wsme.wsattr([link.Link], readonly=True) """A list containing a self link and associated node links""" ports = wsme.wsattr([link.Link], readonly=True) """Links to the collection of ports on this node""" portgroups = wsme.wsattr([link.Link], readonly=True) """Links to the collection of portgroups on this node""" volume = wsme.wsattr([link.Link], readonly=True) """Links to endpoint for retrieving volume resources on this node""" states = wsme.wsattr([link.Link], readonly=True) """Links to endpoint for retrieving and setting node states""" boot_interface = wsme.wsattr(str) """The boot interface to be used for this node""" console_interface = wsme.wsattr(str) """The console interface to be used for this node""" deploy_interface = wsme.wsattr(str) """The deploy interface to be used for this node""" inspect_interface = wsme.wsattr(str) """The inspect interface to be used for this node""" management_interface = wsme.wsattr(str) """The management interface to be used for this node""" network_interface = wsme.wsattr(str) """The network interface to be used for this node""" power_interface = wsme.wsattr(str) """The power interface to be used for this node""" raid_interface = wsme.wsattr(str) """The raid interface to be used for this node""" rescue_interface = wsme.wsattr(str) """The rescue interface to be used for this node""" storage_interface = wsme.wsattr(str) """The storage interface to be used for this node""" vendor_interface = wsme.wsattr(str) """The vendor interface to be used for this node""" traits = wtypes.ArrayType(str) """The traits associated with this node""" bios_interface = wsme.wsattr(str) """The bios interface to be used for this node""" conductor_group = wsme.wsattr(str) """The conductor group to manage this node""" automated_clean = types.boolean """Indicates whether the node will perform automated clean or not.""" protected = types.boolean """Indicates whether the node is protected from undeploying/rebuilding.""" protected_reason = wsme.wsattr(str) """Indicates reason for protecting the node.""" conductor = wsme.wsattr(str, readonly=True) """Represent the conductor currently serving the node""" owner = wsme.wsattr(str) """Field for storage of physical node owner""" lessee = wsme.wsattr(wtypes.text) """Field for storage of physical node lessee""" description = wsme.wsattr(wtypes.text) """Field for node description""" allocation_uuid = wsme.wsattr(types.uuid, readonly=True) """The UUID of the allocation this node belongs""" retired = types.boolean """Indicates whether the node is marked for retirement.""" retired_reason = wsme.wsattr(str) """Indicates the reason for a node's retirement.""" # NOTE(deva): "conductor_affinity" shouldn't be presented on the # API because it's an internal value. Don't add it here. def __init__(self, **kwargs): self.fields = [] fields = list(objects.Node.fields) # NOTE(lucasagomes): chassis_uuid is not part of objects.Node.fields # because it's an API-only attribute. fields.append('chassis_uuid') # NOTE(kaifeng) conductor is not part of objects.Node.fields too. fields.append('conductor') for k in fields: # Add fields we expose. if hasattr(self, k): self.fields.append(k) # TODO(jroll) is there a less hacky way to do this? if k == 'traits' and kwargs.get('traits') is not None: value = [t['trait'] for t in kwargs['traits']['objects']] # NOTE(jroll) this is special-cased to "" and not Unset, # because it is used in hash ring calculations elif (k == 'conductor_group' and (k not in kwargs or kwargs[k] is wtypes.Unset)): value = '' else: value = kwargs.get(k, wtypes.Unset) setattr(self, k, value) # NOTE(lucasagomes): chassis_id is an attribute created on-the-fly # by _set_chassis_uuid(), it needs to be present in the fields so # that as_dict() will contain chassis_id field when converting it # before saving it in the database. self.fields.append('chassis_id') if 'chassis_uuid' not in kwargs: setattr(self, 'chassis_uuid', kwargs.get('chassis_id', wtypes.Unset)) @staticmethod def _convert_with_links(node, url, fields=None, show_states_links=True, show_portgroups=True, show_volume=True): if fields is None: node.ports = [link.Link.make_link('self', url, 'nodes', node.uuid + "/ports"), link.Link.make_link('bookmark', url, 'nodes', node.uuid + "/ports", bookmark=True) ] if show_states_links: node.states = [link.Link.make_link('self', url, 'nodes', node.uuid + "/states"), link.Link.make_link('bookmark', url, 'nodes', node.uuid + "/states", bookmark=True)] if show_portgroups: node.portgroups = [ link.Link.make_link('self', url, 'nodes', node.uuid + "/portgroups"), link.Link.make_link('bookmark', url, 'nodes', node.uuid + "/portgroups", bookmark=True)] if show_volume: node.volume = [ link.Link.make_link('self', url, 'nodes', node.uuid + "/volume"), link.Link.make_link('bookmark', url, 'nodes', node.uuid + "/volume", bookmark=True)] node.links = [link.Link.make_link('self', url, 'nodes', node.uuid), link.Link.make_link('bookmark', url, 'nodes', node.uuid, bookmark=True) ] return node @classmethod def convert_with_links(cls, rpc_node, fields=None, sanitize=True): node = Node(**rpc_node.as_dict()) if (api_utils.allow_expose_conductors() and (fields is None or 'conductor' in fields)): # NOTE(kaifeng) It is possible a node gets orphaned in certain # circumstances, set conductor to None in such case. try: host = api.request.rpcapi.get_conductor_for(rpc_node) node.conductor = host except (exception.NoValidHost, exception.TemporaryFailure): LOG.debug('Currently there is no conductor servicing node ' '%(node)s.', {'node': rpc_node.uuid}) node.conductor = None if (api_utils.allow_allocations() and (fields is None or 'allocation_uuid' in fields)): node.allocation_uuid = None if rpc_node.allocation_id: try: allocation = objects.Allocation.get_by_id( api.request.context, rpc_node.allocation_id) node.allocation_uuid = allocation.uuid except exception.AllocationNotFound: pass if fields is not None: api_utils.check_for_invalid_fields( fields, set(node.as_dict()) | {'allocation_uuid'}) show_states_links = ( api_utils.allow_links_node_states_and_driver_properties()) show_portgroups = api_utils.allow_portgroups_subcontrollers() show_volume = api_utils.allow_volume() node = cls._convert_with_links(node, api.request.public_url, fields=fields, show_states_links=show_states_links, show_portgroups=show_portgroups, show_volume=show_volume) if not sanitize: return node node.sanitize(fields) return node def sanitize(self, fields): """Removes sensitive and unrequested data. Will only keep the fields specified in the ``fields`` parameter. :param fields: list of fields to preserve, or ``None`` to preserve them all :type fields: list of str """ cdict = api.request.context.to_policy_values() # NOTE(deva): the 'show_password' policy setting name exists for legacy # purposes and can not be changed. Changing it will cause # upgrade problems for any operators who have customized # the value of this field show_driver_secrets = policy.check("show_password", cdict, cdict) show_instance_secrets = policy.check("show_instance_secrets", cdict, cdict) if not show_driver_secrets and self.driver_info != wtypes.Unset: self.driver_info = strutils.mask_dict_password( self.driver_info, "******") # NOTE(derekh): mask ssh keys for the ssh power driver. # As this driver is deprecated masking here (opposed to strutils) # is simpler, and easier to backport. This can be removed along # with support for the ssh power driver. if self.driver_info.get('ssh_key_contents'): self.driver_info['ssh_key_contents'] = "******" if not show_instance_secrets and self.instance_info != wtypes.Unset: self.instance_info = strutils.mask_dict_password( self.instance_info, "******") # NOTE(deva): agent driver may store a swift temp_url on the # instance_info, which shouldn't be exposed to non-admin users. # Now that ironic supports additional policies, we need to hide # it here, based on this policy. # Related to bug #1613903 if self.instance_info.get('image_url'): self.instance_info['image_url'] = "******" if self.driver_internal_info.get('agent_secret_token'): self.driver_internal_info['agent_secret_token'] = "******" update_state_in_older_versions(self) hide_fields_in_newer_versions(self) if fields is not None: self.unset_fields_except(fields) # NOTE(lucasagomes): The numeric ID should not be exposed to # the user, it's internal only. self.chassis_id = wtypes.Unset show_states_links = ( api_utils.allow_links_node_states_and_driver_properties()) show_portgroups = api_utils.allow_portgroups_subcontrollers() show_volume = api_utils.allow_volume() if not show_volume: self.volume = wtypes.Unset if not show_portgroups: self.portgroups = wtypes.Unset if not show_states_links: self.states = wtypes.Unset @classmethod def sample(cls, expand=True): time = datetime.datetime(2000, 1, 1, 12, 0, 0) node_uuid = '1be26c0b-03f2-4d2e-ae87-c02d7f33c123' instance_uuid = 'dcf1fbc5-93fc-4596-9395-b80572f6267b' name = 'database16-dc02' sample = cls(uuid=node_uuid, instance_uuid=instance_uuid, name=name, power_state=ir_states.POWER_ON, target_power_state=ir_states.NOSTATE, last_error=None, provision_state=ir_states.ACTIVE, target_provision_state=ir_states.NOSTATE, reservation=None, driver='fake', driver_info={}, driver_internal_info={}, extra={}, properties={ 'memory_mb': '1024', 'local_gb': '10', 'cpus': '1'}, updated_at=time, created_at=time, provision_updated_at=time, instance_info={}, maintenance=False, maintenance_reason=None, fault=None, inspection_finished_at=None, inspection_started_at=time, console_enabled=False, clean_step={}, deploy_step={}, raid_config=None, target_raid_config=None, network_interface='flat', resource_class='baremetal-gold', boot_interface=None, console_interface=None, deploy_interface=None, inspect_interface=None, management_interface=None, power_interface=None, raid_interface=None, vendor_interface=None, storage_interface=None, traits=[], rescue_interface=None, bios_interface=None, conductor_group="", automated_clean=None, protected=False, protected_reason=None, owner=None, allocation_uuid='982ddb5b-bce5-4d23-8fb8-7f710f648cd5', retired=False, retired_reason=None, lessee=None) # NOTE(matty_dubs): The chassis_uuid getter() is based on the # _chassis_uuid variable: sample._chassis_uuid = 'edcad704-b2da-41d5-96d9-afd580ecfa12' fields = None if expand else _DEFAULT_RETURN_FIELDS return cls._convert_with_links(sample, 'http://localhost:6385', fields=fields) class NodePatchType(types.JsonPatchType): _api_base = Node @staticmethod def internal_attrs(): defaults = types.JsonPatchType.internal_attrs() # TODO(lucasagomes): Include maintenance once the endpoint # v1/nodes//maintenance do more things than updating the DB. return defaults + ['/console_enabled', '/last_error', '/power_state', '/provision_state', '/reservation', '/target_power_state', '/target_provision_state', '/provision_updated_at', '/maintenance_reason', '/driver_internal_info', '/inspection_finished_at', '/inspection_started_at', '/clean_step', '/deploy_step', '/raid_config', '/target_raid_config', '/fault', '/conductor', '/allocation_uuid'] class NodeCollection(collection.Collection): """API representation of a collection of nodes.""" nodes = [Node] """A list containing nodes objects""" def __init__(self, **kwargs): self._type = 'nodes' @staticmethod def convert_with_links(nodes, limit, url=None, fields=None, **kwargs): collection = NodeCollection() collection.nodes = [Node.convert_with_links(n, fields=fields, sanitize=False) for n in nodes] collection.next = collection.get_next(limit, url=url, fields=fields, **kwargs) for node in collection.nodes: node.sanitize(fields) return collection @classmethod def sample(cls): sample = cls() node = Node.sample(expand=False) sample.nodes = [node] return sample class NodeVendorPassthruController(rest.RestController): """REST controller for VendorPassthru. This controller allow vendors to expose a custom functionality in the Ironic API. Ironic will merely relay the message from here to the appropriate driver, no introspection will be made in the message body. """ _custom_actions = { 'methods': ['GET'] } @METRICS.timer('NodeVendorPassthruController.methods') @expose.expose(str, types.uuid_or_name) def methods(self, node_ident): """Retrieve information about vendor methods of the given node. :param node_ident: UUID or logical name of a node. :returns: dictionary with : entries. :raises: NodeNotFound if the node is not found. """ rpc_node = api_utils.check_node_policy_and_retrieve( 'baremetal:node:vendor_passthru', node_ident) # Raise an exception if node is not found if rpc_node.driver not in _VENDOR_METHODS: topic = api.request.rpcapi.get_topic_for(rpc_node) ret = api.request.rpcapi.get_node_vendor_passthru_methods( api.request.context, rpc_node.uuid, topic=topic) _VENDOR_METHODS[rpc_node.driver] = ret return _VENDOR_METHODS[rpc_node.driver] @METRICS.timer('NodeVendorPassthruController._default') @expose.expose(str, types.uuid_or_name, str, body=str) def _default(self, node_ident, method, data=None): """Call a vendor extension. :param node_ident: UUID or logical name of a node. :param method: name of the method in vendor driver. :param data: body of data to supply to the specified method. """ rpc_node = api_utils.check_node_policy_and_retrieve( 'baremetal:node:vendor_passthru', node_ident) # Raise an exception if node is not found topic = api.request.rpcapi.get_topic_for(rpc_node) return api_utils.vendor_passthru(rpc_node.uuid, method, topic, data=data) class NodeMaintenanceController(rest.RestController): def _set_maintenance(self, rpc_node, maintenance_mode, reason=None): context = api.request.context rpc_node.maintenance = maintenance_mode rpc_node.maintenance_reason = reason notify.emit_start_notification(context, rpc_node, 'maintenance_set') with notify.handle_error_notification(context, rpc_node, 'maintenance_set'): try: topic = api.request.rpcapi.get_topic_for(rpc_node) except exception.NoValidHost as e: e.code = http_client.BAD_REQUEST raise new_node = api.request.rpcapi.update_node(context, rpc_node, topic=topic) notify.emit_end_notification(context, new_node, 'maintenance_set') @METRICS.timer('NodeMaintenanceController.put') @expose.expose(None, types.uuid_or_name, str, status_code=http_client.ACCEPTED) def put(self, node_ident, reason=None): """Put the node in maintenance mode. :param node_ident: the UUID or logical_name of a node. :param reason: Optional, the reason why it's in maintenance. """ rpc_node = api_utils.check_node_policy_and_retrieve( 'baremetal:node:set_maintenance', node_ident) self._set_maintenance(rpc_node, True, reason=reason) @METRICS.timer('NodeMaintenanceController.delete') @expose.expose(None, types.uuid_or_name, status_code=http_client.ACCEPTED) def delete(self, node_ident): """Remove the node from maintenance mode. :param node_ident: the UUID or logical name of a node. """ rpc_node = api_utils.check_node_policy_and_retrieve( 'baremetal:node:clear_maintenance', node_ident) self._set_maintenance(rpc_node, False) # NOTE(vsaienko) We don't support pagination with VIFs, so we don't use # collection.Collection here. class VifCollection(base.Base): """API representation of a collection of VIFs. """ vifs = [types.viftype] """A list containing VIFs objects""" @staticmethod def collection_from_list(vifs): col = VifCollection() col.vifs = [types.VifType.frombasetype(vif) for vif in vifs] return col class NodeVIFController(rest.RestController): def __init__(self, node_ident): self.node_ident = node_ident def _get_node_and_topic(self, policy_name): rpc_node = api_utils.check_node_policy_and_retrieve( policy_name, self.node_ident) try: return rpc_node, api.request.rpcapi.get_topic_for(rpc_node) except exception.NoValidHost as e: e.code = http_client.BAD_REQUEST raise @METRICS.timer('NodeVIFController.get_all') @expose.expose(VifCollection) def get_all(self): """Get a list of attached VIFs""" rpc_node, topic = self._get_node_and_topic('baremetal:node:vif:list') vifs = api.request.rpcapi.vif_list(api.request.context, rpc_node.uuid, topic=topic) return VifCollection.collection_from_list(vifs) @METRICS.timer('NodeVIFController.post') @expose.expose(None, body=types.viftype, status_code=http_client.NO_CONTENT) def post(self, vif): """Attach a VIF to this node :param vif: a dictionary of information about a VIF. It must have an 'id' key, whose value is a unique identifier for that VIF. """ rpc_node, topic = self._get_node_and_topic('baremetal:node:vif:attach') api.request.rpcapi.vif_attach(api.request.context, rpc_node.uuid, vif_info=vif, topic=topic) @METRICS.timer('NodeVIFController.delete') @expose.expose(None, types.uuid_or_name, status_code=http_client.NO_CONTENT) def delete(self, vif_id): """Detach a VIF from this node :param vif_id: The ID of a VIF to detach """ rpc_node, topic = self._get_node_and_topic('baremetal:node:vif:detach') api.request.rpcapi.vif_detach(api.request.context, rpc_node.uuid, vif_id=vif_id, topic=topic) class NodesController(rest.RestController): """REST controller for Nodes.""" # NOTE(lucasagomes): For future reference. If we happen # to need to add another sub-controller in this class let's # try to make it a parameter instead of an endpoint due # https://bugs.launchpad.net/ironic/+bug/1572651, e.g, instead of # v1/nodes/(ident)/detail we could have v1/nodes/(ident)?detail=True states = NodeStatesController() """Expose the state controller action as a sub-element of nodes""" vendor_passthru = NodeVendorPassthruController() """A resource used for vendors to expose a custom functionality in the API""" management = NodeManagementController() """Expose management as a sub-element of nodes""" maintenance = NodeMaintenanceController() """Expose maintenance as a sub-element of nodes""" from_chassis = False """A flag to indicate if the requests to this controller are coming from the top-level resource Chassis""" _custom_actions = { 'detail': ['GET'], 'validate': ['GET'], } invalid_sort_key_list = ['properties', 'driver_info', 'extra', 'instance_info', 'driver_internal_info', 'clean_step', 'deploy_step', 'raid_config', 'target_raid_config', 'traits'] _subcontroller_map = { 'ports': port.PortsController, 'portgroups': portgroup.PortgroupsController, 'vifs': NodeVIFController, 'volume': volume.VolumeController, 'traits': NodeTraitsController, 'bios': bios.NodeBiosController, 'allocation': allocation.NodeAllocationController, } @pecan.expose() def _lookup(self, ident, *remainder): try: ident = types.uuid_or_name.validate(ident) except exception.InvalidUuidOrName as e: pecan.abort(http_client.BAD_REQUEST, e.args[0]) if not remainder: return if ((remainder[0] == 'portgroups' and not api_utils.allow_portgroups_subcontrollers()) or (remainder[0] == 'vifs' and not api_utils.allow_vifs_subcontroller()) or (remainder[0] == 'bios' and not api_utils.allow_bios_interface()) or (remainder[0] == 'allocation' and not api_utils.allow_allocations())): pecan.abort(http_client.NOT_FOUND) if remainder[0] == 'traits' and not api_utils.allow_traits(): # NOTE(mgoddard): Returning here will ensure we exhibit the # behaviour of previous releases for microversions without this # endpoint. return subcontroller = self._subcontroller_map.get(remainder[0]) if subcontroller: return subcontroller(node_ident=ident), remainder[1:] def _filter_by_conductor(self, nodes, conductor): filtered_nodes = [] for n in nodes: try: host = api.request.rpcapi.get_conductor_for(n) if host == conductor: filtered_nodes.append(n) except (exception.NoValidHost, exception.TemporaryFailure): # NOTE(kaifeng) Node gets orphaned in case some conductor # offline or all conductors are offline. pass return filtered_nodes def _get_nodes_collection(self, chassis_uuid, instance_uuid, associated, maintenance, retired, provision_state, marker, limit, sort_key, sort_dir, driver=None, resource_class=None, resource_url=None, fields=None, fault=None, conductor_group=None, detail=None, conductor=None, owner=None, lessee=None, project=None, description_contains=None): if self.from_chassis and not chassis_uuid: raise exception.MissingParameterValue( _("Chassis id not specified.")) limit = api_utils.validate_limit(limit) sort_dir = api_utils.validate_sort_dir(sort_dir) if sort_key in self.invalid_sort_key_list: raise exception.InvalidParameterValue( _("The sort_key value %(key)s is an invalid field for " "sorting") % {'key': sort_key}) marker_obj = None if marker: marker_obj = objects.Node.get_by_uuid(api.request.context, marker) # The query parameters for the 'next' URL parameters = {} if instance_uuid: # NOTE(rloo) if instance_uuid is specified, the other query # parameters are ignored. Since there can be at most one node that # has this instance_uuid, we do not want to generate a 'next' link. nodes = self._get_nodes_by_instance(instance_uuid) # NOTE(rloo) if limit==1 and len(nodes)==1 (see # Collection.has_next()), a 'next' link will # be generated, which we don't want. limit = 0 else: possible_filters = { 'maintenance': maintenance, 'chassis_uuid': chassis_uuid, 'associated': associated, 'provision_state': provision_state, 'driver': driver, 'resource_class': resource_class, 'fault': fault, 'conductor_group': conductor_group, 'owner': owner, 'lessee': lessee, 'project': project, 'description_contains': description_contains, 'retired': retired, } filters = {} for key, value in possible_filters.items(): if value is not None: filters[key] = value nodes = objects.Node.list(api.request.context, limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir, filters=filters) # Special filtering on results based on conductor field if conductor: nodes = self._filter_by_conductor(nodes, conductor) parameters = {'sort_key': sort_key, 'sort_dir': sort_dir} if associated: parameters['associated'] = associated if maintenance: parameters['maintenance'] = maintenance if retired: parameters['retired'] = retired if detail is not None: parameters['detail'] = detail return NodeCollection.convert_with_links(nodes, limit, url=resource_url, fields=fields, **parameters) def _get_nodes_by_instance(self, instance_uuid): """Retrieve a node by its instance uuid. It returns a list with the node, or an empty list if no node is found. """ try: node = objects.Node.get_by_instance_uuid(api.request.context, instance_uuid) return [node] except exception.InstanceNotFound: return [] def _check_names_acceptable(self, names, error_msg): """Checks all node 'name's are acceptable, it does not return a value. This function will raise an exception for unacceptable names. :param names: list of node names to check :param error_msg: error message in case of exception.ClientSideError, should contain %(name)s placeholder. :raises: exception.NotAcceptable :raises: exception.ClientSideError """ if not api_utils.allow_node_logical_names(): raise exception.NotAcceptable() reserved_names = get_nodes_controller_reserved_names() for name in names: if not api_utils.is_valid_node_name(name): raise exception.ClientSideError( error_msg % {'name': name}, status_code=http_client.BAD_REQUEST) if name in reserved_names: raise exception.ClientSideError( 'The word "%(name)s" is reserved and can not be used as a ' 'node name. Reserved words are: %(reserved)s.' % {'name': name, 'reserved': ', '.join(reserved_names)}, status_code=http_client.BAD_REQUEST) def _update_changed_fields(self, node, rpc_node): """Update rpc_node based on changed fields in a node. """ # NOTE(mgoddard): Traits cannot be updated via a node PATCH. fields = set(objects.Node.fields) - {'traits'} for field in fields: try: patch_val = getattr(node, field) except AttributeError: # Ignore fields that aren't exposed in the API, except # chassis_id. chassis_id would have been set (instead of # chassis_uuid) if the node belongs to a chassis. This # AttributeError is raised for chassis_id only if # 1. the node doesn't belong to a chassis or # 2. the node belonged to a chassis but is now being removed # from the chassis. if (field == "chassis_id" and rpc_node[field] is not None): if not api_utils.allow_remove_chassis_uuid(): raise exception.NotAcceptable() rpc_node[field] = None continue if patch_val == wtypes.Unset: patch_val = None # conductor_group is case-insensitive, and we use it to calculate # the conductor to send an update too. lowercase it here instead # of just before saving so we calculate correctly. if field == 'conductor_group': patch_val = patch_val.lower() if rpc_node[field] != patch_val: rpc_node[field] = patch_val def _check_driver_changed_and_console_enabled(self, rpc_node, node_ident): """Checks if the driver and the console is enabled in a node. If it does, is necessary to prevent updating it because the new driver will not be able to stop a console started by the previous one. :param rpc_node: RPC Node object to be verified. :param node_ident: the UUID or logical name of a node. :raises: exception.ClientSideError """ delta = rpc_node.obj_what_changed() if 'driver' in delta and rpc_node.console_enabled: raise exception.ClientSideError( _("Node %s can not update the driver while the console is " "enabled. Please stop the console first.") % node_ident, status_code=http_client.CONFLICT) @METRICS.timer('NodesController.get_all') @expose.expose(NodeCollection, types.uuid, types.uuid, types.boolean, types.boolean, types.boolean, str, types.uuid, int, str, str, str, types.listtype, str, str, str, types.boolean, str, str, str, str, str) def get_all(self, chassis_uuid=None, instance_uuid=None, associated=None, maintenance=None, retired=None, provision_state=None, marker=None, limit=None, sort_key='id', sort_dir='asc', driver=None, fields=None, resource_class=None, fault=None, conductor_group=None, detail=None, conductor=None, owner=None, description_contains=None, lessee=None, project=None): """Retrieve a list of nodes. :param chassis_uuid: Optional UUID of a chassis, to get only nodes for that chassis. :param instance_uuid: Optional UUID of an instance, to find the node associated with that instance. :param associated: Optional boolean whether to return a list of associated or unassociated nodes. May be combined with other parameters. :param maintenance: Optional boolean value that indicates whether to get nodes in maintenance mode ("True"), or not in maintenance mode ("False"). :param retired: Optional boolean value that indicates whether to get retired nodes. :param provision_state: Optional string value to get only nodes in that provision state. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. This value cannot be larger than the value of max_limit in the [api] section of the ironic configuration, or only max_limit resources will be returned. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. :param driver: Optional string value to get only nodes using that driver. :param resource_class: Optional string value to get only nodes with that resource_class. :param conductor_group: Optional string value to get only nodes with that conductor_group. :param conductor: Optional string value to get only nodes managed by that conductor. :param owner: Optional string value that set the owner whose nodes are to be retrurned. :param lessee: Optional string value that set the lessee whose nodes are to be returned. :param project: Optional string value that set the project - lessee or owner - whose nodes are to be returned. :param fields: Optional, a list with a specified set of fields of the resource to be returned. :param fault: Optional string value to get only nodes with that fault. :param description_contains: Optional string value to get only nodes with description field contains matching value. """ project = api_utils.check_list_policy('node', project) api_utils.check_allow_specify_fields(fields) api_utils.check_allowed_fields(fields) api_utils.check_allowed_fields([sort_key]) api_utils.check_for_invalid_state_and_allow_filter(provision_state) api_utils.check_allow_specify_driver(driver) api_utils.check_allow_specify_resource_class(resource_class) api_utils.check_allow_filter_by_fault(fault) api_utils.check_allow_filter_by_conductor_group(conductor_group) api_utils.check_allow_filter_by_conductor(conductor) api_utils.check_allow_filter_by_owner(owner) api_utils.check_allow_filter_by_lessee(lessee) fields = api_utils.get_request_return_fields(fields, detail, _DEFAULT_RETURN_FIELDS) extra_args = {'description_contains': description_contains} return self._get_nodes_collection(chassis_uuid, instance_uuid, associated, maintenance, retired, provision_state, marker, limit, sort_key, sort_dir, driver=driver, resource_class=resource_class, fields=fields, fault=fault, conductor_group=conductor_group, detail=detail, conductor=conductor, owner=owner, lessee=lessee, project=project, **extra_args) @METRICS.timer('NodesController.detail') @expose.expose(NodeCollection, types.uuid, types.uuid, types.boolean, types.boolean, types.boolean, str, types.uuid, int, str, str, str, str, str, str, str, str, str, str, str) def detail(self, chassis_uuid=None, instance_uuid=None, associated=None, maintenance=None, retired=None, provision_state=None, marker=None, limit=None, sort_key='id', sort_dir='asc', driver=None, resource_class=None, fault=None, conductor_group=None, conductor=None, owner=None, description_contains=None, lessee=None, project=None): """Retrieve a list of nodes with detail. :param chassis_uuid: Optional UUID of a chassis, to get only nodes for that chassis. :param instance_uuid: Optional UUID of an instance, to find the node associated with that instance. :param associated: Optional boolean whether to return a list of associated or unassociated nodes. May be combined with other parameters. :param maintenance: Optional boolean value that indicates whether to get nodes in maintenance mode ("True"), or not in maintenance mode ("False"). :param retired: Optional boolean value that indicates whether to get nodes which are retired. :param provision_state: Optional string value to get only nodes in that provision state. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. This value cannot be larger than the value of max_limit in the [api] section of the ironic configuration, or only max_limit resources will be returned. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. :param driver: Optional string value to get only nodes using that driver. :param resource_class: Optional string value to get only nodes with that resource_class. :param fault: Optional string value to get only nodes with that fault. :param conductor_group: Optional string value to get only nodes with that conductor_group. :param owner: Optional string value that set the owner whose nodes are to be retrurned. :param lessee: Optional string value that set the lessee whose nodes are to be returned. :param project: Optional string value that set the project - lessee or owner - whose nodes are to be returned. :param description_contains: Optional string value to get only nodes with description field contains matching value. """ project = api_utils.check_list_policy('node', project) api_utils.check_for_invalid_state_and_allow_filter(provision_state) api_utils.check_allow_specify_driver(driver) api_utils.check_allow_specify_resource_class(resource_class) api_utils.check_allow_filter_by_fault(fault) api_utils.check_allow_filter_by_conductor_group(conductor_group) api_utils.check_allow_filter_by_owner(owner) api_utils.check_allow_filter_by_lessee(lessee) api_utils.check_allowed_fields([sort_key]) # /detail should only work against collections parent = api.request.path.split('/')[:-1][-1] if parent != "nodes": raise exception.HTTPNotFound() api_utils.check_allow_filter_by_conductor(conductor) resource_url = '/'.join(['nodes', 'detail']) extra_args = {'description_contains': description_contains} return self._get_nodes_collection(chassis_uuid, instance_uuid, associated, maintenance, retired, provision_state, marker, limit, sort_key, sort_dir, driver=driver, resource_class=resource_class, resource_url=resource_url, fault=fault, conductor_group=conductor_group, conductor=conductor, owner=owner, lessee=lessee, project=project, **extra_args) @METRICS.timer('NodesController.validate') @expose.expose(str, types.uuid_or_name, types.uuid) def validate(self, node=None, node_uuid=None): """Validate the driver interfaces, using the node's UUID or name. Note that the 'node_uuid' interface is deprecated in favour of the 'node' interface :param node: UUID or name of a node. :param node_uuid: UUID of a node. """ if node is not None: # We're invoking this interface using positional notation, or # explicitly using 'node'. Try and determine which one. if (not api_utils.allow_node_logical_names() and not uuidutils.is_uuid_like(node)): raise exception.NotAcceptable() rpc_node = api_utils.check_node_policy_and_retrieve( 'baremetal:node:validate', node_uuid or node) topic = api.request.rpcapi.get_topic_for(rpc_node) return api.request.rpcapi.validate_driver_interfaces( api.request.context, rpc_node.uuid, topic) @METRICS.timer('NodesController.get_one') @expose.expose(Node, types.uuid_or_name, types.listtype) def get_one(self, node_ident, fields=None): """Retrieve information about the given node. :param node_ident: UUID or logical name of a node. :param fields: Optional, a list with a specified set of fields of the resource to be returned. """ if self.from_chassis: raise exception.OperationNotPermitted() rpc_node = api_utils.check_node_policy_and_retrieve( 'baremetal:node:get', node_ident, with_suffix=True) api_utils.check_allow_specify_fields(fields) api_utils.check_allowed_fields(fields) return Node.convert_with_links(rpc_node, fields=fields) @METRICS.timer('NodesController.post') @expose.expose(Node, body=Node, status_code=http_client.CREATED) def post(self, node): """Create a new node. :param node: a node within the request body. """ if self.from_chassis: raise exception.OperationNotPermitted() context = api.request.context cdict = context.to_policy_values() policy.authorize('baremetal:node:create', cdict, cdict) if node.conductor is not wtypes.Unset: msg = _("Cannot specify conductor on node creation.") raise exception.Invalid(msg) reject_fields_in_newer_versions(node) if node.traits is not wtypes.Unset: msg = _("Cannot specify node traits on node creation. Traits must " "be set via the node traits API.") raise exception.Invalid(msg) if (node.protected is not wtypes.Unset or node.protected_reason is not wtypes.Unset): msg = _("Cannot specify protected or protected_reason on node " "creation. These fields can only be set for active nodes") raise exception.Invalid(msg) if (node.description is not wtypes.Unset and len(node.description) > _NODE_DESCRIPTION_MAX_LENGTH): msg = _("Cannot create node with description exceeding %s " "characters") % _NODE_DESCRIPTION_MAX_LENGTH raise exception.Invalid(msg) if node.allocation_uuid is not wtypes.Unset: msg = _("Allocation UUID cannot be specified, use allocations API") raise exception.Invalid(msg) # NOTE(deva): get_topic_for checks if node.driver is in the hash ring # and raises NoValidHost if it is not. # We need to ensure that node has a UUID before it can # be mapped onto the hash ring. if not node.uuid: node.uuid = uuidutils.generate_uuid() try: topic = api.request.rpcapi.get_topic_for(node) except exception.NoValidHost as e: # NOTE(deva): convert from 404 to 400 because client can see # list of available drivers and shouldn't request # one that doesn't exist. e.code = http_client.BAD_REQUEST raise if node.name != wtypes.Unset and node.name is not None: error_msg = _("Cannot create node with invalid name '%(name)s'") self._check_names_acceptable([node.name], error_msg) node.provision_state = api_utils.initial_node_provision_state() if not node.resource_class: node.resource_class = CONF.default_resource_class new_node = objects.Node(context, **node.as_dict()) notify.emit_start_notification(context, new_node, 'create', chassis_uuid=node.chassis_uuid) with notify.handle_error_notification(context, new_node, 'create', chassis_uuid=node.chassis_uuid): new_node = api.request.rpcapi.create_node(context, new_node, topic) # Set the HTTP Location Header api.response.location = link.build_url('nodes', new_node.uuid) api_node = Node.convert_with_links(new_node) notify.emit_end_notification(context, new_node, 'create', chassis_uuid=api_node.chassis_uuid) return api_node def _validate_patch(self, patch, reset_interfaces): if self.from_chassis: raise exception.OperationNotPermitted() reject_patch_in_newer_versions(patch) traits = api_utils.get_patch_values(patch, '/traits') if traits: msg = _("Cannot update node traits via node patch. Node traits " "should be updated via the node traits API.") raise exception.Invalid(msg) driver = api_utils.get_patch_values(patch, '/driver') if reset_interfaces and not driver: msg = _("The reset_interfaces parameter can only be used when " "changing the node's driver.") raise exception.Invalid(msg) description = api_utils.get_patch_values(patch, '/description') if description and len(description[0]) > _NODE_DESCRIPTION_MAX_LENGTH: msg = _("Cannot update node with description exceeding %s " "characters") % _NODE_DESCRIPTION_MAX_LENGTH raise exception.Invalid(msg) def _authorize_patch_and_get_node(self, node_ident, patch): # deal with attribute-specific policy rules policy_checks = [] generic_update = False for p in patch: if p['path'].startswith('/instance_info'): policy_checks.append('baremetal:node:update_instance_info') elif p['path'].startswith('/extra'): policy_checks.append('baremetal:node:update_extra') else: generic_update = True # always do at least one check if generic_update or not policy_checks: policy_checks.append('baremetal:node:update') return api_utils.check_multiple_node_policies_and_retrieve( policy_checks, node_ident, with_suffix=True) @METRICS.timer('NodesController.patch') @wsme.validate(types.uuid, types.boolean, [NodePatchType]) @expose.expose(Node, types.uuid_or_name, types.boolean, body=[NodePatchType]) def patch(self, node_ident, reset_interfaces=None, patch=None): """Update an existing node. :param node_ident: UUID or logical name of a node. :param reset_interfaces: whether to reset hardware interfaces to their defaults. Only valid when updating the driver field. :param patch: a json PATCH document to apply to this node. """ if (reset_interfaces is not None and not api_utils.allow_reset_interfaces()): raise exception.NotAcceptable() self._validate_patch(patch, reset_interfaces) context = api.request.context rpc_node = self._authorize_patch_and_get_node(node_ident, patch) remove_inst_uuid_patch = [{'op': 'remove', 'path': '/instance_uuid'}] if rpc_node.maintenance and patch == remove_inst_uuid_patch: LOG.debug('Removing instance uuid %(instance)s from node %(node)s', {'instance': rpc_node.instance_uuid, 'node': rpc_node.uuid}) # Check if node is transitioning state, although nodes in some states # can be updated. elif (rpc_node.target_provision_state and rpc_node.provision_state not in ir_states.UPDATE_ALLOWED_STATES): msg = _("Node %s can not be updated while a state transition " "is in progress.") raise exception.ClientSideError( msg % node_ident, status_code=http_client.CONFLICT) elif (rpc_node.provision_state == ir_states.INSPECTING and api_utils.allow_inspect_wait_state()): msg = _('Cannot update node "%(node)s" while it is in state ' '"%(state)s".') % {'node': rpc_node.uuid, 'state': ir_states.INSPECTING} raise exception.ClientSideError(msg, status_code=http_client.CONFLICT) elif api_utils.get_patch_values(patch, '/owner'): # check if updating a provisioned node's owner is allowed if rpc_node.provision_state == ir_states.ACTIVE: try: api_utils.check_owner_policy( 'node', 'baremetal:node:update_owner_provisioned', rpc_node['owner'], rpc_node['lessee']) except exception.HTTPForbidden: msg = _('Cannot update owner of node "%(node)s" while it ' 'is in state "%(state)s".') % { 'node': rpc_node.uuid, 'state': ir_states.ACTIVE} raise exception.ClientSideError( msg, status_code=http_client.CONFLICT) # check if node has an associated allocation with an owner if rpc_node.allocation_id: try: allocation = objects.Allocation.get_by_id( context, rpc_node.allocation_id) if allocation.owner is not None: msg = _('Cannot update owner of node "%(node)s" while ' 'it is allocated to an allocation with an ' ' owner.') % {'node': rpc_node.uuid} raise exception.ClientSideError( msg, status_code=http_client.CONFLICT) except exception.AllocationNotFound: pass names = api_utils.get_patch_values(patch, '/name') if len(names): error_msg = (_("Node %s: Cannot change name to invalid name ") % node_ident) error_msg += "'%(name)s'" self._check_names_acceptable(names, error_msg) node_dict = rpc_node.as_dict() # NOTE(lucasagomes): # 1) Remove chassis_id because it's an internal value and # not present in the API object # 2) Add chassis_uuid node_dict['chassis_uuid'] = node_dict.pop('chassis_id', None) node = Node(**api_utils.apply_jsonpatch(node_dict, patch)) self._update_changed_fields(node, rpc_node) # NOTE(deva): we calculate the rpc topic here in case node.driver # has changed, so that update is sent to the # new conductor, not the old one which may fail to # load the new driver. try: topic = api.request.rpcapi.get_topic_for(rpc_node) except exception.NoValidHost as e: # NOTE(deva): convert from 404 to 400 because client can see # list of available drivers and shouldn't request # one that doesn't exist. e.code = http_client.BAD_REQUEST raise self._check_driver_changed_and_console_enabled(rpc_node, node_ident) notify.emit_start_notification(context, rpc_node, 'update', chassis_uuid=node.chassis_uuid) with notify.handle_error_notification(context, rpc_node, 'update', chassis_uuid=node.chassis_uuid): new_node = api.request.rpcapi.update_node(context, rpc_node, topic, reset_interfaces) api_node = Node.convert_with_links(new_node) notify.emit_end_notification(context, new_node, 'update', chassis_uuid=api_node.chassis_uuid) return api_node @METRICS.timer('NodesController.delete') @expose.expose(None, types.uuid_or_name, status_code=http_client.NO_CONTENT) def delete(self, node_ident): """Delete a node. :param node_ident: UUID or logical name of a node. """ if self.from_chassis: raise exception.OperationNotPermitted() context = api.request.context rpc_node = api_utils.check_node_policy_and_retrieve( 'baremetal:node:delete', node_ident, with_suffix=True) chassis_uuid = _get_chassis_uuid(rpc_node) notify.emit_start_notification(context, rpc_node, 'delete', chassis_uuid=chassis_uuid) with notify.handle_error_notification(context, rpc_node, 'delete', chassis_uuid=chassis_uuid): try: topic = api.request.rpcapi.get_topic_for(rpc_node) except exception.NoValidHost as e: e.code = http_client.BAD_REQUEST raise api.request.rpcapi.destroy_node(context, rpc_node.uuid, topic) notify.emit_end_notification(context, rpc_node, 'delete', chassis_uuid=chassis_uuid) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/ironic/api/controllers/v1/notification_utils.py0000644000175000017500000001651100000000000025570 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib from oslo_config import cfg from oslo_log import log from oslo_messaging import exceptions as oslo_msg_exc from oslo_utils import excutils from oslo_versionedobjects import exception as oslo_vo_exc from wsme import types as wtypes from ironic.common import exception from ironic.common.i18n import _ from ironic.objects import allocation as allocation_objects from ironic.objects import chassis as chassis_objects from ironic.objects import deploy_template as deploy_template_objects from ironic.objects import fields from ironic.objects import node as node_objects from ironic.objects import notification from ironic.objects import port as port_objects from ironic.objects import portgroup as portgroup_objects from ironic.objects import volume_connector as volume_connector_objects from ironic.objects import volume_target as volume_target_objects LOG = log.getLogger(__name__) CONF = cfg.CONF CRUD_NOTIFY_OBJ = { 'allocation': (allocation_objects.AllocationCRUDNotification, allocation_objects.AllocationCRUDPayload), 'chassis': (chassis_objects.ChassisCRUDNotification, chassis_objects.ChassisCRUDPayload), 'deploytemplate': (deploy_template_objects.DeployTemplateCRUDNotification, deploy_template_objects.DeployTemplateCRUDPayload), 'node': (node_objects.NodeCRUDNotification, node_objects.NodeCRUDPayload), 'port': (port_objects.PortCRUDNotification, port_objects.PortCRUDPayload), 'portgroup': (portgroup_objects.PortgroupCRUDNotification, portgroup_objects.PortgroupCRUDPayload), 'volumeconnector': (volume_connector_objects.VolumeConnectorCRUDNotification, volume_connector_objects.VolumeConnectorCRUDPayload), 'volumetarget': (volume_target_objects.VolumeTargetCRUDNotification, volume_target_objects.VolumeTargetCRUDPayload), } def _emit_api_notification(context, obj, action, level, status, **kwargs): """Helper for emitting API notifications. :param context: request context. :param obj: resource rpc object. :param action: Action string to go in the EventType. :param level: Notification level. One of `ironic.objects.fields.NotificationLevel.ALL` :param status: Status to go in the EventType. One of `ironic.objects.fields.NotificationStatus.ALL` :param kwargs: kwargs to use when creating the notification payload. """ resource = obj.__class__.__name__.lower() # value wsme.Unset can be passed from API representation of resource extra_args = {k: (v if v != wtypes.Unset else None) for k, v in kwargs.items()} try: try: if action == 'maintenance_set': notification_method = node_objects.NodeMaintenanceNotification payload_method = node_objects.NodePayload elif resource not in CRUD_NOTIFY_OBJ: notification_name = payload_name = _("is not defined") raise KeyError(_("Unsupported resource: %s") % resource) else: notification_method, payload_method = CRUD_NOTIFY_OBJ[resource] notification_name = notification_method.__name__ payload_name = payload_method.__name__ finally: # Prepare our exception message just in case exception_values = {"resource": resource, "uuid": obj.uuid, "action": action, "status": status, "level": level, "notification_method": notification_name, "payload_method": payload_name} exception_message = (_("Failed to send baremetal.%(resource)s." "%(action)s.%(status)s notification for " "%(resource)s %(uuid)s with level " "%(level)s, notification method " "%(notification_method)s, payload method " "%(payload_method)s, error %(error)s")) payload = payload_method(obj, **extra_args) if resource == 'node': notification.mask_secrets(payload) notification_method( publisher=notification.NotificationPublisher( service='ironic-api', host=CONF.host), event_type=notification.EventType( object=resource, action=action, status=status), level=level, payload=payload).emit(context) except (exception.NotificationSchemaObjectError, exception.NotificationSchemaKeyError, exception.NotificationPayloadError, oslo_msg_exc.MessageDeliveryFailure, oslo_vo_exc.VersionedObjectsException) as e: exception_values['error'] = e LOG.warning(exception_message, exception_values) except Exception as e: exception_values['error'] = e LOG.exception(exception_message, exception_values) def emit_start_notification(context, obj, action, **kwargs): """Helper for emitting API 'start' notifications. :param context: request context. :param obj: resource rpc object. :param action: Action string to go in the EventType. :param kwargs: kwargs to use when creating the notification payload. """ _emit_api_notification(context, obj, action, fields.NotificationLevel.INFO, fields.NotificationStatus.START, **kwargs) @contextlib.contextmanager def handle_error_notification(context, obj, action, **kwargs): """Context manager to handle any error notifications. :param context: request context. :param obj: resource rpc object. :param action: Action string to go in the EventType. :param kwargs: kwargs to use when creating the notification payload. """ try: yield except Exception: with excutils.save_and_reraise_exception(): _emit_api_notification(context, obj, action, fields.NotificationLevel.ERROR, fields.NotificationStatus.ERROR, **kwargs) def emit_end_notification(context, obj, action, **kwargs): """Helper for emitting API 'end' notifications. :param context: request context. :param obj: resource rpc object. :param action: Action string to go in the EventType. :param kwargs: kwargs to use when creating the notification payload. """ _emit_api_notification(context, obj, action, fields.NotificationLevel.INFO, fields.NotificationStatus.END, **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/ironic/api/controllers/v1/port.py0000644000175000017500000010221300000000000022641 0ustar00coreycorey00000000000000# Copyright 2013 UnitedStack Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from http import client as http_client from ironic_lib import metrics_utils from oslo_log import log from oslo_utils import uuidutils from pecan import rest import wsme from wsme import types as wtypes from ironic import api from ironic.api.controllers import base from ironic.api.controllers import link from ironic.api.controllers.v1 import collection from ironic.api.controllers.v1 import notification_utils as notify from ironic.api.controllers.v1 import types from ironic.api.controllers.v1 import utils as api_utils from ironic.api import expose from ironic.common import exception from ironic.common.i18n import _ from ironic.common import policy from ironic.common import states as ir_states from ironic import objects METRICS = metrics_utils.get_metrics_logger(__name__) LOG = log.getLogger(__name__) _DEFAULT_RETURN_FIELDS = ('uuid', 'address') def hide_fields_in_newer_versions(obj): # if requested version is < 1.18, hide internal_info field if not api_utils.allow_port_internal_info(): obj.internal_info = wsme.Unset # if requested version is < 1.19, hide local_link_connection and # pxe_enabled fields if not api_utils.allow_port_advanced_net_fields(): obj.pxe_enabled = wsme.Unset obj.local_link_connection = wsme.Unset # if requested version is < 1.24, hide portgroup_uuid field if not api_utils.allow_portgroups_subcontrollers(): obj.portgroup_uuid = wsme.Unset # if requested version is < 1.34, hide physical_network field. if not api_utils.allow_port_physical_network(): obj.physical_network = wsme.Unset # if requested version is < 1.53, hide is_smartnic field. if not api_utils.allow_port_is_smartnic(): obj.is_smartnic = wsme.Unset class Port(base.APIBase): """API representation of a port. This class enforces type checking and value constraints, and converts between the internal object model and the API representation of a port. """ _node_uuid = None _portgroup_uuid = None def _get_node_uuid(self): return self._node_uuid def _set_node_uuid(self, value): if value and self._node_uuid != value: try: # FIXME(comstud): One should only allow UUID here, but # there seems to be a bug in that tests are passing an # ID. See bug #1301046 for more details. node = objects.Node.get(api.request.context, value) self._node_uuid = node.uuid # NOTE(lucasagomes): Create the node_id attribute on-the-fly # to satisfy the api -> rpc object # conversion. self.node_id = node.id except exception.NodeNotFound as e: # Change error code because 404 (NotFound) is inappropriate # response for a POST request to create a Port e.code = http_client.BAD_REQUEST # BadRequest raise elif value == wtypes.Unset: self._node_uuid = wtypes.Unset def _get_portgroup_uuid(self): return self._portgroup_uuid def _set_portgroup_uuid(self, value): if value and self._portgroup_uuid != value: if not api_utils.allow_portgroups_subcontrollers(): self._portgroup_uuid = wtypes.Unset return try: portgroup = objects.Portgroup.get(api.request.context, value) if portgroup.node_id != self.node_id: raise exception.BadRequest(_('Port can not be added to a ' 'portgroup belonging to a ' 'different node.')) self._portgroup_uuid = portgroup.uuid # NOTE(lucasagomes): Create the portgroup_id attribute # on-the-fly to satisfy the api -> # rpc object conversion. self.portgroup_id = portgroup.id except exception.PortgroupNotFound as e: # Change error code because 404 (NotFound) is inappropriate # response for a POST request to create a Port e.code = http_client.BAD_REQUEST # BadRequest raise e elif value == wtypes.Unset: self._portgroup_uuid = wtypes.Unset elif value is None and api_utils.allow_portgroups_subcontrollers(): # This is to output portgroup_uuid field if API version allows this self._portgroup_uuid = None uuid = types.uuid """Unique UUID for this port""" address = wsme.wsattr(types.macaddress, mandatory=True) """MAC Address for this port""" extra = {str: types.jsontype} """This port's meta data""" internal_info = wsme.wsattr({str: types.jsontype}, readonly=True) """This port's internal information maintained by ironic""" node_uuid = wsme.wsproperty(types.uuid, _get_node_uuid, _set_node_uuid, mandatory=True) """The UUID of the node this port belongs to""" portgroup_uuid = wsme.wsproperty(types.uuid, _get_portgroup_uuid, _set_portgroup_uuid, mandatory=False) """The UUID of the portgroup this port belongs to""" pxe_enabled = types.boolean """Indicates whether pxe is enabled or disabled on the node.""" local_link_connection = types.locallinkconnectiontype """The port binding profile for the port""" physical_network = wtypes.StringType(max_length=64) """The name of the physical network to which this port is connected.""" links = wsme.wsattr([link.Link], readonly=True) """A list containing a self link and associated port links""" is_smartnic = types.boolean """Indicates whether this port is a Smart NIC port.""" def __init__(self, **kwargs): self.fields = [] fields = list(objects.Port.fields) # NOTE(lucasagomes): node_uuid is not part of objects.Port.fields # because it's an API-only attribute fields.append('node_uuid') # NOTE: portgroup_uuid is not part of objects.Port.fields # because it's an API-only attribute fields.append('portgroup_uuid') for field in fields: # Add fields we expose. if hasattr(self, field): self.fields.append(field) setattr(self, field, kwargs.get(field, wtypes.Unset)) # NOTE(lucasagomes): node_id is an attribute created on-the-fly # by _set_node_uuid(), it needs to be present in the fields so # that as_dict() will contain node_id field when converting it # before saving it in the database. self.fields.append('node_id') setattr(self, 'node_uuid', kwargs.get('node_id', wtypes.Unset)) # NOTE: portgroup_id is an attribute created on-the-fly # by _set_portgroup_uuid(), it needs to be present in the fields so # that as_dict() will contain portgroup_id field when converting it # before saving it in the database. self.fields.append('portgroup_id') setattr(self, 'portgroup_uuid', kwargs.get('portgroup_id', wtypes.Unset)) @classmethod def convert_with_links(cls, rpc_port, fields=None, sanitize=True): port = Port(**rpc_port.as_dict()) port._validate_fields(fields) url = api.request.public_url port.links = [link.Link.make_link('self', url, 'ports', port.uuid), link.Link.make_link('bookmark', url, 'ports', port.uuid, bookmark=True) ] if not sanitize: return port port.sanitize(fields=fields) return port def _validate_fields(self, fields=None): if fields is not None: api_utils.check_for_invalid_fields(fields, self.as_dict()) def sanitize(self, fields=None): """Removes sensitive and unrequested data. Will only keep the fields specified in the ``fields`` parameter. :param fields: list of fields to preserve, or ``None`` to preserve them all :type fields: list of str """ hide_fields_in_newer_versions(self) if fields is not None: self.unset_fields_except(fields) # never expose the node_id attribute self.node_id = wtypes.Unset # never expose the portgroup_id attribute self.portgroup_id = wtypes.Unset @classmethod def sample(cls, expand=True): time = datetime.datetime(2000, 1, 1, 12, 0, 0) sample = cls(uuid='27e3153e-d5bf-4b7e-b517-fb518e17f34c', address='fe:54:00:77:07:d9', extra={'foo': 'bar'}, internal_info={}, created_at=time, updated_at=time, pxe_enabled=True, local_link_connection={ 'switch_info': 'host', 'port_id': 'Gig0/1', 'switch_id': 'aa:bb:cc:dd:ee:ff'}, physical_network='physnet1', is_smartnic=False) # NOTE(lucasagomes): node_uuid getter() method look at the # _node_uuid variable sample._node_uuid = '7ae81bb3-dec3-4289-8d6c-da80bd8001ae' sample._portgroup_uuid = '037d9a52-af89-4560-b5a3-a33283295ba2' fields = None if expand else _DEFAULT_RETURN_FIELDS return cls._convert_with_links(sample, 'http://localhost:6385', fields=fields) class PortPatchType(types.JsonPatchType): _api_base = Port @staticmethod def internal_attrs(): defaults = types.JsonPatchType.internal_attrs() return defaults + ['/internal_info'] class PortCollection(collection.Collection): """API representation of a collection of ports.""" ports = [Port] """A list containing ports objects""" def __init__(self, **kwargs): self._type = 'ports' @staticmethod def convert_with_links(rpc_ports, limit, url=None, fields=None, **kwargs): collection = PortCollection() collection.ports = [] for rpc_port in rpc_ports: try: port = Port.convert_with_links(rpc_port, fields=fields, sanitize=False) except exception.NodeNotFound: # NOTE(dtantsur): node was deleted after we fetched the port # list, meaning that the port was also deleted. Skip it. LOG.debug('Skipping port %s as its node was deleted', rpc_port.uuid) continue except exception.PortgroupNotFound: # NOTE(dtantsur): port group was deleted after we fetched the # port list, it may mean that the port was deleted too, but # we don't know it. Pretend that the port group was removed. LOG.debug('Removing port group UUID from port %s as the port ' 'group was deleted', rpc_port.uuid) rpc_port.portgroup_id = None port = Port.convert_with_links(rpc_port, fields=fields, sanitize=False) collection.ports.append(port) collection.next = collection.get_next(limit, url=url, fields=fields, **kwargs) for item in collection.ports: item.sanitize(fields=fields) return collection @classmethod def sample(cls): sample = cls() sample.ports = [Port.sample(expand=False)] return sample class PortsController(rest.RestController): """REST controller for Ports.""" _custom_actions = { 'detail': ['GET'], } invalid_sort_key_list = ['extra', 'internal_info', 'local_link_connection'] advanced_net_fields = ['pxe_enabled', 'local_link_connection'] def __init__(self, node_ident=None, portgroup_ident=None): super(PortsController, self).__init__() self.parent_node_ident = node_ident self.parent_portgroup_ident = portgroup_ident def _get_ports_collection(self, node_ident, address, portgroup_ident, marker, limit, sort_key, sort_dir, resource_url=None, fields=None, detail=None, owner=None): limit = api_utils.validate_limit(limit) sort_dir = api_utils.validate_sort_dir(sort_dir) marker_obj = None if marker: marker_obj = objects.Port.get_by_uuid(api.request.context, marker) if sort_key in self.invalid_sort_key_list: raise exception.InvalidParameterValue( _("The sort_key value %(key)s is an invalid field for " "sorting") % {'key': sort_key}) node_ident = self.parent_node_ident or node_ident portgroup_ident = self.parent_portgroup_ident or portgroup_ident if node_ident and portgroup_ident: raise exception.OperationNotPermitted() if portgroup_ident: # FIXME: Since all we need is the portgroup ID, we can # make this more efficient by only querying # for that column. This will get cleaned up # as we move to the object interface. portgroup = api_utils.get_rpc_portgroup(portgroup_ident) ports = objects.Port.list_by_portgroup_id(api.request.context, portgroup.id, limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir, owner=owner) elif node_ident: # FIXME(comstud): Since all we need is the node ID, we can # make this more efficient by only querying # for that column. This will get cleaned up # as we move to the object interface. node = api_utils.get_rpc_node(node_ident) ports = objects.Port.list_by_node_id(api.request.context, node.id, limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir, owner=owner) elif address: ports = self._get_ports_by_address(address, owner=owner) else: ports = objects.Port.list(api.request.context, limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir, owner=owner) parameters = {} if detail is not None: parameters['detail'] = detail return PortCollection.convert_with_links(ports, limit, url=resource_url, fields=fields, sort_key=sort_key, sort_dir=sort_dir, **parameters) def _get_ports_by_address(self, address, owner=None): """Retrieve a port by its address. :param address: MAC address of a port, to get the port which has this MAC address. :returns: a list with the port, or an empty list if no port is found. """ try: port = objects.Port.get_by_address(api.request.context, address, owner=owner) return [port] except exception.PortNotFound: return [] def _check_allowed_port_fields(self, fields): """Check if fetching a particular field of a port is allowed. Check if the required version is being requested for fields that are only allowed to be fetched in a particular API version. :param fields: list or set of fields to check :raises: NotAcceptable if a field is not allowed """ if fields is None: return if (not api_utils.allow_port_advanced_net_fields() and set(fields).intersection(self.advanced_net_fields)): raise exception.NotAcceptable() if ('portgroup_uuid' in fields and not api_utils.allow_portgroups_subcontrollers()): raise exception.NotAcceptable() if ('physical_network' in fields and not api_utils.allow_port_physical_network()): raise exception.NotAcceptable() if ('is_smartnic' in fields and not api_utils.allow_port_is_smartnic()): raise exception.NotAcceptable() if ('local_link_connection/network_type' in fields and not api_utils.allow_local_link_connection_network_type()): raise exception.NotAcceptable() if (isinstance(fields, dict) and fields.get('local_link_connection') is not None): if (not api_utils.allow_local_link_connection_network_type() and 'network_type' in fields['local_link_connection']): raise exception.NotAcceptable() @METRICS.timer('PortsController.get_all') @expose.expose(PortCollection, types.uuid_or_name, types.uuid, types.macaddress, types.uuid, int, str, str, types.listtype, types.uuid_or_name, types.boolean) def get_all(self, node=None, node_uuid=None, address=None, marker=None, limit=None, sort_key='id', sort_dir='asc', fields=None, portgroup=None, detail=None): """Retrieve a list of ports. Note that the 'node_uuid' interface is deprecated in favour of the 'node' interface :param node: UUID or name of a node, to get only ports for that node. :param node_uuid: UUID of a node, to get only ports for that node. :param address: MAC address of a port, to get the port which has this MAC address. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. This value cannot be larger than the value of max_limit in the [api] section of the ironic configuration, or only max_limit resources will be returned. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. :param fields: Optional, a list with a specified set of fields of the resource to be returned. :param portgroup: UUID or name of a portgroup, to get only ports for that portgroup. :raises: NotAcceptable, HTTPNotFound """ owner = api_utils.check_port_list_policy() api_utils.check_allow_specify_fields(fields) self._check_allowed_port_fields(fields) self._check_allowed_port_fields([sort_key]) if portgroup and not api_utils.allow_portgroups_subcontrollers(): raise exception.NotAcceptable() fields = api_utils.get_request_return_fields(fields, detail, _DEFAULT_RETURN_FIELDS) if not node_uuid and node: # We're invoking this interface using positional notation, or # explicitly using 'node'. Try and determine which one. # Make sure only one interface, node or node_uuid is used if (not api_utils.allow_node_logical_names() and not uuidutils.is_uuid_like(node)): raise exception.NotAcceptable() return self._get_ports_collection(node_uuid or node, address, portgroup, marker, limit, sort_key, sort_dir, fields=fields, detail=detail, owner=owner) @METRICS.timer('PortsController.detail') @expose.expose(PortCollection, types.uuid_or_name, types.uuid, types.macaddress, types.uuid, int, str, str, types.uuid_or_name) def detail(self, node=None, node_uuid=None, address=None, marker=None, limit=None, sort_key='id', sort_dir='asc', portgroup=None): """Retrieve a list of ports with detail. Note that the 'node_uuid' interface is deprecated in favour of the 'node' interface :param node: UUID or name of a node, to get only ports for that node. :param node_uuid: UUID of a node, to get only ports for that node. :param address: MAC address of a port, to get the port which has this MAC address. :param portgroup: UUID or name of a portgroup, to get only ports for that portgroup. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. This value cannot be larger than the value of max_limit in the [api] section of the ironic configuration, or only max_limit resources will be returned. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. :raises: NotAcceptable, HTTPNotFound """ owner = api_utils.check_port_list_policy() self._check_allowed_port_fields([sort_key]) if portgroup and not api_utils.allow_portgroups_subcontrollers(): raise exception.NotAcceptable() if not node_uuid and node: # We're invoking this interface using positional notation, or # explicitly using 'node'. Try and determine which one. # Make sure only one interface, node or node_uuid is used if (not api_utils.allow_node_logical_names() and not uuidutils.is_uuid_like(node)): raise exception.NotAcceptable() # NOTE(lucasagomes): /detail should only work against collections parent = api.request.path.split('/')[:-1][-1] if parent != "ports": raise exception.HTTPNotFound() resource_url = '/'.join(['ports', 'detail']) return self._get_ports_collection(node_uuid or node, address, portgroup, marker, limit, sort_key, sort_dir, resource_url, owner=owner) @METRICS.timer('PortsController.get_one') @expose.expose(Port, types.uuid, types.listtype) def get_one(self, port_uuid, fields=None): """Retrieve information about the given port. :param port_uuid: UUID of a port. :param fields: Optional, a list with a specified set of fields of the resource to be returned. :raises: NotAcceptable, HTTPNotFound """ if self.parent_node_ident or self.parent_portgroup_ident: raise exception.OperationNotPermitted() rpc_port, rpc_node = api_utils.check_port_policy_and_retrieve( 'baremetal:port:get', port_uuid) api_utils.check_allow_specify_fields(fields) self._check_allowed_port_fields(fields) return Port.convert_with_links(rpc_port, fields=fields) @METRICS.timer('PortsController.post') @expose.expose(Port, body=Port, status_code=http_client.CREATED) def post(self, port): """Create a new port. :param port: a port within the request body. :raises: NotAcceptable, HTTPNotFound, Conflict """ if self.parent_node_ident or self.parent_portgroup_ident: raise exception.OperationNotPermitted() context = api.request.context cdict = context.to_policy_values() policy.authorize('baremetal:port:create', cdict, cdict) pdict = port.as_dict() self._check_allowed_port_fields(pdict) if (port.is_smartnic and not types.locallinkconnectiontype .validate_for_smart_nic(port.local_link_connection)): raise exception.Invalid( "Smart NIC port must have port_id " "and hostname in local_link_connection") create_remotely = api.request.rpcapi.can_send_create_port() if (not create_remotely and pdict.get('portgroup_uuid')): # NOTE(mgoddard): In RPC API v1.41, port creation was moved to the # conductor service to facilitate validation of the physical # network field of ports in portgroups. During a rolling upgrade, # the RPCAPI will reject the create_port method, so we need to # create the port locally. If the port is a member of a portgroup, # we are unable to perform the validation and must reject the # request. raise exception.NotAcceptable() vif = api_utils.handle_post_port_like_extra_vif(pdict) if (pdict.get('portgroup_uuid') and (pdict.get('pxe_enabled') or vif)): rpc_pg = objects.Portgroup.get_by_uuid(context, pdict['portgroup_uuid']) if not rpc_pg.standalone_ports_supported: msg = _("Port group %s doesn't support standalone ports. " "This port cannot be created as a member of that " "port group because either 'extra/vif_port_id' " "was specified or 'pxe_enabled' was set to True.") raise exception.Conflict( msg % pdict['portgroup_uuid']) # NOTE(yuriyz): UUID is mandatory for notifications payload if not pdict.get('uuid'): pdict['uuid'] = uuidutils.generate_uuid() rpc_port = objects.Port(context, **pdict) rpc_node = objects.Node.get_by_id(context, rpc_port.node_id) notify_extra = {'node_uuid': port.node_uuid, 'portgroup_uuid': port.portgroup_uuid} notify.emit_start_notification(context, rpc_port, 'create', **notify_extra) with notify.handle_error_notification(context, rpc_port, 'create', **notify_extra): # NOTE(mgoddard): In RPC API v1.41, port creation was moved to the # conductor service to facilitate validation of the physical # network field of ports in portgroups. During a rolling upgrade, # the RPCAPI will reject the create_port method, so we need to # create the port locally. if create_remotely: topic = api.request.rpcapi.get_topic_for(rpc_node) new_port = api.request.rpcapi.create_port(context, rpc_port, topic) else: rpc_port.create() new_port = rpc_port notify.emit_end_notification(context, new_port, 'create', **notify_extra) # Set the HTTP Location Header api.response.location = link.build_url('ports', new_port.uuid) return Port.convert_with_links(new_port) @METRICS.timer('PortsController.patch') @wsme.validate(types.uuid, [PortPatchType]) @expose.expose(Port, types.uuid, body=[PortPatchType]) def patch(self, port_uuid, patch): """Update an existing port. :param port_uuid: UUID of a port. :param patch: a json PATCH document to apply to this port. :raises: NotAcceptable, HTTPNotFound """ if self.parent_node_ident or self.parent_portgroup_ident: raise exception.OperationNotPermitted() rpc_port, rpc_node = api_utils.check_port_policy_and_retrieve( 'baremetal:port:update', port_uuid) context = api.request.context fields_to_check = set() for field in (self.advanced_net_fields + ['portgroup_uuid', 'physical_network', 'is_smartnic', 'local_link_connection/network_type']): field_path = '/%s' % field if (api_utils.get_patch_values(patch, field_path) or api_utils.is_path_removed(patch, field_path)): fields_to_check.add(field) self._check_allowed_port_fields(fields_to_check) port_dict = rpc_port.as_dict() # NOTE(lucasagomes): # 1) Remove node_id because it's an internal value and # not present in the API object # 2) Add node_uuid port_dict['node_uuid'] = port_dict.pop('node_id', None) # NOTE(vsaienko): # 1) Remove portgroup_id because it's an internal value and # not present in the API object # 2) Add portgroup_uuid port_dict['portgroup_uuid'] = port_dict.pop('portgroup_id', None) port = Port(**api_utils.apply_jsonpatch(port_dict, patch)) api_utils.handle_patch_port_like_extra_vif(rpc_port, port, patch) if api_utils.is_path_removed(patch, '/portgroup_uuid'): rpc_port.portgroup_id = None # Update only the fields that have changed for field in objects.Port.fields: try: patch_val = getattr(port, field) except AttributeError: # Ignore fields that aren't exposed in the API continue if patch_val == wtypes.Unset: patch_val = None if rpc_port[field] != patch_val: rpc_port[field] = patch_val if (rpc_node.provision_state == ir_states.INSPECTING and api_utils.allow_inspect_wait_state()): msg = _('Cannot update port "%(port)s" on "%(node)s" while it is ' 'in state "%(state)s".') % {'port': rpc_port.uuid, 'node': rpc_node.uuid, 'state': ir_states.INSPECTING} raise exception.ClientSideError(msg, status_code=http_client.CONFLICT) notify_extra = {'node_uuid': rpc_node.uuid, 'portgroup_uuid': port.portgroup_uuid} notify.emit_start_notification(context, rpc_port, 'update', **notify_extra) with notify.handle_error_notification(context, rpc_port, 'update', **notify_extra): topic = api.request.rpcapi.get_topic_for(rpc_node) new_port = api.request.rpcapi.update_port(context, rpc_port, topic) api_port = Port.convert_with_links(new_port) notify.emit_end_notification(context, new_port, 'update', **notify_extra) return api_port @METRICS.timer('PortsController.delete') @expose.expose(None, types.uuid, status_code=http_client.NO_CONTENT) def delete(self, port_uuid): """Delete a port. :param port_uuid: UUID of a port. :raises: OperationNotPermitted, HTTPNotFound """ if self.parent_node_ident or self.parent_portgroup_ident: raise exception.OperationNotPermitted() rpc_port, rpc_node = api_utils.check_port_policy_and_retrieve( 'baremetal:port:delete', port_uuid) context = api.request.context portgroup_uuid = None if rpc_port.portgroup_id: portgroup = objects.Portgroup.get_by_id(context, rpc_port.portgroup_id) portgroup_uuid = portgroup.uuid notify_extra = {'node_uuid': rpc_node.uuid, 'portgroup_uuid': portgroup_uuid} notify.emit_start_notification(context, rpc_port, 'delete', **notify_extra) with notify.handle_error_notification(context, rpc_port, 'delete', **notify_extra): topic = api.request.rpcapi.get_topic_for(rpc_node) api.request.rpcapi.destroy_port(context, rpc_port, topic) notify.emit_end_notification(context, rpc_port, 'delete', **notify_extra) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/ironic/api/controllers/v1/portgroup.py0000644000175000017500000006267200000000000023734 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from http import client as http_client from ironic_lib import metrics_utils from oslo_utils import uuidutils import pecan import wsme from wsme import types as wtypes from ironic import api from ironic.api.controllers import base from ironic.api.controllers import link from ironic.api.controllers.v1 import collection from ironic.api.controllers.v1 import notification_utils as notify from ironic.api.controllers.v1 import port from ironic.api.controllers.v1 import types from ironic.api.controllers.v1 import utils as api_utils from ironic.api import expose from ironic.common import exception from ironic.common.i18n import _ from ironic.common import policy from ironic.common import states as ir_states from ironic import objects METRICS = metrics_utils.get_metrics_logger(__name__) _DEFAULT_RETURN_FIELDS = ('uuid', 'address', 'name') class Portgroup(base.APIBase): """API representation of a portgroup. This class enforces type checking and value constraints, and converts between the internal object model and the API representation of a portgroup. """ _node_uuid = None def _get_node_uuid(self): return self._node_uuid def _set_node_uuid(self, value): if value and self._node_uuid != value: if not api_utils.allow_portgroups(): self._node_uuid = wtypes.Unset return try: node = objects.Node.get(api.request.context, value) self._node_uuid = node.uuid # NOTE: Create the node_id attribute on-the-fly # to satisfy the api -> rpc object # conversion. self.node_id = node.id except exception.NodeNotFound as e: # Change error code because 404 (NotFound) is inappropriate # response for a POST request to create a Portgroup e.code = http_client.BAD_REQUEST raise e elif value == wtypes.Unset: self._node_uuid = wtypes.Unset uuid = types.uuid """Unique UUID for this portgroup""" address = wsme.wsattr(types.macaddress) """MAC Address for this portgroup""" extra = {str: types.jsontype} """This portgroup's meta data""" internal_info = wsme.wsattr({str: types.jsontype}, readonly=True) """This portgroup's internal info""" node_uuid = wsme.wsproperty(types.uuid, _get_node_uuid, _set_node_uuid, mandatory=True) """The UUID of the node this portgroup belongs to""" name = wsme.wsattr(str) """The logical name for this portgroup""" links = wsme.wsattr([link.Link], readonly=True) """A list containing a self link and associated portgroup links""" standalone_ports_supported = types.boolean """Indicates whether ports of this portgroup may be used as single NIC ports""" mode = wsme.wsattr(str) """The mode for this portgroup. See linux bonding documentation for details: https://www.kernel.org/doc/Documentation/networking/bonding.txt""" properties = {str: types.jsontype} """This portgroup's properties""" ports = wsme.wsattr([link.Link], readonly=True) """Links to the collection of ports of this portgroup""" def __init__(self, **kwargs): self.fields = [] fields = list(objects.Portgroup.fields) # NOTE: node_uuid is not part of objects.Portgroup.fields # because it's an API-only attribute fields.append('node_uuid') for field in fields: # Skip fields we do not expose. if not hasattr(self, field): continue self.fields.append(field) setattr(self, field, kwargs.get(field, wtypes.Unset)) # NOTE: node_id is an attribute created on-the-fly # by _set_node_uuid(), it needs to be present in the fields so # that as_dict() will contain node_id field when converting it # before saving it in the database. self.fields.append('node_id') setattr(self, 'node_uuid', kwargs.get('node_id', wtypes.Unset)) @staticmethod def _convert_with_links(portgroup, url, fields=None): """Add links to the portgroup.""" if fields is None: portgroup.ports = [ link.Link.make_link('self', url, 'portgroups', portgroup.uuid + "/ports"), link.Link.make_link('bookmark', url, 'portgroups', portgroup.uuid + "/ports", bookmark=True) ] # never expose the node_id attribute portgroup.node_id = wtypes.Unset portgroup.links = [link.Link.make_link('self', url, 'portgroups', portgroup.uuid), link.Link.make_link('bookmark', url, 'portgroups', portgroup.uuid, bookmark=True) ] return portgroup @classmethod def convert_with_links(cls, rpc_portgroup, fields=None, sanitize=True): """Add links to the portgroup.""" portgroup = Portgroup(**rpc_portgroup.as_dict()) if fields is not None: api_utils.check_for_invalid_fields(fields, portgroup.as_dict()) portgroup = cls._convert_with_links(portgroup, api.request.host_url, fields=fields) if not sanitize: return portgroup portgroup.sanitize(fields) return portgroup def sanitize(self, fields=None): """Removes sensitive and unrequested data. Will only keep the fields specified in the ``fields`` parameter. :param fields: list of fields to preserve, or ``None`` to preserve them all :type fields: list of str """ if fields is not None: self.unset_fields_except(fields) # never expose the node_id attribute self.node_id = wtypes.Unset @classmethod def sample(cls, expand=True): """Return a sample of the portgroup.""" sample = cls(uuid='a594544a-2daf-420c-8775-17a8c3e0852f', address='fe:54:00:77:07:d9', name='node1-portgroup-01', extra={'foo': 'bar'}, internal_info={'baz': 'boo'}, standalone_ports_supported=True, mode='active-backup', properties={}, created_at=datetime.datetime(2000, 1, 1, 12, 0, 0), updated_at=datetime.datetime(2000, 1, 1, 12, 0, 0)) # NOTE(lucasagomes): node_uuid getter() method look at the # _node_uuid variable sample._node_uuid = '7ae81bb3-dec3-4289-8d6c-da80bd8001ae' fields = None if expand else _DEFAULT_RETURN_FIELDS return cls._convert_with_links(sample, 'http://localhost:6385', fields=fields) class PortgroupPatchType(types.JsonPatchType): _api_base = Portgroup _extra_non_removable_attrs = {'/mode'} @staticmethod def internal_attrs(): defaults = types.JsonPatchType.internal_attrs() return defaults + ['/internal_info'] class PortgroupCollection(collection.Collection): """API representation of a collection of portgroups.""" portgroups = [Portgroup] """A list containing portgroup objects""" def __init__(self, **kwargs): self._type = 'portgroups' @staticmethod def convert_with_links(rpc_portgroups, limit, url=None, fields=None, **kwargs): collection = PortgroupCollection() collection.portgroups = [Portgroup.convert_with_links(p, fields=fields, sanitize=False) for p in rpc_portgroups] collection.next = collection.get_next(limit, url=url, fields=fields, **kwargs) for item in collection.portgroups: item.sanitize(fields=fields) return collection @classmethod def sample(cls): """Return a sample of the portgroup.""" sample = cls() sample.portgroups = [Portgroup.sample(expand=False)] return sample class PortgroupsController(pecan.rest.RestController): """REST controller for portgroups.""" _custom_actions = { 'detail': ['GET'], } invalid_sort_key_list = ['extra', 'internal_info', 'properties'] _subcontroller_map = { 'ports': port.PortsController, } @pecan.expose() def _lookup(self, ident, *remainder): if not api_utils.allow_portgroups(): pecan.abort(http_client.NOT_FOUND) try: ident = types.uuid_or_name.validate(ident) except exception.InvalidUuidOrName as e: pecan.abort(http_client.BAD_REQUEST, e.args[0]) if not remainder: return subcontroller = self._subcontroller_map.get(remainder[0]) if subcontroller: if api_utils.allow_portgroups_subcontrollers(): return subcontroller( portgroup_ident=ident, node_ident=self.parent_node_ident), remainder[1:] pecan.abort(http_client.NOT_FOUND) def __init__(self, node_ident=None): super(PortgroupsController, self).__init__() self.parent_node_ident = node_ident def _get_portgroups_collection(self, node_ident, address, marker, limit, sort_key, sort_dir, resource_url=None, fields=None, detail=None): """Return portgroups collection. :param node_ident: UUID or name of a node. :param address: MAC address of a portgroup. :param marker: Pagination marker for large data sets. :param limit: Maximum number of resources to return in a single result. :param sort_key: Column to sort results by. Default: id. :param sort_dir: Direction to sort. "asc" or "desc". Default: asc. :param resource_url: Optional, URL to the portgroup resource. :param fields: Optional, a list with a specified set of fields of the resource to be returned. """ limit = api_utils.validate_limit(limit) sort_dir = api_utils.validate_sort_dir(sort_dir) marker_obj = None if marker: marker_obj = objects.Portgroup.get_by_uuid(api.request.context, marker) if sort_key in self.invalid_sort_key_list: raise exception.InvalidParameterValue( _("The sort_key value %(key)s is an invalid field for " "sorting") % {'key': sort_key}) node_ident = self.parent_node_ident or node_ident if node_ident: # FIXME: Since all we need is the node ID, we can # make this more efficient by only querying # for that column. This will get cleaned up # as we move to the object interface. node = api_utils.get_rpc_node(node_ident) portgroups = objects.Portgroup.list_by_node_id( api.request.context, node.id, limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir) elif address: portgroups = self._get_portgroups_by_address(address) else: portgroups = objects.Portgroup.list(api.request.context, limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir) parameters = {} if detail is not None: parameters['detail'] = detail return PortgroupCollection.convert_with_links(portgroups, limit, url=resource_url, fields=fields, sort_key=sort_key, sort_dir=sort_dir, **parameters) def _get_portgroups_by_address(self, address): """Retrieve a portgroup by its address. :param address: MAC address of a portgroup, to get the portgroup which has this MAC address. :returns: a list with the portgroup, or an empty list if no portgroup is found. """ try: portgroup = objects.Portgroup.get_by_address(api.request.context, address) return [portgroup] except exception.PortgroupNotFound: return [] @METRICS.timer('PortgroupsController.get_all') @expose.expose(PortgroupCollection, types.uuid_or_name, types.macaddress, types.uuid, int, str, str, types.listtype, types.boolean) def get_all(self, node=None, address=None, marker=None, limit=None, sort_key='id', sort_dir='asc', fields=None, detail=None): """Retrieve a list of portgroups. :param node: UUID or name of a node, to get only portgroups for that node. :param address: MAC address of a portgroup, to get the portgroup which has this MAC address. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. This value cannot be larger than the value of max_limit in the [api] section of the ironic configuration, or only max_limit resources will be returned. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. :param fields: Optional, a list with a specified set of fields of the resource to be returned. """ if not api_utils.allow_portgroups(): raise exception.NotFound() cdict = api.request.context.to_policy_values() policy.authorize('baremetal:portgroup:get', cdict, cdict) api_utils.check_allowed_portgroup_fields(fields) api_utils.check_allowed_portgroup_fields([sort_key]) fields = api_utils.get_request_return_fields(fields, detail, _DEFAULT_RETURN_FIELDS) return self._get_portgroups_collection(node, address, marker, limit, sort_key, sort_dir, fields=fields, detail=detail) @METRICS.timer('PortgroupsController.detail') @expose.expose(PortgroupCollection, types.uuid_or_name, types.macaddress, types.uuid, int, str, str) def detail(self, node=None, address=None, marker=None, limit=None, sort_key='id', sort_dir='asc'): """Retrieve a list of portgroups with detail. :param node: UUID or name of a node, to get only portgroups for that node. :param address: MAC address of a portgroup, to get the portgroup which has this MAC address. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. This value cannot be larger than the value of max_limit in the [api] section of the ironic configuration, or only max_limit resources will be returned. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. """ if not api_utils.allow_portgroups(): raise exception.NotFound() cdict = api.request.context.to_policy_values() policy.authorize('baremetal:portgroup:get', cdict, cdict) api_utils.check_allowed_portgroup_fields([sort_key]) # NOTE: /detail should only work against collections parent = api.request.path.split('/')[:-1][-1] if parent != "portgroups": raise exception.HTTPNotFound() resource_url = '/'.join(['portgroups', 'detail']) return self._get_portgroups_collection( node, address, marker, limit, sort_key, sort_dir, resource_url=resource_url) @METRICS.timer('PortgroupsController.get_one') @expose.expose(Portgroup, types.uuid_or_name, types.listtype) def get_one(self, portgroup_ident, fields=None): """Retrieve information about the given portgroup. :param portgroup_ident: UUID or logical name of a portgroup. :param fields: Optional, a list with a specified set of fields of the resource to be returned. """ if not api_utils.allow_portgroups(): raise exception.NotFound() cdict = api.request.context.to_policy_values() policy.authorize('baremetal:portgroup:get', cdict, cdict) if self.parent_node_ident: raise exception.OperationNotPermitted() api_utils.check_allowed_portgroup_fields(fields) rpc_portgroup = api_utils.get_rpc_portgroup_with_suffix( portgroup_ident) return Portgroup.convert_with_links(rpc_portgroup, fields=fields) @METRICS.timer('PortgroupsController.post') @expose.expose(Portgroup, body=Portgroup, status_code=http_client.CREATED) def post(self, portgroup): """Create a new portgroup. :param portgroup: a portgroup within the request body. """ if not api_utils.allow_portgroups(): raise exception.NotFound() context = api.request.context cdict = context.to_policy_values() policy.authorize('baremetal:portgroup:create', cdict, cdict) if self.parent_node_ident: raise exception.OperationNotPermitted() if (not api_utils.allow_portgroup_mode_properties() and (portgroup.mode is not wtypes.Unset or portgroup.properties is not wtypes.Unset)): raise exception.NotAcceptable() if (portgroup.name and not api_utils.is_valid_logical_name(portgroup.name)): error_msg = _("Cannot create portgroup with invalid name " "'%(name)s'") % {'name': portgroup.name} raise exception.ClientSideError( error_msg, status_code=http_client.BAD_REQUEST) pg_dict = portgroup.as_dict() api_utils.handle_post_port_like_extra_vif(pg_dict) # NOTE(yuriyz): UUID is mandatory for notifications payload if not pg_dict.get('uuid'): pg_dict['uuid'] = uuidutils.generate_uuid() new_portgroup = objects.Portgroup(context, **pg_dict) notify.emit_start_notification(context, new_portgroup, 'create', node_uuid=portgroup.node_uuid) with notify.handle_error_notification(context, new_portgroup, 'create', node_uuid=portgroup.node_uuid): new_portgroup.create() notify.emit_end_notification(context, new_portgroup, 'create', node_uuid=portgroup.node_uuid) # Set the HTTP Location Header api.response.location = link.build_url('portgroups', new_portgroup.uuid) return Portgroup.convert_with_links(new_portgroup) @METRICS.timer('PortgroupsController.patch') @wsme.validate(types.uuid_or_name, [PortgroupPatchType]) @expose.expose(Portgroup, types.uuid_or_name, body=[PortgroupPatchType]) def patch(self, portgroup_ident, patch): """Update an existing portgroup. :param portgroup_ident: UUID or logical name of a portgroup. :param patch: a json PATCH document to apply to this portgroup. """ if not api_utils.allow_portgroups(): raise exception.NotFound() context = api.request.context cdict = context.to_policy_values() policy.authorize('baremetal:portgroup:update', cdict, cdict) if self.parent_node_ident: raise exception.OperationNotPermitted() if (not api_utils.allow_portgroup_mode_properties() and (api_utils.is_path_updated(patch, '/mode') or api_utils.is_path_updated(patch, '/properties'))): raise exception.NotAcceptable() rpc_portgroup = api_utils.get_rpc_portgroup_with_suffix( portgroup_ident) names = api_utils.get_patch_values(patch, '/name') for name in names: if (name and not api_utils.is_valid_logical_name(name)): error_msg = _("Portgroup %(portgroup)s: Cannot change name to" " invalid name '%(name)s'") % {'portgroup': portgroup_ident, 'name': name} raise exception.ClientSideError( error_msg, status_code=http_client.BAD_REQUEST) portgroup_dict = rpc_portgroup.as_dict() # NOTE: # 1) Remove node_id because it's an internal value and # not present in the API object # 2) Add node_uuid portgroup_dict['node_uuid'] = portgroup_dict.pop('node_id', None) portgroup = Portgroup(**api_utils.apply_jsonpatch(portgroup_dict, patch)) api_utils.handle_patch_port_like_extra_vif(rpc_portgroup, portgroup, patch) # Update only the fields that have changed for field in objects.Portgroup.fields: try: patch_val = getattr(portgroup, field) except AttributeError: # Ignore fields that aren't exposed in the API continue if patch_val == wtypes.Unset: patch_val = None if rpc_portgroup[field] != patch_val: rpc_portgroup[field] = patch_val rpc_node = objects.Node.get_by_id(context, rpc_portgroup.node_id) if (rpc_node.provision_state == ir_states.INSPECTING and api_utils.allow_inspect_wait_state()): msg = _('Cannot update portgroup "%(portgroup)s" on node ' '"%(node)s" while it is in state "%(state)s".') % { 'portgroup': rpc_portgroup.uuid, 'node': rpc_node.uuid, 'state': ir_states.INSPECTING} raise exception.ClientSideError(msg, status_code=http_client.CONFLICT) notify.emit_start_notification(context, rpc_portgroup, 'update', node_uuid=rpc_node.uuid) with notify.handle_error_notification(context, rpc_portgroup, 'update', node_uuid=rpc_node.uuid): topic = api.request.rpcapi.get_topic_for(rpc_node) new_portgroup = api.request.rpcapi.update_portgroup( context, rpc_portgroup, topic) api_portgroup = Portgroup.convert_with_links(new_portgroup) notify.emit_end_notification(context, new_portgroup, 'update', node_uuid=api_portgroup.node_uuid) return api_portgroup @METRICS.timer('PortgroupsController.delete') @expose.expose(None, types.uuid_or_name, status_code=http_client.NO_CONTENT) def delete(self, portgroup_ident): """Delete a portgroup. :param portgroup_ident: UUID or logical name of a portgroup. """ if not api_utils.allow_portgroups(): raise exception.NotFound() context = api.request.context cdict = context.to_policy_values() policy.authorize('baremetal:portgroup:delete', cdict, cdict) if self.parent_node_ident: raise exception.OperationNotPermitted() rpc_portgroup = api_utils.get_rpc_portgroup_with_suffix( portgroup_ident) rpc_node = objects.Node.get_by_id(api.request.context, rpc_portgroup.node_id) notify.emit_start_notification(context, rpc_portgroup, 'delete', node_uuid=rpc_node.uuid) with notify.handle_error_notification(context, rpc_portgroup, 'delete', node_uuid=rpc_node.uuid): topic = api.request.rpcapi.get_topic_for(rpc_node) api.request.rpcapi.destroy_portgroup(context, rpc_portgroup, topic) notify.emit_end_notification(context, rpc_portgroup, 'delete', node_uuid=rpc_node.uuid) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/ironic/api/controllers/v1/ramdisk.py0000644000175000017500000002235500000000000023317 0ustar00coreycorey00000000000000# Copyright 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import client as http_client from oslo_config import cfg from oslo_log import log from pecan import rest from ironic import api from ironic.api.controllers import base from ironic.api.controllers.v1 import node as node_ctl from ironic.api.controllers.v1 import types from ironic.api.controllers.v1 import utils as api_utils from ironic.api import expose from ironic.common import exception from ironic.common.i18n import _ from ironic.common import policy from ironic.common import states from ironic.common import utils from ironic import objects CONF = cfg.CONF LOG = log.getLogger(__name__) _LOOKUP_RETURN_FIELDS = ('uuid', 'properties', 'instance_info', 'driver_internal_info') def config(token): return { 'metrics': { 'backend': CONF.metrics.agent_backend, 'prepend_host': CONF.metrics.agent_prepend_host, 'prepend_uuid': CONF.metrics.agent_prepend_uuid, 'prepend_host_reverse': CONF.metrics.agent_prepend_host_reverse, 'global_prefix': CONF.metrics.agent_global_prefix }, 'metrics_statsd': { 'statsd_host': CONF.metrics_statsd.agent_statsd_host, 'statsd_port': CONF.metrics_statsd.agent_statsd_port }, 'heartbeat_timeout': CONF.api.ramdisk_heartbeat_timeout, 'agent_token': token, # Not an API version based indicator, passing as configuration # as the signifigants indicates support should also be present. 'agent_token_required': CONF.require_agent_token, } class LookupResult(base.APIBase): """API representation of the node lookup result.""" node = node_ctl.Node """The short node representation.""" config = {str: types.jsontype} """The configuration to pass to the ramdisk.""" @classmethod def sample(cls): return cls(node=node_ctl.Node.sample(), config={'heartbeat_timeout': 600}) @classmethod def convert_with_links(cls, node): token = node.driver_internal_info.get('agent_secret_token') node = node_ctl.Node.convert_with_links(node, _LOOKUP_RETURN_FIELDS) return cls(node=node, config=config(token)) class LookupController(rest.RestController): """Controller handling node lookup for a deploy ramdisk.""" @property def lookup_allowed_states(self): if CONF.deploy.fast_track: return states.FASTTRACK_LOOKUP_ALLOWED_STATES return states.LOOKUP_ALLOWED_STATES @expose.expose(LookupResult, types.listtype, types.uuid) def get_all(self, addresses=None, node_uuid=None): """Look up a node by its MAC addresses and optionally UUID. If the "restrict_lookup" option is set to True (the default), limit the search to nodes in certain transient states (e.g. deploy wait). :param addresses: list of MAC addresses for a node. :param node_uuid: UUID of a node. :raises: NotFound if requested API version does not allow this endpoint. :raises: NotFound if suitable node was not found or node's provision state is not allowed for the lookup. :raises: IncompleteLookup if neither node UUID nor any valid MAC address was provided. """ if not api_utils.allow_ramdisk_endpoints(): raise exception.NotFound() cdict = api.request.context.to_policy_values() policy.authorize('baremetal:driver:ipa_lookup', cdict, cdict) # Validate the list of MAC addresses if addresses is None: addresses = [] valid_addresses = [] invalid_addresses = [] for addr in addresses: try: mac = utils.validate_and_normalize_mac(addr) valid_addresses.append(mac) except exception.InvalidMAC: invalid_addresses.append(addr) if invalid_addresses: node_log = ('' if not node_uuid else '(Node UUID: %s)' % node_uuid) LOG.warning('The following MAC addresses "%(addrs)s" are ' 'invalid and will be ignored by the lookup ' 'request %(node)s', {'addrs': ', '.join(invalid_addresses), 'node': node_log}) if not valid_addresses and not node_uuid: raise exception.IncompleteLookup() try: if node_uuid: node = objects.Node.get_by_uuid( api.request.context, node_uuid) else: node = objects.Node.get_by_port_addresses( api.request.context, valid_addresses) except exception.NotFound: # NOTE(dtantsur): we are reraising the same exception to make sure # we don't disclose the difference between nodes that are not found # at all and nodes in a wrong state by different error messages. raise exception.NotFound() if (CONF.api.restrict_lookup and node.provision_state not in self.lookup_allowed_states): raise exception.NotFound() if api_utils.allow_agent_token() or CONF.require_agent_token: try: topic = api.request.rpcapi.get_topic_for(node) except exception.NoValidHost as e: e.code = http_client.BAD_REQUEST raise found_node = api.request.rpcapi.get_node_with_token( api.request.context, node.uuid, topic=topic) else: found_node = node return LookupResult.convert_with_links(found_node) class HeartbeatController(rest.RestController): """Controller handling heartbeats from deploy ramdisk.""" @expose.expose(None, types.uuid_or_name, str, str, str, status_code=http_client.ACCEPTED) def post(self, node_ident, callback_url, agent_version=None, agent_token=None): """Process a heartbeat from the deploy ramdisk. :param node_ident: the UUID or logical name of a node. :param callback_url: the URL to reach back to the ramdisk. :param agent_version: The version of the agent that is heartbeating. ``None`` indicates that the agent that is heartbeating is a version before sending agent_version was introduced so agent v3.0.0 (the last release before sending agent_version was introduced) will be assumed. :raises: NodeNotFound if node with provided UUID or name was not found. :raises: InvalidUuidOrName if node_ident is not valid name or UUID. :raises: NoValidHost if RPC topic for node could not be retrieved. :raises: NotFound if requested API version does not allow this endpoint. """ if not api_utils.allow_ramdisk_endpoints(): raise exception.NotFound() if agent_version and not api_utils.allow_agent_version_in_heartbeat(): raise exception.InvalidParameterValue( _('Field "agent_version" not recognised')) cdict = api.request.context.to_policy_values() policy.authorize('baremetal:node:ipa_heartbeat', cdict, cdict) rpc_node = api_utils.get_rpc_node_with_suffix(node_ident) dii = rpc_node['driver_internal_info'] agent_url = dii.get('agent_url') # If we have an agent_url on file, and we get something different # we should fail because this is unexpected behavior of the agent. if agent_url is not None and agent_url != callback_url: LOG.error('Received heartbeat for node %(node)s with ' 'callback URL %(url)s. This is not expected, ' 'and the heartbeat will not be processed.', {'node': rpc_node.uuid, 'url': callback_url}) raise exception.Invalid( _('Detected change in ramdisk provided ' '"callback_url"')) # NOTE(TheJulia): If tokens are required, lets go ahead and fail the # heartbeat very early on. token_required = CONF.require_agent_token if token_required and agent_token is None: LOG.error('Agent heartbeat received for node %(node)s ' 'without an agent token.', {'node': node_ident}) raise exception.InvalidParameterValue( _('Agent token is required for heartbeat processing.')) try: topic = api.request.rpcapi.get_topic_for(rpc_node) except exception.NoValidHost as e: e.code = http_client.BAD_REQUEST raise api.request.rpcapi.heartbeat( api.request.context, rpc_node.uuid, callback_url, agent_version, agent_token, topic=topic) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/ironic/api/controllers/v1/state.py0000644000175000017500000000200100000000000022767 0ustar00coreycorey00000000000000# Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ironic.api.controllers import base from ironic.api.controllers import link class State(base.APIBase): current = str """The current state""" target = str """The user modified desired state""" available = [str] """A list of available states it is able to transition to""" links = [link.Link] """A list containing a self link and associated state links""" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/ironic/api/controllers/v1/types.py0000644000175000017500000003757400000000000023042 0ustar00coreycorey00000000000000# coding: utf-8 # # Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import inspect import json from oslo_log import log from oslo_utils import strutils from oslo_utils import uuidutils import wsme from wsme import types as wtypes from ironic.api.controllers import base from ironic.api.controllers.v1 import utils as v1_utils from ironic.common import exception from ironic.common.i18n import _ from ironic.common import utils LOG = log.getLogger(__name__) class MacAddressType(wtypes.UserType): """A simple MAC address type.""" basetype = str name = 'macaddress' @staticmethod def validate(value): return utils.validate_and_normalize_mac(value) @staticmethod def frombasetype(value): if value is None: return None return MacAddressType.validate(value) class UuidOrNameType(wtypes.UserType): """A simple UUID or logical name type.""" basetype = str name = 'uuid_or_name' @staticmethod def validate(value): if not (uuidutils.is_uuid_like(value) or v1_utils.is_valid_logical_name(value)): raise exception.InvalidUuidOrName(name=value) return value @staticmethod def frombasetype(value): if value is None: return None return UuidOrNameType.validate(value) class NameType(wtypes.UserType): """A simple logical name type.""" basetype = str name = 'name' @staticmethod def validate(value): if not v1_utils.is_valid_logical_name(value): raise exception.InvalidName(name=value) return value @staticmethod def frombasetype(value): if value is None: return None return NameType.validate(value) class UuidType(wtypes.UserType): """A simple UUID type.""" basetype = str name = 'uuid' @staticmethod def validate(value): if not uuidutils.is_uuid_like(value): raise exception.InvalidUUID(uuid=value) return value @staticmethod def frombasetype(value): if value is None: return None return UuidType.validate(value) class BooleanType(wtypes.UserType): """A simple boolean type.""" basetype = str name = 'boolean' @staticmethod def validate(value): try: return strutils.bool_from_string(value, strict=True) except ValueError as e: # raise Invalid to return 400 (BadRequest) in the API raise exception.Invalid(str(e)) @staticmethod def frombasetype(value): if value is None: return None return BooleanType.validate(value) class JsonType(wtypes.UserType): """A simple JSON type.""" basetype = str name = 'json' def __str__(self): # These are the json serializable native types return ' | '.join(map(str, (str, int, float, BooleanType, list, dict, None))) @staticmethod def validate(value): try: json.dumps(value) except TypeError: raise exception.Invalid(_('%s is not JSON serializable') % value) else: return value @staticmethod def frombasetype(value): return JsonType.validate(value) class ListType(wtypes.UserType): """A simple list type.""" basetype = str name = 'list' @staticmethod def validate(value): """Validate and convert the input to a ListType. :param value: A comma separated string of values :returns: A list of unique values (lower-cased), maintaining the same order """ items = [] for v in str(value).split(','): v_norm = v.strip().lower() if v_norm and v_norm not in items: items.append(v_norm) return items @staticmethod def frombasetype(value): if value is None: return None return ListType.validate(value) macaddress = MacAddressType() uuid_or_name = UuidOrNameType() name = NameType() uuid = UuidType() boolean = BooleanType() listtype = ListType() # Can't call it 'json' because that's the name of the stdlib module jsontype = JsonType() class JsonPatchType(base.Base): """A complex type that represents a single json-patch operation.""" path = wtypes.wsattr(wtypes.StringType(pattern='^(/[\\w-]+)+$'), mandatory=True) op = wtypes.wsattr(wtypes.Enum(str, 'add', 'replace', 'remove'), mandatory=True) value = wsme.wsattr(jsontype, default=wtypes.Unset) # The class of the objects being patched. Override this in subclasses. # Should probably be a subclass of ironic.api.controllers.base.APIBase. _api_base = None # Attributes that are not required for construction, but which may not be # removed if set. Override in subclasses if needed. _extra_non_removable_attrs = set() # Set of non-removable attributes, calculated lazily. _non_removable_attrs = None @staticmethod def internal_attrs(): """Returns a list of internal attributes. Internal attributes can't be added, replaced or removed. This method may be overwritten by derived class. """ return ['/created_at', '/id', '/links', '/updated_at', '/uuid'] @classmethod def non_removable_attrs(cls): """Returns a set of names of attributes that may not be removed. Attributes whose 'mandatory' property is True are automatically added to this set. To add additional attributes to the set, override the field _extra_non_removable_attrs in subclasses, with a set of the form {'/foo', '/bar'}. """ if cls._non_removable_attrs is None: cls._non_removable_attrs = cls._extra_non_removable_attrs.copy() if cls._api_base: fields = inspect.getmembers(cls._api_base, lambda a: not inspect.isroutine(a)) for name, field in fields: if getattr(field, 'mandatory', False): cls._non_removable_attrs.add('/%s' % name) return cls._non_removable_attrs @staticmethod def validate(patch): _path = '/' + patch.path.split('/')[1] if _path in patch.internal_attrs(): msg = _("'%s' is an internal attribute and can not be updated") raise exception.ClientSideError(msg % patch.path) if patch.path in patch.non_removable_attrs() and patch.op == 'remove': msg = _("'%s' is a mandatory attribute and can not be removed") raise exception.ClientSideError(msg % patch.path) if patch.op != 'remove': if patch.value is wsme.Unset: msg = _("'add' and 'replace' operations need a value") raise exception.ClientSideError(msg) ret = {'path': patch.path, 'op': patch.op} if patch.value is not wsme.Unset: ret['value'] = patch.value return ret class LocalLinkConnectionType(wtypes.UserType): """A type describing local link connection.""" basetype = wtypes.DictType name = 'locallinkconnection' local_link_mandatory_fields = {'port_id', 'switch_id'} smart_nic_mandatory_fields = {'port_id', 'hostname'} mandatory_fields_list = [local_link_mandatory_fields, smart_nic_mandatory_fields] optional_fields = {'switch_info', 'network_type'} valid_fields = set.union(optional_fields, *mandatory_fields_list) valid_network_types = {'managed', 'unmanaged'} @staticmethod def validate(value): """Validate and convert the input to a LocalLinkConnectionType. :param value: A dictionary of values to validate, switch_id is a MAC address or an OpenFlow based datapath_id, switch_info is an optional field. Required Smart NIC fields are port_id and hostname. For example:: { 'switch_id': mac_or_datapath_id(), 'port_id': 'Ethernet3/1', 'switch_info': 'switch1' } Or for Smart NIC:: { 'port_id': 'rep0-0', 'hostname': 'host1-bf' } :returns: A dictionary. :raises: Invalid if some of the keys in the dictionary being validated are unknown, invalid, or some required ones are missing. """ wtypes.DictType(str, str).validate(value) keys = set(value) # This is to workaround an issue when an API object is initialized from # RPC object, in which dictionary fields that are set to None become # empty dictionaries if not keys: return value invalid = keys - LocalLinkConnectionType.valid_fields if invalid: raise exception.Invalid(_('%s are invalid keys') % (invalid)) # If network_type is 'unmanaged', this is a network with no switch # management. i.e local_link_connection details are not required. if 'network_type' in keys: if (value['network_type'] not in LocalLinkConnectionType.valid_network_types): msg = _( 'Invalid network_type %(type)s, valid network_types are ' '%(valid_network_types)s.') % { 'type': value['network_type'], 'valid_network_types': LocalLinkConnectionType.valid_network_types} raise exception.Invalid(msg) if (value['network_type'] == 'unmanaged' and not (keys - {'network_type'})): # Only valid network_type 'unmanaged' is set, no for further # validation required. return value # Check any mandatory fields sets are present for mandatory_set in LocalLinkConnectionType.mandatory_fields_list: if mandatory_set <= keys: break else: msg = _('Missing mandatory keys. Required keys are ' '%(required_fields)s. Or in case of Smart NIC ' '%(smart_nic_required_fields)s. ' 'Submitted keys are %(keys)s .') % { 'required_fields': LocalLinkConnectionType.local_link_mandatory_fields, 'smart_nic_required_fields': LocalLinkConnectionType.smart_nic_mandatory_fields, 'keys': keys} raise exception.Invalid(msg) # Check switch_id is either a valid mac address or # OpenFlow datapath_id and normalize it. try: value['switch_id'] = utils.validate_and_normalize_mac( value['switch_id']) except exception.InvalidMAC: try: value['switch_id'] = utils.validate_and_normalize_datapath_id( value['switch_id']) except exception.InvalidDatapathID: raise exception.InvalidSwitchID(switch_id=value['switch_id']) except KeyError: # In Smart NIC case 'switch_id' is optional. pass return value @staticmethod def frombasetype(value): if value is None: return None return LocalLinkConnectionType.validate(value) @staticmethod def validate_for_smart_nic(value): """Validates Smart NIC field are present 'port_id' and 'hostname' :param value: local link information of type Dictionary. :return: True if both fields 'port_id' and 'hostname' are present in 'value', False otherwise. """ wtypes.DictType(str, str).validate(value) keys = set(value) if LocalLinkConnectionType.smart_nic_mandatory_fields <= keys: return True return False locallinkconnectiontype = LocalLinkConnectionType() class VifType(JsonType): basetype = str name = 'viftype' mandatory_fields = {'id'} @staticmethod def validate(value): super(VifType, VifType).validate(value) keys = set(value) # Check all mandatory fields are present missing = VifType.mandatory_fields - keys if missing: msg = _('Missing mandatory keys: %s') % ', '.join(list(missing)) raise exception.Invalid(msg) UuidOrNameType.validate(value['id']) return value @staticmethod def frombasetype(value): if value is None: return None return VifType.validate(value) viftype = VifType() class EventType(wtypes.UserType): """A simple Event type.""" basetype = wtypes.DictType name = 'event' def _validate_network_port_event(value): """Validate network port event fields. :param value: A event dict :returns: value :raises: Invalid if network port event not in proper format """ validators = { 'port_id': UuidType.validate, 'mac_address': MacAddressType.validate, 'status': str, 'device_id': UuidType.validate, 'binding:host_id': UuidType.validate, 'binding:vnic_type': str } keys = set(value) net_keys = set(validators) net_mandatory_fields = {'port_id', 'mac_address', 'status'} # Check all keys are valid for network port event invalid = keys.difference(EventType.mandatory_fields.union(net_keys)) if invalid: raise exception.Invalid(_('%s are invalid keys') % ', '.join(invalid)) # Check all mandatory fields for network port event is present missing = net_mandatory_fields.difference(keys) if missing: raise exception.Invalid(_('Missing mandatory keys: %s') % ', '.join(missing)) # Check all values are of expected type for key in net_keys: if key in value: try: validators[key](value[key]) except Exception as e: msg = (_('Event validation failure for %(key)s. ' '%(message)s') % {'key': key, 'message': e}) raise exception.Invalid(msg) return value mandatory_fields = {'event'} event_validators = { 'network.bind_port': _validate_network_port_event, 'network.unbind_port': _validate_network_port_event, 'network.delete_port': _validate_network_port_event, } valid_events = set(event_validators) @staticmethod def validate(value): """Validate the input :param value: A event dict :returns: value :raises: Invalid if event not in proper format """ wtypes.DictType(str, str).validate(value) keys = set(value) # Check all mandatory fields are present missing = EventType.mandatory_fields.difference(keys) if missing: raise exception.Invalid(_('Missing mandatory keys: %s') % ', '.join(missing)) # Check event is a supported event if value['event'] not in EventType.valid_events: raise exception.Invalid( _('%(event)s is not one of valid events: %(valid_events)s.') % {'event': value['event'], 'valid_events': ', '.join(EventType.valid_events)}) return EventType.event_validators[value['event']](value) eventtype = EventType() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/ironic/api/controllers/v1/utils.py0000644000175000017500000014330300000000000023022 0ustar00coreycorey00000000000000# Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import client as http_client import inspect import re import jsonpatch import jsonschema from jsonschema import exceptions as json_schema_exc import os_traits from oslo_config import cfg from oslo_utils import uuidutils from pecan import rest from webob import static import wsme from ironic import api from ironic.api.controllers.v1 import versions from ironic.common import exception from ironic.common import faults from ironic.common.i18n import _ from ironic.common import policy from ironic.common import states from ironic.common import utils from ironic import objects CONF = cfg.CONF _JSONPATCH_EXCEPTIONS = (jsonpatch.JsonPatchConflict, jsonpatch.JsonPatchException, jsonpatch.JsonPointerException, KeyError, IndexError) # Minimum API version to use for certain verbs MIN_VERB_VERSIONS = { # v1.4 added the MANAGEABLE state and two verbs to move nodes into # and out of that state. Reject requests to do this in older versions states.VERBS['manage']: versions.MINOR_4_MANAGEABLE_STATE, states.VERBS['provide']: versions.MINOR_4_MANAGEABLE_STATE, states.VERBS['inspect']: versions.MINOR_6_INSPECT_STATE, states.VERBS['abort']: versions.MINOR_13_ABORT_VERB, states.VERBS['clean']: versions.MINOR_15_MANUAL_CLEAN, states.VERBS['adopt']: versions.MINOR_17_ADOPT_VERB, states.VERBS['rescue']: versions.MINOR_38_RESCUE_INTERFACE, states.VERBS['unrescue']: versions.MINOR_38_RESCUE_INTERFACE, } V31_FIELDS = [ 'boot_interface', 'console_interface', 'deploy_interface', 'inspect_interface', 'management_interface', 'power_interface', 'raid_interface', 'vendor_interface', ] STANDARD_TRAITS = os_traits.get_traits() CUSTOM_TRAIT_REGEX = re.compile("^%s[A-Z0-9_]+$" % os_traits.CUSTOM_NAMESPACE) def validate_limit(limit): if limit is None: return CONF.api.max_limit if limit <= 0: raise exception.ClientSideError(_("Limit must be positive")) return min(CONF.api.max_limit, limit) def validate_sort_dir(sort_dir): if sort_dir not in ['asc', 'desc']: raise exception.ClientSideError(_("Invalid sort direction: %s. " "Acceptable values are " "'asc' or 'desc'") % sort_dir) return sort_dir def validate_trait(trait, error_prefix=_('Invalid trait')): error = exception.ClientSideError( _('%(error_prefix)s. A valid trait must be no longer than 255 ' 'characters. Standard traits are defined in the os_traits library. ' 'A custom trait must start with the prefix CUSTOM_ and use ' 'the following characters: A-Z, 0-9 and _') % {'error_prefix': error_prefix}) if not isinstance(trait, str): raise error if len(trait) > 255 or len(trait) < 1: raise error if trait in STANDARD_TRAITS: return if CUSTOM_TRAIT_REGEX.match(trait) is None: raise error def apply_jsonpatch(doc, patch): """Apply a JSON patch, one operation at a time. If the patch fails to apply, this allows us to determine which operation failed, making the error message a little less cryptic. :param doc: The JSON document to patch. :param patch: The JSON patch to apply. :returns: The result of the patch operation. :raises: PatchError if the patch fails to apply. :raises: exception.ClientSideError if the patch adds a new root attribute. """ # Prevent removal of root attributes. for p in patch: if p['op'] == 'add' and p['path'].count('/') == 1: if p['path'].lstrip('/') not in doc: msg = _('Adding a new attribute (%s) to the root of ' 'the resource is not allowed') raise exception.ClientSideError(msg % p['path']) # Apply operations one at a time, to improve error reporting. for patch_op in patch: try: doc = jsonpatch.apply_patch(doc, jsonpatch.JsonPatch([patch_op])) except _JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch_op, reason=e) return doc def get_patch_values(patch, path): """Get the patch values corresponding to the specified path. If there are multiple values specified for the same path, for example :: [{'op': 'add', 'path': '/name', 'value': 'abc'}, {'op': 'add', 'path': '/name', 'value': 'bca'}] return all of them in a list (preserving order) :param patch: HTTP PATCH request body. :param path: the path to get the patch values for. :returns: list of values for the specified path in the patch. """ return [p['value'] for p in patch if p['path'] == path and p['op'] != 'remove'] def is_path_removed(patch, path): """Returns whether the patch includes removal of the path (or subpath of). :param patch: HTTP PATCH request body. :param path: the path to check. :returns: True if path or subpath being removed, False otherwise. """ path = path.rstrip('/') for p in patch: if ((p['path'] == path or p['path'].startswith(path + '/')) and p['op'] == 'remove'): return True def is_path_updated(patch, path): """Returns whether the patch includes operation on path (or its subpath). :param patch: HTTP PATCH request body. :param path: the path to check. :returns: True if path or subpath being patched, False otherwise. """ path = path.rstrip('/') for p in patch: return p['path'] == path or p['path'].startswith(path + '/') def allow_node_logical_names(): # v1.5 added logical name aliases return api.request.version.minor >= versions.MINOR_5_NODE_NAME def _get_with_suffix(get_func, ident, exc_class): """Helper to get a resource taking into account API .json suffix.""" try: return get_func(ident) except exc_class: if not api.request.environ['HAS_JSON_SUFFIX']: raise # NOTE(dtantsur): strip .json prefix to maintain compatibility # with the guess_content_type_from_ext feature. Try to return it # back if the resulting resource was not found. return get_func(ident + '.json') def get_rpc_node(node_ident): """Get the RPC node from the node uuid or logical name. :param node_ident: the UUID or logical name of a node. :returns: The RPC Node. :raises: InvalidUuidOrName if the name or uuid provided is not valid. :raises: NodeNotFound if the node is not found. """ # Check to see if the node_ident is a valid UUID. If it is, treat it # as a UUID. if uuidutils.is_uuid_like(node_ident): return objects.Node.get_by_uuid(api.request.context, node_ident) # We can refer to nodes by their name, if the client supports it if allow_node_logical_names(): if is_valid_logical_name(node_ident): return objects.Node.get_by_name(api.request.context, node_ident) raise exception.InvalidUuidOrName(name=node_ident) # Ensure we raise the same exception as we did for the Juno release raise exception.NodeNotFound(node=node_ident) def get_rpc_node_with_suffix(node_ident): """Get the RPC node from the node uuid or logical name. If HAS_JSON_SUFFIX flag is set in the pecan environment, try also looking for node_ident with '.json' suffix. Otherwise identical to get_rpc_node. :param node_ident: the UUID or logical name of a node. :returns: The RPC Node. :raises: InvalidUuidOrName if the name or uuid provided is not valid. :raises: NodeNotFound if the node is not found. """ return _get_with_suffix(get_rpc_node, node_ident, exception.NodeNotFound) def get_rpc_portgroup(portgroup_ident): """Get the RPC portgroup from the portgroup UUID or logical name. :param portgroup_ident: the UUID or logical name of a portgroup. :returns: The RPC portgroup. :raises: InvalidUuidOrName if the name or uuid provided is not valid. :raises: PortgroupNotFound if the portgroup is not found. """ # Check to see if the portgroup_ident is a valid UUID. If it is, treat it # as a UUID. if uuidutils.is_uuid_like(portgroup_ident): return objects.Portgroup.get_by_uuid(api.request.context, portgroup_ident) # We can refer to portgroups by their name if utils.is_valid_logical_name(portgroup_ident): return objects.Portgroup.get_by_name(api.request.context, portgroup_ident) raise exception.InvalidUuidOrName(name=portgroup_ident) def get_rpc_portgroup_with_suffix(portgroup_ident): """Get the RPC portgroup from the portgroup UUID or logical name. If HAS_JSON_SUFFIX flag is set in the pecan environment, try also looking for portgroup_ident with '.json' suffix. Otherwise identical to get_rpc_portgroup. :param portgroup_ident: the UUID or logical name of a portgroup. :returns: The RPC portgroup. :raises: InvalidUuidOrName if the name or uuid provided is not valid. :raises: PortgroupNotFound if the portgroup is not found. """ return _get_with_suffix(get_rpc_portgroup, portgroup_ident, exception.PortgroupNotFound) def get_rpc_allocation(allocation_ident): """Get the RPC allocation from the allocation UUID or logical name. :param allocation_ident: the UUID or logical name of an allocation. :returns: The RPC allocation. :raises: InvalidUuidOrName if the name or uuid provided is not valid. :raises: AllocationNotFound if the allocation is not found. """ # Check to see if the allocation_ident is a valid UUID. If it is, treat it # as a UUID. if uuidutils.is_uuid_like(allocation_ident): return objects.Allocation.get_by_uuid(api.request.context, allocation_ident) # We can refer to allocations by their name if utils.is_valid_logical_name(allocation_ident): return objects.Allocation.get_by_name(api.request.context, allocation_ident) raise exception.InvalidUuidOrName(name=allocation_ident) def get_rpc_allocation_with_suffix(allocation_ident): """Get the RPC allocation from the allocation UUID or logical name. If HAS_JSON_SUFFIX flag is set in the pecan environment, try also looking for allocation_ident with '.json' suffix. Otherwise identical to get_rpc_allocation. :param allocation_ident: the UUID or logical name of an allocation. :returns: The RPC allocation. :raises: InvalidUuidOrName if the name or uuid provided is not valid. :raises: AllocationNotFound if the allocation is not found. """ return _get_with_suffix(get_rpc_allocation, allocation_ident, exception.AllocationNotFound) def get_rpc_deploy_template(template_ident): """Get the RPC deploy template from the UUID or logical name. :param template_ident: the UUID or logical name of a deploy template. :returns: The RPC deploy template. :raises: InvalidUuidOrName if the name or uuid provided is not valid. :raises: DeployTemplateNotFound if the deploy template is not found. """ # Check to see if the template_ident is a valid UUID. If it is, treat it # as a UUID. if uuidutils.is_uuid_like(template_ident): return objects.DeployTemplate.get_by_uuid(api.request.context, template_ident) # We can refer to templates by their name if utils.is_valid_logical_name(template_ident): return objects.DeployTemplate.get_by_name(api.request.context, template_ident) raise exception.InvalidUuidOrName(name=template_ident) def get_rpc_deploy_template_with_suffix(template_ident): """Get the RPC deploy template from the UUID or logical name. If HAS_JSON_SUFFIX flag is set in the pecan environment, try also looking for template_ident with '.json' suffix. Otherwise identical to get_rpc_deploy_template. :param template_ident: the UUID or logical name of a deploy template. :returns: The RPC deploy template. :raises: InvalidUuidOrName if the name or uuid provided is not valid. :raises: DeployTemplateNotFound if the deploy template is not found. """ return _get_with_suffix(get_rpc_deploy_template, template_ident, exception.DeployTemplateNotFound) def is_valid_node_name(name): """Determine if the provided name is a valid node name. Check to see that the provided node name is valid, and isn't a UUID. :param name: the node name to check. :returns: True if the name is valid, False otherwise. """ return is_valid_logical_name(name) and not uuidutils.is_uuid_like(name) def is_valid_logical_name(name): """Determine if the provided name is a valid hostname.""" if api.request.version.minor < versions.MINOR_10_UNRESTRICTED_NODE_NAME: return utils.is_hostname_safe(name) else: return utils.is_valid_logical_name(name) def vendor_passthru(ident, method, topic, data=None, driver_passthru=False): """Call a vendor passthru API extension. Call the vendor passthru API extension and process the method response to set the right return code for methods that are asynchronous or synchronous; Attach the return value to the response object if it's being served statically. :param ident: The resource identification. For node's vendor passthru this is the node's UUID, for driver's vendor passthru this is the driver's name. :param method: The vendor method name. :param topic: The RPC topic. :param data: The data passed to the vendor method. Defaults to None. :param driver_passthru: Boolean value. Whether this is a node or driver vendor passthru. Defaults to False. :returns: A WSME response object to be returned by the API. """ if not method: raise exception.ClientSideError(_("Method not specified")) if data is None: data = {} http_method = api.request.method.upper() params = (api.request.context, ident, method, http_method, data, topic) if driver_passthru: response = api.request.rpcapi.driver_vendor_passthru(*params) else: response = api.request.rpcapi.vendor_passthru(*params) status_code = http_client.ACCEPTED if response['async'] else http_client.OK return_value = response['return'] response_params = {'status_code': status_code} # Attach the return value to the response object if response.get('attach'): if isinstance(return_value, str): # If unicode, convert to bytes return_value = return_value.encode('utf-8') file_ = wsme.types.File(content=return_value) api.response.app_iter = static.FileIter(file_.file) # Since we've attached the return value to the response # object the response body should now be empty. return_value = None response_params['return_type'] = None return wsme.api.Response(return_value, **response_params) def check_for_invalid_fields(fields, object_fields): """Check for requested non-existent fields. Check if the user requested non-existent fields. :param fields: A list of fields requested by the user :object_fields: A list of fields supported by the object. :raises: InvalidParameterValue if invalid fields were requested. """ invalid_fields = set(fields) - set(object_fields) if invalid_fields: raise exception.InvalidParameterValue( _('Field(s) "%s" are not valid') % ', '.join(invalid_fields)) def check_allow_specify_fields(fields): """Check if fetching a subset of the resource attributes is allowed. Version 1.8 of the API allows fetching a subset of the resource attributes, this method checks if the required version is being requested. """ if (fields is not None and api.request.version.minor < versions.MINOR_8_FETCHING_SUBSET_OF_FIELDS): raise exception.NotAcceptable() VERSIONED_FIELDS = { 'driver_internal_info': versions.MINOR_3_DRIVER_INTERNAL_INFO, 'name': versions.MINOR_5_NODE_NAME, 'inspection_finished_at': versions.MINOR_6_INSPECT_STATE, 'inspection_started_at': versions.MINOR_6_INSPECT_STATE, 'clean_step': versions.MINOR_7_NODE_CLEAN, 'raid_config': versions.MINOR_12_RAID_CONFIG, 'target_raid_config': versions.MINOR_12_RAID_CONFIG, 'network_interface': versions.MINOR_20_NETWORK_INTERFACE, 'resource_class': versions.MINOR_21_RESOURCE_CLASS, 'storage_interface': versions.MINOR_33_STORAGE_INTERFACE, 'traits': versions.MINOR_37_NODE_TRAITS, 'rescue_interface': versions.MINOR_38_RESCUE_INTERFACE, 'bios_interface': versions.MINOR_40_BIOS_INTERFACE, 'fault': versions.MINOR_42_FAULT, 'deploy_step': versions.MINOR_44_NODE_DEPLOY_STEP, 'conductor_group': versions.MINOR_46_NODE_CONDUCTOR_GROUP, 'automated_clean': versions.MINOR_47_NODE_AUTOMATED_CLEAN, 'protected': versions.MINOR_48_NODE_PROTECTED, 'protected_reason': versions.MINOR_48_NODE_PROTECTED, 'conductor': versions.MINOR_49_CONDUCTORS, 'owner': versions.MINOR_50_NODE_OWNER, 'description': versions.MINOR_51_NODE_DESCRIPTION, 'allocation_uuid': versions.MINOR_52_ALLOCATION, 'events': versions.MINOR_54_EVENTS, 'retired': versions.MINOR_61_NODE_RETIRED, 'retired_reason': versions.MINOR_61_NODE_RETIRED, 'lessee': versions.MINOR_65_NODE_LESSEE, } for field in V31_FIELDS: VERSIONED_FIELDS[field] = versions.MINOR_31_DYNAMIC_INTERFACES def allow_field(field): """Check if a field is allowed in the current version.""" return api.request.version.minor >= VERSIONED_FIELDS[field] def disallowed_fields(): """Generator of fields not allowed in the current request.""" for field in VERSIONED_FIELDS: if not allow_field(field): yield field def check_allowed_fields(fields): """Check if fetching a particular field is allowed. This method checks if the required version is being requested for fields that are only allowed to be fetched in a particular API version. """ if fields is None: return for field in disallowed_fields(): if field in fields: raise exception.NotAcceptable() def check_allowed_portgroup_fields(fields): """Check if fetching a particular field of a portgroup is allowed. This method checks if the required version is being requested for fields that are only allowed to be fetched in a particular API version. """ if fields is None: return if (('mode' in fields or 'properties' in fields) and not allow_portgroup_mode_properties()): raise exception.NotAcceptable() def check_allow_management_verbs(verb): min_version = MIN_VERB_VERSIONS.get(verb) if min_version is not None and api.request.version.minor < min_version: raise exception.NotAcceptable() def check_for_invalid_state_and_allow_filter(provision_state): """Check if filtering nodes by provision state is allowed. Version 1.9 of the API allows filter nodes by provision state. """ if provision_state is not None: if (api.request.version.minor < versions.MINOR_9_PROVISION_STATE_FILTER): raise exception.NotAcceptable() valid_states = states.machine.states if provision_state not in valid_states: raise exception.InvalidParameterValue( _('Provision state "%s" is not valid') % provision_state) def check_allow_specify_driver(driver): """Check if filtering nodes by driver is allowed. Version 1.16 of the API allows filter nodes by driver. """ if (driver is not None and api.request.version.minor < versions.MINOR_16_DRIVER_FILTER): raise exception.NotAcceptable(_( "Request not acceptable. The minimal required API version " "should be %(base)s.%(opr)s") % {'base': versions.BASE_VERSION, 'opr': versions.MINOR_16_DRIVER_FILTER}) def check_allow_specify_resource_class(resource_class): """Check if filtering nodes by resource_class is allowed. Version 1.21 of the API allows filtering nodes by resource_class. """ if (resource_class is not None and api.request.version.minor < versions.MINOR_21_RESOURCE_CLASS): raise exception.NotAcceptable(_( "Request not acceptable. The minimal required API version " "should be %(base)s.%(opr)s") % {'base': versions.BASE_VERSION, 'opr': versions.MINOR_21_RESOURCE_CLASS}) def check_allow_filter_driver_type(driver_type): """Check if filtering drivers by classic/dynamic is allowed. Version 1.30 of the API allows this. """ if driver_type is not None and not allow_dynamic_drivers(): raise exception.NotAcceptable(_( "Request not acceptable. The minimal required API version " "should be %(base)s.%(opr)s") % {'base': versions.BASE_VERSION, 'opr': versions.MINOR_30_DYNAMIC_DRIVERS}) def check_allow_driver_detail(detail): """Check if getting detailed driver info is allowed. Version 1.30 of the API allows this. """ if detail is not None and not allow_dynamic_drivers(): raise exception.NotAcceptable(_( "Request not acceptable. The minimal required API version " "should be %(base)s.%(opr)s") % {'base': versions.BASE_VERSION, 'opr': versions.MINOR_30_DYNAMIC_DRIVERS}) _CONFIG_DRIVE_SCHEMA = { 'anyOf': [ { 'type': 'object', 'properties': { 'meta_data': {'type': 'object'}, 'network_data': {'type': 'object'}, 'user_data': { 'type': ['object', 'array', 'string', 'null'] }, 'vendor_data': {'type': 'object'}, }, 'additionalProperties': False }, { 'type': ['string', 'null'] } ] } def check_allow_configdrive(target, configdrive=None): if not configdrive: return allowed_targets = [states.ACTIVE] if allow_node_rebuild_with_configdrive(): allowed_targets.append(states.REBUILD) if target not in allowed_targets: msg = (_('Adding a config drive is only supported when setting ' 'provision state to %s') % ', '.join(allowed_targets)) raise exception.ClientSideError( msg, status_code=http_client.BAD_REQUEST) try: jsonschema.validate(configdrive, _CONFIG_DRIVE_SCHEMA) except json_schema_exc.ValidationError as e: msg = _('Invalid configdrive format: %s') % e raise exception.ClientSideError( msg, status_code=http_client.BAD_REQUEST) if isinstance(configdrive, dict): if not allow_build_configdrive(): msg = _('Providing a JSON object for configdrive is only supported' ' starting with API version %(base)s.%(opr)s') % { 'base': versions.BASE_VERSION, 'opr': versions.MINOR_56_BUILD_CONFIGDRIVE} raise exception.ClientSideError( msg, status_code=http_client.BAD_REQUEST) if ('vendor_data' in configdrive and not allow_configdrive_vendor_data()): msg = _('Providing vendor_data in configdrive is only supported' ' starting with API version %(base)s.%(opr)s') % { 'base': versions.BASE_VERSION, 'opr': versions.MINOR_59_CONFIGDRIVE_VENDOR_DATA} raise exception.ClientSideError( msg, status_code=http_client.BAD_REQUEST) def check_allow_filter_by_fault(fault): """Check if filtering nodes by fault is allowed. Version 1.42 of the API allows filtering nodes by fault. """ if (fault is not None and api.request.version.minor < versions.MINOR_42_FAULT): raise exception.NotAcceptable(_( "Request not acceptable. The minimal required API version " "should be %(base)s.%(opr)s") % {'base': versions.BASE_VERSION, 'opr': versions.MINOR_42_FAULT}) if fault is not None and fault not in faults.VALID_FAULTS: msg = (_('Unrecognized fault "%(fault)s" is specified, allowed faults ' 'are %(valid_faults)s') % {'fault': fault, 'valid_faults': faults.VALID_FAULTS}) raise exception.ClientSideError( msg, status_code=http_client.BAD_REQUEST) def check_allow_filter_by_conductor_group(conductor_group): """Check if filtering nodes by conductor_group is allowed. Version 1.46 of the API allows filtering nodes by conductor_group. """ if (conductor_group is not None and api.request.version.minor < versions.MINOR_46_NODE_CONDUCTOR_GROUP): raise exception.NotAcceptable(_( "Request not acceptable. The minimal required API version " "should be %(base)s.%(opr)s") % {'base': versions.BASE_VERSION, 'opr': versions.MINOR_46_NODE_CONDUCTOR_GROUP}) def check_allow_filter_by_owner(owner): """Check if filtering nodes by owner is allowed. Version 1.50 of the API allows filtering nodes by owner. """ if (owner is not None and api.request.version.minor < versions.MINOR_50_NODE_OWNER): raise exception.NotAcceptable(_( "Request not acceptable. The minimal required API version " "should be %(base)s.%(opr)s") % {'base': versions.BASE_VERSION, 'opr': versions.MINOR_50_NODE_OWNER}) def check_allow_filter_by_lessee(lessee): """Check if filtering nodes by lessee is allowed. Version 1.62 of the API allows filtering nodes by lessee. """ if (lessee is not None and api.request.version.minor < versions.MINOR_65_NODE_LESSEE): raise exception.NotAcceptable(_( "Request not acceptable. The minimal required API version " "should be %(base)s.%(opr)s") % {'base': versions.BASE_VERSION, 'opr': versions.MINOR_65_NODE_LESSEE}) def initial_node_provision_state(): """Return node state to use by default when creating new nodes. Previously the default state for new nodes was AVAILABLE. Starting with API 1.11 it is ENROLL. """ return (states.AVAILABLE if api.request.version.minor < versions.MINOR_11_ENROLL_STATE else states.ENROLL) def allow_raid_config(): """Check if RAID configuration is allowed for the node. Version 1.12 of the API allows RAID configuration for the node. """ return api.request.version.minor >= versions.MINOR_12_RAID_CONFIG def allow_soft_power_off(): """Check if Soft Power Off is allowed for the node. Version 1.27 of the API allows Soft Power Off, including Soft Reboot, for the node. """ return api.request.version.minor >= versions.MINOR_27_SOFT_POWER_OFF def allow_inject_nmi(): """Check if Inject NMI is allowed for the node. Version 1.29 of the API allows Inject NMI for the node. """ return api.request.version.minor >= versions.MINOR_29_INJECT_NMI def allow_links_node_states_and_driver_properties(): """Check if links are displayable. Version 1.14 of the API allows the display of links to node states and driver properties. """ return (api.request.version.minor >= versions.MINOR_14_LINKS_NODESTATES_DRIVERPROPERTIES) def allow_port_internal_info(): """Check if accessing internal_info is allowed for the port. Version 1.18 of the API exposes internal_info readonly field for the port. """ return (api.request.version.minor >= versions.MINOR_18_PORT_INTERNAL_INFO) def allow_port_advanced_net_fields(): """Check if we should return local_link_connection and pxe_enabled fields. Version 1.19 of the API added support for these new fields in port object. """ return (api.request.version.minor >= versions.MINOR_19_PORT_ADVANCED_NET_FIELDS) def allow_ramdisk_endpoints(): """Check if heartbeat and lookup endpoints are allowed. Version 1.22 of the API introduced them. """ return api.request.version.minor >= versions.MINOR_22_LOOKUP_HEARTBEAT def allow_portgroups(): """Check if we should support portgroup operations. Version 1.23 of the API added support for PortGroups. """ return (api.request.version.minor >= versions.MINOR_23_PORTGROUPS) def allow_portgroups_subcontrollers(): """Check if portgroups can be used as subcontrollers. Version 1.24 of the API added support for Portgroups as subcontrollers """ return (api.request.version.minor >= versions.MINOR_24_PORTGROUPS_SUBCONTROLLERS) def allow_remove_chassis_uuid(): """Check if chassis_uuid can be removed from node. Version 1.25 of the API added support for chassis_uuid removal """ return (api.request.version.minor >= versions.MINOR_25_UNSET_CHASSIS_UUID) def allow_portgroup_mode_properties(): """Check if mode and properties can be added to/queried from a portgroup. Version 1.26 of the API added mode and properties fields to portgroup object. """ return (api.request.version.minor >= versions.MINOR_26_PORTGROUP_MODE_PROPERTIES) def allow_vifs_subcontroller(): """Check if node/vifs can be used. Version 1.28 of the API added support for VIFs to be attached to Nodes. """ return (api.request.version.minor >= versions.MINOR_28_VIFS_SUBCONTROLLER) def allow_dynamic_drivers(): """Check if dynamic driver API calls are allowed. Version 1.30 of the API added support for all of the driver composition related calls in the /v1/drivers API. """ return (api.request.version.minor >= versions.MINOR_30_DYNAMIC_DRIVERS) def allow_dynamic_interfaces(): """Check if dynamic interface fields are allowed. Version 1.31 of the API added support for viewing and setting the fields in ``V31_FIELDS`` on the node object. """ return (api.request.version.minor >= versions.MINOR_31_DYNAMIC_INTERFACES) def allow_volume(): """Check if volume connectors and targets are allowed. Version 1.32 of the API added support for volume connectors and targets """ return api.request.version.minor >= versions.MINOR_32_VOLUME def allow_storage_interface(): """Check if we should support storage_interface node and driver fields. Version 1.33 of the API added support for storage interfaces. """ return (api.request.version.minor >= versions.MINOR_33_STORAGE_INTERFACE) def allow_port_physical_network(): """Check if port physical network field is allowed. Version 1.34 of the API added the physical network field to the port object. We also check whether the target version of the Port object supports the physical_network field as this may not be the case during a rolling upgrade. """ return ((api.request.version.minor >= versions.MINOR_34_PORT_PHYSICAL_NETWORK) and objects.Port.supports_physical_network()) def allow_node_rebuild_with_configdrive(): """Check if we should support node rebuild with configdrive. Version 1.35 of the API added support for node rebuild with configdrive. """ return (api.request.version.minor >= versions.MINOR_35_REBUILD_CONFIG_DRIVE) def allow_agent_version_in_heartbeat(): """Check if agent version is allowed to be passed into heartbeat. Version 1.36 of the API added the ability for agents to pass their version information to Ironic on heartbeat. """ return (api.request.version.minor >= versions.MINOR_36_AGENT_VERSION_HEARTBEAT) def allow_rescue_interface(): """Check if we should support rescue and unrescue operations and interface. Version 1.38 of the API added support for rescue and unrescue. """ return api.request.version.minor >= versions.MINOR_38_RESCUE_INTERFACE def allow_bios_interface(): """Check if we should support bios interface and endpoints. Version 1.40 of the API added support for bios interface. """ return api.request.version.minor >= versions.MINOR_40_BIOS_INTERFACE def get_controller_reserved_names(cls): """Get reserved names for a given controller. Inspect the controller class and return the reserved names within it. Reserved names are names that can not be used as an identifier for a resource because the names are either being used as a custom action or is the name of a nested controller inside the given class. :param cls: The controller class to be inspected. """ reserved_names = [ name for name, member in inspect.getmembers(cls) if isinstance(member, rest.RestController)] if hasattr(cls, '_custom_actions'): reserved_names += list(cls._custom_actions) return reserved_names def allow_traits(): """Check if traits are allowed for the node. Version 1.37 of the API allows traits for the node. """ return api.request.version.minor >= versions.MINOR_37_NODE_TRAITS def allow_inspect_wait_state(): """Check if inspect wait is allowed for the node. Version 1.39 of the API adds 'inspect wait' state to substitute 'inspecting' state during asynchronous hardware inspection. """ return api.request.version.minor >= versions.MINOR_39_INSPECT_WAIT def allow_inspect_abort(): """Check if inspection abort is allowed. Version 1.41 of the API added support for inspection abort """ return api.request.version.minor >= versions.MINOR_41_INSPECTION_ABORT def handle_post_port_like_extra_vif(p_dict): """Handle a Post request that sets .extra['vif_port_id']. This handles attach of VIFs via specifying the VIF port ID in a port or port group's extra['vif_port_id'] field. :param p_dict: a dictionary with field names/values for the port or port group :return: VIF or None """ vif = p_dict.get('extra', {}).get('vif_port_id') if vif: # TODO(rloo): in Stein cycle: if API version >= 1.28, remove # warning and support for extra[]; else (< 1.28) # still support it; continue copying to internal_info # (see bug 1722850). i.e., change the 7 lines of code # below to something like: # if not api_utils.allow_vifs_subcontroller(): # internal_info = {'tenant_vif_port_id': vif} # pg_dict['internal_info'] = internal_info if allow_vifs_subcontroller(): utils.warn_about_deprecated_extra_vif_port_id() # NOTE(rloo): this value should really be in .internal_info[..] # which is what would happen if they had used the # POST /v1/nodes//vifs API. internal_info = {'tenant_vif_port_id': vif} p_dict['internal_info'] = internal_info return vif def handle_patch_port_like_extra_vif(rpc_object, api_object, patch): """Handle a Patch request that modifies .extra['vif_port_id']. This handles attach/detach of VIFs via the VIF port ID in a port or port group's extra['vif_port_id'] field. :param rpc_object: a Port or Portgroup RPC object :param api_object: the corresponding Port or Portgroup API object :param patch: the JSON patch in the API request """ vif_list = get_patch_values(patch, '/extra/vif_port_id') vif = None if vif_list: # if specified more than once, use the last value vif = vif_list[-1] # TODO(rloo): in Stein cycle: if API version >= 1.28, remove this # warning and don't copy to internal_info; else (<1.28) still # support it; continue copying to internal_info (see bug 1722850). # i.e., change the 8 lines of code below to something like: # if not allow_vifs_subcontroller(): # int_info = rpc_object.internal_info.get('tenant_vif_port_id') # if (not int_info or # int_info == rpc_object.extra.get('vif_port_id')): # api_object.internal_info['tenant_vif_port_id'] = vif if allow_vifs_subcontroller(): utils.warn_about_deprecated_extra_vif_port_id() # NOTE(rloo): if the user isn't also using the REST API # 'POST nodes//vifs', we are safe to copy the # .extra[] value to the .internal_info location int_info = rpc_object.internal_info.get('tenant_vif_port_id') if (not int_info or int_info == rpc_object.extra.get('vif_port_id')): api_object.internal_info['tenant_vif_port_id'] = vif elif is_path_removed(patch, '/extra/vif_port_id'): # TODO(rloo): in Stein cycle: if API version >= 1.28, remove this # warning and don't remove from internal_info; else (<1.28) still # support it; remove from internal_info (see bug 1722850). # i.e., change the 8 lines of code below to something like: # if not allow_vifs_subcontroller(): # int_info = rpc_object.internal_info.get('tenant_vif...') # if (int_info and int_info==rpc_object.extra.get('vif_port_id')): # api_object.internal_info['tenant_vif_port_id'] = None if allow_vifs_subcontroller(): utils.warn_about_deprecated_extra_vif_port_id() # NOTE(rloo): if the user isn't also using the REST API # 'POST nodes//vifs', we are safe to remove the # .extra[] value from the .internal_info location int_info = rpc_object.internal_info.get('tenant_vif_port_id') if (int_info and int_info == rpc_object.extra.get('vif_port_id')): api_object.internal_info.pop('tenant_vif_port_id') def allow_detail_query(): """Check if passing a detail=True query string is allowed. Version 1.43 allows a user to pass the detail query string to list the resource with all the fields. """ return api.request.version.minor >= versions.MINOR_43_ENABLE_DETAIL_QUERY def allow_reset_interfaces(): """Check if passing a reset_interfaces query string is allowed.""" return api.request.version.minor >= versions.MINOR_45_RESET_INTERFACES def get_request_return_fields(fields, detail, default_fields): """Calculate fields to return from an API request The fields query and detail=True query can not be passed into a request at the same time. To use the detail query we need to be on a version of the API greater than 1.43. This function raises an InvalidParameterValue exception if either of these conditions are not met. If these checks pass then this function will return either the fields passed in or the default fields provided. :param fields: The fields query passed into the API request. :param detail: The detail query passed into the API request. :param default_fields: The default fields to return if fields=None and detail=None. :raises: InvalidParameterValue if there is an invalid combination of query strings or API version. :returns: 'fields' passed in value or 'default_fields' """ if detail is not None and not allow_detail_query(): raise exception.InvalidParameterValue( "Invalid query parameter ?detail=%s received." % detail) if fields is not None and detail: raise exception.InvalidParameterValue( "Can not specify ?detail=True and fields in the same request.") if fields is None and not detail: return default_fields return fields def allow_expose_conductors(): """Check if accessing conductor endpoints is allowed. Version 1.49 of the API exposed conductor endpoints and conductor field for the node. """ return api.request.version.minor >= versions.MINOR_49_CONDUCTORS def check_allow_filter_by_conductor(conductor): """Check if filtering nodes by conductor is allowed. Version 1.49 of the API allows filtering nodes by conductor. """ if conductor is not None and not allow_expose_conductors(): raise exception.NotAcceptable(_( "Request not acceptable. The minimal required API version " "should be %(base)s.%(opr)s") % {'base': versions.BASE_VERSION, 'opr': versions.MINOR_49_CONDUCTORS}) def allow_allocations(): """Check if accessing allocation endpoints is allowed. Version 1.52 of the API exposed allocation endpoints and allocation_uuid field for the node. """ return api.request.version.minor >= versions.MINOR_52_ALLOCATION def allow_port_is_smartnic(): """Check if port is_smartnic field is allowed. Version 1.53 of the API added is_smartnic field to the port object. """ return ((api.request.version.minor >= versions.MINOR_53_PORT_SMARTNIC) and objects.Port.supports_is_smartnic()) def allow_expose_events(): """Check if accessing events endpoint is allowed. Version 1.54 of the API added the events endpoint. """ return api.request.version.minor >= versions.MINOR_54_EVENTS def allow_deploy_templates(): """Check if accessing deploy template endpoints is allowed. Version 1.55 of the API exposed deploy template endpoints. """ return api.request.version.minor >= versions.MINOR_55_DEPLOY_TEMPLATES def check_policy(policy_name): """Check if the specified policy is authorised for this request. :policy_name: Name of the policy to check. :raises: HTTPForbidden if the policy forbids access. """ cdict = api.request.context.to_policy_values() policy.authorize(policy_name, cdict, cdict) def check_owner_policy(object_type, policy_name, owner, lessee=None): """Check if the policy authorizes this request on an object. :param: object_type: type of object being checked :param: policy_name: Name of the policy to check. :param: owner: the owner :param: lessee: the lessee :raises: HTTPForbidden if the policy forbids access. """ cdict = api.request.context.to_policy_values() target_dict = dict(cdict) target_dict[object_type + '.owner'] = owner if lessee: target_dict[object_type + '.lessee'] = lessee policy.authorize(policy_name, target_dict, cdict) def check_node_policy_and_retrieve(policy_name, node_ident, with_suffix=False): """Check if the specified policy authorizes this request on a node. :param: policy_name: Name of the policy to check. :param: node_ident: the UUID or logical name of a node. :param: with_suffix: whether the RPC node should include the suffix :raises: HTTPForbidden if the policy forbids access. :raises: NodeNotFound if the node is not found. :return: RPC node identified by node_ident """ try: if with_suffix: rpc_node = get_rpc_node_with_suffix(node_ident) else: rpc_node = get_rpc_node(node_ident) except exception.NodeNotFound: # don't expose non-existence of node unless requester # has generic access to policy cdict = api.request.context.to_policy_values() policy.authorize(policy_name, cdict, cdict) raise check_owner_policy('node', policy_name, rpc_node['owner'], rpc_node['lessee']) return rpc_node def check_allocation_policy_and_retrieve(policy_name, allocation_ident): """Check if the specified policy authorizes request on allocation. :param: policy_name: Name of the policy to check. :param: allocation_ident: the UUID or logical name of a node. :raises: HTTPForbidden if the policy forbids access. :raises: AllocationNotFound if the node is not found. :return: RPC node identified by node_ident """ try: rpc_allocation = get_rpc_allocation_with_suffix( allocation_ident) except exception.AllocationNotFound: # don't expose non-existence unless requester # has generic access to policy cdict = api.request.context.to_policy_values() policy.authorize(policy_name, cdict, cdict) raise check_owner_policy('allocation', policy_name, rpc_allocation['owner']) return rpc_allocation def check_multiple_node_policies_and_retrieve(policy_names, node_ident, with_suffix=False): """Check if the specified policies authorize this request on a node. :param: policy_names: List of policy names to check. :param: node_ident: the UUID or logical name of a node. :param: with_suffix: whether the RPC node should include the suffix :raises: HTTPForbidden if the policy forbids access. :raises: NodeNotFound if the node is not found. :return: RPC node identified by node_ident """ rpc_node = None for policy_name in policy_names: if rpc_node is None: rpc_node = check_node_policy_and_retrieve(policy_names[0], node_ident, with_suffix) else: check_owner_policy('node', policy_name, rpc_node['owner'], rpc_node['lessee']) return rpc_node def check_list_policy(object_type, owner=None): """Check if the list policy authorizes this request on an object. :param: object_type: type of object being checked :param: owner: owner filter for list query, if any :raises: HTTPForbidden if the policy forbids access. :return: owner that should be used for list query, if needed """ cdict = api.request.context.to_policy_values() try: policy.authorize('baremetal:%s:list_all' % object_type, cdict, cdict) except exception.HTTPForbidden: project_owner = cdict.get('project_id') if (not project_owner or (owner and owner != project_owner)): raise policy.authorize('baremetal:%s:list' % object_type, cdict, cdict) return project_owner return owner def check_port_policy_and_retrieve(policy_name, port_uuid): """Check if the specified policy authorizes this request on a port. :param: policy_name: Name of the policy to check. :param: port_uuid: the UUID of a port. :raises: HTTPForbidden if the policy forbids access. :raises: NodeNotFound if the node is not found. :return: RPC port identified by port_uuid and associated node """ context = api.request.context cdict = context.to_policy_values() try: rpc_port = objects.Port.get_by_uuid(context, port_uuid) except exception.PortNotFound: # don't expose non-existence of port unless requester # has generic access to policy policy.authorize(policy_name, cdict, cdict) raise rpc_node = objects.Node.get_by_id(context, rpc_port.node_id) target_dict = dict(cdict) target_dict['node.owner'] = rpc_node['owner'] target_dict['node.lessee'] = rpc_node['lessee'] policy.authorize(policy_name, target_dict, cdict) return rpc_port, rpc_node def check_port_list_policy(): """Check if the specified policy authorizes this request on a port. :raises: HTTPForbidden if the policy forbids access. :return: owner that should be used for list query, if needed """ cdict = api.request.context.to_policy_values() try: policy.authorize('baremetal:port:list_all', cdict, cdict) except exception.HTTPForbidden: owner = cdict.get('project_id') if not owner: raise policy.authorize('baremetal:port:list', cdict, cdict) return owner def allow_build_configdrive(): """Check if building configdrive is allowed. Version 1.56 of the API added support for building configdrive. """ return api.request.version.minor >= versions.MINOR_56_BUILD_CONFIGDRIVE def allow_configdrive_vendor_data(): """Check if configdrive can contain a vendor_data key. Version 1.59 of the API added support for configdrive vendor_data. """ return (api.request.version.minor >= versions.MINOR_59_CONFIGDRIVE_VENDOR_DATA) def allow_allocation_update(): """Check if updating an existing allocation is allowed or not. Version 1.57 of the API added support for updating an allocation. """ return api.request.version.minor >= versions.MINOR_57_ALLOCATION_UPDATE def allow_allocation_backfill(): """Check if backfilling allocations is allowed. Version 1.58 of the API added support for backfilling allocations. """ return api.request.version.minor >= versions.MINOR_58_ALLOCATION_BACKFILL def allow_allocation_owner(): """Check if allocation owner field is allowed. Version 1.60 of the API added the owner field to the allocation object. """ return api.request.version.minor >= versions.MINOR_60_ALLOCATION_OWNER def allow_agent_token(): """Check if agent token is available.""" return api.request.version.minor >= versions.MINOR_62_AGENT_TOKEN def allow_local_link_connection_network_type(): """Check if network_type is allowed in ports link_local_connection""" return (api.request.version.minor >= versions.MINOR_64_LOCAL_LINK_CONNECTION_NETWORK_TYPE) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/ironic/api/controllers/v1/versions.py0000644000175000017500000001663500000000000023541 0ustar00coreycorey00000000000000# Copyright (c) 2015 Intel Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from ironic.common import release_mappings CONF = cfg.CONF # This is the version 1 API BASE_VERSION = 1 # Here goes a short log of changes in every version. # Refer to doc/source/contributor/webapi-version-history.rst for a detailed # explanation of what each version contains. # # v1.0: corresponds to Juno API, not supported since Kilo # v1.1: API at the point in time when versioning support was added, # covers the following commits from Kilo cycle: # 827db7fe: Add Node.maintenance_reason # 68eed82b: Add API endpoint to set/unset the node maintenance mode # bc973889: Add sync and async support for passthru methods # e03f443b: Vendor endpoints to support different HTTP methods # e69e5309: Make vendor methods discoverable via the Ironic API # edf532db: Add logic to store the config drive passed by Nova # v1.2: Renamed NOSTATE ("None") to AVAILABLE ("available") # v1.3: Add node.driver_internal_info # v1.4: Add MANAGEABLE state # v1.5: Add logical node names # v1.6: Add INSPECT* states # v1.7: Add node.clean_step # v1.8: Add ability to return a subset of resource fields # v1.9: Add ability to filter nodes by provision state # v1.10: Logical node names support RFC 3986 unreserved characters # v1.11: Nodes appear in ENROLL state by default # v1.12: Add support for RAID # v1.13: Add 'abort' verb to CLEANWAIT # v1.14: Make the following endpoints discoverable via API: # 1. '/v1/nodes//states' # 2. '/v1/drivers//properties' # v1.15: Add ability to do manual cleaning of nodes # v1.16: Add ability to filter nodes by driver. # v1.17: Add 'adopt' verb for ADOPTING active nodes. # v1.18: Add port.internal_info. # v1.19: Add port.local_link_connection and port.pxe_enabled. # v1.20: Add node.network_interface # v1.21: Add node.resource_class # v1.22: Ramdisk lookup and heartbeat endpoints. # v1.23: Add portgroup support. # v1.24: Add subcontrollers: node.portgroup, portgroup.ports. # Add port.portgroup_uuid field. # v1.25: Add possibility to unset chassis_uuid from node. # v1.26: Add portgroup.mode and portgroup.properties. # v1.27: Add soft reboot, soft power off and timeout. # v1.28: Add vifs subcontroller to node # v1.29: Add inject nmi. # v1.30: Add dynamic driver interactions. # v1.31: Add dynamic interfaces fields to node. # v1.32: Add volume support. # v1.33: Add node storage interface # v1.34: Add physical network field to port. # v1.35: Add ability to provide configdrive when rebuilding node. # v1.36: Add Ironic Python Agent version support. # v1.37: Add node traits. # v1.38: Add rescue and unrescue provision states # v1.39: Add inspect wait provision state. # v1.40: Add bios.properties. # Add bios_interface to the node object. # v1.41: Add inspection abort support. # v1.42: Expose fault field to node. # v1.43: Add detail=True flag to all API endpoints # v1.44: Add node deploy_step field # v1.45: reset_interfaces parameter to node's PATCH # v1.46: Add conductor_group to the node object. # v1.47: Add automated_clean to the node object. # v1.48: Add protected to the node object. # v1.49: Add conductor to the node object and /v1/conductors. # v1.50: Add owner to the node object. # v1.51: Add description to the node object. # v1.52: Add allocation API. # v1.53: Add support for Smart NIC port # v1.54: Add events support. # v1.55: Add deploy templates API. # v1.56: Add support for building configdrives. # v1.57: Add support for updating an exisiting allocation. # v1.58: Add support for backfilling allocations. # v1.59: Add support vendor data in configdrives. # v1.60: Add owner to the allocation object. # v1.61: Add retired and retired_reason to the node object. # v1.62: Add agent_token support for agent communication. # v1.63: Add support for indicators # v1.64: Add network_type to port.local_link_connection # v1.65: Add lessee to the node object. MINOR_0_JUNO = 0 MINOR_1_INITIAL_VERSION = 1 MINOR_2_AVAILABLE_STATE = 2 MINOR_3_DRIVER_INTERNAL_INFO = 3 MINOR_4_MANAGEABLE_STATE = 4 MINOR_5_NODE_NAME = 5 MINOR_6_INSPECT_STATE = 6 MINOR_7_NODE_CLEAN = 7 MINOR_8_FETCHING_SUBSET_OF_FIELDS = 8 MINOR_9_PROVISION_STATE_FILTER = 9 MINOR_10_UNRESTRICTED_NODE_NAME = 10 MINOR_11_ENROLL_STATE = 11 MINOR_12_RAID_CONFIG = 12 MINOR_13_ABORT_VERB = 13 MINOR_14_LINKS_NODESTATES_DRIVERPROPERTIES = 14 MINOR_15_MANUAL_CLEAN = 15 MINOR_16_DRIVER_FILTER = 16 MINOR_17_ADOPT_VERB = 17 MINOR_18_PORT_INTERNAL_INFO = 18 MINOR_19_PORT_ADVANCED_NET_FIELDS = 19 MINOR_20_NETWORK_INTERFACE = 20 MINOR_21_RESOURCE_CLASS = 21 MINOR_22_LOOKUP_HEARTBEAT = 22 MINOR_23_PORTGROUPS = 23 MINOR_24_PORTGROUPS_SUBCONTROLLERS = 24 MINOR_25_UNSET_CHASSIS_UUID = 25 MINOR_26_PORTGROUP_MODE_PROPERTIES = 26 MINOR_27_SOFT_POWER_OFF = 27 MINOR_28_VIFS_SUBCONTROLLER = 28 MINOR_29_INJECT_NMI = 29 MINOR_30_DYNAMIC_DRIVERS = 30 MINOR_31_DYNAMIC_INTERFACES = 31 MINOR_32_VOLUME = 32 MINOR_33_STORAGE_INTERFACE = 33 MINOR_34_PORT_PHYSICAL_NETWORK = 34 MINOR_35_REBUILD_CONFIG_DRIVE = 35 MINOR_36_AGENT_VERSION_HEARTBEAT = 36 MINOR_37_NODE_TRAITS = 37 MINOR_38_RESCUE_INTERFACE = 38 MINOR_39_INSPECT_WAIT = 39 MINOR_40_BIOS_INTERFACE = 40 MINOR_41_INSPECTION_ABORT = 41 MINOR_42_FAULT = 42 MINOR_43_ENABLE_DETAIL_QUERY = 43 MINOR_44_NODE_DEPLOY_STEP = 44 MINOR_45_RESET_INTERFACES = 45 MINOR_46_NODE_CONDUCTOR_GROUP = 46 MINOR_47_NODE_AUTOMATED_CLEAN = 47 MINOR_48_NODE_PROTECTED = 48 MINOR_49_CONDUCTORS = 49 MINOR_50_NODE_OWNER = 50 MINOR_51_NODE_DESCRIPTION = 51 MINOR_52_ALLOCATION = 52 MINOR_53_PORT_SMARTNIC = 53 MINOR_54_EVENTS = 54 MINOR_55_DEPLOY_TEMPLATES = 55 MINOR_56_BUILD_CONFIGDRIVE = 56 MINOR_57_ALLOCATION_UPDATE = 57 MINOR_58_ALLOCATION_BACKFILL = 58 MINOR_59_CONFIGDRIVE_VENDOR_DATA = 59 MINOR_60_ALLOCATION_OWNER = 60 MINOR_61_NODE_RETIRED = 61 MINOR_62_AGENT_TOKEN = 62 MINOR_63_INDICATORS = 63 MINOR_64_LOCAL_LINK_CONNECTION_NETWORK_TYPE = 64 MINOR_65_NODE_LESSEE = 65 # When adding another version, update: # - MINOR_MAX_VERSION # - doc/source/contributor/webapi-version-history.rst with a detailed # explanation of what changed in the new version # - common/release_mappings.py, RELEASE_MAPPING['master']['api'] MINOR_MAX_VERSION = MINOR_65_NODE_LESSEE # String representations of the minor and maximum versions _MIN_VERSION_STRING = '{}.{}'.format(BASE_VERSION, MINOR_1_INITIAL_VERSION) _MAX_VERSION_STRING = '{}.{}'.format(BASE_VERSION, MINOR_MAX_VERSION) def min_version_string(): """Returns the minimum supported API version (as a string)""" return _MIN_VERSION_STRING def max_version_string(): """Returns the maximum supported API version (as a string). If the service is pinned, the maximum API version is the pinned version. Otherwise, it is the maximum supported API version. """ release_ver = release_mappings.RELEASE_MAPPING.get( CONF.pin_release_version) if release_ver: return release_ver['api'] else: return _MAX_VERSION_STRING ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/ironic/api/controllers/v1/volume.py0000644000175000017500000000673500000000000023200 0ustar00coreycorey00000000000000# Copyright (c) 2017 Hitachi, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import client as http_client import pecan from pecan import rest import wsme from ironic import api from ironic.api.controllers import base from ironic.api.controllers import link from ironic.api.controllers.v1 import utils as api_utils from ironic.api.controllers.v1 import volume_connector from ironic.api.controllers.v1 import volume_target from ironic.api import expose from ironic.common import exception from ironic.common import policy class Volume(base.APIBase): """API representation of a volume root. This class exists as a root class for the volume connectors and volume targets controllers. """ links = wsme.wsattr([link.Link], readonly=True) """A list containing a self link and associated volume links""" connectors = wsme.wsattr([link.Link], readonly=True) """Links to the volume connectors resource""" targets = wsme.wsattr([link.Link], readonly=True) """Links to the volume targets resource""" @staticmethod def convert(node_ident=None): url = api.request.public_url volume = Volume() if node_ident: resource = 'nodes' args = '%s/volume/' % node_ident else: resource = 'volume' args = '' volume.links = [ link.Link.make_link('self', url, resource, args), link.Link.make_link('bookmark', url, resource, args, bookmark=True)] volume.connectors = [ link.Link.make_link('self', url, resource, args + 'connectors'), link.Link.make_link('bookmark', url, resource, args + 'connectors', bookmark=True)] volume.targets = [ link.Link.make_link('self', url, resource, args + 'targets'), link.Link.make_link('bookmark', url, resource, args + 'targets', bookmark=True)] return volume class VolumeController(rest.RestController): """REST controller for volume root""" _subcontroller_map = { 'connectors': volume_connector.VolumeConnectorsController, 'targets': volume_target.VolumeTargetsController } def __init__(self, node_ident=None): super(VolumeController, self).__init__() self.parent_node_ident = node_ident @expose.expose(Volume) def get(self): if not api_utils.allow_volume(): raise exception.NotFound() cdict = api.request.context.to_policy_values() policy.authorize('baremetal:volume:get', cdict, cdict) return Volume.convert(self.parent_node_ident) @pecan.expose() def _lookup(self, subres, *remainder): if not api_utils.allow_volume(): pecan.abort(http_client.NOT_FOUND) subcontroller = self._subcontroller_map.get(subres) if subcontroller: return subcontroller(node_ident=self.parent_node_ident), remainder ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/ironic/api/controllers/v1/volume_connector.py0000644000175000017500000005166200000000000025251 0ustar00coreycorey00000000000000# Copyright (c) 2017 Hitachi, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from http import client as http_client from ironic_lib import metrics_utils from oslo_utils import uuidutils from pecan import rest import wsme from wsme import types as wtypes from ironic import api from ironic.api.controllers import base from ironic.api.controllers import link from ironic.api.controllers.v1 import collection from ironic.api.controllers.v1 import notification_utils as notify from ironic.api.controllers.v1 import types from ironic.api.controllers.v1 import utils as api_utils from ironic.api import expose from ironic.common import exception from ironic.common.i18n import _ from ironic.common import policy from ironic import objects METRICS = metrics_utils.get_metrics_logger(__name__) _DEFAULT_RETURN_FIELDS = ('uuid', 'node_uuid', 'type', 'connector_id') class VolumeConnector(base.APIBase): """API representation of a volume connector. This class enforces type checking and value constraints, and converts between the internal object model and the API representation of a volume connector. """ _node_uuid = None def _get_node_uuid(self): return self._node_uuid def _set_node_identifiers(self, value): """Set both UUID and ID of a node for VolumeConnector object :param value: UUID, ID of a node, or wtypes.Unset """ if value == wtypes.Unset: self._node_uuid = wtypes.Unset elif value and self._node_uuid != value: try: node = objects.Node.get(api.request.context, value) self._node_uuid = node.uuid # NOTE(smoriya): Create the node_id attribute on-the-fly # to satisfy the api -> rpc object conversion. self.node_id = node.id except exception.NodeNotFound as e: # Change error code because 404 (NotFound) is inappropriate # response for a POST request to create a VolumeConnector e.code = http_client.BAD_REQUEST # BadRequest raise uuid = types.uuid """Unique UUID for this volume connector""" type = wsme.wsattr(str, mandatory=True) """The type of volume connector""" connector_id = wsme.wsattr(str, mandatory=True) """The connector_id for this volume connector""" extra = {str: types.jsontype} """The metadata for this volume connector""" node_uuid = wsme.wsproperty(types.uuid, _get_node_uuid, _set_node_identifiers, mandatory=True) """The UUID of the node this volume connector belongs to""" links = wsme.wsattr([link.Link], readonly=True) """A list containing a self link and associated volume connector links""" def __init__(self, **kwargs): self.fields = [] fields = list(objects.VolumeConnector.fields) for field in fields: # Skip fields we do not expose. if not hasattr(self, field): continue self.fields.append(field) setattr(self, field, kwargs.get(field, wtypes.Unset)) # NOTE(smoriya): node_id is an attribute created on-the-fly # by _set_node_uuid(), it needs to be present in the fields so # that as_dict() will contain node_id field when converting it # before saving it in the database. self.fields.append('node_id') # NOTE(smoriya): node_uuid is not part of objects.VolumeConnector.- # fields because it's an API-only attribute self.fields.append('node_uuid') # NOTE(jtaryma): Additionally to node_uuid, node_id is handled as a # secondary identifier in case RPC volume connector object dictionary # was passed to the constructor. self.node_uuid = kwargs.get('node_uuid') or kwargs.get('node_id', wtypes.Unset) @staticmethod def _convert_with_links(connector, url): connector.links = [link.Link.make_link('self', url, 'volume/connectors', connector.uuid), link.Link.make_link('bookmark', url, 'volume/connectors', connector.uuid, bookmark=True) ] return connector @classmethod def convert_with_links(cls, rpc_connector, fields=None, sanitize=True): connector = VolumeConnector(**rpc_connector.as_dict()) if fields is not None: api_utils.check_for_invalid_fields(fields, connector.as_dict()) connector = cls._convert_with_links(connector, api.request.public_url) if not sanitize: return connector connector.sanitize(fields) return connector def sanitize(self, fields=None): """Removes sensitive and unrequested data. Will only keep the fields specified in the ``fields`` parameter. :param fields: list of fields to preserve, or ``None`` to preserve them all :type fields: list of str """ if fields is not None: self.unset_fields_except(fields) # never expose the node_id attribute self.node_id = wtypes.Unset @classmethod def sample(cls, expand=True): time = datetime.datetime(2000, 1, 1, 12, 0, 0) sample = cls(uuid='86cfd480-0842-4abb-8386-e46149beb82f', type='iqn', connector_id='iqn.2010-10.org.openstack:51332b70524', extra={'foo': 'bar'}, created_at=time, updated_at=time) sample._node_uuid = '7ae81bb3-dec3-4289-8d6c-da80bd8001ae' fields = None if expand else _DEFAULT_RETURN_FIELDS return cls._convert_with_links(sample, 'http://localhost:6385', fields=fields) class VolumeConnectorPatchType(types.JsonPatchType): _api_base = VolumeConnector class VolumeConnectorCollection(collection.Collection): """API representation of a collection of volume connectors.""" connectors = [VolumeConnector] """A list containing volume connector objects""" def __init__(self, **kwargs): self._type = 'connectors' @staticmethod def convert_with_links(rpc_connectors, limit, url=None, fields=None, detail=None, **kwargs): collection = VolumeConnectorCollection() collection.connectors = [ VolumeConnector.convert_with_links(p, fields=fields, sanitize=False) for p in rpc_connectors] if detail: kwargs['detail'] = detail collection.next = collection.get_next(limit, url=url, fields=fields, **kwargs) for connector in collection.connectors: connector.sanitize(fields) return collection @classmethod def sample(cls): sample = cls() sample.connectors = [VolumeConnector.sample(expand=False)] return sample class VolumeConnectorsController(rest.RestController): """REST controller for VolumeConnectors.""" invalid_sort_key_list = ['extra'] def __init__(self, node_ident=None): super(VolumeConnectorsController, self).__init__() self.parent_node_ident = node_ident def _get_volume_connectors_collection(self, node_ident, marker, limit, sort_key, sort_dir, resource_url=None, fields=None, detail=None): limit = api_utils.validate_limit(limit) sort_dir = api_utils.validate_sort_dir(sort_dir) marker_obj = None if marker: marker_obj = objects.VolumeConnector.get_by_uuid( api.request.context, marker) if sort_key in self.invalid_sort_key_list: raise exception.InvalidParameterValue( _("The sort_key value %(key)s is an invalid field for " "sorting") % {'key': sort_key}) node_ident = self.parent_node_ident or node_ident if node_ident: # FIXME(comstud): Since all we need is the node ID, we can # make this more efficient by only querying # for that column. This will get cleaned up # as we move to the object interface. node = api_utils.get_rpc_node(node_ident) connectors = objects.VolumeConnector.list_by_node_id( api.request.context, node.id, limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir) else: connectors = objects.VolumeConnector.list(api.request.context, limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir) return VolumeConnectorCollection.convert_with_links(connectors, limit, url=resource_url, fields=fields, sort_key=sort_key, sort_dir=sort_dir, detail=detail) @METRICS.timer('VolumeConnectorsController.get_all') @expose.expose(VolumeConnectorCollection, types.uuid_or_name, types.uuid, int, str, str, types.listtype, types.boolean) def get_all(self, node=None, marker=None, limit=None, sort_key='id', sort_dir='asc', fields=None, detail=None): """Retrieve a list of volume connectors. :param node: UUID or name of a node, to get only volume connectors for that node. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. This value cannot be larger than the value of max_limit in the [api] section of the ironic configuration, or only max_limit resources will be returned. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: "asc". :param fields: Optional, a list with a specified set of fields of the resource to be returned. :param detail: Optional, whether to retrieve with detail. :returns: a list of volume connectors, or an empty list if no volume connector is found. :raises: InvalidParameterValue if sort_key does not exist :raises: InvalidParameterValue if sort key is invalid for sorting. :raises: InvalidParameterValue if both fields and detail are specified. """ cdict = api.request.context.to_policy_values() policy.authorize('baremetal:volume:get', cdict, cdict) if fields is None and not detail: fields = _DEFAULT_RETURN_FIELDS if fields and detail: raise exception.InvalidParameterValue( _("Can't fetch a subset of fields with 'detail' set")) resource_url = 'volume/connectors' return self._get_volume_connectors_collection( node, marker, limit, sort_key, sort_dir, resource_url=resource_url, fields=fields, detail=detail) @METRICS.timer('VolumeConnectorsController.get_one') @expose.expose(VolumeConnector, types.uuid, types.listtype) def get_one(self, connector_uuid, fields=None): """Retrieve information about the given volume connector. :param connector_uuid: UUID of a volume connector. :param fields: Optional, a list with a specified set of fields of the resource to be returned. :returns: API-serializable volume connector object. :raises: OperationNotPermitted if accessed with specifying a parent node. :raises: VolumeConnectorNotFound if no volume connector exists with the specified UUID. """ cdict = api.request.context.to_policy_values() policy.authorize('baremetal:volume:get', cdict, cdict) if self.parent_node_ident: raise exception.OperationNotPermitted() rpc_connector = objects.VolumeConnector.get_by_uuid( api.request.context, connector_uuid) return VolumeConnector.convert_with_links(rpc_connector, fields=fields) @METRICS.timer('VolumeConnectorsController.post') @expose.expose(VolumeConnector, body=VolumeConnector, status_code=http_client.CREATED) def post(self, connector): """Create a new volume connector. :param connector: a volume connector within the request body. :returns: API-serializable volume connector object. :raises: OperationNotPermitted if accessed with specifying a parent node. :raises: VolumeConnectorTypeAndIdAlreadyExists if a volume connector already exists with the same type and connector_id :raises: VolumeConnectorAlreadyExists if a volume connector with the same UUID already exists """ context = api.request.context cdict = context.to_policy_values() policy.authorize('baremetal:volume:create', cdict, cdict) if self.parent_node_ident: raise exception.OperationNotPermitted() connector_dict = connector.as_dict() # NOTE(hshiina): UUID is mandatory for notification payload if not connector_dict.get('uuid'): connector_dict['uuid'] = uuidutils.generate_uuid() new_connector = objects.VolumeConnector(context, **connector_dict) notify.emit_start_notification(context, new_connector, 'create', node_uuid=connector.node_uuid) with notify.handle_error_notification(context, new_connector, 'create', node_uuid=connector.node_uuid): new_connector.create() notify.emit_end_notification(context, new_connector, 'create', node_uuid=connector.node_uuid) # Set the HTTP Location Header api.response.location = link.build_url('volume/connectors', new_connector.uuid) return VolumeConnector.convert_with_links(new_connector) @METRICS.timer('VolumeConnectorsController.patch') @wsme.validate(types.uuid, [VolumeConnectorPatchType]) @expose.expose(VolumeConnector, types.uuid, body=[VolumeConnectorPatchType]) def patch(self, connector_uuid, patch): """Update an existing volume connector. :param connector_uuid: UUID of a volume connector. :param patch: a json PATCH document to apply to this volume connector. :returns: API-serializable volume connector object. :raises: OperationNotPermitted if accessed with specifying a parent node. :raises: PatchError if a given patch can not be applied. :raises: VolumeConnectorNotFound if no volume connector exists with the specified UUID. :raises: InvalidParameterValue if the volume connector's UUID is being changed :raises: NodeLocked if node is locked by another conductor :raises: NodeNotFound if the node associated with the connector does not exist :raises: VolumeConnectorTypeAndIdAlreadyExists if another connector already exists with the same values for type and connector_id fields :raises: InvalidUUID if invalid node UUID is passed in the patch. :raises: InvalidStateRequested If a node associated with the volume connector is not powered off. """ context = api.request.context cdict = context.to_policy_values() policy.authorize('baremetal:volume:update', cdict, cdict) if self.parent_node_ident: raise exception.OperationNotPermitted() values = api_utils.get_patch_values(patch, '/node_uuid') for value in values: if not uuidutils.is_uuid_like(value): message = _("Expected a UUID for node_uuid, but received " "%(uuid)s.") % {'uuid': str(value)} raise exception.InvalidUUID(message=message) rpc_connector = objects.VolumeConnector.get_by_uuid(context, connector_uuid) connector_dict = rpc_connector.as_dict() # NOTE(smoriya): # 1) Remove node_id because it's an internal value and # not present in the API object # 2) Add node_uuid connector_dict['node_uuid'] = connector_dict.pop('node_id', None) connector = VolumeConnector( **api_utils.apply_jsonpatch(connector_dict, patch)) # Update only the fields that have changed. for field in objects.VolumeConnector.fields: try: patch_val = getattr(connector, field) except AttributeError: # Ignore fields that aren't exposed in the API continue if patch_val == wtypes.Unset: patch_val = None if rpc_connector[field] != patch_val: rpc_connector[field] = patch_val rpc_node = objects.Node.get_by_id(context, rpc_connector.node_id) notify.emit_start_notification(context, rpc_connector, 'update', node_uuid=rpc_node.uuid) with notify.handle_error_notification(context, rpc_connector, 'update', node_uuid=rpc_node.uuid): topic = api.request.rpcapi.get_topic_for(rpc_node) new_connector = api.request.rpcapi.update_volume_connector( context, rpc_connector, topic) api_connector = VolumeConnector.convert_with_links(new_connector) notify.emit_end_notification(context, new_connector, 'update', node_uuid=rpc_node.uuid) return api_connector @METRICS.timer('VolumeConnectorsController.delete') @expose.expose(None, types.uuid, status_code=http_client.NO_CONTENT) def delete(self, connector_uuid): """Delete a volume connector. :param connector_uuid: UUID of a volume connector. :raises: OperationNotPermitted if accessed with specifying a parent node. :raises: NodeLocked if node is locked by another conductor :raises: NodeNotFound if the node associated with the connector does not exist :raises: VolumeConnectorNotFound if the volume connector cannot be found :raises: InvalidStateRequested If a node associated with the volume connector is not powered off. """ context = api.request.context cdict = context.to_policy_values() policy.authorize('baremetal:volume:delete', cdict, cdict) if self.parent_node_ident: raise exception.OperationNotPermitted() rpc_connector = objects.VolumeConnector.get_by_uuid(context, connector_uuid) rpc_node = objects.Node.get_by_id(context, rpc_connector.node_id) notify.emit_start_notification(context, rpc_connector, 'delete', node_uuid=rpc_node.uuid) with notify.handle_error_notification(context, rpc_connector, 'delete', node_uuid=rpc_node.uuid): topic = api.request.rpcapi.get_topic_for(rpc_node) api.request.rpcapi.destroy_volume_connector(context, rpc_connector, topic) notify.emit_end_notification(context, rpc_connector, 'delete', node_uuid=rpc_node.uuid) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/ironic/api/controllers/v1/volume_target.py0000644000175000017500000005136100000000000024541 0ustar00coreycorey00000000000000# Copyright (c) 2017 Hitachi, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from http import client as http_client from ironic_lib import metrics_utils from oslo_utils import uuidutils from pecan import rest import wsme from wsme import types as wtypes from ironic import api from ironic.api.controllers import base from ironic.api.controllers import link from ironic.api.controllers.v1 import collection from ironic.api.controllers.v1 import notification_utils as notify from ironic.api.controllers.v1 import types from ironic.api.controllers.v1 import utils as api_utils from ironic.api import expose from ironic.common import exception from ironic.common.i18n import _ from ironic.common import policy from ironic import objects METRICS = metrics_utils.get_metrics_logger(__name__) _DEFAULT_RETURN_FIELDS = ('uuid', 'node_uuid', 'volume_type', 'boot_index', 'volume_id') class VolumeTarget(base.APIBase): """API representation of a volume target. This class enforces type checking and value constraints, and converts between the internal object model and the API representation of a volume target. """ _node_uuid = None def _get_node_uuid(self): return self._node_uuid def _set_node_identifiers(self, value): """Set both UUID and ID of a node for VolumeTarget object :param value: UUID, ID of a node, or wtypes.Unset """ if value == wtypes.Unset: self._node_uuid = wtypes.Unset elif value and self._node_uuid != value: try: node = objects.Node.get(api.request.context, value) self._node_uuid = node.uuid # NOTE(smoriya): Create the node_id attribute on-the-fly # to satisfy the api -> rpc object conversion. self.node_id = node.id except exception.NodeNotFound as e: # Change error code because 404 (NotFound) is inappropriate # response for a POST request to create a VolumeTarget e.code = http_client.BAD_REQUEST # BadRequest raise uuid = types.uuid """Unique UUID for this volume target""" volume_type = wsme.wsattr(str, mandatory=True) """The volume_type of volume target""" properties = {str: types.jsontype} """The properties for this volume target""" boot_index = wsme.wsattr(int, mandatory=True) """The boot_index of volume target""" volume_id = wsme.wsattr(str, mandatory=True) """The volume_id for this volume target""" extra = {str: types.jsontype} """The metadata for this volume target""" node_uuid = wsme.wsproperty(types.uuid, _get_node_uuid, _set_node_identifiers, mandatory=True) """The UUID of the node this volume target belongs to""" links = wsme.wsattr([link.Link], readonly=True) """A list containing a self link and associated volume target links""" def __init__(self, **kwargs): self.fields = [] fields = list(objects.VolumeTarget.fields) for field in fields: # Skip fields we do not expose. if not hasattr(self, field): continue self.fields.append(field) setattr(self, field, kwargs.get(field, wtypes.Unset)) # NOTE(smoriya): node_id is an attribute created on-the-fly # by _set_node_uuid(), it needs to be present in the fields so # that as_dict() will contain node_id field when converting it # before saving it in the database. self.fields.append('node_id') # NOTE(smoriya): node_uuid is not part of objects.VolumeTarget.- # fields because it's an API-only attribute self.fields.append('node_uuid') # NOTE(jtaryma): Additionally to node_uuid, node_id is handled as a # secondary identifier in case RPC volume target object dictionary # was passed to the constructor. self.node_uuid = kwargs.get('node_uuid') or kwargs.get('node_id', wtypes.Unset) @staticmethod def _convert_with_links(target, url): target.links = [link.Link.make_link('self', url, 'volume/targets', target.uuid), link.Link.make_link('bookmark', url, 'volume/targets', target.uuid, bookmark=True) ] return target @classmethod def convert_with_links(cls, rpc_target, fields=None, sanitize=True): target = VolumeTarget(**rpc_target.as_dict()) if fields is not None: api_utils.check_for_invalid_fields(fields, target.as_dict()) target = cls._convert_with_links(target, api.request.public_url) if not sanitize: return target target.sanitize(fields) return target def sanitize(self, fields=None): """Removes sensitive and unrequested data. Will only keep the fields specified in the ``fields`` parameter. :param fields: list of fields to preserve, or ``None`` to preserve them all :type fields: list of str """ if fields is not None: self.unset_fields_except(fields) # never expose the node_id attribute self.node_id = wtypes.Unset @classmethod def sample(cls, expand=True): time = datetime.datetime(2000, 1, 1, 12, 0, 0) properties = {"auth_method": "CHAP", "auth_username": "XXX", "auth_password": "XXX", "target_iqn": "iqn.2010-10.com.example:vol-X", "target_portal": "192.168.0.123:3260", "volume_id": "a2f3ff15-b3ea-4656-ab90-acbaa1a07607", "target_lun": 0, "access_mode": "rw"} sample = cls(uuid='667808d4-622f-4629-b629-07753a19e633', volume_type='iscsi', boot_index=0, volume_id='a2f3ff15-b3ea-4656-ab90-acbaa1a07607', properties=properties, extra={'foo': 'bar'}, created_at=time, updated_at=time) sample._node_uuid = '7ae81bb3-dec3-4289-8d6c-da80bd8001ae' fields = None if expand else _DEFAULT_RETURN_FIELDS return cls._convert_with_links(sample, 'http://localhost:6385', fields=fields) class VolumeTargetPatchType(types.JsonPatchType): _api_base = VolumeTarget class VolumeTargetCollection(collection.Collection): """API representation of a collection of volume targets.""" targets = [VolumeTarget] """A list containing volume target objects""" def __init__(self, **kwargs): self._type = 'targets' @staticmethod def convert_with_links(rpc_targets, limit, url=None, fields=None, detail=None, **kwargs): collection = VolumeTargetCollection() collection.targets = [ VolumeTarget.convert_with_links(p, fields=fields, sanitize=False) for p in rpc_targets] if detail: kwargs['detail'] = detail collection.next = collection.get_next(limit, url=url, fields=fields, **kwargs) for target in collection.targets: target.sanitize(fields) return collection @classmethod def sample(cls): sample = cls() sample.targets = [VolumeTarget.sample(expand=False)] return sample class VolumeTargetsController(rest.RestController): """REST controller for VolumeTargets.""" invalid_sort_key_list = ['extra', 'properties'] def __init__(self, node_ident=None): super(VolumeTargetsController, self).__init__() self.parent_node_ident = node_ident def _get_volume_targets_collection(self, node_ident, marker, limit, sort_key, sort_dir, resource_url=None, fields=None, detail=None): limit = api_utils.validate_limit(limit) sort_dir = api_utils.validate_sort_dir(sort_dir) marker_obj = None if marker: marker_obj = objects.VolumeTarget.get_by_uuid( api.request.context, marker) if sort_key in self.invalid_sort_key_list: raise exception.InvalidParameterValue( _("The sort_key value %(key)s is an invalid field for " "sorting") % {'key': sort_key}) node_ident = self.parent_node_ident or node_ident if node_ident: # FIXME(comstud): Since all we need is the node ID, we can # make this more efficient by only querying # for that column. This will get cleaned up # as we move to the object interface. node = api_utils.get_rpc_node(node_ident) targets = objects.VolumeTarget.list_by_node_id( api.request.context, node.id, limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir) else: targets = objects.VolumeTarget.list(api.request.context, limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir) return VolumeTargetCollection.convert_with_links(targets, limit, url=resource_url, fields=fields, sort_key=sort_key, sort_dir=sort_dir, detail=detail) @METRICS.timer('VolumeTargetsController.get_all') @expose.expose(VolumeTargetCollection, types.uuid_or_name, types.uuid, int, str, str, types.listtype, types.boolean) def get_all(self, node=None, marker=None, limit=None, sort_key='id', sort_dir='asc', fields=None, detail=None): """Retrieve a list of volume targets. :param node: UUID or name of a node, to get only volume targets for that node. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. This value cannot be larger than the value of max_limit in the [api] section of the ironic configuration, or only max_limit resources will be returned. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: "asc". :param fields: Optional, a list with a specified set of fields of the resource to be returned. :param detail: Optional, whether to retrieve with detail. :returns: a list of volume targets, or an empty list if no volume target is found. :raises: InvalidParameterValue if sort_key does not exist :raises: InvalidParameterValue if sort key is invalid for sorting. :raises: InvalidParameterValue if both fields and detail are specified. """ cdict = api.request.context.to_policy_values() policy.authorize('baremetal:volume:get', cdict, cdict) if fields is None and not detail: fields = _DEFAULT_RETURN_FIELDS if fields and detail: raise exception.InvalidParameterValue( _("Can't fetch a subset of fields with 'detail' set")) resource_url = 'volume/targets' return self._get_volume_targets_collection(node, marker, limit, sort_key, sort_dir, resource_url=resource_url, fields=fields, detail=detail) @METRICS.timer('VolumeTargetsController.get_one') @expose.expose(VolumeTarget, types.uuid, types.listtype) def get_one(self, target_uuid, fields=None): """Retrieve information about the given volume target. :param target_uuid: UUID of a volume target. :param fields: Optional, a list with a specified set of fields of the resource to be returned. :returns: API-serializable volume target object. :raises: OperationNotPermitted if accessed with specifying a parent node. :raises: VolumeTargetNotFound if no volume target with this UUID exists """ cdict = api.request.context.to_policy_values() policy.authorize('baremetal:volume:get', cdict, cdict) if self.parent_node_ident: raise exception.OperationNotPermitted() rpc_target = objects.VolumeTarget.get_by_uuid( api.request.context, target_uuid) return VolumeTarget.convert_with_links(rpc_target, fields=fields) @METRICS.timer('VolumeTargetsController.post') @expose.expose(VolumeTarget, body=VolumeTarget, status_code=http_client.CREATED) def post(self, target): """Create a new volume target. :param target: a volume target within the request body. :returns: API-serializable volume target object. :raises: OperationNotPermitted if accessed with specifying a parent node. :raises: VolumeTargetBootIndexAlreadyExists if a volume target already exists with the same node ID and boot index :raises: VolumeTargetAlreadyExists if a volume target with the same UUID exists """ context = api.request.context cdict = context.to_policy_values() policy.authorize('baremetal:volume:create', cdict, cdict) if self.parent_node_ident: raise exception.OperationNotPermitted() target_dict = target.as_dict() # NOTE(hshiina): UUID is mandatory for notification payload if not target_dict.get('uuid'): target_dict['uuid'] = uuidutils.generate_uuid() new_target = objects.VolumeTarget(context, **target_dict) notify.emit_start_notification(context, new_target, 'create', node_uuid=target.node_uuid) with notify.handle_error_notification(context, new_target, 'create', node_uuid=target.node_uuid): new_target.create() notify.emit_end_notification(context, new_target, 'create', node_uuid=target.node_uuid) # Set the HTTP Location Header api.response.location = link.build_url('volume/targets', new_target.uuid) return VolumeTarget.convert_with_links(new_target) @METRICS.timer('VolumeTargetsController.patch') @wsme.validate(types.uuid, [VolumeTargetPatchType]) @expose.expose(VolumeTarget, types.uuid, body=[VolumeTargetPatchType]) def patch(self, target_uuid, patch): """Update an existing volume target. :param target_uuid: UUID of a volume target. :param patch: a json PATCH document to apply to this volume target. :returns: API-serializable volume target object. :raises: OperationNotPermitted if accessed with specifying a parent node. :raises: PatchError if a given patch can not be applied. :raises: InvalidParameterValue if the volume target's UUID is being changed :raises: NodeLocked if the node is already locked :raises: NodeNotFound if the node associated with the volume target does not exist :raises: VolumeTargetNotFound if the volume target cannot be found :raises: VolumeTargetBootIndexAlreadyExists if a volume target already exists with the same node ID and boot index values :raises: InvalidUUID if invalid node UUID is passed in the patch. :raises: InvalidStateRequested If a node associated with the volume target is not powered off. """ context = api.request.context cdict = context.to_policy_values() policy.authorize('baremetal:volume:update', cdict, cdict) if self.parent_node_ident: raise exception.OperationNotPermitted() values = api_utils.get_patch_values(patch, '/node_uuid') for value in values: if not uuidutils.is_uuid_like(value): message = _("Expected a UUID for node_uuid, but received " "%(uuid)s.") % {'uuid': str(value)} raise exception.InvalidUUID(message=message) rpc_target = objects.VolumeTarget.get_by_uuid(context, target_uuid) target_dict = rpc_target.as_dict() # NOTE(smoriya): # 1) Remove node_id because it's an internal value and # not present in the API object # 2) Add node_uuid target_dict['node_uuid'] = target_dict.pop('node_id', None) target = VolumeTarget( **api_utils.apply_jsonpatch(target_dict, patch)) # Update only the fields that have changed. for field in objects.VolumeTarget.fields: try: patch_val = getattr(target, field) except AttributeError: # Ignore fields that aren't exposed in the API continue if patch_val == wtypes.Unset: patch_val = None if rpc_target[field] != patch_val: rpc_target[field] = patch_val rpc_node = objects.Node.get_by_id(context, rpc_target.node_id) notify.emit_start_notification(context, rpc_target, 'update', node_uuid=rpc_node.uuid) with notify.handle_error_notification(context, rpc_target, 'update', node_uuid=rpc_node.uuid): topic = api.request.rpcapi.get_topic_for(rpc_node) new_target = api.request.rpcapi.update_volume_target( context, rpc_target, topic) api_target = VolumeTarget.convert_with_links(new_target) notify.emit_end_notification(context, new_target, 'update', node_uuid=rpc_node.uuid) return api_target @METRICS.timer('VolumeTargetsController.delete') @expose.expose(None, types.uuid, status_code=http_client.NO_CONTENT) def delete(self, target_uuid): """Delete a volume target. :param target_uuid: UUID of a volume target. :raises: OperationNotPermitted if accessed with specifying a parent node. :raises: NodeLocked if node is locked by another conductor :raises: NodeNotFound if the node associated with the target does not exist :raises: VolumeTargetNotFound if the volume target cannot be found :raises: InvalidStateRequested If a node associated with the volume target is not powered off. """ context = api.request.context cdict = context.to_policy_values() policy.authorize('baremetal:volume:delete', cdict, cdict) if self.parent_node_ident: raise exception.OperationNotPermitted() rpc_target = objects.VolumeTarget.get_by_uuid(context, target_uuid) rpc_node = objects.Node.get_by_id(context, rpc_target.node_id) notify.emit_start_notification(context, rpc_target, 'delete', node_uuid=rpc_node.uuid) with notify.handle_error_notification(context, rpc_target, 'delete', node_uuid=rpc_node.uuid): topic = api.request.rpcapi.get_topic_for(rpc_node) api.request.rpcapi.destroy_volume_target(context, rpc_target, topic) notify.emit_end_notification(context, rpc_target, 'delete', node_uuid=rpc_node.uuid) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/ironic/api/controllers/version.py0000644000175000017500000000400300000000000023012 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ironic import api from ironic.api.controllers import base from ironic.api.controllers import link ID_VERSION1 = 'v1' class Version(base.Base): """An API version representation. This class represents an API version, including the minimum and maximum minor versions that are supported within the major version. """ id = str """The ID of the (major) version, also acts as the release number""" links = [link.Link] """A Link that point to a specific version of the API""" status = str """Status of the version. One of: * CURRENT - the latest version of API, * SUPPORTED - supported, but not latest, version of API, * DEPRECATED - supported, but deprecated, version of API. """ version = str """The current, maximum supported (major.minor) version of API.""" min_version = str """Minimum supported (major.minor) version of API.""" def __init__(self, id, min_version, version, status='CURRENT'): self.id = id self.links = [link.Link.make_link('self', api.request.public_url, self.id, '', bookmark=True)] self.status = status self.version = version self.min_version = min_version def default_version(): # NOTE(dtantsur): avoid circular imports from ironic.api.controllers.v1 import versions return Version(ID_VERSION1, versions.min_version_string(), versions.max_version_string()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/ironic/api/expose.py0000644000175000017500000000160100000000000020263 0ustar00coreycorey00000000000000# # Copyright 2015 Rackspace, Inc # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import wsmeext.pecan as wsme_pecan def expose(*args, **kwargs): """Ensure that only JSON, and not XML, is supported.""" if 'rest_content_types' not in kwargs: kwargs['rest_content_types'] = ('json',) return wsme_pecan.wsexpose(*args, **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/ironic/api/hooks.py0000644000175000017500000001555200000000000020115 0ustar00coreycorey00000000000000# -*- encoding: utf-8 -*- # # Copyright © 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import client as http_client import re from oslo_config import cfg from oslo_log import log from pecan import hooks from ironic.common import context from ironic.common import policy from ironic.conductor import rpcapi from ironic.db import api as dbapi LOG = log.getLogger(__name__) CHECKED_DEPRECATED_POLICY_ARGS = False INBOUND_HEADER = 'X-Openstack-Request-Id' GLOBAL_REQ_ID = 'openstack.global_request_id' ID_FORMAT = (r'^req-[a-f0-9]{8}-[a-f0-9]{4}-' r'[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$') def policy_deprecation_check(): global CHECKED_DEPRECATED_POLICY_ARGS if not CHECKED_DEPRECATED_POLICY_ARGS: enforcer = policy.get_enforcer() substitution_dict = { 'user': 'user_id', 'domain_id': 'user_domain_id', 'domain_name': 'user_domain_id', 'tenant': 'project_name', } policy_rules = enforcer.file_rules.values() for rule in policy_rules: str_rule = str(rule) for deprecated, replacement in substitution_dict.items(): if re.search(r'\b%s\b' % deprecated, str_rule): LOG.warning( "Deprecated argument %(deprecated)s is used in policy " "file rule (%(rule)s), please use %(replacement)s " "argument instead. The possibility to use deprecated " "arguments will be removed in the Pike release.", {'deprecated': deprecated, 'replacement': replacement, 'rule': str_rule}) if deprecated == 'domain_name': LOG.warning( "Please note that user_domain_id is an ID of the " "user domain, while the deprecated domain_name is " "its name. The policy rule has to be updated " "accordingly.") CHECKED_DEPRECATED_POLICY_ARGS = True class ConfigHook(hooks.PecanHook): """Attach the config object to the request so controllers can get to it.""" def before(self, state): state.request.cfg = cfg.CONF class DBHook(hooks.PecanHook): """Attach the dbapi object to the request so controllers can get to it.""" def before(self, state): state.request.dbapi = dbapi.get_instance() class ContextHook(hooks.PecanHook): """Configures a request context and attaches it to the request.""" def __init__(self, public_api_routes): self.public_api_routes = public_api_routes super(ContextHook, self).__init__() def before(self, state): is_public_api = state.request.environ.get('is_public_api', False) # set the global_request_id if we have an inbound request id gr_id = state.request.headers.get(INBOUND_HEADER, "") if re.match(ID_FORMAT, gr_id): state.request.environ[GLOBAL_REQ_ID] = gr_id ctx = context.RequestContext.from_environ(state.request.environ, is_public_api=is_public_api) # Do not pass any token with context for noauth mode if cfg.CONF.auth_strategy == 'noauth': ctx.auth_token = None creds = ctx.to_policy_values() is_admin = policy.check('is_admin', creds, creds) ctx.is_admin = is_admin policy_deprecation_check() state.request.context = ctx def after(self, state): if state.request.context == {}: # An incorrect url path will not create RequestContext return # NOTE(lintan): RequestContext will generate a request_id if no one # passing outside, so it always contain a request_id. request_id = state.request.context.request_id state.response.headers['Openstack-Request-Id'] = request_id class RPCHook(hooks.PecanHook): """Attach the rpcapi object to the request so controllers can get to it.""" def before(self, state): state.request.rpcapi = rpcapi.ConductorAPI() class NoExceptionTracebackHook(hooks.PecanHook): """Workaround rpc.common: deserialize_remote_exception. deserialize_remote_exception builds rpc exception traceback into error message which is then sent to the client. Such behavior is a security concern so this hook is aimed to cut-off traceback from the error message. """ # NOTE(max_lobur): 'after' hook used instead of 'on_error' because # 'on_error' never fired for wsme+pecan pair. wsme @wsexpose decorator # catches and handles all the errors, so 'on_error' dedicated for unhandled # exceptions never fired. def after(self, state): # Omit empty body. Some errors may not have body at this level yet. if not state.response.body: return # Do nothing if there is no error. # Status codes in the range 200 (OK) to 399 (400 = BAD_REQUEST) are not # an error. if (http_client.OK <= state.response.status_int < http_client.BAD_REQUEST): return json_body = state.response.json # Do not remove traceback when traceback config is set if cfg.CONF.debug_tracebacks_in_api: return faultstring = json_body.get('faultstring') traceback_marker = 'Traceback (most recent call last):' if faultstring and traceback_marker in faultstring: # Cut-off traceback. faultstring = faultstring.split(traceback_marker, 1)[0] # Remove trailing newlines and spaces if any. json_body['faultstring'] = faultstring.rstrip() # Replace the whole json. Cannot change original one because it's # generated on the fly. state.response.json = json_body class PublicUrlHook(hooks.PecanHook): """Attach the right public_url to the request. Attach the right public_url to the request so resources can create links even when the API service is behind a proxy or SSL terminator. """ def before(self, state): if cfg.CONF.oslo_middleware.enable_proxy_headers_parsing: state.request.public_url = state.request.application_url else: state.request.public_url = (cfg.CONF.api.public_endpoint or state.request.host_url) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538406.1983993 ironic-14.0.1.dev163/ironic/api/middleware/0000755000175000017500000000000000000000000020525 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/ironic/api/middleware/__init__.py0000644000175000017500000000175100000000000022642 0ustar00coreycorey00000000000000# -*- encoding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ironic.api.middleware import auth_token from ironic.api.middleware import json_ext from ironic.api.middleware import parsable_error ParsableErrorMiddleware = parsable_error.ParsableErrorMiddleware AuthTokenMiddleware = auth_token.AuthTokenMiddleware JsonExtensionMiddleware = json_ext.JsonExtensionMiddleware __all__ = ('ParsableErrorMiddleware', 'AuthTokenMiddleware', 'JsonExtensionMiddleware') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/ironic/api/middleware/auth_token.py0000644000175000017500000000435300000000000023245 0ustar00coreycorey00000000000000# -*- encoding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from keystonemiddleware import auth_token from ironic.common import exception from ironic.common.i18n import _ from ironic.common import utils class AuthTokenMiddleware(auth_token.AuthProtocol): """A wrapper on Keystone auth_token middleware. Does not perform verification of authentication tokens for public routes in the API. """ def __init__(self, app, conf, public_api_routes=None): api_routes = [] if public_api_routes is None else public_api_routes self._ironic_app = app # TODO(mrda): Remove .xml and ensure that doesn't result in a # 401 Authentication Required instead of 404 Not Found route_pattern_tpl = '%s(\\.json|\\.xml)?$' try: self.public_api_routes = [re.compile(route_pattern_tpl % route_tpl) for route_tpl in api_routes] except re.error as e: raise exception.ConfigInvalid( error_msg=_('Cannot compile public API routes: %s') % e) super(AuthTokenMiddleware, self).__init__(app, conf) def __call__(self, env, start_response): path = utils.safe_rstrip(env.get('PATH_INFO'), '/') # The information whether the API call is being performed against the # public API is required for some other components. Saving it to the # WSGI environment is reasonable thereby. env['is_public_api'] = any(map(lambda pattern: re.match(pattern, path), self.public_api_routes)) if env['is_public_api']: return self._ironic_app(env, start_response) return super(AuthTokenMiddleware, self).__call__(env, start_response) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/ironic/api/middleware/json_ext.py0000644000175000017500000000272100000000000022732 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from ironic.common import utils LOG = log.getLogger(__name__) class JsonExtensionMiddleware(object): """Simplified processing of .json extension. Previously Ironic API used the "guess_content_type_from_ext" feature. It was never needed, as we never allowed non-JSON content types anyway. Now that it is removed, this middleware strips .json extension for backward compatibility. """ def __init__(self, app): self.app = app def __call__(self, env, start_response): path = utils.safe_rstrip(env.get('PATH_INFO'), '/') if path and path.endswith('.json'): LOG.debug('Stripping .json prefix from %s for compatibility ' 'with pecan', path) env['PATH_INFO'] = path[:-5] env['HAS_JSON_SUFFIX'] = True else: env['HAS_JSON_SUFFIX'] = False return self.app(env, start_response) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/ironic/api/middleware/parsable_error.py0000644000175000017500000000614200000000000024104 0ustar00coreycorey00000000000000# -*- encoding: utf-8 -*- # # Copyright © 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Middleware to replace the plain text message body of an error response with one formatted so the client can parse it. Based on pecan.middleware.errordocument """ import json from oslo_log import log from ironic.common.i18n import _ LOG = log.getLogger(__name__) class ParsableErrorMiddleware(object): """Replace error body with something the client can parse.""" def __init__(self, app): self.app = app def __call__(self, environ, start_response): # Request for this state, modified by replace_start_response() # and used when an error is being reported. state = {} def replacement_start_response(status, headers, exc_info=None): """Overrides the default response to make errors parsable.""" try: status_code = int(status.split(' ')[0]) state['status_code'] = status_code except (ValueError, TypeError): # pragma: nocover raise Exception(_( 'ErrorDocumentMiddleware received an invalid ' 'status %s') % status) else: if (state['status_code'] // 100) not in (2, 3): # Remove some headers so we can replace them later # when we have the full error message and can # compute the length. headers = [(h, v) for (h, v) in headers if h not in ('Content-Length', 'Content-Type') ] # Save the headers in case we need to modify them. state['headers'] = headers return start_response(status, headers, exc_info) # The default for ironic is application/json. However, Pecan will try # to output HTML errors if no Accept header is provided. if 'HTTP_ACCEPT' not in environ or environ['HTTP_ACCEPT'] == '*/*': environ['HTTP_ACCEPT'] = 'application/json' app_iter = self.app(environ, replacement_start_response) if (state['status_code'] // 100) not in (2, 3): app_iter = [i.decode('utf-8') for i in app_iter] body = [json.dumps({'error_message': '\n'.join(app_iter)})] body = [item.encode('utf-8') for item in body] state['headers'].append(('Content-Type', 'application/json')) state['headers'].append(('Content-Length', str(len(body[0])))) else: body = app_iter return body ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/ironic/api/wsgi.py0000644000175000017500000000211200000000000017727 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """WSGI script for Ironic API, installed by pbr.""" import sys from oslo_config import cfg from oslo_log import log from ironic.api import app from ironic.common import i18n from ironic.common import service CONF = cfg.CONF LOG = log.getLogger(__name__) # NOTE(dtantsur): WSGI containers may need to override the passed argv. def initialize_wsgi_app(argv=sys.argv): i18n.install('ironic') service.prepare_service(argv) LOG.debug("Configuration:") CONF.log_opt_values(LOG, log.DEBUG) return app.VersionSelectorApplication() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586538406.1983993 ironic-14.0.1.dev163/ironic/cmd/0000755000175000017500000000000000000000000016402 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/ironic/cmd/__init__.py0000644000175000017500000000216700000000000020521 0ustar00coreycorey00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE(yuriyz): Do eventlet monkey patching here, instead of in # ironic/__init__.py. This allows the API service to run without monkey # patching under Apache (which uses its own concurrency model). Mixing # concurrency models can cause undefined behavior and potentially API timeouts. import os os.environ['EVENTLET_NO_GREENDNS'] = 'yes' # noqa E402 import eventlet eventlet.monkey_patch(os=False) from ironic.common import i18n # noqa for I202 due to 'import eventlet' above i18n.install('ironic') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/ironic/cmd/api.py0000644000175000017500000000333200000000000017526 0ustar00coreycorey00000000000000# -*- encoding: utf-8 -*- # # Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The Ironic Service API.""" import sys from oslo_config import cfg from oslo_log import log try: from oslo_reports import guru_meditation_report as gmr except ImportError: gmr = None from ironic.common import profiler from ironic.common import service as ironic_service from ironic.common import wsgi_service from ironic import version CONF = cfg.CONF LOG = log.getLogger(__name__) def main(): # Parse config file and command line options, then start logging ironic_service.prepare_service(sys.argv) if gmr is not None: gmr.TextGuruMeditation.setup_autorun(version) else: LOG.debug('Guru meditation reporting is disabled ' 'because oslo.reports is not installed') profiler.setup('ironic_api', CONF.host) # Build and start the WSGI app launcher = ironic_service.process_launcher() server = wsgi_service.WSGIService('ironic_api', CONF.api.enable_ssl_api) launcher.launch_service(server, workers=server.workers) launcher.wait() if __name__ == '__main__': sys.exit(main()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/ironic/cmd/conductor.py0000644000175000017500000000763600000000000020770 0ustar00coreycorey00000000000000# -*- encoding: utf-8 -*- # # Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ The Ironic Management Service """ import sys from oslo_config import cfg from oslo_log import log try: from oslo_reports import guru_meditation_report as gmr except ImportError: gmr = None from oslo_service import service from ironic.common import profiler from ironic.common import rpc_service from ironic.common import service as ironic_service from ironic import version CONF = cfg.CONF LOG = log.getLogger(__name__) def warn_about_unsafe_shred_parameters(conf): iterations = conf.deploy.shred_random_overwrite_iterations overwrite_with_zeros = conf.deploy.shred_final_overwrite_with_zeros if iterations == 0 and overwrite_with_zeros is False: LOG.warning('With shred_random_overwrite_iterations set to 0 and ' 'shred_final_overwrite_with_zeros set to False, disks ' 'may NOT be shredded at all, unless they support ATA ' 'Secure Erase. This is a possible SECURITY ISSUE!') def warn_about_missing_default_boot_option(conf): if not conf.deploy.default_boot_option: LOG.warning('The default value of default_boot_option ' 'configuration will change eventually from ' '"netboot" to "local". It is recommended to set ' 'an explicit value for it during the transition period') def warn_about_agent_token_deprecation(conf): if not conf.require_agent_token: LOG.warning('The ``[DEFAULT]require_agent_token`` option is not ' 'set and support for ironic-python-agents that do not ' 'utilize agent tokens, along with the configuration ' 'option will be removed in the W development cycle. ' 'Please upgrade your ironic-python-agent version, and ' 'consider adopting the require_agent_token setting ' 'during the Victoria development cycle.') def issue_startup_warnings(conf): warn_about_unsafe_shred_parameters(conf) warn_about_missing_default_boot_option(conf) warn_about_agent_token_deprecation(conf) def main(): # NOTE(lucasagomes): Safeguard to prevent 'ironic.conductor.manager' # from being imported prior to the configuration options being loaded. # If this happened, the periodic decorators would always use the # default values of the options instead of the configured ones. For # more information see: https://bugs.launchpad.net/ironic/+bug/1562258 # and https://bugs.launchpad.net/ironic/+bug/1279774. assert 'ironic.conductor.manager' not in sys.modules # Parse config file and command line options, then start logging ironic_service.prepare_service(sys.argv) if gmr is not None: gmr.TextGuruMeditation.setup_autorun(version) else: LOG.debug('Guru meditation reporting is disabled ' 'because oslo.reports is not installed') mgr = rpc_service.RPCService(CONF.host, 'ironic.conductor.manager', 'ConductorManager') issue_startup_warnings(CONF) profiler.setup('ironic_conductor', CONF.host) launcher = service.launch(CONF, mgr, restart_method='mutate') launcher.wait() if __name__ == '__main__': sys.exit(main()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586538401.0 ironic-14.0.1.dev163/ironic/cmd/dbsync.py0000644000175000017500000003541200000000000020243 0ustar00coreycorey00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Run storage database migration. """ import sys from oslo_config import cfg from ironic.common import context from ironic.common import exception from ironic.common.i18n import _ from ironic.common import service from ironic.conf import CONF from ironic.db import api as db_api from ironic.db import migration from ironic import version dbapi = db_api.get_instance() # NOTE(rloo): This is a list of functions to perform online data migrations # (from previous releases) for this release, in batches. It may be empty. # The migration functions should be ordered by execution order; from earlier # to later releases. # # Each migration function takes two arguments -- the context and maximum # number of objects to migrate, and returns a 2-tuple -- the total number of # objects that need to be migrated at the beginning of the function, and the # number migrated. If the function determines that no migrations are needed, # it returns (0, 0). # # The last migration step should always remain the last one -- it migrates # all objects to their latest known versions. # # Example of a function docstring: # # def sample_data_migration(context, max_count): # """Sample method to migrate data to new format. # # :param context: an admin context # :param max_count: The maximum number of objects to migrate. Must be # >= 0. If zero, all the objects will be migrated. # :returns: A 2-tuple -- the total number of objects that need to be # migrated (at the beginning of this call) and the number # of migrated objects. # """ # NOTE(vdrok): Do not access objects' attributes, instead only provide object # and attribute name tuples, so that not to trigger the load of the whole # object, in case it is lazy loaded. The attribute will be accessed when needed # by doing getattr on the object ONLINE_MIGRATIONS = ( # NOTE(rloo): Don't remove this; it should always be last (dbapi, 'update_to_latest_versions'), ) # These are the models added in supported releases. We skip the version check # for them since the tables do not exist when it happens. NEW_MODELS = [ ] class DBCommand(object): def check_obj_versions(self, ignore_missing_tables=False): """Check the versions of objects. Check that the object versions are compatible with this release of ironic. It does this by comparing the objects' .version field in the database, with the expected versions of these objects. Returns None if compatible; a string describing the issue otherwise. """ if migration.version() is None: # no tables, nothing to check return if ignore_missing_tables: ignore_models = NEW_MODELS else: ignore_models = () msg = None try: if not dbapi.check_versions(ignore_models=ignore_models): msg = (_('The database is not compatible with this ' 'release of ironic (%s). Please run ' '"ironic-dbsync online_data_migrations" using ' 'the previous release.\n') % version.version_info.release_string()) except exception.DatabaseVersionTooOld: msg = (_('The database version is not compatible with this ' 'release of ironic (%s). This can happen if you are ' 'attempting to upgrade from a version older than ' 'the previous release (skip versions upgrade). ' 'This is an unsupported upgrade method. ' 'Please run "ironic-dbsync upgrade" using the previous ' 'releases for a fast-forward upgrade.\n') % version.version_info.release_string()) return msg def _check_versions(self, ignore_missing_tables=False): msg = self.check_obj_versions( ignore_missing_tables=ignore_missing_tables) if not msg: return else: sys.stderr.write(msg) # NOTE(rloo): We return 1 in online_data_migrations() to indicate # that there are more objects to migrate, so don't use 1 here. sys.exit(2) def upgrade(self): self._check_versions(ignore_missing_tables=True) migration.upgrade(CONF.command.revision) def revision(self): migration.revision(CONF.command.message, CONF.command.autogenerate) def stamp(self): migration.stamp(CONF.command.revision) def version(self): print(migration.version()) def create_schema(self): migration.create_schema() def online_data_migrations(self): self._check_versions() self._run_online_data_migrations(max_count=CONF.command.max_count, options=CONF.command.options) def _run_migration_functions(self, context, max_count, options): """Runs the migration functions. Runs the data migration functions in the ONLINE_MIGRATIONS list. It makes sure the total number of object migrations doesn't exceed the specified max_count. A migration of an object will typically migrate one row of data inside the database. :param context: an admin context :param max_count: the maximum number of objects (rows) to migrate; a value >= 1. :param options: migration options - dict mapping migration name to a dictionary of options for this migration. :raises: Exception from the migration function :returns: Boolean value indicating whether migrations are done. Returns False if max_count objects have been migrated (since at that point, it is unknown whether all migrations are done). Returns True if migrations are all done (i.e. fewer than max_count objects were migrated when the migrations are done). """ total_migrated = 0 for migration_func_obj, migration_func_name in ONLINE_MIGRATIONS: migration_func = getattr(migration_func_obj, migration_func_name) migration_opts = options.get(migration_func_name, {}) num_to_migrate = max_count - total_migrated try: total_to_do, num_migrated = migration_func(context, num_to_migrate, **migration_opts) except Exception as e: print(_("Error while running %(migration)s: %(err)s.") % {'migration': migration_func.__name__, 'err': e}, file=sys.stderr) raise print(_('%(migration)s() migrated %(done)i of %(total)i objects.') % {'migration': migration_func.__name__, 'total': total_to_do, 'done': num_migrated}) total_migrated += num_migrated if total_migrated >= max_count: # NOTE(rloo). max_count objects have been migrated so we have # to stop. We return False because there is no look-ahead so # we don't know if the migrations have been all done. All we # know is that we've migrated max_count. It is possible that # the migrations are done and that there aren't any more to # migrate after this, but that would involve checking: # 1. num_migrated == total_to_do (easy enough), AND # 2. whether there are other migration functions and whether # they need to do any object migrations (not so easy to # check) return False return True def _run_online_data_migrations(self, max_count=None, options=None): """Perform online data migrations for the release. Online data migrations are done by running all the data migration functions in the ONLINE_MIGRATIONS list. If max_count is None, all the functions will be run in batches of 50 objects, until the migrations are done. Otherwise, this will run (some of) the functions until max_count objects have been migrated. :param max_count: the maximum number of individual object migrations or modified rows, a value >= 1. If None, migrations are run in a loop in batches of 50, until completion. :param options: options to pass to migrations. List of values in the form of .